diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 0000000000..1ef771b3d9 --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,113 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 0 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = 8 + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "120s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +# path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "lighthouse-run" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 4 diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml deleted file mode 100644 index a96431fafb..0000000000 --- a/.github/workflows/docker-antithesis.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: docker antithesis - -on: - push: - branches: - - unstable - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} - ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} - ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} - REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} - IMAGE_NAME: lighthouse - TAG: libvoidstar - -jobs: - build-docker: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v3 - - name: Update Rust - run: rustup update stable - - name: Dockerhub login - run: | - echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin - - name: Build AMD64 dockerfile (with push) - run: | - docker build \ - --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ - --file ./testing/antithesis/Dockerfile.libvoidstar . - docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 21ca4940d9..007070dbb5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -17,6 +17,8 @@ env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} IMAGE_NAME: ${{ github.repository_owner}}/lighthouse LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli + # Enable self-hosted runners for the sigp repo only. + SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} jobs: # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX @@ -48,7 +50,8 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }}${{ matrix.features.version_suffix }} - runs-on: ubuntu-22.04 + # Use self-hosted runners only on the sigp repo. + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: binary: [aarch64, @@ -64,14 +67,13 @@ jobs: needs: [extract-version] env: - # We need to enable experimental docker features in order to use `docker buildx` - DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} steps: - uses: actions/checkout@v3 - name: Update Rust + if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - name: Dockerhub login run: | @@ -80,16 +82,14 @@ jobs: run: | cargo install cross env CROSS_PROFILE=${{ matrix.profile }} CROSS_FEATURES=${{ matrix.features.env }} make build-${{ matrix.binary }} + - name: Make bin dir + run: mkdir ./bin - name: Move cross-built binary into Docker scope (if ARM) if: startsWith(matrix.binary, 'aarch64') - run: | - mkdir ./bin; - mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; + run: mv ./target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin - name: Move cross-built binary into Docker scope (if x86_64) if: startsWith(matrix.binary, 'x86_64') - run: | - mkdir ./bin; - mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin; + run: mv ./target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ./bin - name: Map aarch64 to arm64 short arch if: startsWith(matrix.binary, 'aarch64') run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV @@ -99,17 +99,24 @@ jobs: - name: Set modernity suffix if: endsWith(matrix.binary, '-portable') != true run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; - # Install dependencies for emulation. Have to create a new builder to pick up emulation support. - - name: Build Dockerfile and push - run: | - docker run --privileged --rm tonistiigi/binfmt --install ${SHORT_ARCH} - docker buildx create --use --name cross-builder - docker buildx build \ - --platform=linux/${SHORT_ARCH} \ - --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ - --provenance=false \ - --push + + - name: Install QEMU + if: env.SELF_HOSTED_RUNNERS == 'false' + run: sudo apt-get update && sudo apt-get install -y qemu-user-static + + - name: Set up Docker Buildx + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + file: ./Dockerfile.cross + context: . + platforms: linux/${{ env.SHORT_ARCH }} + push: true + tags: ${{ env.IMAGE_NAME }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }}${{ env.MODERNITY_SUFFIX }}${{ env.FEATURE_SUFFIX }} + build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} runs-on: ubuntu-22.04 @@ -118,20 +125,22 @@ jobs: matrix: modernity: ["", "-modern"] env: - # We need to enable experimental docker features in order to use `docker manifest` - DOCKER_CLI_EXPERIMENTAL: enabled VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Create and push multiarch manifest run: | - docker manifest create ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ - --amend ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ - --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; - docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} + docker buildx imagetools create -t ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ + ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ + ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; + build-docker-lcli: runs-on: ubuntu-22.04 needs: [extract-version] diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 1269aee627..74a9071eab 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,6 +20,9 @@ jobs: - ubuntu-22.04 - macos-12 runs-on: ${{ matrix.os }} + env: + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + FEATURES: portable,jemalloc steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e38b03daf7..26ef781b68 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,6 +14,8 @@ env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} REPO_NAME: ${{ github.repository_owner }}/lighthouse IMAGE_NAME: ${{ github.repository_owner }}/lighthouse + # Enable self-hosted runners for the sigp repo only. + SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} jobs: extract-version: @@ -38,36 +40,37 @@ jobs: x86_64-windows-portable] include: - arch: aarch64-unknown-linux-gnu - platform: ubuntu-latest + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - arch: aarch64-unknown-linux-gnu-portable - platform: ubuntu-latest + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - arch: x86_64-unknown-linux-gnu - platform: ubuntu-latest + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - arch: x86_64-unknown-linux-gnu-portable - platform: ubuntu-latest + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - arch: x86_64-apple-darwin - platform: macos-latest + runner: macos-latest profile: maxperf - arch: x86_64-apple-darwin-portable - platform: macos-latest + runner: macos-latest profile: maxperf - arch: x86_64-windows - platform: windows-2019 + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} profile: maxperf - arch: x86_64-windows-portable - platform: windows-2019 + runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} profile: maxperf - runs-on: ${{ matrix.platform }} + runs-on: ${{ matrix.runner }} needs: extract-version steps: - name: Checkout sources uses: actions/checkout@v3 - name: Get latest version of stable Rust + if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable # ============================== @@ -75,7 +78,7 @@ jobs: # ============================== - uses: KyleMayes/install-llvm-action@v1 - if: startsWith(matrix.arch, 'x86_64-windows') + if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') with: version: "15.0" directory: ${{ runner.temp }}/llvm @@ -279,9 +282,6 @@ jobs: | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | ENDBODY ) - assets=() - for asset in ./lighthouse-*.tar.gz*; do - assets+=("-a" "$asset/$asset") - done + assets=(./lighthouse-*.tar.gz*) tag_name="${{ env.VERSION }}" - echo "$body" | hub release create --draft "${assets[@]}" -F "-" "$tag_name" + echo "$body" | gh release create --draft -F "-" "$tag_name" "${assets[@]}" diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index debc45bc75..4559a89bd0 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -18,14 +18,16 @@ env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" - # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Self-hosted runners need to reference a different host for `./watch` tests. WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + TEST_FEATURES: portable jobs: target-branch-check: name: target-branch-check @@ -34,147 +36,191 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - extract-msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Extract Minimum Supported Rust Version (MSRV) - run: | - metadata=$(cargo metadata --no-deps --format-version 1) - msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "MSRV=$msrv" >> $GITHUB_OUTPUT - id: extract_msrv - outputs: - MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - cargo-fmt: - name: cargo-fmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Check formatting with cargo fmt - run: make cargo-fmt release-tests-ubuntu: name: release-tests-ubuntu # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats release-tests-windows: name: release-tests-windows - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows"]') || 'windows-2019' }} - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable - - name: Use Node.js - uses: actions/setup-node@v2 + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 with: - node-version: '14' - - name: Install windows build tools - run: | - choco install python visualstudio2019-workload-vctools -y - npm config set msvs_version 2019 + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 - - name: Install make - run: choco install -y make - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == false with: - version: "15.0" - directory: ${{ runner.temp }}/llvm + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d + - name: Install make + if: env.SELF_HOSTED_RUNNERS == 'false' + run: choco install -y make +# - uses: KyleMayes/install-llvm-action@v1 +# if: env.SELF_HOSTED_RUNNERS == 'false' +# with: +# version: "15.0" +# directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats op-pool-tests: name: op-pool-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run operation_pool tests for all known forks run: make test-op-pool + network-tests: + name: network-tests + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v3 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run network tests for all known forks + run: make test-network slasher-tests: name: slasher-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run slasher tests for all supported backends run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + bins: cargo-nextest - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug - run: make test-debug + run: make nextest-debug + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "small"]') || 'ubuntu-latest' }} - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run consensus-spec-tests with blst, milagro and fake_crypto - run: make test-ef + run: make nextest-ef + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats dockerfile-ubuntu: name: dockerfile-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -182,35 +228,45 @@ jobs: eth1-simulator-ubuntu: name: eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: name: merge-transition-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim fallback-simulator-ubuntu: @@ -232,32 +288,45 @@ jobs: syncing-simulator-ubuntu: name: syncing-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + env: + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + FEATURES: jemalloc,portable steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install geth + if: env.SELF_HOSTED_RUNNERS == 'false' run: | sudo add-apt-repository -y ppa:ethereum/ethereum sudo apt-get update sudo apt-get install ethereum - - name: Install lighthouse and lcli + - name: Install lighthouse run: | make - make install-lcli + - name: Install lcli + if: env.SELF_HOSTED_RUNNERS == 'false' + run: make install-lcli - name: Run the doppelganger protection failure test script run: | cd scripts/tests @@ -268,90 +337,76 @@ jobs: ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.20' - - uses: actions/setup-dotnet@v3 - with: - dotnet-version: '6.0.201' - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Add go compiler to $PATH + if: env.SELF_HOSTED_RUNNERS == 'true' + run: echo "/usr/local/go/bin" >> $GITHUB_PATH - name: Run exec engine integration tests in release run: make test-exec-engine - check-benchmarks: - name: check-benchmarks + check-code: + name: check-code runs-on: ubuntu-latest - needs: cargo-fmt + env: + CARGO_INCREMENTAL: 1 steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck benchmark code without running it - run: make check-benches - clippy: - name: clippy - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + components: rustfmt,clippy + bins: cargo-audit + - name: Check formatting with cargo fmt + run: make cargo-fmt - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + - name: Typecheck benchmark code without running it + run: make check-benches + - name: Validate state_processing feature arbitrary-fuzz + run: make arbitrary-fuzz + - name: Run cargo audit + run: make audit-CI + - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose + run: CARGO_HOME=$(readlink -f $HOME) make vendor check-msrv: name: check-msrv runs-on: ubuntu-latest - needs: [cargo-fmt, extract-msrv] steps: - uses: actions/checkout@v3 - - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Rust at Minimum Supported Rust Version (MSRV) + run: | + metadata=$(cargo metadata --no-deps --format-version 1) + msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + rustup override set $msrv - name: Run cargo check run: cargo check --workspace - arbitrary-check: - name: arbitrary-check - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Validate state_processing feature arbitrary-fuzz - run: make arbitrary-fuzz - cargo-audit: - name: cargo-audit - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - run: make audit - cargo-vendor: - name: cargo-vendor - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose - run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: name: cargo-udeps runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - run: rustup toolchain install $PINNED_NIGHTLY - - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force + - name: Get latest version of nightly Rust + uses: moonrepo/setup-rust@v1 + with: + channel: nightly + bins: cargo-udeps + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/.gitignore b/.gitignore index 1b7e5dbb88..e63e218a3b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target/ +vendor/ **/*.rs.bk *.pk *.sk @@ -9,7 +10,11 @@ perf.data* /bin genesis.ssz /clippy.toml +/.cargo # IntelliJ /*.iml -.idea \ No newline at end of file +.idea + +# VSCode +/.vscode diff --git a/Cargo.lock b/Cargo.lock index 116f8081fa..af9d0a0ad8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,6 +33,7 @@ dependencies = [ "serde", "serde_json", "slashing_protection", + "slog", "slot_clock", "tempfile", "tokio", @@ -48,11 +49,10 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.8.5", + "rand", "regex", "rpassword", "serde", - "serde_derive", "serde_yaml", "slog", "types", @@ -62,9 +62,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -97,12 +97,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "ctr", "opaque-debug", ] +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + [[package]] name = "aes-gcm" version = "0.9.4" @@ -110,8 +121,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead", - "aes", - "cipher", + "aes 0.7.5", + "cipher 0.3.0", "ctr", "ghash", "subtle", @@ -123,7 +134,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.10", + "getrandom", "once_cell", "version_check", ] @@ -141,9 +152,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -157,7 +168,7 @@ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "amcl" version = "0.3.0" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" [[package]] name = "android-tzdata" @@ -183,26 +194,17 @@ dependencies = [ "winapi", ] -[[package]] -name = "anvil-rpc" -version = "0.1.0" -source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "a2e1373abdaa212b704512ec2bd8b26bd0b7d5c3f70117411a5d9a451383c859" dependencies = [ "derive_arbitrary", ] @@ -213,15 +215,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "array-init" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayref" version = "0.3.7" @@ -234,6 +227,45 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "asn1_der" version = "0.7.6" @@ -247,14 +279,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", - "autocfg 1.1.0", + "autocfg", "cfg-if", "concurrent-queue", "futures-lite", "log", "parking", "polling", - "rustix 0.37.23", + "rustix 0.37.26", "slab", "socket2 0.4.9", "waker-fn", @@ -262,44 +294,22 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ "event-listener", ] -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite 0.2.10", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.28", -] - [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -310,7 +320,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -323,7 +333,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite", ] [[package]] @@ -338,6 +348,17 @@ dependencies = [ "wildmatch", ] +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -361,15 +382,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -378,9 +390,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -395,7 +407,7 @@ dependencies = [ "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.10", + "pin-project-lite", "rustversion", "serde", "serde_json", @@ -427,9 +439,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -466,9 +478,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -476,34 +488,17 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beacon-api-client" -version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" -dependencies = [ - "ethereum-consensus", - "http", - "itertools", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec 0.20.4", + "bitvec 1.0.1", "bls", "derivative", "environment", "eth1", "eth2", + "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -516,6 +511,7 @@ dependencies = [ "hex", "int_to_bytes", "itertools", + "kzg", "lazy_static", "lighthouse_metrics", "logging", @@ -526,23 +522,24 @@ dependencies = [ "operation_pool", "parking_lot 0.12.1", "proto_array", - "rand 0.8.5", + "rand", "rayon", "safe_arith", "sensitive_url", "serde", - "serde_derive", "serde_json", "slasher", "slog", + "slog-async", + "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "store", "strum", - "superstruct 0.5.0", + "superstruct", "task_executor", "tempfile", "tokio", @@ -550,12 +547,11 @@ dependencies = [ "tree_hash", "tree_hash_derive", "types", - "unused_port", ] [[package]] name = "beacon_node" -version = "4.3.0" +version = "4.5.0" dependencies = [ "beacon_chain", "clap", @@ -578,6 +574,7 @@ dependencies = [ "node_test_rig", "sensitive_url", "serde", + "serde_json", "slasher", "slog", "store", @@ -641,6 +638,29 @@ dependencies = [ "shlex", ] +[[package]] +name = "bindgen" +version = "0.66.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" +dependencies = [ + "bitflags 2.4.1", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.38", + "which", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -649,9 +669,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitvec" @@ -723,9 +743,8 @@ dependencies = [ "ethereum_ssz", "hex", "milagro_bls", - "rand 0.7.3", + "rand", "serde", - "serde_derive", "tree_hash", "zeroize", ] @@ -744,18 +763,17 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.41.0" +version = "1.42.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +checksum = "ed59b5c00048f48d7af971b71f800fdf23e858844a6f9e4d32ca72e9399e7864" dependencies = [ - "chrono", "serde", "serde_with", ] [[package]] name = "boot_node" -version = "4.3.0" +version = "4.5.0" dependencies = [ "beacon_node", "clap", @@ -767,7 +785,6 @@ dependencies = [ "log", "logging", "serde", - "serde_derive", "serde_json", "serde_yaml", "slog", @@ -794,16 +811,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "builder_client" version = "0.1.0" @@ -818,9 +825,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byte-slice-cast" @@ -830,15 +837,15 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" dependencies = [ "serde", ] @@ -864,6 +871,20 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "0.1.0" +source = "git+https://github.com/ethereum/c-kzg-4844?rev=748283cced543c486145d5f3f38684becdfe3e1b#748283cced543c486145d5f3f38684becdfe3e1b" +dependencies = [ + "bindgen 0.66.1", + "blst", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "cached_tree_hash" version = "0.1.0" @@ -874,7 +895,7 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tree_hash", ] @@ -890,9 +911,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -905,7 +926,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.18", + "semver", "serde", "serde_json", "thiserror", @@ -919,9 +940,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "jobserver", + "libc", +] [[package]] name = "cexpr" @@ -929,7 +954,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.3", + "nom", ] [[package]] @@ -945,7 +970,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "zeroize", ] @@ -958,25 +983,21 @@ checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", - "cipher", + "cipher 0.3.0", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", - "serde", - "time 0.1.45", - "wasm-bindgen", - "winapi", + "windows-targets 0.48.5", ] [[package]] @@ -988,6 +1009,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" version = "1.6.1" @@ -1058,7 +1089,6 @@ dependencies = [ "parking_lot 0.12.1", "sensitive_url", "serde", - "serde_derive", "serde_yaml", "slasher", "slasher_service", @@ -1067,7 +1097,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.24", + "time", "timer", "tokio", "types", @@ -1087,6 +1117,7 @@ name = "compare_fields" version = "0.2.0" dependencies = [ "compare_fields_derive", + "itertools", ] [[package]] @@ -1099,18 +1130,24 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" @@ -1224,7 +1261,7 @@ version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ - "autocfg 1.1.0", + "autocfg", "cfg-if", "crossbeam-utils", "memoffset 0.9.0", @@ -1253,19 +1290,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1302,9 +1339,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -1314,9 +1351,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] @@ -1327,47 +1364,47 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] name = "ctrlc" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" +checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" dependencies = [ - "nix 0.26.2", - "windows-sys", + "nix 0.27.1", + "windows-sys 0.48.0", ] [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.0.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "packed_simd_2", - "platforms 3.0.2", + "platforms 3.1.2", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "darling" version = "0.13.4" @@ -1458,6 +1495,7 @@ dependencies = [ "clap", "clap_utils", "environment", + "hex", "logging", "slog", "sloggers", @@ -1480,7 +1518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.8", + "tokio-util 0.7.9", ] [[package]] @@ -1492,7 +1530,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.10.7", + "sha2 0.9.9", "tree_hash", "types", ] @@ -1509,9 +1547,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", @@ -1519,10 +1557,27 @@ dependencies = [ ] [[package]] -name = "deranged" -version = "0.3.6" +name = "der-parser" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +dependencies = [ + "powerfmt", +] [[package]] name = "derivative" @@ -1543,7 +1598,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1555,17 +1610,17 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn 1.0.109", ] [[package]] name = "diesel" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" +checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "byteorder", "diesel_derives", "itoa", @@ -1575,14 +1630,14 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74398b79d81e52e130d991afeed9c86034bb1b7735f46d2f5bf7deb261d80303" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1602,7 +1657,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -1682,11 +1737,11 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" dependencies = [ - "aes", + "aes 0.7.5", "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.0", + "enr", "fnv", "futures", "hashlink 0.7.0", @@ -1698,9 +1753,9 @@ dependencies = [ "lru 0.7.8", "more-asserts", "parking_lot 0.11.2", - "rand 0.8.5", + "rand", "rlp", - "smallvec 1.11.0", + "smallvec", "socket2 0.4.9", "tokio", "tracing", @@ -1709,6 +1764,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "dtoa" version = "1.0.9" @@ -1739,9 +1805,9 @@ version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.7", + "der 0.7.8", "digest 0.10.7", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "rfc6979 0.4.0", "signature 2.1.0", "spki 0.7.2", @@ -1749,18 +1815,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature 1.6.4", -] - -[[package]] -name = "ed25519" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8 0.10.2", "signature 2.1.0", @@ -1768,29 +1825,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519 1.5.3", - "rand 0.7.3", + "curve25519-dalek", + "ed25519", + "rand_core", "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "ed25519-dalek" -version = "2.0.0-pre.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd577ba9d4bcab443cac60003d8fd32c638e7024a3ec92c200d7af5d2c397ed" -dependencies = [ - "curve25519-dalek 4.0.0-rc.1", - "ed25519 2.2.1", - "rand_core 0.6.4", - "serde", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -1804,16 +1847,19 @@ dependencies = [ "compare_fields", "compare_fields_derive", "derivative", + "eth2_network_config", "ethereum-types 0.14.1", + "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", "fork_choice", "fs2", "hex", + "kzg", "rayon", "serde", - "serde_derive", + "serde_json", "serde_repr", "serde_yaml", "snap", @@ -1844,8 +1890,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", + "rand_core", "sec1 0.3.0", "subtle", "zeroize", @@ -1853,19 +1898,19 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.2", + "crypto-bigint 0.5.3", "digest 0.10.7", "ff 0.13.0", "generic-array", "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core 0.6.4", + "rand_core", "sec1 0.7.3", "subtle", "zeroize", @@ -1873,48 +1918,28 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] [[package]] name = "enr" -version = "0.6.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" +checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", + "base64 0.21.4", "bytes", - "hex", - "k256 0.11.6", - "log", - "rand 0.8.5", - "rlp", - "serde", - "sha3 0.10.8", - "zeroize", -] - -[[package]] -name = "enr" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" -dependencies = [ - "base64 0.21.2", - "bytes", - "ed25519-dalek 2.0.0-pre.0", + "ed25519-dalek", "hex", "k256 0.13.1", "log", - "rand 0.8.5", + "rand", "rlp", "serde", - "serde-hex", "sha3 0.10.8", "zeroize", ] @@ -1932,10 +1957,22 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.7.1" +name = "enum-as-inner" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.38", +] + +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ "log", "regex", @@ -1965,7 +2002,6 @@ dependencies = [ "futures", "logging", "serde", - "serde_derive", "slog", "slog-async", "slog-json", @@ -1983,24 +2019,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "errno" -version = "0.3.2" +name = "erased-serde" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", + "serde", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "errno" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "cc", "libc", + "windows-sys 0.48.0", ] [[package]] @@ -2037,7 +2071,7 @@ dependencies = [ "slog", "sloggers", "state_processing", - "superstruct 0.5.0", + "superstruct", "task_executor", "tokio", "tree_hash", @@ -2076,7 +2110,7 @@ dependencies = [ "mediatype", "mime", "pretty_reqwest_error", - "procinfo", + "procfs", "proto_array", "psutil", "reqwest", @@ -2085,8 +2119,10 @@ dependencies = [ "serde", "serde_json", "slashing_protection", + "ssz_types", "store", "tokio", + "tree_hash", "types", ] @@ -2109,7 +2145,6 @@ dependencies = [ "lazy_static", "num-bigint", "serde", - "serde_derive", "serde_yaml", ] @@ -2121,7 +2156,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.10.7", + "sha2 0.9.9", "zeroize", ] @@ -2129,13 +2164,13 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes", + "aes 0.7.5", "bls", "eth2_key_derivation", "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.8.5", + "rand", "scrypt", "serde", "serde_json", @@ -2151,12 +2186,22 @@ dependencies = [ name = "eth2_network_config" version = "0.2.0" dependencies = [ + "bytes", "discv5", "eth2_config", "ethereum_ssz", + "logging", + "pretty_reqwest_error", + "reqwest", + "sensitive_url", + "serde_json", "serde_yaml", + "sha2 0.9.9", + "slog", "tempfile", + "tokio", "types", + "url", "zip", ] @@ -2167,7 +2212,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.8.5", + "rand", "serde", "serde_json", "serde_repr", @@ -2245,30 +2290,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "ethereum-consensus" -version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" -dependencies = [ - "async-stream", - "blst", - "bs58 0.4.0", - "enr 0.6.2", - "hex", - "integer-sqrt", - "multiaddr 0.14.0", - "multihash 0.16.3", - "rand 0.8.5", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.9.9", - "ssz_rs", - "thiserror", - "tokio", - "tokio-stream", -] - [[package]] name = "ethereum-types" version = "0.12.1" @@ -2294,7 +2315,7 @@ dependencies = [ "impl-codec 0.6.0", "impl-rlp", "impl-serde 0.4.0", - "primitive-types 0.12.1", + "primitive-types 0.12.2", "scale-info", "uint", ] @@ -2308,7 +2329,7 @@ dependencies = [ "cpufeatures", "lazy_static", "ring", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -2332,7 +2353,7 @@ checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -2377,7 +2398,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom 0.2.10", + "getrandom", "hex", "proc-macro2", "quote", @@ -2423,7 +2444,7 @@ dependencies = [ "k256 0.11.6", "once_cell", "open-fastrlp", - "rand 0.8.5", + "rand", "rlp", "rlp-derive", "serde", @@ -2445,11 +2466,10 @@ dependencies = [ "auto_impl", "base64 0.13.1", "ethers-core", - "futures-channel", "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.10", + "getrandom", "hashers", "hex", "http", @@ -2461,7 +2481,6 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tokio-tungstenite 0.17.2", "tracing", "tracing-futures", "url", @@ -2506,12 +2525,13 @@ dependencies = [ name = "execution_layer" version = "0.1.0" dependencies = [ + "arc-swap", "async-trait", "builder_client", "bytes", "environment", "eth2", - "ethereum-consensus", + "eth2_network_config", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", @@ -2523,24 +2543,23 @@ dependencies = [ "hex", "jsonwebtoken", "keccak-hash", + "kzg", "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-rs", "parking_lot 0.12.1", "pretty_reqwest_error", - "rand 0.8.5", + "rand", "reqwest", "sensitive_url", "serde", "serde_json", "slog", "slot_clock", - "ssz_rs", "ssz_types", "state_processing", "strum", - "superstruct 0.6.0", + "superstruct", "task_executor", "tempfile", "tokio", @@ -2595,9 +2614,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -2605,7 +2624,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2615,7 +2634,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2627,9 +2646,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] name = "field-offset" @@ -2638,7 +2657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset 0.9.0", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -2649,6 +2668,12 @@ dependencies = [ "windows-acl", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fixed-hash" version = "0.7.0" @@ -2656,7 +2681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -2669,16 +2694,16 @@ checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "arbitrary", "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "libz-sys", @@ -2767,6 +2792,16 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.28" @@ -2812,7 +2847,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.10", + "pin-project-lite", "waker-fn", ] @@ -2824,18 +2859,17 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls", ] [[package]] @@ -2880,7 +2914,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite", "pin-utils", "slab", ] @@ -2926,19 +2960,6 @@ dependencies = [ "types", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - [[package]] name = "getrandom" version = "0.2.10" @@ -2948,7 +2969,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -2964,9 +2985,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "git-version" @@ -3003,7 +3024,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -3014,15 +3035,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -3033,7 +3054,7 @@ dependencies = [ "indexmap 1.9.3", "slab", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tracing", ] @@ -3078,18 +3099,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash 0.8.3", -] - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" dependencies = [ "ahash 0.8.3", "allocator-api2", @@ -3115,21 +3127,20 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.1", ] [[package]] name = "headers" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64 0.21.4", "bytes", "headers-core", "http", @@ -3164,9 +3175,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -3229,6 +3240,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3259,7 +3279,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.10", + "pin-project-lite", ] [[package]] @@ -3305,7 +3325,6 @@ dependencies = [ "tokio-stream", "tree_hash", "types", - "unused_port", "warp", "warp_utils", ] @@ -3339,9 +3358,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -3365,7 +3384,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.10", + "pin-project-lite", "socket2 0.4.9", "tokio", "tower-service", @@ -3382,9 +3401,9 @@ dependencies = [ "futures-util", "http", "hyper", - "rustls 0.21.5", + "rustls", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", ] [[package]] @@ -3402,16 +3421,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core", ] [[package]] @@ -3483,9 +3502,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "bbb892e5777fe09e16f3d44de7802f4daa7267ecbe8c466f19d94e25bb0c303e" dependencies = [ "async-io", "core-foundation", @@ -3497,7 +3516,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows 0.34.0", + "windows", ] [[package]] @@ -3506,9 +3525,28 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" dependencies = [ - "attohttpc", + "attohttpc 0.16.3", "log", - "rand 0.8.5", + "rand", + "url", + "xmltree", +] + +[[package]] +name = "igd-next" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +dependencies = [ + "async-trait", + "attohttpc 0.24.1", + "bytes", + "futures", + "http", + "hyper", + "log", + "rand", + "tokio", "url", "xmltree", ] @@ -3528,7 +3566,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.4", + "parity-scale-codec 3.6.5", ] [[package]] @@ -3581,18 +3619,27 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.1.0", + "autocfg", "hashbrown 0.12.3", ] [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", ] [[package]] @@ -3631,9 +3678,9 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3642,10 +3689,10 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.4", "widestring 1.0.2", - "windows-sys", - "winreg 0.50.0", + "windows-sys 0.48.0", + "winreg", ] [[package]] @@ -3700,6 +3747,15 @@ dependencies = [ "libc", ] +[[package]] +name = "jobserver" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -3715,7 +3771,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", "pem", "ring", "serde", @@ -3732,7 +3788,7 @@ dependencies = [ "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", - "sha2 0.10.7", + "sha2 0.10.8", "sha3 0.10.8", ] @@ -3744,9 +3800,9 @@ checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "once_cell", - "sha2 0.10.7", + "sha2 0.10.8", "signature 2.1.0", ] @@ -3765,10 +3821,26 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" dependencies = [ - "primitive-types 0.12.1", + "primitive-types 0.12.2", "tiny-keccak", ] +[[package]] +name = "kzg" +version = "0.1.0" +dependencies = [ + "arbitrary", + "c-kzg", + "derivative", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", + "hex", + "serde", + "tree_hash", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3786,7 +3858,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.3.0" +version = "4.5.0" dependencies = [ "account_utils", "beacon_chain", @@ -3847,9 +3919,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.147" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libflate" @@ -3883,15 +3955,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.1.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - -[[package]] -name = "libm" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmdbx" @@ -3910,14 +3976,15 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.52.1" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" +checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464" dependencies = [ "bytes", + "either", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -3930,12 +3997,15 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-plaintext", + "libp2p-quic", "libp2p-swarm", "libp2p-tcp", - "libp2p-websocket", + "libp2p-upnp", "libp2p-yamux", - "multiaddr 0.18.0", + "multiaddr", "pin-project", + "rw-stream-sink", + "thiserror", ] [[package]] @@ -3964,9 +4034,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" +checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" dependencies = [ "either", "fnv", @@ -3975,51 +4045,52 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.0", + "multiaddr", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", "pin-project", "quick-protobuf", - "rand 0.8.5", + "rand", "rw-stream-sink", - "smallvec 1.11.0", + "smallvec", "thiserror", - "unsigned-varint 0.7.1", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-dns" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4394c81c0c06d7b4a60f3face7e8e8a9b246840f98d2c80508d0721b032147" +checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" dependencies = [ + "async-trait", "futures", "libp2p-core", "libp2p-identity", "log", "parking_lot 0.12.1", - "smallvec 1.11.0", + "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.45.0" +version = "0.45.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e378da62e8c9251f6e885ed173a561663f29b251e745586cf6ae6150b295c37" +checksum = "f1f9624e2a843b655f1c1b8262b8d5de6f309413fca4d66f01bb0662429f84dc" dependencies = [ "asynchronous-codec", - "base64 0.21.2", + "base64 0.21.4", "byteorder", "bytes", "either", "fnv", "futures", "futures-ticker", - "getrandom 0.2.10", + "getrandom", "hex_fmt", "instant", "libp2p-core", @@ -4029,53 +4100,55 @@ dependencies = [ "prometheus-client", "quick-protobuf", "quick-protobuf-codec", - "rand 0.8.5", + "rand", "regex", - "sha2 0.10.7", - "smallvec 1.11.0", - "unsigned-varint 0.7.1", + "sha2 0.10.8", + "smallvec", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-identify" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a29675a32dbcc87790db6cf599709e64308f1ae9d5ecea2d259155889982db8" +checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" dependencies = [ "asynchronous-codec", "either", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", "log", - "lru 0.10.1", + "lru 0.12.0", "quick-protobuf", "quick-protobuf-codec", - "smallvec 1.11.0", + "smallvec", "thiserror", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.2" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" +checksum = "cdd6317441f361babc74c2989c6484eb0726045399b6648de039e1805ea96972" dependencies = [ "asn1_der", "bs58 0.5.0", - "ed25519-dalek 1.0.1", + "ed25519-dalek", + "hkdf", "libsecp256k1", "log", - "multihash 0.19.0", + "multihash", "p256", "quick-protobuf", - "rand 0.8.5", + "rand", "sec1 0.7.3", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "void", "zeroize", @@ -4094,19 +4167,19 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand 0.8.5", - "smallvec 1.11.0", - "socket2 0.5.3", + "rand", + "smallvec", + "socket2 0.5.4", "tokio", - "trust-dns-proto", + "trust-dns-proto 0.22.0", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ "instant", "libp2p-core", @@ -4119,23 +4192,42 @@ dependencies = [ ] [[package]] -name = "libp2p-noise" -version = "0.43.0" +name = "libp2p-mplex" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" +checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" dependencies = [ + "asynchronous-codec", "bytes", - "curve25519-dalek 3.2.0", "futures", "libp2p-core", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.0", + "nohash-hasher", + "parking_lot 0.12.1", + "rand", + "smallvec", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "libp2p-noise" +version = "0.43.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" +dependencies = [ + "bytes", + "curve25519-dalek", + "futures", + "libp2p-core", + "libp2p-identity", + "log", + "multiaddr", + "multihash", "once_cell", "quick-protobuf", - "rand 0.8.5", - "sha2 0.10.7", + "rand", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -4145,9 +4237,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37266c683a757df713f7dcda0cdcb5ad4681355ffa1b37b77c113c176a531195" +checksum = "53cc5390cc2f77b7de2452fb6105892d0bb64e3cafa3bb346abb603f4cc93a09" dependencies = [ "asynchronous-codec", "bytes", @@ -4156,14 +4248,38 @@ dependencies = [ "libp2p-identity", "log", "quick-protobuf", - "unsigned-varint 0.7.1", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "libp2p-quic" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "log", + "parking_lot 0.12.1", + "quinn", + "rand", + "ring", + "rustls", + "socket2 0.5.4", + "thiserror", + "tokio", ] [[package]] name = "libp2p-swarm" -version = "0.43.2" +version = "0.43.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" +checksum = "48ff0e918a45fec0b6f27b30b0547a57c6c214aa8b13be3647b7701bfd8b8797" dependencies = [ "either", "fnv", @@ -4176,8 +4292,8 @@ dependencies = [ "log", "multistream-select", "once_cell", - "rand 0.8.5", - "smallvec 1.11.0", + "rand", + "smallvec", "tokio", "void", ] @@ -4192,14 +4308,14 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "libp2p-tcp" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bfdfb6f945c5c014b87872a0bdb6e0aef90e92f380ef57cd9013f118f9289d" +checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" dependencies = [ "futures", "futures-timer", @@ -4208,35 +4324,50 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.3", + "socket2 0.5.4", "tokio", ] [[package]] -name = "libp2p-websocket" -version = "0.42.0" +name = "libp2p-tls" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" dependencies = [ - "either", "futures", "futures-rustls", "libp2p-core", "libp2p-identity", + "rcgen", + "ring", + "rustls", + "rustls-webpki", + "thiserror", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-upnp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", "log", - "parking_lot 0.12.1", - "quicksink", - "rw-stream-sink", - "soketto", - "url", - "webpki-roots 0.23.1", + "tokio", + "void", ] [[package]] name = "libp2p-yamux" -version = "0.44.0" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", "libp2p-core", @@ -4258,7 +4389,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.5", + "rand", "serde", "sha2 0.9.9", "typenum", @@ -4317,7 +4448,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.3.0" +version = "4.5.0" dependencies = [ "account_manager", "account_utils", @@ -4383,6 +4514,8 @@ dependencies = [ "hex", "lazy_static", "libp2p", + "libp2p-mplex", + "libp2p-quic", "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", @@ -4391,19 +4524,18 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand 0.8.5", + "rand", "regex", "serde", - "serde_derive", - "sha2 0.10.7", + "sha2 0.9.9", "slog", "slog-async", "slog-term", - "smallvec 1.11.0", + "smallvec", "snap", "ssz_types", "strum", - "superstruct 0.5.0", + "superstruct", "task_executor", "tempfile", "tiny-keccak", @@ -4433,6 +4565,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -4441,9 +4579,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lmdb-rkv" @@ -4468,11 +4606,11 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "autocfg 1.1.0", + "autocfg", "scopeguard", ] @@ -4486,9 +4624,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "logging" @@ -4519,11 +4657,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" +checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.1", ] [[package]] @@ -4540,6 +4678,7 @@ name = "lru_cache" version = "0.1.0" dependencies = [ "fnv", + "mock_instant", ] [[package]] @@ -4592,22 +4731,17 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] @@ -4616,7 +4750,7 @@ name = "mdbx-sys" version = "0.11.6-4" source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ - "bindgen", + "bindgen 0.59.2", "cc", "cmake", "libc", @@ -4630,9 +4764,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -4640,7 +4774,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -4649,7 +4783,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -4683,31 +4817,10 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] -[[package]] -name = "mev-rs" -version = "0.3.0" -source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" -dependencies = [ - "anvil-rpc", - "async-trait", - "axum", - "beacon-api-client", - "ethereum-consensus", - "hyper", - "parking_lot 0.12.1", - "reqwest", - "serde", - "serde_json", - "ssz_rs", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "migrations_internals" version = "2.1.0" @@ -4715,7 +4828,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" dependencies = [ "serde", - "toml 0.7.6", + "toml 0.7.8", ] [[package]] @@ -4731,13 +4844,13 @@ dependencies = [ [[package]] name = "milagro_bls" -version = "1.4.2" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +version = "1.5.1" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" dependencies = [ "amcl", "hex", "lazy_static", - "rand 0.7.3", + "rand", "zeroize", ] @@ -4779,10 +4892,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "wasi", + "windows-sys 0.48.0", ] +[[package]] +name = "mock_instant" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" + [[package]] name = "monitoring_api" version = "0.1.0" @@ -4795,7 +4914,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_derive", "serde_json", "slog", "store", @@ -4809,24 +4927,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" -[[package]] -name = "multiaddr" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" -dependencies = [ - "arrayref", - "bs58 0.4.0", - "byteorder", - "data-encoding", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.1", - "url", -] - [[package]] name = "multiaddr" version = "0.18.0" @@ -4838,11 +4938,11 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.0", + "multihash", "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.1", + "unsigned-varint 0.7.2", "url", ] @@ -4859,57 +4959,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.3" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "digest 0.10.7", - "multihash-derive", - "sha2 0.10.7", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "multihash" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" -dependencies = [ - "core2", - "unsigned-varint 0.7.1", -] - -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", + "unsigned-varint 0.7.2", ] [[package]] @@ -4922,8 +4977,8 @@ dependencies = [ "futures", "log", "pin-project", - "smallvec 1.11.0", - "unsigned-varint 0.7.1", + "smallvec", + "unsigned-varint 0.7.2", ] [[package]] @@ -5020,6 +5075,7 @@ dependencies = [ "derivative", "environment", "error-chain", + "eth2", "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", @@ -5035,19 +5091,20 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", + "lru 0.7.8", "lru_cache", "matches", "num_cpus", "operation_pool", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rlp", "slog", "slog-async", "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "store", "strum", @@ -5084,14 +5141,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cfg-if", "libc", - "static_assertions", ] [[package]] @@ -5110,24 +5166,12 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" - [[package]] name = "nom" version = "7.1.3" @@ -5159,31 +5203,30 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-bigint-dig" -version = "0.6.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d51546d704f52ef14b3c962b5776e53d5b862e5790e40a350d366c209bd7f7a" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" dependencies = [ - "autocfg 0.1.8", "byteorder", "lazy_static", - "libm 0.2.7", + "libm", "num-integer", "num-iter", "num-traits", - "rand 0.7.3", + "rand", "serde", - "smallvec 1.11.0", + "smallvec", "zeroize", ] @@ -5193,7 +5236,7 @@ version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-traits", ] @@ -5203,18 +5246,18 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ - "autocfg 1.1.0", + "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -5223,7 +5266,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", ] @@ -5238,13 +5281,22 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.18.0" @@ -5297,11 +5349,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -5318,7 +5370,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -5329,18 +5381,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.26.0+1.1.1u" +version = "300.1.5+3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" +checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ "cc", "libc", @@ -5363,10 +5415,9 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rayon", "serde", - "serde_derive", "state_processing", "store", "tokio", @@ -5386,19 +5437,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", "primeorder", - "sha2 0.10.7", -] - -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm 0.1.4", + "sha2 0.10.8", ] [[package]] @@ -5417,15 +5458,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.4", + "parity-scale-codec-derive 3.6.5", "serde", ] @@ -5443,9 +5484,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5455,9 +5496,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -5477,7 +5518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -5490,21 +5531,32 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.0", + "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", - "smallvec 1.11.0", - "windows-targets", + "redox_syscall 0.4.1", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core", + "subtle", ] [[package]] @@ -5513,15 +5565,6 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - [[package]] name = "pbkdf2" version = "0.8.0" @@ -5531,6 +5574,18 @@ dependencies = [ "crypto-mac 0.11.1", ] +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.8", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -5568,7 +5623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -5591,35 +5646,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "pin-project-lite" -version = "0.1.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -5643,7 +5692,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.7", + "der 0.7.8", "spki 0.7.2", ] @@ -5661,9 +5710,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "platforms" -version = "3.0.2" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" [[package]] name = "plotters" @@ -5699,14 +5748,14 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ - "autocfg 1.1.0", + "autocfg", "bitflags 1.3.2", "cfg-if", "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.10", - "windows-sys", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] @@ -5734,33 +5783,39 @@ dependencies = [ [[package]] name = "postgres-protocol" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", "byteorder", "bytes", "fallible-iterator", "hmac 0.12.1", "md-5", "memchr", - "rand 0.8.5", - "sha2 0.10.7", + "rand", + "sha2 0.10.8", "stringprep", ] [[package]] name = "postgres-types" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" dependencies = [ "bytes", "fallible-iterator", "postgres-protocol", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -5784,13 +5839,23 @@ dependencies = [ "sensitive_url", ] +[[package]] +name = "prettyplease" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +dependencies = [ + "proc-macro2", + "syn 2.0.38", +] + [[package]] name = "primeorder" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "elliptic-curve 0.13.5", + "elliptic-curve 0.13.6", ] [[package]] @@ -5808,9 +5873,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash 0.8.0", "impl-codec 0.6.0", @@ -5822,12 +5887,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "thiserror", - "toml 0.5.11", + "once_cell", + "toml_edit", ] [[package]] @@ -5862,34 +5927,37 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro-warning" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] [[package]] -name = "procinfo" -version = "0.4.2" +name = "procfs" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" +checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" dependencies = [ + "bitflags 1.3.2", "byteorder", - "libc", - "nom 2.2.1", - "rustc_version 0.2.3", + "chrono", + "flate2", + "hex", + "lazy_static", + "rustix 0.36.16", ] [[package]] @@ -5921,13 +5989,13 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] @@ -5938,9 +6006,8 @@ dependencies = [ "ethereum_ssz_derive", "safe_arith", "serde", - "serde_derive", "serde_yaml", - "superstruct 0.5.0", + "superstruct", "types", ] @@ -5994,26 +6061,25 @@ dependencies = [ "bytes", "quick-protobuf", "thiserror", - "unsigned-varint 0.7.1", + "unsigned-varint 0.7.2", ] [[package]] name = "quickcheck" -version = "0.9.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger 0.7.1", + "env_logger 0.8.4", "log", - "rand 0.7.3", - "rand_core 0.5.1", + "rand", ] [[package]] name = "quickcheck_macros" -version = "0.9.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" +checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2", "quote", @@ -6021,21 +6087,58 @@ dependencies = [ ] [[package]] -name = "quicksink" -version = "0.1.2" +name = "quinn" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.12", + "bytes", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" +dependencies = [ + "bytes", + "rand", + "ring", + "rustc-hash", + "rustls", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.4", + "tracing", + "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -6073,19 +6176,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -6093,18 +6183,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -6114,16 +6194,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -6132,16 +6203,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] @@ -6150,14 +6212,14 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -6165,14 +6227,24 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", +] + +[[package]] +name = "rcgen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +dependencies = [ + "pem", + "ring", + "time", + "yasna", ] [[package]] @@ -6193,27 +6265,36 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.9.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.4", - "regex-syntax 0.7.4", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -6227,13 +6308,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -6244,17 +6325,17 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -6272,24 +6353,25 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.10", - "rustls 0.21.5", + "pin-project-lite", + "rustls", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.1", - "tokio-util 0.7.8", + "tokio-rustls", + "tokio-util 0.7.9", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.22.6", - "winreg 0.10.1", + "webpki-roots", + "winreg", ] [[package]] @@ -6399,9 +6481,9 @@ dependencies = [ "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.3", + "hashlink 0.8.4", "libsqlite3-sys", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -6422,86 +6504,75 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.18", + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", ] [[package]] name = "rustix" -version = "0.37.23" +version = "0.36.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "6da3636faa25820d8648e0e31c5d519bbb01f72fdf57131f0f5f7da5fed36eab" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", +] + +[[package]] +name = "rustix" +version = "0.37.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f3f8f960ed3b5a59055428714943298bf3fa2d4a1d53135084e0544829d995" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.5", - "windows-sys", + "linux-raw-sys 0.4.10", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.19.1" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.1", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", -] - -[[package]] -name = "rustls" -version = "0.21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.2", - "sct 0.7.0", + "rustls-webpki", + "sct", ] [[package]] @@ -6510,24 +6581,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", ] [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", @@ -6560,19 +6621,13 @@ checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" name = "safe_arith" version = "0.1.0" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "salsa20" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -6592,7 +6647,7 @@ checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.6.4", + "parity-scale-codec 3.6.5", "scale-info-derive", ] @@ -6614,7 +6669,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -6650,16 +6705,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -6691,7 +6736,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.7", + "der 0.7.8", "generic-array", "pkcs8 0.10.2", "subtle", @@ -6723,28 +6768,13 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6761,24 +6791,13 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.180" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hex" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" -dependencies = [ - "array-init", - "serde", - "smallvec 0.6.14", -] - [[package]] name = "serde_array_query" version = "0.1.0" @@ -6801,20 +6820,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.180" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -6839,7 +6858,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -6897,35 +6916,11 @@ dependencies = [ "yaml-rust", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -6947,9 +6942,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -6980,18 +6975,18 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook-registry" @@ -7009,7 +7004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -7019,7 +7014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -7031,7 +7026,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.24", + "time", ] [[package]] @@ -7054,17 +7049,17 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -7086,11 +7081,10 @@ dependencies = [ "lru 0.7.8", "maplit", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "rayon", "safe_arith", "serde", - "serde_derive", "slog", "sloggers", "strum", @@ -7130,7 +7124,6 @@ dependencies = [ "rayon", "rusqlite", "serde", - "serde_derive", "serde_json", "tempfile", "types", @@ -7141,12 +7134,15 @@ name = "slog" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" +dependencies = [ + "erased-serde", +] [[package]] name = "slog-async" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "766c59b252e62a34651412870ff55d8c4e6d04df19b43eecb2703e417b097ffe" +checksum = "72c8038f898a2c79507940990f05386455b3a317d8f18d4caea7cbc3d5096b84" dependencies = [ "crossbeam-channel", "slog", @@ -7163,7 +7159,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.24", + "time", ] [[package]] @@ -7208,14 +7204,14 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.24", + "time", ] [[package]] name = "sloggers" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e20d36cb80da75a9c5511872f15247ddad14ead8c1dd97a86b56d1be9f5d4a0e" +checksum = "7a0a4d8569a69ee56f277bffc2f6eee637b98ed468448e8a5a84fa63efe4de9d" dependencies = [ "chrono", "libc", @@ -7247,18 +7243,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "0.6.14" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "snap" @@ -7268,18 +7255,18 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "ring", - "rustc_version 0.4.0", - "sha2 0.10.7", + "rustc_version", + "sha2 0.10.8", "subtle", ] @@ -7295,27 +7282,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", - "windows-sys", -] - -[[package]] -name = "soketto" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" -dependencies = [ - "base64 0.13.1", - "bytes", - "futures", - "httparse", - "log", - "rand 0.8.5", - "sha-1 0.9.8", + "windows-sys 0.48.0", ] [[package]] @@ -7341,32 +7313,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.7", -] - -[[package]] -name = "ssz_rs" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" -dependencies = [ - "bitvec 1.0.1", - "hex", - "num-bigint", - "serde", - "sha2 0.9.9", - "ssz_rs_derive", -] - -[[package]] -name = "ssz_rs_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "der 0.7.8", ] [[package]] @@ -7382,7 +7329,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec 1.11.0", + "smallvec", "tree_hash", "typenum", ] @@ -7407,7 +7354,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tokio", "tree_hash", @@ -7448,7 +7395,6 @@ dependencies = [ "lru 0.7.8", "parking_lot 0.12.1", "serde", - "serde_derive", "slog", "sloggers", "state_processing", @@ -7459,10 +7405,11 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" dependencies = [ + "finl_unicode", "unicode-bidi", "unicode-normalization", ] @@ -7507,20 +7454,6 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" -[[package]] -name = "superstruct" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" -dependencies = [ - "darling", - "itertools", - "proc-macro2", - "quote", - "smallvec 1.11.0", - "syn 1.0.109", -] - [[package]] name = "superstruct" version = "0.6.0" @@ -7531,7 +7464,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] @@ -7557,9 +7490,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -7627,7 +7560,6 @@ dependencies = [ "lighthouse_network", "parking_lot 0.12.1", "serde", - "serde_derive", "serde_json", "sysinfo", "types", @@ -7673,15 +7605,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.4", - "windows-sys", + "rustix 0.38.20", + "windows-sys 0.48.0", ] [[package]] @@ -7697,9 +7629,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -7723,18 +7655,17 @@ dependencies = [ [[package]] name = "testcontainers" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" dependencies = [ "bollard-stubs", "futures", "hex", "hmac 0.12.1", "log", - "rand 0.8.5", + "rand", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -7748,22 +7679,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -7787,25 +7718,15 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", "libc", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -7813,15 +7734,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -7839,17 +7760,17 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.8.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ "anyhow", - "hmac 0.8.1", + "hmac 0.12.1", "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", + "pbkdf2 0.11.0", + "rand", "rustc-hash", - "sha2 0.9.9", + "sha2 0.10.8", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -7892,22 +7813,20 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ - "autocfg 1.1.0", "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite 0.2.10", + "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.4", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -7916,7 +7835,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.10", + "pin-project-lite", "tokio", ] @@ -7928,7 +7847,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] @@ -7943,9 +7862,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" dependencies = [ "async-trait", "byteorder", @@ -7957,34 +7876,14 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "phf", - "pin-project-lite 0.2.10", + "pin-project-lite", "postgres-protocol", "postgres-types", - "socket2 0.5.3", + "rand", + "socket2 0.5.4", "tokio", - "tokio-util 0.7.8", -] - -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.8", - "tokio", - "webpki 0.22.0", + "tokio-util 0.7.9", + "whoami", ] [[package]] @@ -7993,7 +7892,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.5", + "rustls", "tokio", ] @@ -8004,38 +7903,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.10", + "pin-project-lite", "tokio", - "tokio-util 0.7.8", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" -dependencies = [ - "futures-util", - "log", - "pin-project", - "tokio", - "tungstenite 0.14.0", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" -dependencies = [ - "futures-util", - "log", - "rustls 0.20.8", - "tokio", - "tokio-rustls 0.23.4", - "tungstenite 0.17.3", - "webpki 0.22.0", - "webpki-roots 0.22.6", + "tokio-util 0.7.9", ] [[package]] @@ -8049,21 +7919,21 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.10", + "pin-project-lite", "slab", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.10", + "pin-project-lite", "slab", "tokio", "tracing", @@ -8080,9 +7950,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", @@ -8101,11 +7971,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -8121,7 +7991,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.10", + "pin-project-lite", "tokio", "tower-layer", "tower-service", @@ -8142,33 +8012,32 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", - "pin-project-lite 0.2.10", + "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -8206,7 +8075,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec 1.11.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -8240,7 +8109,7 @@ checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -8273,15 +8142,15 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner", + "enum-as-inner 0.5.1", "futures-channel", "futures-io", "futures-util", "idna 0.2.3", "ipnet", "lazy_static", - "rand 0.8.5", - "smallvec 1.11.0", + "rand", + "smallvec", "socket2 0.4.9", "thiserror", "tinyvec", @@ -8291,23 +8160,49 @@ dependencies = [ ] [[package]] -name = "trust-dns-resolver" -version = "0.22.0" +name = "trust-dns-proto" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "559ac980345f7f5020883dd3bcacf176355225e01916f8c2efecad7534f682c6" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.6.0", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "smallvec", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c723b0e608b24ad04c73b2607e0241b2c98fd79795a95e98b068b6966138a29d" dependencies = [ "cfg-if", "futures-util", "ipconfig", - "lazy_static", "lru-cache", + "once_cell", "parking_lot 0.12.1", + "rand", "resolv-conf", - "smallvec 1.11.0", + "smallvec", "thiserror", "tokio", "tracing", - "trust-dns-proto", + "trust-dns-proto 0.23.1", ] [[package]] @@ -8316,60 +8211,11 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "tungstenite" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "tungstenite" -version = "0.17.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "rustls 0.20.8", - "sha-1 0.10.1", - "thiserror", - "url", - "utf-8", - "webpki 0.22.0", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "types" @@ -8392,6 +8238,7 @@ dependencies = [ "hex", "int_to_bytes", "itertools", + "kzg", "lazy_static", "log", "maplit", @@ -8399,23 +8246,21 @@ dependencies = [ "metastruct", "parking_lot 0.12.1", "paste", - "rand 0.8.5", + "rand", "rand_xorshift", "rayon", "regex", "rusqlite", "safe_arith", "serde", - "serde_derive", "serde_json", - "serde_with", "serde_yaml", "slog", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "strum", - "superstruct 0.6.0", + "superstruct", "swap_or_not_shuffle", "tempfile", "test_random_derive", @@ -8445,9 +8290,9 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] @@ -8460,9 +8305,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -8475,9 +8320,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -8507,9 +8352,9 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ "asynchronous-codec", "bytes", @@ -8532,28 +8377,22 @@ dependencies = [ [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna 0.4.0", "percent-encoding", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.10", + "getrandom", "serde", ] @@ -8588,13 +8427,12 @@ dependencies = [ "malloc_utils", "monitoring_api", "parking_lot 0.12.1", - "rand 0.8.5", + "rand", "reqwest", "ring", "safe_arith", "sensitive_url", "serde", - "serde_derive", "serde_json", "slashing_protection", "slog", @@ -8625,7 +8463,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.8.5", + "rand", "tempfile", "tree_hash", "types", @@ -8688,15 +8526,15 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -8713,8 +8551,8 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" -source = "git+https://github.com/macladson/warp?rev=7e75acc368229a46a236a8c991bf251fe7fe50ef#7e75acc368229a46a236a8c991bf251fe7fe50ef" +version = "0.3.6" +source = "git+https://github.com/seanmonstar/warp.git#efe8548a19172e69918396d0fdbc369df9d0eb17" dependencies = [ "bytes", "futures-channel", @@ -8725,18 +8563,17 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls", "tokio-stream", - "tokio-tungstenite 0.15.0", - "tokio-util 0.6.10", + "tokio-util 0.7.9", "tower-service", "tracing", ] @@ -8759,18 +8596,6 @@ dependencies = [ "warp", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -8798,7 +8623,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -8832,7 +8657,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8845,9 +8670,9 @@ checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" dependencies = [ "futures-util", "js-sys", @@ -8889,9 +8714,10 @@ dependencies = [ "http_api", "hyper", "log", + "logging", "network", "r2d2", - "rand 0.7.3", + "rand", "reqwest", "serde", "serde_json", @@ -8929,7 +8755,6 @@ dependencies = [ "parking_lot 0.12.1", "reqwest", "serde", - "serde_derive", "serde_json", "serde_yaml", "slot_clock", @@ -8942,42 +8767,32 @@ dependencies = [ "zip", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ - "webpki 0.22.0", + "either", + "home", + "once_cell", + "rustix 0.38.20", ] [[package]] -name = "webpki-roots" -version = "0.23.1" +name = "whoami" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" dependencies = [ - "rustls-webpki 0.100.1", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -9016,9 +8831,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -9031,24 +8846,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", -] - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets", + "windows-core", + "windows-targets 0.48.5", ] [[package]] @@ -9063,120 +8866,156 @@ dependencies = [ "winapi", ] +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.3" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" +checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.50.0" @@ -9184,7 +9023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -9198,7 +9037,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version", "send_wrapper", "thiserror", "wasm-bindgen", @@ -9223,20 +9062,38 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "1.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", + "curve25519-dalek", + "rand_core", + "serde", "zeroize", ] [[package]] -name = "xml-rs" -version = "0.8.16" +name = "x509-parser" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "xml-rs" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" [[package]] name = "xmltree" @@ -9258,18 +9115,28 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.8.5", + "pin-project", + "rand", "static_assertions", ] +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -9287,19 +9154,54 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.38", ] [[package]] name = "zip" -version = "0.5.13" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ + "aes 0.8.3", "byteorder", "bzip2", + "constant_time_eq", "crc32fast", + "crossbeam-utils", "flate2", - "thiserror", - "time 0.1.45", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "sha1", + "time", + "zstd", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 15906a0306..9adb913ff5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ members = [ "consensus/swap_or_not_shuffle", "crypto/bls", + "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", @@ -89,9 +90,139 @@ members = [ ] resolver = "2" -[patch] -[patch.crates-io] -warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } +[workspace.package] +edition = "2021" + +[workspace.dependencies] +arbitrary = { version = "1", features = ["derive"] } +bincode = "1" +bitvec = "1" +byteorder = "1" +bytes = "1" +clap = "2" +compare_fields_derive = { path = "common/compare_fields_derive" } +criterion = "0.3" +delay_map = "0.3" +derivative = "2" +dirs = "3" +discv5 = { version = "0.3", features = ["libp2p"] } +env_logger = "0.9" +error-chain = "0.12" +ethereum-types = "0.14" +ethereum_hashing = "1.0.0-beta.2" +ethereum_serde_utils = "0.5" +ethereum_ssz = "0.5" +ethereum_ssz_derive = "0.5" +ethers-core = "1" +ethers-providers = { version = "1", default-features = false } +exit-future = "0.2" +fnv = "1" +fs2 = "0.4" +futures = "0.3" +hex = "0.4" +hyper = "0.14" +itertools = "0.10" +lazy_static = "1" +libsecp256k1 = "0.7" +log = "0.4" +lru = "0.7" +maplit = "1" +num_cpus = "1" +parking_lot = "0.12" +paste = "1" +quickcheck = "1" +quickcheck_macros = "1" +quote = "1" +r2d2 = "0.8" +rand = "0.8" +rayon = "1.7" +regex = "1" +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } +ring = "0.16" +rusqlite = { version = "0.28", features = ["bundled"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_repr = "0.1" +serde_yaml = "0.8" +sha2 = "0.9" +slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] } +slog-async = "2" +slog-term = "2" +sloggers = { version = "2", features = ["json"] } +smallvec = "1" +snap = "1" +ssz_types = "0.5" +strum = { version = "0.24", features = ["derive"] } +superstruct = "0.6" +syn = "1" +sysinfo = "0.26" +tempfile = "3" +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } +tokio-stream = { version = "0.1", features = ["sync"] } +tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } +tree_hash = "0.5" +tree_hash_derive = "0.5" +url = "2" +uuid = { version = "0.8", features = ["serde", "v4"] } +# TODO update to warp 0.3.6 after released. +warp = { git = "https://github.com/seanmonstar/warp.git", default-features = false, features = ["tls"] } +zeroize = { version = "1", features = ["zeroize_derive"] } +zip = "0.6" + +# Local crates. +account_utils = { path = "common/account_utils" } +beacon_chain = { path = "beacon_node/beacon_chain" } +beacon_node = { path = "beacon_node" } +beacon_processor = { path = "beacon_node/beacon_processor" } +bls = { path = "crypto/bls" } +cached_tree_hash = { path = "consensus/cached_tree_hash" } +clap_utils = { path = "common/clap_utils" } +compare_fields = { path = "common/compare_fields" } +deposit_contract = { path = "common/deposit_contract" } +directory = { path = "common/directory" } +environment = { path = "lighthouse/environment" } +eth1 = { path = "beacon_node/eth1" } +eth1_test_rig = { path = "testing/eth1_test_rig" } +eth2 = { path = "common/eth2" } +eth2_config = { path = "common/eth2_config" } +eth2_key_derivation = { path = "crypto/eth2_key_derivation" } +eth2_keystore = { path = "crypto/eth2_keystore" } +eth2_network_config = { path = "common/eth2_network_config" } +eth2_wallet = { path = "crypto/eth2_wallet" } +execution_layer = { path = "beacon_node/execution_layer" } +filesystem = { path = "common/filesystem" } +fork_choice = { path = "consensus/fork_choice" } +genesis = { path = "beacon_node/genesis" } +http_api = { path = "beacon_node/http_api" } +int_to_bytes = { path = "consensus/int_to_bytes" } +kzg = { path = "crypto/kzg" } +lighthouse_metrics = { path = "common/lighthouse_metrics" } +lighthouse_network = { path = "beacon_node/lighthouse_network" } +lighthouse_version = { path = "common/lighthouse_version" } +lockfile = { path = "common/lockfile" } +logging = { path = "common/logging" } +lru_cache = { path = "common/lru_cache" } +malloc_utils = { path = "common/malloc_utils" } +merkle_proof = { path = "consensus/merkle_proof" } +monitoring_api = { path = "common/monitoring_api" } +network = { path = "beacon_node/network" } +operation_pool = { path = "beacon_node/operation_pool" } +pretty_reqwest_error = { path = "common/pretty_reqwest_error" } +proto_array = { path = "consensus/proto_array" } +safe_arith = {path = "consensus/safe_arith"} +sensitive_url = { path = "common/sensitive_url" } +slasher = { path = "slasher" } +slashing_protection = { path = "validator_client/slashing_protection" } +slot_clock = { path = "common/slot_clock" } +state_processing = { path = "consensus/state_processing" } +store = { path = "beacon_node/store" } +swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } +task_executor = { path = "common/task_executor" } +types = { path = "consensus/types" } +unused_port = { path = "common/unused_port" } +validator_client = { path = "validator_client/" } +validator_dir = { path = "common/validator_dir" } +warp_utils = { path = "common/warp_utils" } [profile.maxperf] inherits = "release" diff --git a/Cross.toml b/Cross.toml index d5f7a5d506..871391253d 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,5 +1,5 @@ [target.x86_64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +pre-build = ["apt-get install -y cmake clang-5.0"] [target.aarch64-unknown-linux-gnu] -pre-build = ["apt-get install -y cmake clang-3.9"] +pre-build = ["apt-get install -y cmake clang-5.0"] diff --git a/Dockerfile b/Dockerfile index f07c42dd85..878a3602bd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,12 @@ -FROM rust:1.68.2-bullseye AS builder +FROM rust:1.69.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ARG PROFILE=release +ARG CARGO_USE_GIT_CLI=true ENV FEATURES $FEATURES ENV PROFILE $PROFILE +ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI RUN cd lighthouse && make FROM ubuntu:22.04 diff --git a/Makefile b/Makefile index cb447e26a5..e3d889bfea 100644 --- a/Makefile +++ b/Makefile @@ -31,12 +31,15 @@ CROSS_PROFILE ?= release # List of features to use when running EF tests. EF_TEST_FEATURES ?= +# List of features to use when running CI tests. +TEST_FEATURES ?= + # Cargo profile for regular builds. PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair merge capella +FORKS=phase0 altair merge capella deneb # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -71,13 +74,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-x86_64-portable: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64-portable: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -106,12 +109,26 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --release --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network + +# Runs the full workspace tests in **release**, without downloading any additional +# test vectors, using nextest. +nextest-release: + cargo nextest run --workspace --release --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --workspace --exclude ef_tests --exclude beacon_chain + cargo test --workspace --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude network + +# Runs the full workspace tests in **debug**, without downloading any additional test +# vectors, using nextest. +nextest-debug: + cargo nextest run --workspace --features "$(TEST_FEATURES)" \ + --exclude ef_tests --exclude beacon_chain --exclude network # Runs cargo-fmt (linter). cargo-fmt: @@ -119,7 +136,7 @@ cargo-fmt: # Typechecks benchmark code check-benches: - cargo check --workspace --benches + cargo check --workspace --benches --features "$(TEST_FEATURES)" # Runs only the ef-test vectors. run-ef-tests: @@ -129,25 +146,41 @@ run-ef-tests: cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests +# Runs EF test vectors with nextest +nextest-run-ef-tests: + rm -rf $(EF_TESTS)/.accessed_file_log.txt + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" + ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests + # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) test-op-pool-%: - env FORK_NAME=$* cargo test --release \ - --features 'beacon_chain/fork_from_env'\ + env FORK_NAME=$* cargo nextest run --release \ + --features "beacon_chain/fork_from_env,$(TEST_FEATURES)"\ -p operation_pool +# Run the tests in the `network` crate for all known forks. +test-network: $(patsubst %,test-network-%,$(FORKS)) + +test-network-%: + env FORK_NAME=$* cargo nextest run --release \ + --features "fork_from_env,$(TEST_FEATURES)" \ + -p network + # Run the tests in the `slasher` crate for all supported database backends. test-slasher: - cargo test --release -p slasher --features lmdb - cargo test --release -p slasher --no-default-features --features mdbx - cargo test --release -p slasher --features lmdb,mdbx # both backends enabled + cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)" + cargo nextest run --release -p slasher --no-default-features --features "mdbx,$(TEST_FEATURES)" + cargo nextest run --release -p slasher --features "lmdb,mdbx,$(TEST_FEATURES)" # both backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -156,6 +189,9 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Downloads and runs the EF test vectors with nextest. +nextest-ef: make-ef-tests nextest-run-ef-tests + # Runs tests checking interop between Lighthouse and execution clients. test-exec-engine: make -C $(EXECUTION_ENGINE_INTEGRATION) test @@ -170,8 +206,9 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \ + cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ + -D clippy::manual_let_else \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ -A clippy::from-over-into \ @@ -201,13 +238,17 @@ make-ef-tests: # Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check -p state_processing --features arbitrary-fuzz - cargo check -p slashing_protection --features arbitrary-fuzz + cargo check -p state_processing --features arbitrary-fuzz,$(TEST_FEATURES) + cargo check -p slashing_protection --features arbitrary-fuzz,$(TEST_FEATURES) # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) -audit: +audit: install-audit audit-CI + +install-audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093 + +audit-CI: + cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: @@ -215,7 +256,7 @@ vendor: # Runs `cargo udeps` to check for unused dependencies udeps: - cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release + cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release --features "$(TEST_FEATURES)" # Performs a `cargo` clean and cleans the `ef_tests` directory. clean: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7d90cbb427..0fab7b31fe 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,31 +1,35 @@ [package] name = "account_manager" version = "0.3.5" -authors = ["Paul Hauner ", "Luke Anderson "] -edition = "2021" +authors = [ + "Paul Hauner ", + "Luke Anderson ", +] +edition = { workspace = true } [dependencies] -bls = { path = "../crypto/bls" } -clap = "2.33.3" -types = { path = "../consensus/types" } -environment = { path = "../lighthouse/environment" } -eth2_network_config = { path = "../common/eth2_network_config" } -clap_utils = { path = "../common/clap_utils" } -directory = { path = "../common/directory" } -eth2_wallet = { path = "../crypto/eth2_wallet" } +bls = { workspace = true } +clap = { workspace = true } +types = { workspace = true } +environment = { workspace = true } +eth2_network_config = { workspace = true } +clap_utils = { workspace = true } +directory = { workspace = true } +eth2_wallet = { workspace = true } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -validator_dir = { path = "../common/validator_dir" } -tokio = { version = "1.14.0", features = ["full"] } -eth2_keystore = { path = "../crypto/eth2_keystore" } -account_utils = { path = "../common/account_utils" } -slashing_protection = { path = "../validator_client/slashing_protection" } -eth2 = {path = "../common/eth2"} -safe_arith = {path = "../consensus/safe_arith"} -slot_clock = { path = "../common/slot_clock" } -filesystem = { path = "../common/filesystem" } -sensitive_url = { path = "../common/sensitive_url" } -serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.58" +validator_dir = { workspace = true } +tokio = { workspace = true } +eth2_keystore = { workspace = true } +account_utils = { workspace = true } +slashing_protection = { workspace = true } +eth2 = { workspace = true } +safe_arith = { workspace = true } +slot_clock = { workspace = true } +filesystem = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 5755a355f3..bc9e0ee1dd 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -14,7 +14,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::path::{Path, PathBuf}; use std::time::Duration; use tokio::time::sleep; -use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit}; +use types::{ChainSpec, Epoch, EthSpec, VoluntaryExit}; pub const CMD: &str = "exit"; pub const KEYSTORE_FLAG: &str = "keystore"; @@ -123,10 +123,8 @@ async fn publish_voluntary_exit( ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config - .beacon_state::() - .as_ref() - .expect("network should have valid genesis state") - .genesis_validators_root(); + .genesis_validators_root::()? + .ok_or("Genesis state is unknown")?; // Verify that the beacon node and validator being exited are on the same network. if genesis_data.genesis_validators_root != testnet_genesis_root { @@ -148,7 +146,6 @@ async fn publish_voluntary_exit( .ok_or("Failed to get current epoch. Please check your system time")?; let validator_index = get_validator_index_for_exit(client, &keypair.pk, epoch, spec).await?; - let fork = get_beacon_state_fork(client).await?; let voluntary_exit = VoluntaryExit { epoch, validator_index, @@ -175,12 +172,8 @@ async fn publish_voluntary_exit( if confirmation == CONFIRMATION_PHRASE { // Sign and publish the voluntary exit to network - let signed_voluntary_exit = voluntary_exit.sign( - &keypair.sk, - &fork, - genesis_data.genesis_validators_root, - spec, - ); + let signed_voluntary_exit = + voluntary_exit.sign(&keypair.sk, genesis_data.genesis_validators_root, spec); client .post_beacon_pool_voluntary_exits(&signed_voluntary_exit) .await @@ -318,16 +311,6 @@ async fn is_syncing(client: &BeaconNodeHttpClient) -> Result { .is_syncing) } -/// Get fork object for the current state by querying the beacon node client. -async fn get_beacon_state_fork(client: &BeaconNodeHttpClient) -> Result { - Ok(client - .get_beacon_states_fork(StateId::Head) - .await - .map_err(|e| format!("Failed to get get fork: {:?}", e))? - .ok_or("Failed to get fork, state not found")? - .data) -} - /// Calculates the current epoch from the genesis time and current time. fn get_current_epoch(genesis_time: u64, spec: &ChainSpec) -> Option { let slot_clock = SystemTimeSlotClock::new( diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index f25bbd8159..c6d81275a5 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -7,7 +7,7 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; @@ -81,20 +81,13 @@ pub fn cli_run( validator_base_dir: PathBuf, ) -> Result<(), String> { let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); - let eth2_network_config = env .eth2_network_config .ok_or("Unable to get testnet configuration from the environment")?; let genesis_validators_root = eth2_network_config - .beacon_state::() - .map(|state: BeaconState| state.genesis_validators_root()) - .map_err(|e| { - format!( - "Unable to get genesis state, has genesis occurred? Detail: {:?}", - e - ) - })?; + .genesis_validators_root::()? + .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f55c724dc3..276faaecdd 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "beacon_node" -version = "4.3.0" -authors = ["Paul Hauner ", "Age Manning ", + "Age Manning ", "Age Manning "] -edition = "2021" +edition = { workspace = true } autotests = false # using a single test binary compiles faster [features] @@ -10,63 +10,67 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable +portable = ["bls/supranational-portable"] [dev-dependencies] -maplit = "1.0.2" -environment = { path = "../../lighthouse/environment" } -serde_json = "1.0.58" +maplit = { workspace = true } +environment = { workspace = true } +serde_json = { workspace = true } [dependencies] -merkle_proof = { path = "../../consensus/merkle_proof" } -store = { path = "../store" } -parking_lot = "0.12.0" -lazy_static = "1.4.0" -smallvec = "1.6.1" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -operation_pool = { path = "../operation_pool" } -rayon = "1.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -ethereum_serde_utils = "0.5.0" -slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = { version = "2.1.1", features = ["json"] } -slot_clock = { path = "../../common/slot_clock" } -ethereum_hashing = "1.0.0-beta.2" -ethereum_ssz = "0.5.0" -ssz_types = "0.5.3" -ethereum_ssz_derive = "0.5.0" -state_processing = { path = "../../consensus/state_processing" } -tree_hash_derive = "0.5.0" -tree_hash = "0.5.0" -types = { path = "../../consensus/types" } -tokio = "1.14.0" -tokio-stream = "0.1.3" -eth1 = { path = "../eth1" } -futures = "0.3.7" -genesis = { path = "../genesis" } -int_to_bytes = { path = "../../consensus/int_to_bytes" } -rand = "0.8.5" -proto_array = { path = "../../consensus/proto_array" } -lru = "0.7.1" -tempfile = "3.1.0" -bitvec = "0.20.4" -bls = { path = "../../crypto/bls" } -safe_arith = { path = "../../consensus/safe_arith" } -fork_choice = { path = "../../consensus/fork_choice" } -task_executor = { path = "../../common/task_executor" } -derivative = "2.1.1" -itertools = "0.10.0" -slasher = { path = "../../slasher" } -eth2 = { path = "../../common/eth2" } -strum = { version = "0.24.0", features = ["derive"] } -logging = { path = "../../common/logging" } -execution_layer = { path = "../execution_layer" } -sensitive_url = { path = "../../common/sensitive_url" } -superstruct = "0.5.0" -hex = "0.4.2" -exit-future = "0.2.0" -unused_port = {path = "../../common/unused_port"} -oneshot_broadcast = { path = "../../common/oneshot_broadcast" } +serde_json = { workspace = true } +eth2_network_config = { workspace = true } +merkle_proof = { workspace = true } +store = { workspace = true } +parking_lot = { workspace = true } +lazy_static = { workspace = true } +smallvec = { workspace = true } +lighthouse_metrics = { workspace = true } +operation_pool = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +ethereum_serde_utils = { workspace = true } +slog = { workspace = true } +sloggers = { workspace = true } +slot_clock = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +ssz_types = { workspace = true } +ethereum_ssz_derive = { workspace = true } +state_processing = { workspace = true } +tree_hash_derive = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +eth1 = { workspace = true } +futures = { workspace = true } +genesis = { workspace = true } +int_to_bytes = { workspace = true } +rand = { workspace = true } +proto_array = { workspace = true } +lru = { workspace = true } +tempfile = { workspace = true } +bitvec = { workspace = true } +bls = { workspace = true } +kzg = { workspace = true } +safe_arith = { workspace = true } +fork_choice = { workspace = true } +task_executor = { workspace = true } +derivative = { workspace = true } +itertools = { workspace = true } +slasher = { workspace = true } +eth2 = { workspace = true } +strum = { workspace = true } +logging = { workspace = true } +execution_layer = { workspace = true } +sensitive_url = { workspace = true } +superstruct = { workspace = true } +hex = { workspace = true } +exit-future = { workspace = true } +oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } +slog-term = { workspace = true } +slog-async = { workspace = true } [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 94bd28f98f..992c7a479e 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -5,6 +5,7 @@ use participation_cache::ParticipationCache; use safe_arith::SafeArith; use serde_utils::quoted_u64::Quoted; use slog::debug; +use state_processing::per_epoch_processing::altair::process_inactivity_updates; use state_processing::{ common::altair::BaseRewardPerIncrement, per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, @@ -50,9 +51,10 @@ impl BeaconChain { match state { BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - self.compute_attestation_rewards_altair(state, validators) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => self.compute_attestation_rewards_altair(state, validators), } } @@ -123,6 +125,7 @@ impl BeaconChain { // Calculate ideal_rewards let participation_cache = ParticipationCache::new(&state, spec)?; + process_inactivity_updates(&mut state, &participation_cache, spec)?; let previous_epoch = state.previous_epoch(); @@ -189,6 +192,7 @@ impl BeaconChain { let mut head_reward = 0i64; let mut target_reward = 0i64; let mut source_reward = 0i64; + let mut inactivity_penalty = 0i64; if eligible { let effective_balance = state.get_effective_balance(*validator_index)?; @@ -214,6 +218,14 @@ impl BeaconChain { head_reward = 0; } else if flag_index == TIMELY_TARGET_FLAG_INDEX { target_reward = *penalty; + + let penalty_numerator = effective_balance + .safe_mul(state.get_inactivity_score(*validator_index)?)?; + let penalty_denominator = spec + .inactivity_score_bias + .safe_mul(spec.inactivity_penalty_quotient_for_state(&state))?; + inactivity_penalty = + -(penalty_numerator.safe_div(penalty_denominator)? as i64); } else if flag_index == TIMELY_SOURCE_FLAG_INDEX { source_reward = *penalty; } @@ -225,8 +237,7 @@ impl BeaconChain { target: target_reward, source: source_reward, inclusion_delay: None, - // TODO: altair calculation logic needs to be updated to include inactivity penalty - inactivity: 0, + inactivity: inactivity_penalty, }); } @@ -249,7 +260,6 @@ impl BeaconChain { target: 0, source: 0, inclusion_delay: None, - // TODO: altair calculation logic needs to be updated to include inactivity penalty inactivity: 0, }); match *flag_index { diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 5535fec37c..d7a8bca4d0 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -55,7 +55,7 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; @@ -1049,10 +1049,21 @@ pub fn verify_propagation_slot_range( } // Taking advantage of saturating subtraction on `Slot`. - let earliest_permissible_slot = slot_clock + let one_epoch_prior = slot_clock .now_with_past_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)? - E::slots_per_epoch(); + + let current_fork = + spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); + let earliest_permissible_slot = match current_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => one_epoch_prior, + // EIP-7045 + ForkName::Deneb => one_epoch_prior + .epoch(E::slots_per_epoch()) + .start_slot(E::slots_per_epoch()), + }; + if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { attestation_slot, diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 786402c997..8cbeae371e 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -64,19 +64,19 @@ impl BeaconChain { self.compute_beacon_block_attestation_reward_base(block, block_root, state) .map_err(|e| { error!( - self.log, - "Error calculating base block attestation reward"; - "error" => ?e + self.log, + "Error calculating base block attestation reward"; + "error" => ?e ); BeaconChainError::BlockRewardAttestationError })? } else { - self.compute_beacon_block_attestation_reward_altair(block, state) + self.compute_beacon_block_attestation_reward_altair_deneb(block, state) .map_err(|e| { error!( - self.log, - "Error calculating altair block attestation reward"; - "error" => ?e + self.log, + "Error calculating altair block attestation reward"; + "error" => ?e ); BeaconChainError::BlockRewardAttestationError })? @@ -173,7 +173,9 @@ impl BeaconChain { Ok(block_attestation_reward) } - fn compute_beacon_block_attestation_reward_altair>( + fn compute_beacon_block_attestation_reward_altair_deneb< + Payload: AbstractExecPayload, + >( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, @@ -192,6 +194,7 @@ impl BeaconChain { for attestation in block.body().attestations() { let data = &attestation.data; let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + // [Modified in Deneb:EIP7045] let participation_flag_indices = get_attestation_participation_flag_indices( state, data, diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index e43f2a8dd8..9312d4511d 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -3,7 +3,7 @@ use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, Logger}; use std::collections::HashMap; use std::sync::Arc; -use store::DatabaseBlock; +use store::{DatabaseBlock, ExecutionPayloadDeneb}; use task_executor::TaskExecutor; use tokio::sync::{ mpsc::{self, UnboundedSender}, @@ -97,6 +97,7 @@ fn reconstruct_default_header_block( let payload: ExecutionPayload = match fork { ForkName::Merge => ExecutionPayloadMerge::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::PayloadReconstruction(format!( "Block with fork variant {} has execution payload", @@ -714,19 +715,21 @@ mod tests { } #[tokio::test] - async fn check_all_blocks_from_altair_to_capella() { + async fn check_all_blocks_from_altair_to_deneb() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 8; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; + let deneb_fork_epoch = 6usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); - let harness = get_harness(VALIDATOR_COUNT, spec); + let harness = get_harness(VALIDATOR_COUNT, spec.clone()); // go to bellatrix fork harness .extend_slots(bellatrix_fork_epoch * slots_per_epoch) @@ -833,17 +836,19 @@ mod tests { } #[tokio::test] - async fn check_fallback_altair_to_capella() { + async fn check_fallback_altair_to_deneb() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 8; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; + let deneb_fork_epoch = 6usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); + spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); let harness = get_harness(VALIDATOR_COUNT, spec); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 987ea9e7c3..9f59733934 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,14 +7,22 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; +use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ - check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root, + check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, - IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, + IntoExecutionPendingBlock, +}; +use crate::block_verification_types::{ + AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, }; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; use crate::chain_config::ChainConfig; +use crate::data_availability_checker::{ + Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, +}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; @@ -41,6 +49,7 @@ use crate::observed_aggregates::{ use crate::observed_attesters::{ ObservedAggregators, ObservedAttesters, ObservedSyncAggregators, ObservedSyncContributors, }; +use crate::observed_blob_sidecars::ObservedBlobSidecars; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; @@ -57,8 +66,11 @@ use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; -use eth2::types::{EventKind, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; +use crate::{ + kzg_utils, metrics, AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, + BeaconSnapshot, CachedHead, +}; +use eth2::types::{EventKind, SseBlobSidecar, SseBlock, SseExtendedPayloadAttributes, SyncDuty}; use execution_layer::{ BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, @@ -70,6 +82,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; +use kzg::Kzg; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; use proto_array::{DoNotReOrg, ProposerHeadError}; @@ -106,12 +119,14 @@ use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; +use types::blob_sidecar::{BlobSidecarList, FixedBlobSidecarList}; +use types::sidecar::BlobItems; use types::*; pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. -type HashBlockTuple = (Hash256, Arc>); +type HashBlockTuple = (Hash256, RpcBlock); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. @@ -170,6 +185,34 @@ pub enum WhenSlotSkipped { Prev, } +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum AvailabilityProcessingStatus { + MissingComponents(Slot, Hash256), + Imported(Hash256), +} + +impl TryInto for AvailabilityProcessingStatus { + type Error = (); + + fn try_into(self) -> Result { + match self { + AvailabilityProcessingStatus::Imported(hash) => Ok(hash.into()), + _ => Err(()), + } + } +} + +impl TryInto for AvailabilityProcessingStatus { + type Error = (); + + fn try_into(self) -> Result { + match self { + AvailabilityProcessingStatus::Imported(hash) => Ok(hash), + _ => Err(()), + } + } +} + /// The result of a chain segment processing. pub enum ChainSegmentResult { /// Processing this chain segment finished successfully. @@ -192,9 +235,13 @@ pub enum ProduceBlockVerification { pub struct PrePayloadAttributes { pub proposer_index: u64, pub prev_randao: Hash256, + /// The block number of the block being built upon (same block as fcU `headBlockHash`). + /// /// The parent block number is not part of the payload attributes sent to the EL, but *is* /// sent to builders via SSE. pub parent_block_number: u64, + /// The block root of the block being built upon (same block as fcU `headBlockHash`). + pub parent_beacon_block_root: Hash256, } /// Information about a state/block at a specific slot. @@ -359,6 +406,8 @@ pub struct BeaconChain { pub(crate) observed_sync_aggregators: RwLock>, /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, + /// Maintains a record of blob sidecars seen over the gossip network. + pub(crate) observed_blob_sidecars: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -428,9 +477,18 @@ pub struct BeaconChain { pub validator_monitor: RwLock>, /// The slot at which blocks are downloaded back to. pub genesis_backfill_slot: Slot, + /// Provides a KZG verification and temporary storage for blocks and blobs as + /// they are collected and combined. + pub data_availability_checker: Arc>, + /// The KZG trusted setup used by this chain. + pub kzg: Option>, } -type BeaconBlockAndState = (BeaconBlock, BeaconState); +type BeaconBlockAndState = ( + BeaconBlock, + BeaconState, + Option>::Sidecar>>, +); impl FinalizationAndCanonicity { pub fn is_finalized(self) -> bool { @@ -543,11 +601,11 @@ impl BeaconChain { spec: &ChainSpec, log: &Logger, ) -> Result>, Error> { - let persisted_fork_choice = - match store.get_item::(&FORK_CHOICE_DB_KEY)? { - Some(fc) => fc, - None => return Ok(None), - }; + let Some(persisted_fork_choice) = + store.get_item::(&FORK_CHOICE_DB_KEY)? + else { + return Ok(None); + }; let fc_store = BeaconForkChoiceStore::from_persisted(persisted_fork_choice.fork_choice_store, store)?; @@ -590,6 +648,13 @@ impl BeaconChain { Ok(()) } + pub fn persist_data_availability_checker(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::PERSIST_DATA_AVAILABILITY_CHECKER); + self.data_availability_checker.persist_all()?; + + Ok(()) + } + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is /// unavailable. /// @@ -670,10 +735,10 @@ impl BeaconChain { start_slot, end_slot, || { - ( + Ok(( head.beacon_state.clone_with_only_committee_caches(), head.beacon_block_root, - ) + )) }, &self.spec, )?; @@ -767,10 +832,10 @@ impl BeaconChain { start_slot, end_slot, || { - ( + Ok(( head.beacon_state.clone_with_only_committee_caches(), head.beacon_state_root(), - ) + )) }, &self.spec, )?; @@ -809,10 +874,10 @@ impl BeaconChain { /// /// May return a database error. pub fn state_root_at_slot(&self, request_slot: Slot) -> Result, Error> { - if request_slot > self.slot()? { - return Ok(None); - } else if request_slot == self.spec.genesis_slot { + if request_slot == self.spec.genesis_slot { return Ok(Some(self.genesis_state_root)); + } else if request_slot > self.slot()? { + return Ok(None); } // Check limits w.r.t historic state bounds. @@ -889,10 +954,10 @@ impl BeaconChain { /// /// May return a database error. fn block_root_at_slot_skips_none(&self, request_slot: Slot) -> Result, Error> { - if request_slot > self.slot()? { - return Ok(None); - } else if request_slot == self.spec.genesis_slot { + if request_slot == self.spec.genesis_slot { return Ok(Some(self.genesis_block_root)); + } else if request_slot > self.slot()? { + return Ok(None); } let prev_slot = request_slot.saturating_sub(1_u64); @@ -952,10 +1017,10 @@ impl BeaconChain { /// /// May return a database error. fn block_root_at_slot_skips_prev(&self, request_slot: Slot) -> Result, Error> { - if request_slot > self.slot()? { - return Ok(None); - } else if request_slot == self.spec.genesis_slot { + if request_slot == self.spec.genesis_slot { return Ok(Some(self.genesis_block_root)); + } else if request_slot > self.slot()? { + return Ok(None); } // Try an optimized path of reading the root directly from the head state. @@ -1040,6 +1105,15 @@ impl BeaconChain { ) } + pub fn get_blobs_checking_early_attester_cache( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.early_attester_cache + .get_blobs(*block_root) + .map_or_else(|| self.get_blobs(block_root), Ok) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1104,6 +1178,17 @@ impl BeaconChain { .map(Some) } + /// Returns the blobs at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { + match self.store.get_blobs(block_root)? { + Some(blobs) => Ok(blobs), + None => Ok(BlobSidecarList::default()), + } + } + pub fn get_blinded_block( &self, block_root: &Hash256, @@ -1923,6 +2008,21 @@ impl BeaconChain { }) } + pub fn verify_blob_sidecar_for_gossip( + self: &Arc, + blob_sidecar: SignedBlobSidecar, + subnet_id: u64, + ) -> Result, GossipBlobError> { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); + let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); + blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self).map( + |v| { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); + v + }, + ) + } + /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it pub fn verify_optimistic_update_for_gossip( self: &Arc, @@ -2428,7 +2528,7 @@ impl BeaconChain { /// This method is potentially long-running and should not run on the core executor. pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>>, + chain_segment: Vec>, ) -> Result>, ChainSegmentResult> { // This function will never import any blocks. let imported_blocks = 0; @@ -2445,14 +2545,14 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. - if let Err(e) = block.fork_name(&self.spec) { + if let Err(e) = block.as_block().fork_name(&self.spec) { return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), }); } - let block_root = get_block_root(&block); + let block_root = block.block_root(); if let Some((child_parent_root, child_slot)) = children.get(i) { // If this block has a child in this chain segment, ensure that its parent root matches @@ -2476,7 +2576,7 @@ impl BeaconChain { } } - match check_block_relevancy(&block, block_root, self) { + match check_block_relevancy(block.as_block(), block_root, self) { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. @@ -2534,7 +2634,7 @@ impl BeaconChain { /// `Self::process_block`. pub async fn process_chain_segment( self: &Arc, - chain_segment: Vec>>, + chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2558,7 +2658,7 @@ impl BeaconChain { while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. - let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let start_epoch = block.epoch(); // The `last_index` indicates the position of the first block in an epoch greater // than the current epoch: partitioning the blocks into a run of blocks in the same @@ -2566,9 +2666,7 @@ impl BeaconChain { // the same `BeaconState`. let last_index = filtered_chain_segment .iter() - .position(|(_root, block)| { - block.slot().epoch(T::EthSpec::slots_per_epoch()) > start_epoch - }) + .position(|(_root, block)| block.epoch() > start_epoch) .unwrap_or(filtered_chain_segment.len()); let mut blocks = filtered_chain_segment.split_off(last_index); @@ -2608,7 +2706,24 @@ impl BeaconChain { ) .await { - Ok(_) => imported_blocks += 1, + Ok(status) => { + match status { + AvailabilityProcessingStatus::Imported(_) => { + // The block was imported successfully. + imported_blocks += 1; + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + warn!(self.log, "Blobs missing in response to range request"; + "block_root" => ?block_root, "slot" => slot); + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::AvailabilityCheck( + AvailabilityCheckError::MissingBlobs, + ), + }; + } + } + } Err(error) => { return ChainSegmentResult::Failed { imported_blocks, @@ -2646,12 +2761,14 @@ impl BeaconChain { match GossipVerifiedBlock::new(block, &chain) { Ok(verified) => { + let commitments_formatted = verified.block.commitments_formatted(); debug!( chain.log, "Successfully verified gossip block"; "graffiti" => graffiti_string, "slot" => slot, "root" => ?verified.block_root(), + "commitments" => commitments_formatted, ); Ok(verified) @@ -2676,6 +2793,117 @@ impl BeaconChain { .map_err(BeaconChainError::TokioJoin)? } + /// Cache the blob in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_gossip_blob( + self: &Arc, + blob: GossipVerifiedBlob, + ) -> Result> { + let block_root = blob.block_root(); + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its blobs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown); + } + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + event_handler.register(EventKind::BlobSidecar(SseBlobSidecar::from_blob_sidecar( + blob.as_blob(), + ))); + } + } + + self.data_availability_checker + .notify_gossip_blob(blob.as_blob().slot, block_root, &blob); + let r = self.check_gossip_blob_availability_and_import(blob).await; + self.remove_notified(&block_root, r) + } + + /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_rpc_blobs( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result> { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its blobs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown); + } + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_blob_sidecar_subscribers() { + for blob in blobs.iter().filter_map(|maybe_blob| maybe_blob.as_ref()) { + event_handler.register(EventKind::BlobSidecar( + SseBlobSidecar::from_blob_sidecar(blob), + )); + } + } + } + + self.data_availability_checker + .notify_rpc_blobs(slot, block_root, &blobs); + let r = self + .check_rpc_blob_availability_and_import(slot, block_root, blobs) + .await; + self.remove_notified(&block_root, r) + } + + /// Remove any block components from the *processing cache* if we no longer require them. If the + /// block was imported full or erred, we no longer require them. + fn remove_notified( + &self, + block_root: &Hash256, + r: Result>, + ) -> Result> { + let has_missing_components = + matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _))); + if !has_missing_components { + self.data_availability_checker.remove_notified(block_root); + } + r + } + + /// Wraps `process_block` in logic to cache the block's commitments in the processing cache + /// and evict if the block was imported or erred. + pub async fn process_block_with_early_caching>( + self: &Arc, + block_root: Hash256, + unverified_block: B, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result> { + if let Ok(commitments) = unverified_block + .block() + .message() + .body() + .blob_kzg_commitments() + { + self.data_availability_checker.notify_block_commitments( + unverified_block.block().slot(), + block_root, + commitments.clone(), + ); + }; + let r = self + .process_block(block_root, unverified_block, notify_execution_layer, || { + Ok(()) + }) + .await; + self.remove_notified(&block_root, r) + } + /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// @@ -2683,6 +2911,7 @@ impl BeaconChain { /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` + /// - `RpcBlock` /// /// ## Errors /// @@ -2694,15 +2923,14 @@ impl BeaconChain { unverified_block: B, notify_execution_layer: NotifyExecutionLayer, publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, - ) -> Result> { + ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - // Clone the block so we can provide it to the event handler. - let block = unverified_block.block().clone(); + let block_slot = unverified_block.block().slot(); // A small closure to group the verification and import errors. let chain = self.clone(); @@ -2713,26 +2941,42 @@ impl BeaconChain { notify_execution_layer, )?; publish_fn()?; - chain - .import_execution_pending_block(execution_pending) - .await + let executed_block = chain.into_executed_block(execution_pending).await?; + match executed_block { + ExecutedBlock::Available(block) => { + self.import_available_block(Box::new(block)).await + } + ExecutedBlock::AvailabilityPending(block) => { + self.check_block_availability_and_import(block).await + } + } }; // Verify and import the block. match import_block.await { // The block was successfully verified and imported. Yay. - Ok(block_root) => { + Ok(status @ AvailabilityProcessingStatus::Imported(block_root)) => { trace!( self.log, "Beacon block imported"; "block_root" => ?block_root, - "block_slot" => %block.slot(), + "block_slot" => block_slot, ); // Increment the Prometheus counter for block processing successes. metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - Ok(block_root) + Ok(status) + } + Ok(status @ AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + trace!( + self.log, + "Beacon block awaiting blobs"; + "block_root" => ?block_root, + "block_slot" => slot, + ); + + Ok(status) } Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { debug!( @@ -2764,36 +3008,27 @@ impl BeaconChain { } } - /// Accepts a fully-verified block and imports it into the chain without performing any - /// additional verification. + /// Accepts a fully-verified block and awaits on it's payload verification handle to + /// get a fully `ExecutedBlock` /// - /// An error is returned if the block was unable to be imported. It may be partially imported - /// (i.e., this function is not atomic). - pub async fn import_execution_pending_block( + /// An error is returned if the verification handle couldn't be awaited. + pub async fn into_executed_block( self: Arc, execution_pending_block: ExecutionPendingBlock, - ) -> Result> { + ) -> Result, BlockError> { let ExecutionPendingBlock { block, - block_root, - state, - parent_block, - confirmed_state_roots, + import_data, payload_verification_handle, - parent_eth1_finalization_data, - consensus_context, } = execution_pending_block; - let PayloadVerificationOutcome { - payload_verification_status, - is_valid_merge_transition_block, - } = payload_verification_handle + let payload_verification_outcome = payload_verification_handle .await .map_err(BeaconChainError::TokioJoin)? .ok_or(BeaconChainError::RuntimeShutdown)??; // Log the PoS pandas if a merge transition just occurred. - if is_valid_merge_transition_block { + if payload_verification_outcome.is_valid_merge_transition_block { info!(self.log, "{}", POS_PANDA_BANNER); info!( self.log, @@ -2821,9 +3056,101 @@ impl BeaconChain { .into_root() ); } + Ok(ExecutedBlock::new( + block, + import_data, + payload_verification_outcome, + )) + } + /* Import methods */ + + /// Checks if the block is available, and imports immediately if so, otherwise caches the block + /// in the data availability checker. + async fn check_block_availability_and_import( + self: &Arc, + block: AvailabilityPendingExecutedBlock, + ) -> Result> { + let slot = block.block.slot(); + let availability = self + .data_availability_checker + .put_pending_executed_block(block)?; + self.process_availability(slot, availability).await + } + + /// Checks if the provided blob can make any cached blocks available, and imports immediately + /// if so, otherwise caches the blob in the data availability checker. + async fn check_gossip_blob_availability_and_import( + self: &Arc, + blob: GossipVerifiedBlob, + ) -> Result> { + let slot = blob.slot(); + let availability = self.data_availability_checker.put_gossip_blob(blob)?; + + self.process_availability(slot, availability).await + } + + /// Checks if the provided blobs can make any cached blocks available, and imports immediately + /// if so, otherwise caches the blob in the data availability checker. + async fn check_rpc_blob_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result> { + let availability = self + .data_availability_checker + .put_rpc_blobs(block_root, blobs)?; + + self.process_availability(slot, availability).await + } + + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn process_availability( + self: &Arc, + slot: Slot, + availability: Availability, + ) -> Result> { + match availability { + Availability::Available(block) => { + // This is the time since start of the slot where all the components of the block have become available + let delay = + get_slot_delay_ms(timestamp_now(), block.block.slot(), &self.slot_clock); + metrics::observe_duration(&metrics::BLOCK_AVAILABILITY_DELAY, delay); + // Block is fully available, import into fork choice + self.import_available_block(block).await + } + Availability::MissingComponents(block_root) => Ok( + AvailabilityProcessingStatus::MissingComponents(slot, block_root), + ), + } + } + + pub async fn import_available_block( + self: &Arc, + block: Box>, + ) -> Result> { + let AvailableExecutedBlock { + block, + import_data, + payload_verification_outcome, + } = *block; + + let BlockImportData { + block_root, + state, + parent_block, + parent_eth1_finalization_data, + confirmed_state_roots, + consensus_context, + } = import_data; + + // import let chain = self.clone(); - let block_hash = self + let block_root = self .spawn_blocking_handle( move || { chain.import_block( @@ -2831,7 +3158,7 @@ impl BeaconChain { block_root, state, confirmed_state_roots, - payload_verification_status, + payload_verification_outcome.payload_verification_status, parent_block, parent_eth1_finalization_data, consensus_context, @@ -2840,11 +3167,10 @@ impl BeaconChain { "payload_verification_handle", ) .await??; - - Ok(block_hash) + Ok(AvailabilityProcessingStatus::Imported(block_root)) } - /// Accepts a fully-verified block and imports it into the chain without performing any + /// Accepts a fully-verified and available block and imports it into the chain without performing any /// additional verification. /// /// An error is returned if the block was unable to be imported. It may be partially imported @@ -2852,7 +3178,7 @@ impl BeaconChain { #[allow(clippy::too_many_arguments)] fn import_block( &self, - signed_block: Arc>, + signed_block: AvailableBlock, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, @@ -2905,7 +3231,9 @@ impl BeaconChain { let mut fork_choice = self.canonical_head.fork_choice_write_lock(); // Do not import a block that doesn't descend from the finalized root. - check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, &signed_block)?; + let signed_block = + check_block_is_finalized_checkpoint_or_descendant(self, &fork_choice, signed_block)?; + let block = signed_block.message(); // Register the new block with the fork choice service. { @@ -3016,6 +3344,8 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 + let (_, signed_block, blobs) = signed_block.deconstruct(); + let block = signed_block.message(); ops.extend( confirmed_state_roots .into_iter() @@ -3023,9 +3353,21 @@ impl BeaconChain { ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); + + if let Some(blobs) = blobs { + if !blobs.is_empty() { + debug!( + self.log, "Writing blobs to store"; + "block_root" => %block_root, + "count" => blobs.len(), + ); + ops.push(StoreOp::PutBlobs(block_root, blobs)); + } + } + let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { error!( self.log, "Database write failed!"; @@ -3143,9 +3485,7 @@ impl BeaconChain { state: &BeaconState, ) -> Result<(), BlockError> { // Only perform the weak subjectivity check if it was configured. - let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint { - checkpoint - } else { + let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else { return Ok(()); }; // Note: we're using the finalized checkpoint from the head state, rather than fork @@ -3226,7 +3566,7 @@ impl BeaconChain { // Sync aggregate. if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot - let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let duty_epoch = block.epoch(); match self.sync_committee_at_epoch(duty_epoch) { Ok(sync_committee) => { @@ -3507,7 +3847,7 @@ impl BeaconChain { parent_block_slot: Slot, ) { // Do not write to eth1 finalization cache for blocks older than 5 epochs. - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch { + if block.epoch() + 5 < current_epoch { return; } @@ -3638,6 +3978,9 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + // Part 1/2 (blocking) // // Load the parent state from disk. @@ -3672,9 +4015,6 @@ impl BeaconChain { self: &Arc, slot: Slot, ) -> Result<(BeaconState, Option), BlockProductionError> { - metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); - let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); - let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); @@ -3877,10 +4217,10 @@ impl BeaconChain { let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); let head_block_root = cached_head.head_block_root(); - let parent_block_root = cached_head.parent_block_root(); + let head_parent_block_root = cached_head.parent_block_root(); // The proposer head must be equal to the canonical head or its parent. - if proposer_head != head_block_root && proposer_head != parent_block_root { + if proposer_head != head_block_root && proposer_head != head_parent_block_root { warn!( self.log, "Unable to compute payload attributes"; @@ -3959,7 +4299,7 @@ impl BeaconChain { // Get the `prev_randao` and parent block number. let head_block_number = cached_head.head_block_number()?; - let (prev_randao, parent_block_number) = if proposer_head == parent_block_root { + let (prev_randao, parent_block_number) = if proposer_head == head_parent_block_root { ( cached_head.parent_random()?, head_block_number.saturating_sub(1), @@ -3972,6 +4312,7 @@ impl BeaconChain { proposer_index, prev_randao, parent_block_number, + parent_beacon_block_root: proposer_head, })) } @@ -4353,9 +4694,14 @@ impl BeaconChain { // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Merge(_) | BeaconState::Capella(_) => { - let prepare_payload_handle = - get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; + BeaconState::Merge(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => { + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + parent_root, + proposer_index, + builder_params, + )?; Some(prepare_payload_handle) } }; @@ -4559,90 +4905,145 @@ impl BeaconChain { bls_to_execution_changes, } = partial_beacon_block; - let inner_block = match &state { - BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyBase { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - _phantom: PhantomData, - }, - }), - BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - _phantom: PhantomData, - }, - }), - BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .to_payload() - .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - }, - }), - BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyCapella { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .to_payload() - .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), - }, - }), + let (inner_block, blobs_opt, proofs_opt) = match &state { + BeaconState::Base(_) => ( + BeaconBlock::Base(BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + _phantom: PhantomData, + }, + }), + None, + None, + ), + BeaconState::Altair(_) => ( + BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + None, + None, + ), + BeaconState::Merge(_) => { + let (payload, _, _, _) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); + ( + BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + }, + }), + None, + None, + ) + } + BeaconState::Capella(_) => { + let (payload, _, _, _) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); + ( + BeaconBlock::Capella(BeaconBlockCapella { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + }, + }), + None, + None, + ) + } + BeaconState::Deneb(_) => { + let (payload, kzg_commitments, blobs, proofs) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); + ( + BeaconBlock::Deneb(BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + blob_kzg_commitments: kzg_commitments + .ok_or(BlockProductionError::InvalidPayloadFork)?, + }, + }), + blobs, + proofs, + ) + } }; let block = SignedBeaconBlock::from_block( @@ -4656,6 +5057,7 @@ impl BeaconChain { self.log, "Produced block on state"; "block_size" => block_size, + "slot" => block.slot(), ); metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); @@ -4669,8 +5071,10 @@ impl BeaconChain { ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao, ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification, }; + // Use a context without block root or proposer index so that both are checked. let mut ctxt = ConsensusContext::new(block.slot()); + per_block_processing( &mut state, &block, @@ -4689,6 +5093,57 @@ impl BeaconChain { let (mut block, _) = block.deconstruct(); *block.state_root_mut() = state_root; + let blobs_verification_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES); + let maybe_sidecar_list = match (blobs_opt, proofs_opt) { + (Some(blobs_or_blobs_roots), Some(proofs)) => { + let expected_kzg_commitments = + block.body().blob_kzg_commitments().map_err(|_| { + BlockProductionError::InvalidBlockVariant( + "deneb block does not contain kzg commitments".to_string(), + ) + })?; + + if expected_kzg_commitments.len() != blobs_or_blobs_roots.len() { + return Err(BlockProductionError::MissingKzgCommitment(format!( + "Missing KZG commitment for slot {}. Expected {}, got: {}", + block.slot(), + blobs_or_blobs_roots.len(), + expected_kzg_commitments.len() + ))); + } + + let kzg_proofs = Vec::from(proofs); + + if let Some(blobs) = blobs_or_blobs_roots.blobs() { + let kzg = self + .kzg + .as_ref() + .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, + ) + .map_err(BlockProductionError::KzgError)?; + } + + Some( + Sidecar::build_sidecar( + blobs_or_blobs_roots, + &block, + expected_kzg_commitments, + kzg_proofs, + ) + .map_err(BlockProductionError::FailedToBuildBlobSidecars)?, + ) + } + _ => None, + }; + + drop(blobs_verification_timer); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); trace!( @@ -4699,7 +5154,7 @@ impl BeaconChain { "slot" => block.slot() ); - Ok((block, state)) + Ok((block, state, maybe_sidecar_list)) } /// This method must be called whenever an execution engine indicates that a payload is @@ -4879,14 +5334,11 @@ impl BeaconChain { ) .await??; - let (forkchoice_update_params, pre_payload_attributes) = - if let Some((fcu, Some(pre_payload))) = maybe_prep_data { - (fcu, pre_payload) - } else { - // Appropriate log messages have already been logged above and in - // `get_pre_payload_attributes`. - return Ok(()); - }; + let Some((forkchoice_update_params, Some(pre_payload_attributes))) = maybe_prep_data else { + // Appropriate log messages have already been logged above and in + // `get_pre_payload_attributes`. + return Ok(()); + }; // If the execution layer doesn't have any proposer data for this validator then we assume // it's not connected to this BN and no action is required. @@ -4899,7 +5351,7 @@ impl BeaconChain { return Ok(()); } - // Fetch payoad attributes from the execution layer's cache, or compute them from scratch + // Fetch payload attributes from the execution layer's cache, or compute them from scratch // if no matching entry is found. This saves recomputing the withdrawals which can take // considerable time to compute if a state load is required. let head_root = forkchoice_update_params.head_root; @@ -4909,9 +5361,10 @@ impl BeaconChain { { payload_attributes } else { - let withdrawals = match self.spec.fork_name_at_slot::(prepare_slot) { + let prepare_slot_fork = self.spec.fork_name_at_slot::(prepare_slot); + let withdrawals = match prepare_slot_fork { ForkName::Base | ForkName::Altair | ForkName::Merge => None, - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { let chain = self.clone(); self.spawn_blocking_handle( move || { @@ -4924,6 +5377,11 @@ impl BeaconChain { } }; + let parent_beacon_block_root = match prepare_slot_fork { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => None, + ForkName::Deneb => Some(pre_payload_attributes.parent_beacon_block_root), + }; + let payload_attributes = PayloadAttributes::new( self.slot_clock .start_of(prepare_slot) @@ -4932,6 +5390,7 @@ impl BeaconChain { pre_payload_attributes.prev_randao, execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals.map(Into::into), + parent_beacon_block_root, ); execution_layer @@ -4972,23 +5431,20 @@ impl BeaconChain { } } - let till_prepare_slot = - if let Some(duration) = self.slot_clock.duration_to_slot(prepare_slot) { - duration - } else { - // `SlotClock::duration_to_slot` will return `None` when we are past the start - // of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case, - // it's too late. - // - // This scenario might occur on an overloaded/under-resourced node. - warn!( - self.log, - "Delayed proposer preparation"; - "prepare_slot" => prepare_slot, - "validator" => proposer, - ); - return Ok(()); - }; + let Some(till_prepare_slot) = self.slot_clock.duration_to_slot(prepare_slot) else { + // `SlotClock::duration_to_slot` will return `None` when we are past the start + // of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case, + // it's too late. + // + // This scenario might occur on an overloaded/under-resourced node. + warn!( + self.log, + "Delayed proposer preparation"; + "prepare_slot" => prepare_slot, + "validator" => proposer, + ); + return Ok(()); + }; // If we are close enough to the proposal slot, send an fcU, which will have payload // attributes filled in by the execution layer cache we just primed. @@ -5571,14 +6027,16 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let state_root = head_block.state_root; - let state = self + let block_state_root = head_block.state_root; + let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let (state_root, state) = self .store .get_inconsistent_state_for_attestation_verification_only( - &state_root, - Some(head_block.slot), + &head_block_root, + max_slot, + block_state_root, )? - .ok_or(Error::MissingBeaconState(head_block.state_root))?; + .ok_or(Error::MissingBeaconState(block_state_root))?; (state, state_root) }; @@ -5925,6 +6383,12 @@ impl BeaconChain { gossip_attested || block_attested || aggregated || produced_block } + + /// The epoch at which we require a data availability check in block processing. + /// `None` if the `Deneb` fork is disabled. + pub fn data_availability_boundary(&self) -> Option { + self.data_availability_checker.data_availability_boundary() + } } impl Drop for BeaconChain { @@ -5932,6 +6396,7 @@ impl Drop for BeaconChain { let drop = || -> Result<(), Error> { self.persist_head_and_fork_choice()?; self.persist_op_pool()?; + self.persist_data_availability_checker()?; self.persist_eth1_cache() }; diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 9b2edbd8b5..2a42b49b42 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -321,9 +321,17 @@ where .deconstruct() .0; - let state = self + let max_slot = self + .justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + let (_, state) = self .store - .get_state(&justified_block.state_root(), Some(justified_block.slot())) + .get_advanced_hot_state( + self.justified_checkpoint.root, + max_slot, + justified_block.state_root(), + ) .map_err(Error::FailedToReadState)? .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 7d89df9829..afb1324776 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,4 +1,4 @@ -use serde_derive::Serialize; +use serde::Serialize; use std::sync::Arc; use types::{ beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs new file mode 100644 index 0000000000..ca69d2ab6f --- /dev/null +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -0,0 +1,548 @@ +use derivative::Derivative; +use slot_clock::SlotClock; +use std::sync::Arc; + +use crate::beacon_chain::{ + BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, +}; +use crate::block_verification::cheap_state_advance_to_obtain_committees; +use crate::data_availability_checker::AvailabilityCheckError; +use crate::kzg_utils::{validate_blob, validate_blobs}; +use crate::{metrics, BeaconChainError}; +use kzg::{Kzg, KzgCommitment}; +use slog::{debug, warn}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash::TreeHash; +use types::blob_sidecar::BlobIdentifier; +use types::{ + BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256, + SignedBlobSidecar, Slot, +}; + +/// An error occurred while validating a gossip blob. +#[derive(Debug)] +pub enum GossipBlobError { + /// The blob sidecar is from a slot that is later than the current slot (with respect to the + /// gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + + /// There was an error whilst processing the blob. It is not known if it is + /// valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this blob due to an internal error. It's + /// unclear if the blob is valid. + BeaconChainError(BeaconChainError), + + /// The `BlobSidecar` was gossiped over an incorrect subnet. + /// + /// ## Peer scoring + /// + /// The blob is invalid or the peer is faulty. + InvalidSubnet { expected: u64, received: u64 }, + + /// The sidecar corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this blob is valid, but this blob is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + blob_slot: Slot, + finalized_slot: Slot, + }, + + /// The proposer index specified in the sidecar does not match the locally computed + /// proposer index. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + ProposerIndexMismatch { sidecar: usize, local: usize }, + + /// The proposal signature in invalid. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + ProposerSignatureInvalid, + + /// The proposal_index corresponding to blob.beacon_block_root is not known. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + UnknownValidator(u64), + + /// The provided blob is not from a later slot than its parent. + /// + /// ## Peer scoring + /// + /// The blob is invalid and the peer is faulty. + BlobIsNotLaterThanParent { blob_slot: Slot, parent_slot: Slot }, + + /// The provided blob's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. + BlobParentUnknown(Arc>), + + /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple + /// over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + RepeatBlob { + proposer: u64, + slot: Slot, + index: u64, + }, +} + +impl std::fmt::Display for GossipBlobError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GossipBlobError::BlobParentUnknown(blob_sidecar) => { + write!( + f, + "BlobParentUnknown(parent_root:{})", + blob_sidecar.block_parent_root + ) + } + other => write!(f, "{:?}", other), + } + } +} + +impl From for GossipBlobError { + fn from(e: BeaconChainError) -> Self { + GossipBlobError::BeaconChainError(e) + } +} + +impl From for GossipBlobError { + fn from(e: BeaconStateError) -> Self { + GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + } +} + +pub type GossipVerifiedBlobList = VariableList< + GossipVerifiedBlob, + <::EthSpec as EthSpec>::MaxBlobsPerBlock, +>; + +/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on +/// the p2p network. +#[derive(Debug)] +pub struct GossipVerifiedBlob { + blob: SignedBlobSidecar, +} + +impl GossipVerifiedBlob { + pub fn new( + blob: SignedBlobSidecar, + chain: &BeaconChain, + ) -> Result> { + let blob_index = blob.message.index; + validate_blob_sidecar_for_gossip(blob, blob_index, chain) + } + /// Construct a `GossipVerifiedBlob` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + pub fn __assumed_valid(blob: SignedBlobSidecar) -> Self { + Self { blob } + } + pub fn id(&self) -> BlobIdentifier { + self.blob.message.id() + } + pub fn block_root(&self) -> Hash256 { + self.blob.message.block_root + } + pub fn to_blob(self) -> Arc> { + self.blob.message + } + pub fn as_blob(&self) -> &BlobSidecar { + &self.blob.message + } + pub fn signed_blob(&self) -> SignedBlobSidecar { + self.blob.clone() + } + pub fn slot(&self) -> Slot { + self.blob.message.slot + } + pub fn index(&self) -> u64 { + self.blob.message.index + } + pub fn kzg_commitment(&self) -> KzgCommitment { + self.blob.message.kzg_commitment + } + pub fn proposer_index(&self) -> u64 { + self.blob.message.proposer_index + } +} + +pub fn validate_blob_sidecar_for_gossip( + signed_blob_sidecar: SignedBlobSidecar, + subnet: u64, + chain: &BeaconChain, +) -> Result, GossipBlobError> { + let blob_slot = signed_blob_sidecar.message.slot; + let blob_index = signed_blob_sidecar.message.index; + let block_parent_root = signed_blob_sidecar.message.block_parent_root; + let blob_proposer_index = signed_blob_sidecar.message.proposer_index; + let block_root = signed_blob_sidecar.message.block_root; + let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); + + // Verify that the blob_sidecar was received on the correct subnet. + if blob_index != subnet { + return Err(GossipBlobError::InvalidSubnet { + expected: blob_index, + received: subnet, + }); + } + + let blob_root = get_blob_root(&signed_blob_sidecar); + + // Verify that the sidecar is not from a future slot. + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > latest_permissible_slot { + return Err(GossipBlobError::FutureSlot { + message_slot: blob_slot, + latest_permissible_slot, + }); + } + + // Verify that the sidecar slot is greater than the latest finalized slot + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + if blob_slot <= latest_finalized_slot { + return Err(GossipBlobError::PastFinalizedSlot { + blob_slot, + finalized_slot: latest_finalized_slot, + }); + } + + // Verify that this is the first blob sidecar received for the (sidecar.block_root, sidecar.index) tuple + if chain + .observed_blob_sidecars + .read() + .is_known(&signed_blob_sidecar.message) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + { + return Err(GossipBlobError::RepeatBlob { + proposer: blob_proposer_index, + slot: blob_slot, + index: blob_index, + }); + } + + // We have already verified that the blob is past finalization, so we can + // just check fork choice for the block's parent. + let Some(parent_block) = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_parent_root) + else { + return Err(GossipBlobError::BlobParentUnknown( + signed_blob_sidecar.message, + )); + }; + + if parent_block.slot >= blob_slot { + return Err(GossipBlobError::BlobIsNotLaterThanParent { + blob_slot, + parent_slot: parent_block.slot, + }); + } + + // Note: We check that the proposer_index matches against the shuffling first to avoid + // signature verification against an invalid proposer_index. + let proposer_shuffling_root = + if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch { + parent_block + .next_epoch_shuffling_id + .shuffling_decision_block + } else { + parent_block.root + }; + + let proposer_opt = chain + .beacon_proposer_cache + .lock() + .get_slot::(proposer_shuffling_root, blob_slot); + + let (proposer_index, fork) = if let Some(proposer) = proposer_opt { + (proposer.index, proposer.fork) + } else { + debug!( + chain.log, + "Proposer shuffling cache miss for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + if let Some(mut snapshot) = chain + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_cloned(block_parent_root, CloneConfig::committee_caches_only()) + }) + { + if snapshot.beacon_state.slot() == blob_slot { + debug!( + chain.log, + "Cloning snapshot cache state for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + ( + snapshot + .beacon_state + .get_beacon_proposer_index(blob_slot, &chain.spec)?, + snapshot.beacon_state.fork(), + ) + } else { + debug!( + chain.log, + "Cloning and advancing snapshot cache state for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + let state = + cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut snapshot.beacon_state, + Some(snapshot.beacon_block_root), + blob_slot, + &chain.spec, + )?; + ( + state.get_beacon_proposer_index(blob_slot, &chain.spec)?, + state.fork(), + ) + } + } + // Need to advance the state to get the proposer index + else { + warn!( + chain.log, + "Snapshot cache miss for blob verification"; + "block_root" => %block_root, + "index" => %blob_index, + ); + + let parent_block = chain + .get_blinded_block(&block_parent_root) + .map_err(GossipBlobError::BeaconChainError)? + .ok_or_else(|| { + GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root)) + })?; + + let mut parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state {:?}", + parent_block.state_root() + )) + })?; + let state = cheap_state_advance_to_obtain_committees::<_, GossipBlobError>( + &mut parent_state, + Some(parent_block.state_root()), + blob_slot, + &chain.spec, + )?; + + let proposers = state.get_beacon_proposer_indices(&chain.spec)?; + let proposer_index = *proposers + .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) + .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain.beacon_proposer_cache.lock().insert( + blob_epoch, + proposer_shuffling_root, + proposers, + state.fork(), + )?; + (proposer_index, state.fork()) + } + }; + + if proposer_index != blob_proposer_index as usize { + return Err(GossipBlobError::ProposerIndexMismatch { + sidecar: blob_proposer_index as usize, + local: proposer_index, + }); + } + + // Signature verification + let signature_is_valid = { + let pubkey_cache = chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) + .map_err(GossipBlobError::BeaconChainError)?; + + let pubkey = pubkey_cache + .get(proposer_index) + .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; + + signed_blob_sidecar.verify_signature( + Some(blob_root), + pubkey, + &fork, + chain.genesis_validators_root, + &chain.spec, + ) + }; + + if !signature_is_valid { + return Err(GossipBlobError::ProposerSignatureInvalid); + } + + // Now the signature is valid, store the proposal so we don't accept another blob sidecar + // with the same `BlobIdentifier`. + // It's important to double-check that the proposer still hasn't been observed so we don't + // have a race-condition when verifying two blocks simultaneously. + // + // Note: If this BlobSidecar goes on to fail full verification, we do not evict it from the seen_cache + // as alternate blob_sidecars for the same identifier can still be retrieved + // over rpc. Evicting them from this cache would allow faster propagation over gossip. So we allow + // retrieval of potentially valid blocks over rpc, but try to punish the proposer for signing + // invalid messages. Issue for more background + // https://github.com/ethereum/consensus-specs/issues/3261 + if chain + .observed_blob_sidecars + .write() + .observe_sidecar(&signed_blob_sidecar.message) + .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? + { + return Err(GossipBlobError::RepeatBlob { + proposer: proposer_index as u64, + slot: blob_slot, + index: blob_index, + }); + } + + Ok(GossipVerifiedBlob { + blob: signed_blob_sidecar, + }) +} + +/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. +/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. +#[derive(Debug, Derivative, Clone, Encode, Decode)] +#[derivative(PartialEq, Eq)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgVerifiedBlob { + blob: Arc>, +} + +impl PartialOrd for KzgVerifiedBlob { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for KzgVerifiedBlob { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.blob.cmp(&other.blob) + } +} + +impl KzgVerifiedBlob { + pub fn to_blob(self) -> Arc> { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + &self.blob + } + pub fn clone_blob(&self) -> Arc> { + self.blob.clone() + } + pub fn block_root(&self) -> Hash256 { + self.blob.block_root + } + pub fn blob_index(&self) -> u64 { + self.blob.index + } +} + +#[cfg(test)] +impl KzgVerifiedBlob { + pub fn new(blob: BlobSidecar) -> Self { + Self { + blob: Arc::new(blob), + } + } +} + +/// Complete kzg verification for a `GossipVerifiedBlob`. +/// +/// Returns an error if the kzg verification check fails. +pub fn verify_kzg_for_blob( + blob: Arc>, + kzg: &Kzg, +) -> Result, AvailabilityCheckError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); + if validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof) + .map_err(AvailabilityCheckError::Kzg)? + { + Ok(KzgVerifiedBlob { blob }) + } else { + Err(AvailabilityCheckError::KzgVerificationFailed) + } +} + +/// Complete kzg verification for a list of `BlobSidecar`s. +/// Returns an error if any of the `BlobSidecar`s fails kzg verification. +/// +/// Note: This function should be preferred over calling `verify_kzg_for_blob` +/// in a loop since this function kzg verifies a list of blobs more efficiently. +pub fn verify_kzg_for_blob_list( + blob_list: &BlobSidecarList, + kzg: &Kzg, +) -> Result<(), AvailabilityCheckError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); + let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list + .iter() + .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) + .unzip(); + if validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) + .map_err(AvailabilityCheckError::Kzg)? + { + Ok(()) + } else { + Err(AvailabilityCheckError::KzgVerificationFailed) + } +} + +/// Returns the canonical root of the given `blob`. +/// +/// Use this function to ensure that we report the blob hashing time Prometheus metric. +pub fn get_blob_root(blob: &SignedBlobSidecar) -> Hash256 { + let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT); + + let blob_root = blob.message.tree_hash_root(); + + metrics::stop_timer(blob_root_timer); + + blob_root +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 0a82eae371..7f1a596ec3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -23,6 +23,7 @@ //! | //! â–¼ //! SignedBeaconBlock +//! | //! |--------------- //! | | //! | â–¼ @@ -47,6 +48,11 @@ // returned alongside. #![allow(clippy::result_large_err)] +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use crate::block_verification_types::{ + AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock, +}; +use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, @@ -64,15 +70,17 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::EventKind; +use eth2::types::{EventKind, SignedBlockContents}; use execution_layer::PayloadStatus; -use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; +pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, @@ -82,18 +90,19 @@ use state_processing::{ StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; +use std::fmt::Debug; use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::ExecPayload; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, - EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -141,7 +150,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Arc>), + ParentUnknown(RpcBlock), /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -215,7 +224,7 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. InvalidSignature, - /// The provided block is from an later slot than its parent. + /// The provided block is not from a later slot than its parent. /// /// ## Peer scoring /// @@ -284,6 +293,27 @@ pub enum BlockError { /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so /// we penalise them with a mid-tolerance error. Slashable, + /// The block and blob together failed validation. + /// + /// ## Peer scoring + /// + /// This error implies that the block satisfied all block validity conditions except consistency + /// with the corresponding blob that we received over gossip/rpc. This is because availability + /// checks are always done after all other checks are completed. + /// This implies that either: + /// 1. The block proposer is faulty + /// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block). + /// 3. It is an internal error + /// For all these cases, we cannot penalize the peer that gave us the block. + /// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob. + /// https://github.com/sigp/lighthouse/issues/4546 + AvailabilityCheck(AvailabilityCheckError), +} + +impl From for BlockError { + fn from(e: AvailabilityCheckError) -> Self { + Self::AvailabilityCheck(e) + } } /// Returned when block validation failed due to some issue verifying @@ -459,6 +489,7 @@ impl From for BlockError { } /// Stores information about verifying a payload against an execution engine. +#[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct PayloadVerificationOutcome { pub payload_verification_status: PayloadVerificationStatus, pub is_valid_merge_transition_block: bool, @@ -528,7 +559,7 @@ fn process_block_slash_info( /// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error /// will be returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, Arc>)>, + mut chain_segment: Vec<(Hash256, RpcBlock)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -545,7 +576,7 @@ pub fn signature_verify_chain_segment( .map(|(_, block)| block.slot()) .unwrap_or_else(|| slot); - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, highest_slot, @@ -561,12 +592,16 @@ pub fn signature_verify_chain_segment( let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(*block_root); - signature_verifier.include_all_signatures(block, &mut consensus_context)?; + signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; + + let maybe_available_block = chain + .data_availability_checker + .check_rpc_block_availability(block.clone())?; // Save the block and its consensus context. The context will have had its proposer index // and attesting indices filled in, which can be used to accelerate later block processing. signature_verified_blocks.push(SignatureVerifiedBlock { - block: block.clone(), + block: maybe_available_block, block_root: *block_root, parent: None, consensus_context, @@ -600,7 +635,7 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, @@ -617,52 +652,74 @@ type PayloadVerificationHandle = /// - Signatures /// - State root check /// - Per block processing +/// - Blobs sidecar has been validated if present /// /// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid /// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. pub struct ExecutionPendingBlock { - pub block: Arc>, - pub block_root: Hash256, - pub state: BeaconState, - pub parent_block: SignedBeaconBlock>, - pub parent_eth1_finalization_data: Eth1FinalizationData, - pub confirmed_state_roots: Vec, - pub consensus_context: ConsensusContext, + pub block: MaybeAvailableBlock, + pub import_data: BlockImportData, pub payload_verification_handle: PayloadVerificationHandle, } -pub trait IntoGossipVerifiedBlock: Sized { +pub trait IntoGossipVerifiedBlockContents: Sized { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockError>; - fn inner(&self) -> Arc>; + ) -> Result, BlockContentsError>; + fn inner_block(&self) -> &SignedBeaconBlock; + fn inner_blobs(&self) -> Option>; } -impl IntoGossipVerifiedBlock for GossipVerifiedBlock { +impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { fn into_gossip_verified_block( self, _chain: &BeaconChain, - ) -> Result, BlockError> { + ) -> Result, BlockContentsError> { Ok(self) } - - fn inner(&self) -> Arc> { - self.block.clone() + fn inner_block(&self) -> &SignedBeaconBlock { + self.0.block.as_block() + } + fn inner_blobs(&self) -> Option> { + self.1.as_ref().map(|blobs| { + VariableList::from( + blobs + .into_iter() + .map(GossipVerifiedBlob::signed_blob) + .collect::>(), + ) + }) } } -impl IntoGossipVerifiedBlock for Arc> { +impl IntoGossipVerifiedBlockContents for SignedBlockContents { fn into_gossip_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockError> { - GossipVerifiedBlock::new(self, chain) + ) -> Result, BlockContentsError> { + let (block, blobs) = self.deconstruct(); + let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; + let gossip_verified_blobs = blobs + .map(|blobs| { + Ok::<_, GossipBlobError>(VariableList::from( + blobs + .into_iter() + .map(|blob| GossipVerifiedBlob::new(blob, chain)) + .collect::, GossipBlobError>>()?, + )) + }) + .transpose()?; + Ok((gossip_verified_block, gossip_verified_blobs)) } - fn inner(&self) -> Arc> { - self.clone() + fn inner_block(&self) -> &SignedBeaconBlock { + self.signed_block() + } + + fn inner_blobs(&self) -> Option> { + self.blobs_cloned() } } @@ -754,25 +811,23 @@ impl GossipVerifiedBlock { // reboot if the `observed_block_producers` cache is empty. In that case, without this // check, we will load the parent and state from disk only to find out later that we // already know this block. - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { + let fork_choice_read_lock = chain.canonical_head.fork_choice_read_lock(); + if fork_choice_read_lock.contains_block(&block_root) { return Err(BlockError::BlockIsAlreadyKnown); } // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - check_block_is_finalized_checkpoint_or_descendant( + let block = check_block_is_finalized_checkpoint_or_descendant( chain, - &chain.canonical_head.fork_choice_read_lock(), - &block, + &fork_choice_read_lock, + block, )?; + drop(fork_choice_read_lock); let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let (parent_block, block) = verify_parent_block_is_known(chain, block)?; + let (parent_block, block) = verify_parent_block_is_known(block_root, chain, block)?; // Track the number of skip slots between the block and its parent. metrics::set_gauge( @@ -831,7 +886,7 @@ impl GossipVerifiedBlock { ); // The state produced is only valid for determining proposer/attester shuffling indices. - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -883,7 +938,9 @@ impl GossipVerifiedBlock { .observe_proposal(block_root, block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { - SeenBlock::Slashable => return Err(BlockError::Slashable), + SeenBlock::Slashable => { + return Err(BlockError::Slashable); + } SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), SeenBlock::UniqueNonSlashable => {} }; @@ -901,7 +958,7 @@ impl GossipVerifiedBlock { // Having checked the proposer index and the block root we can cache them. let consensus_context = ConsensusContext::new(block.slot()) .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); + .set_proposer_index(block.as_block().message().proposer_index()); Ok(Self { block, @@ -934,7 +991,7 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock &SignedBeaconBlock { - &self.block + self.block.as_block() } } @@ -944,12 +1001,13 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. block + .as_block() .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; @@ -958,7 +1016,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = load_parent(block_root, block, chain)?; - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -972,7 +1030,7 @@ impl SignatureVerifiedBlock { let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - signature_verifier.include_all_signatures(&block, &mut consensus_context)?; + signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { @@ -988,7 +1046,7 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { @@ -1008,7 +1066,7 @@ impl SignatureVerifiedBlock { load_parent(from.block_root, from.block, chain)? }; - let state = cheap_state_advance_to_obtain_committees( + let state = cheap_state_advance_to_obtain_committees::<_, BlockError>( &mut parent.pre_state, parent.beacon_state_root, block.slot(), @@ -1023,11 +1081,14 @@ impl SignatureVerifiedBlock { // signature. let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(&block, &mut consensus_context)?; + .include_all_signatures_except_proposal(block.as_ref(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { - block, + block: MaybeAvailableBlock::AvailabilityPending { + block_root: from.block_root, + block, + }, block_root: from.block_root, parent: Some(parent), consensus_context, @@ -1080,7 +1141,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } fn block(&self) -> &SignedBeaconBlock { - &self.block + self.block.as_block() } } @@ -1096,8 +1157,19 @@ impl IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for RpcBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( + self, + block_root: Hash256, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, BlockSlashInfo>> { + // Perform an early check to prevent wasting time on irrelevant blocks. + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; + let maybe_available = chain + .data_availability_checker + .check_rpc_block_availability(self.clone()) + .map_err(|e| { + BlockSlashInfo::SignatureNotChecked( + self.signed_block_header(), + BlockError::AvailabilityCheck(e), + ) + })?; + SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)? + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) + } + + fn block(&self) -> &SignedBeaconBlock { + self.as_block() + } +} + impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. @@ -1115,7 +1217,7 @@ impl ExecutionPendingBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: Arc>, + block: MaybeAvailableBlock, block_root: Hash256, parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, @@ -1151,14 +1253,14 @@ impl ExecutionPendingBlock { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_rpc_block())); } /* * Perform cursory checks to see if the block is even worth processing. */ - check_block_relevancy(&block, block_root, chain)?; + check_block_relevancy(block.as_block(), block_root, chain)?; // Define a future that will verify the execution payload with an execution engine. // @@ -1166,7 +1268,7 @@ impl ExecutionPendingBlock { // with the payload verification. let payload_notifier = PayloadNotifier::new( chain.clone(), - block.clone(), + block.block_cloned(), &parent.pre_state, notify_execution_layer, )?; @@ -1261,7 +1363,7 @@ impl ExecutionPendingBlock { // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); - if state.slot() < parent_slot || state.slot() > parent_slot + 1 { + if state.slot() < parent_slot || state.slot() > block.slot() { return Err(BeaconChainError::BadPreState { parent_root: parent.beacon_block_root, parent_slot, @@ -1316,7 +1418,9 @@ impl ExecutionPendingBlock { StoreOp::PutStateTemporaryFlag(state_root), ] }; - chain.store.do_atomically(state_batch)?; + chain + .store + .do_atomically_with_block_and_blobs_cache(state_batch)?; drop(txn_lock); confirmed_state_roots.push(state_root); @@ -1407,13 +1511,13 @@ impl ExecutionPendingBlock { &state, &chain.log, ); - write_block(&block, block_root, &chain.log); + write_block(block.as_block(), block_root, &chain.log); let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); if let Err(err) = per_block_processing( &mut state, - &block, + block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, StateProcessingStrategy::Accurate, @@ -1497,12 +1601,14 @@ impl ExecutionPendingBlock { Ok(Self { block, - block_root, - state, - parent_block: parent.beacon_block, - parent_eth1_finalization_data, - confirmed_state_roots, - consensus_context, + import_data: BlockImportData { + block_root, + state, + parent_block: parent.beacon_block, + parent_eth1_finalization_data, + confirmed_state_roots, + consensus_context, + }, payload_verification_handle, }) } @@ -1557,13 +1663,16 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_checkpoint_or_descendant( +pub fn check_block_is_finalized_checkpoint_or_descendant< + T: BeaconChainTypes, + B: AsBlock, +>( chain: &BeaconChain, fork_choice: &BeaconForkChoice, - block: &Arc>, -) -> Result<(), BlockError> { + block: B, +) -> Result> { if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { - Ok(()) + Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1582,7 +1691,7 @@ pub fn check_block_is_finalized_checkpoint_or_descendant( block_parent_root: block.parent_root(), }) } else { - Err(BlockError::ParentUnknown(block.clone())) + Err(BlockError::ParentUnknown(block.into_rpc_block())) } } } @@ -1653,17 +1762,21 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { /// fork choice. #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( + block_root: Hash256, chain: &BeaconChain, block: Arc>, ) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = chain .canonical_head .fork_choice_read_lock() - .get_block(&block.message().parent_root()) + .get_block(&block.parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(block)) + Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( + Some(block_root), + block, + ))) } } @@ -1672,17 +1785,11 @@ fn verify_parent_block_is_known( /// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs /// whilst attempting the operation. #[allow(clippy::type_complexity)] -fn load_parent( +fn load_parent>( block_root: Hash256, - block: Arc>, + block: B, chain: &BeaconChain, -) -> Result< - ( - PreProcessingSnapshot, - Arc>, - ), - BlockError, -> { +) -> Result<(PreProcessingSnapshot, B), BlockError> { let spec = &chain.spec; // Reject any block if its parent is not known to fork choice. @@ -1700,7 +1807,7 @@ fn load_parent( .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_rpc_block())); } let block_delay = chain @@ -1760,13 +1867,18 @@ fn load_parent( BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) })?; - // Load the parent blocks state from the database, returning an error if it is not found. + // Load the parent block's state from the database, returning an error if it is not found. // It is an error because if we know the parent block we should also know the parent state. - let parent_state_root = parent_block.state_root(); - let parent_state = chain - .get_state(&parent_state_root, Some(parent_block.slot()))? + // Retrieve any state that is advanced through to at most `block.slot()`: this is + // particularly important if `block` descends from the finalized/split block, but at a slot + // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + let (parent_state_root, parent_state) = chain + .store + .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) + BeaconChainError::DBInconsistent( + format!("Missing state for parent block {root:?}",), + ) })?; metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); @@ -1795,6 +1907,30 @@ fn load_parent( result } +/// This trait is used to unify `BlockError` and `BlobError` so +/// `cheap_state_advance_to_obtain_committees` can be re-used in gossip blob validation. +pub trait CheapStateAdvanceError: From + From + Debug { + fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self; +} + +impl CheapStateAdvanceError for BlockError { + fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self { + BlockError::BlockIsNotLaterThanParent { + block_slot, + parent_slot, + } + } +} + +impl CheapStateAdvanceError for GossipBlobError { + fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self { + GossipBlobError::BlobIsNotLaterThanParent { + blob_slot, + parent_slot, + } + } +} + /// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for /// `slot` can be obtained from `state`. /// @@ -1806,12 +1942,12 @@ fn load_parent( /// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply /// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never /// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). -fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( +pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateAdvanceError>( state: &'a mut BeaconState, state_root_opt: Option, block_slot: Slot, spec: &ChainSpec, -) -> Result>, BlockError> { +) -> Result>, Err> { let block_epoch = block_slot.epoch(E::slots_per_epoch()); if state.current_epoch() == block_epoch { @@ -1822,10 +1958,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( Ok(Cow::Borrowed(state)) } else if state.slot() > block_slot { - Err(BlockError::BlockIsNotLaterThanParent { - block_slot, - parent_slot: state.slot(), - }) + Err(Err::not_later_than_parent_error(block_slot, state.slot())) } else { let mut state = state.clone_with(CloneConfig::committee_caches_only()); let target_slot = block_epoch.start_slot(E::slots_per_epoch()); @@ -1833,7 +1966,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( // Advance the state into the same epoch as the block. Use the "partial" method since state // roots are not important for proposer/attester shuffling. partial_state_advance(&mut state, state_root_opt, target_slot, spec) - .map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?; + .map_err(BeaconChainError::from)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs new file mode 100644 index 0000000000..d236e94f93 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -0,0 +1,529 @@ +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList}; +use crate::block_verification::BlockError; +use crate::data_availability_checker::AvailabilityCheckError; +pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; +use crate::eth1_finalization_cache::Eth1FinalizationData; +use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome}; +use derivative::Derivative; +use ssz_types::VariableList; +use state_processing::ConsensusContext; +use std::sync::Arc; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::{ + BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, +}; + +/// A block that has been received over RPC. It has 2 internal variants: +/// +/// 1. `BlockAndBlobs`: A fully available post deneb block with all the blobs available. This variant +/// is only constructed after making consistency checks between blocks and blobs. +/// Hence, it is fully self contained w.r.t verification. i.e. this block has all the required +/// data to get verified and imported into fork choice. +/// +/// 2. `Block`: This can be a fully available pre-deneb block **or** a post-deneb block that may or may +/// not require blobs to be considered fully available. +/// +/// Note: We make a distinction over blocks received over gossip because +/// in a post-deneb world, the blobs corresponding to a given block that are received +/// over rpc do not contain the proposer signature for dos resistance. +#[derive(Debug, Clone, Derivative)] +#[derivative(Hash(bound = "E: EthSpec"))] +pub struct RpcBlock { + block_root: Hash256, + block: RpcBlockInner, +} + +impl RpcBlock { + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + match &self.block { + RpcBlockInner::Block(block) => block, + RpcBlockInner::BlockAndBlobs(block, _) => block, + } + } +} + +/// Note: This variant is intentionally private because we want to safely construct the +/// internal variants after applying consistency checks to ensure that the block and blobs +/// are consistent with respect to each other. +#[derive(Debug, Clone, Derivative)] +#[derivative(Hash(bound = "E: EthSpec"))] +enum RpcBlockInner { + /// Single block lookup response. This should potentially hit the data availability cache. + Block(Arc>), + /// This variant is used with parent lookups and by-range responses. It should have all blobs + /// ordered, all block roots matching, and the correct number of blobs for this block. + BlockAndBlobs(Arc>, BlobSidecarList), +} + +impl RpcBlock { + /// Constructs a `Block` variant. + pub fn new_without_blobs( + block_root: Option, + block: Arc>, + ) -> Self { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + + Self { + block_root, + block: RpcBlockInner::Block(block), + } + } + + /// Constructs a new `BlockAndBlobs` variant after making consistency + /// checks between the provided blocks and blobs. + pub fn new( + block_root: Option, + block: Arc>, + blobs: Option>, + ) -> Result { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + + if let (Some(blobs), Ok(block_commitments)) = ( + blobs.as_ref(), + block.message().body().blob_kzg_commitments(), + ) { + if blobs.len() != block_commitments.len() { + return Err(AvailabilityCheckError::MissingBlobs); + } + for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) { + let blob_block_root = blob.block_root; + if blob_block_root != block_root { + return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { + block_root, + blob_block_root, + }); + } + let blob_commitment = blob.kzg_commitment; + if blob_commitment != block_commitment { + return Err(AvailabilityCheckError::KzgCommitmentMismatch { + block_commitment, + blob_commitment, + }); + } + } + } + let inner = match blobs { + Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs), + None => RpcBlockInner::Block(block), + }; + Ok(Self { + block_root, + block: inner, + }) + } + + pub fn new_from_fixed( + block_root: Hash256, + block: Arc>, + blobs: FixedBlobSidecarList, + ) -> Result { + let filtered = blobs + .into_iter() + .filter_map(|b| b.clone()) + .collect::>(); + let blobs = if filtered.is_empty() { + None + } else { + Some(VariableList::from(filtered)) + }; + Self::new(Some(block_root), block, blobs) + } + + pub fn deconstruct( + self, + ) -> ( + Hash256, + Arc>, + Option>, + ) { + let block_root = self.block_root(); + match self.block { + RpcBlockInner::Block(block) => (block_root, block, None), + RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)), + } + } + pub fn n_blobs(&self) -> usize { + match &self.block { + RpcBlockInner::Block(_) => 0, + RpcBlockInner::BlockAndBlobs(_, blobs) => blobs.len(), + } + } +} + +/// A block that has gone through all pre-deneb block processing checks including block processing +/// and execution by an EL client. This block hasn't necessarily completed data availability checks. +/// +/// +/// It contains 2 variants: +/// 1. `Available`: This block has been executed and also contains all data to consider it a +/// fully available block. i.e. for post-deneb, this implies that this contains all the +/// required blobs. +/// 2. `AvailabilityPending`: This block hasn't received all required blobs to consider it a +/// fully available block. +pub enum ExecutedBlock { + Available(AvailableExecutedBlock), + AvailabilityPending(AvailabilityPendingExecutedBlock), +} + +impl ExecutedBlock { + pub fn new( + block: MaybeAvailableBlock, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + match block { + MaybeAvailableBlock::Available(available_block) => { + Self::Available(AvailableExecutedBlock::new( + available_block, + import_data, + payload_verification_outcome, + )) + } + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block: pending_block, + } => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new( + pending_block, + import_data, + payload_verification_outcome, + )), + } + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + match self { + Self::Available(available) => available.block.block(), + Self::AvailabilityPending(pending) => &pending.block, + } + } + + pub fn block_root(&self) -> Hash256 { + match self { + ExecutedBlock::AvailabilityPending(pending) => pending.import_data.block_root, + ExecutedBlock::Available(available) => available.import_data.block_root, + } + } +} + +/// A block that has completed all pre-deneb block processing checks including verification +/// by an EL client **and** has all requisite blob data to be imported into fork choice. +#[derive(PartialEq)] +pub struct AvailableExecutedBlock { + pub block: AvailableBlock, + pub import_data: BlockImportData, + pub payload_verification_outcome: PayloadVerificationOutcome, +} + +impl AvailableExecutedBlock { + pub fn new( + block: AvailableBlock, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + Self { + block, + import_data, + payload_verification_outcome, + } + } + + pub fn get_all_blob_ids(&self) -> Vec { + let num_blobs_expected = self + .block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()); + let mut blob_ids = Vec::with_capacity(num_blobs_expected); + for i in 0..num_blobs_expected { + blob_ids.push(BlobIdentifier { + block_root: self.import_data.block_root, + index: i as u64, + }); + } + blob_ids + } +} + +/// A block that has completed all pre-deneb block processing checks, verification +/// by an EL client but does not have all requisite blob data to get imported into +/// fork choice. +pub struct AvailabilityPendingExecutedBlock { + pub block: Arc>, + pub import_data: BlockImportData, + pub payload_verification_outcome: PayloadVerificationOutcome, +} + +impl AvailabilityPendingExecutedBlock { + pub fn new( + block: Arc>, + import_data: BlockImportData, + payload_verification_outcome: PayloadVerificationOutcome, + ) -> Self { + Self { + block, + import_data, + payload_verification_outcome, + } + } + + pub fn as_block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn num_blobs_expected(&self) -> usize { + self.block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + +#[derive(Debug, PartialEq)] +pub struct BlockImportData { + pub block_root: Hash256, + pub state: BeaconState, + pub parent_block: SignedBeaconBlock>, + pub parent_eth1_finalization_data: Eth1FinalizationData, + pub confirmed_state_roots: Vec, + pub consensus_context: ConsensusContext, +} + +pub type GossipVerifiedBlockContents = + (GossipVerifiedBlock, Option>); + +#[derive(Debug)] +pub enum BlockContentsError { + BlockError(BlockError), + BlobError(GossipBlobError), +} + +impl From> for BlockContentsError { + fn from(value: BlockError) -> Self { + Self::BlockError(value) + } +} + +impl From> for BlockContentsError { + fn from(value: GossipBlobError) -> Self { + Self::BlobError(value) + } +} + +impl std::fmt::Display for BlockContentsError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockContentsError::BlockError(err) => { + write!(f, "BlockError({})", err) + } + BlockContentsError::BlobError(err) => { + write!(f, "BlobError({})", err) + } + } + } +} + +/// Trait for common block operations. +pub trait AsBlock { + fn slot(&self) -> Slot; + fn epoch(&self) -> Epoch; + fn parent_root(&self) -> Hash256; + fn state_root(&self) -> Hash256; + fn signed_block_header(&self) -> SignedBeaconBlockHeader; + fn message(&self) -> BeaconBlockRef; + fn as_block(&self) -> &SignedBeaconBlock; + fn block_cloned(&self) -> Arc>; + fn canonical_root(&self) -> Hash256; + fn into_rpc_block(self) -> RpcBlock; +} + +impl AsBlock for Arc> { + fn slot(&self) -> Slot { + SignedBeaconBlock::slot(self) + } + + fn epoch(&self) -> Epoch { + SignedBeaconBlock::epoch(self) + } + + fn parent_root(&self) -> Hash256 { + SignedBeaconBlock::parent_root(self) + } + + fn state_root(&self) -> Hash256 { + SignedBeaconBlock::state_root(self) + } + + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + SignedBeaconBlock::signed_block_header(self) + } + + fn message(&self) -> BeaconBlockRef { + SignedBeaconBlock::message(self) + } + + fn as_block(&self) -> &SignedBeaconBlock { + self + } + + fn block_cloned(&self) -> Arc> { + Arc::>::clone(self) + } + + fn canonical_root(&self) -> Hash256 { + SignedBeaconBlock::canonical_root(self) + } + + fn into_rpc_block(self) -> RpcBlock { + RpcBlock::new_without_blobs(None, self) + } +} + +impl AsBlock for MaybeAvailableBlock { + fn slot(&self) -> Slot { + self.as_block().slot() + } + fn epoch(&self) -> Epoch { + self.as_block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.as_block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.as_block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.as_block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef { + self.as_block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self { + MaybeAvailableBlock::Available(block) => block.as_block(), + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block, + } => block, + } + } + fn block_cloned(&self) -> Arc> { + match &self { + MaybeAvailableBlock::Available(block) => block.block_cloned(), + MaybeAvailableBlock::AvailabilityPending { + block_root: _, + block, + } => block.clone(), + } + } + fn canonical_root(&self) -> Hash256 { + self.as_block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + match self { + MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(), + MaybeAvailableBlock::AvailabilityPending { block_root, block } => { + RpcBlock::new_without_blobs(Some(block_root), block) + } + } + } +} + +impl AsBlock for AvailableBlock { + fn slot(&self) -> Slot { + self.block().slot() + } + + fn epoch(&self) -> Epoch { + self.block().epoch() + } + + fn parent_root(&self) -> Hash256 { + self.block().parent_root() + } + + fn state_root(&self) -> Hash256 { + self.block().state_root() + } + + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.block().signed_block_header() + } + + fn message(&self) -> BeaconBlockRef { + self.block().message() + } + + fn as_block(&self) -> &SignedBeaconBlock { + self.block() + } + + fn block_cloned(&self) -> Arc> { + AvailableBlock::block_cloned(self) + } + + fn canonical_root(&self) -> Hash256 { + self.block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + let (block_root, block, blobs_opt) = self.deconstruct(); + // Circumvent the constructor here, because an Available block will have already had + // consistency checks performed. + let inner = match blobs_opt { + None => RpcBlockInner::Block(block), + Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs), + }; + RpcBlock { + block_root, + block: inner, + } + } +} + +impl AsBlock for RpcBlock { + fn slot(&self) -> Slot { + self.as_block().slot() + } + fn epoch(&self) -> Epoch { + self.as_block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.as_block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.as_block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.as_block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef { + self.as_block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self.block { + RpcBlockInner::Block(block) => block, + RpcBlockInner::BlockAndBlobs(block, _) => block, + } + } + fn block_cloned(&self) -> Arc> { + match &self.block { + RpcBlockInner::Block(block) => block.clone(), + RpcBlockInner::BlockAndBlobs(block, _) => block.clone(), + } + } + fn canonical_root(&self) -> Hash256 { + self.as_block().canonical_root() + } + + fn into_rpc_block(self) -> RpcBlock { + self + } +} diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 044391c415..fd8a3f0460 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,5 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::data_availability_checker::DataAvailabilityChecker; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; @@ -20,12 +21,14 @@ use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; +use kzg::{Kzg, TrustedSetup}; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, error, info, Logger}; +use slog::{crit, debug, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -94,6 +97,7 @@ pub struct BeaconChainBuilder { // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, + trusted_setup: Option, task_executor: Option, } @@ -133,6 +137,7 @@ where slasher: None, validator_monitor: None, pending_io_batch: vec![], + trusted_setup: None, task_executor: None, } } @@ -287,7 +292,7 @@ where let genesis_state = store .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) .map_err(|e| descriptive_db_error("genesis state", &e))? - .ok_or("Genesis block not found in store")?; + .ok_or("Genesis state not found in store")?; self.genesis_time = Some(genesis_state.genesis_time()); @@ -382,6 +387,21 @@ where let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; + // Stage the database's metadata fields for atomic storage when `build` is called. + // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent + // historic states from being retained (unless `--reconstruct-historic-states` is set). + let retain_historic_states = self.chain_config.reconstruct_historic_states; + self.pending_io_batch.push( + store + .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) + .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, + ); + self.pending_io_batch.push( + store + .init_blob_info(genesis.beacon_block.slot()) + .map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?, + ); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = None; @@ -408,30 +428,28 @@ where weak_subj_block: SignedBeaconBlock, genesis_state: BeaconState, ) -> Result { - let store = self.store.clone().ok_or("genesis_state requires a store")?; + let store = self + .store + .clone() + .ok_or("weak_subjectivity_state requires a store")?; + let log = self + .log + .as_ref() + .ok_or("weak_subjectivity_state requires a log")?; - let weak_subj_slot = weak_subj_state.slot(); - let weak_subj_block_root = weak_subj_block.canonical_root(); - let weak_subj_state_root = weak_subj_block.state_root(); - - // Check that the given block lies on an epoch boundary. Due to the database only storing - // full states on epoch boundaries and at restore points it would be difficult to support - // starting from a mid-epoch state. - if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { - return Err(format!( - "Checkpoint block at slot {} is not aligned to epoch start. \ - Please supply an aligned checkpoint with block.slot % 32 == 0", - weak_subj_block.slot(), - )); - } - - // Check that the block and state have consistent slots and state roots. - if weak_subj_state.slot() != weak_subj_block.slot() { - return Err(format!( - "Slot of snapshot block ({}) does not match snapshot state ({})", - weak_subj_block.slot(), - weak_subj_state.slot(), - )); + // Ensure the state is advanced to an epoch boundary. + let slots_per_epoch = TEthSpec::slots_per_epoch(); + if weak_subj_state.slot() % slots_per_epoch != 0 { + debug!( + log, + "Advancing checkpoint state to boundary"; + "state_slot" => weak_subj_state.slot(), + "block_slot" => weak_subj_block.slot(), + ); + while weak_subj_state.slot() % slots_per_epoch != 0 { + per_slot_processing(&mut weak_subj_state, None, &self.spec) + .map_err(|e| format!("Error advancing state: {e:?}"))?; + } } // Prime all caches before storing the state in the database and computing the tree hash @@ -439,15 +457,19 @@ where weak_subj_state .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; - - let computed_state_root = weak_subj_state + let weak_subj_state_root = weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; - if weak_subj_state_root != computed_state_root { + let weak_subj_slot = weak_subj_state.slot(); + let weak_subj_block_root = weak_subj_block.canonical_root(); + + // Validate the state's `latest_block_header` against the checkpoint block. + let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root); + if weak_subj_block_root != state_latest_block_root { return Err(format!( - "Snapshot state root does not match block, expected: {:?}, got: {:?}", - weak_subj_state_root, computed_state_root + "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", + weak_subj_block_root, state_latest_block_root )); } @@ -464,10 +486,25 @@ where // Set the store's split point *before* storing genesis so that genesis is stored // immediately in the freezer DB. - store.set_split(weak_subj_slot, weak_subj_state_root); + store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; + // Fill in the linear block roots between the checkpoint block's slot and the aligned + // state's slot. All slots less than the block's slot will be handled by block backfill, + // while states greater or equal to the checkpoint state will be handled by `migrate_db`. + let block_root_batch = store + .store_frozen_block_root_at_skip_slots( + weak_subj_block.slot(), + weak_subj_state.slot(), + weak_subj_block_root, + ) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + store + .cold_db + .do_atomically(block_root_batch) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + // Write the state and block non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store @@ -480,12 +517,18 @@ where // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor // info or split point is written before the `PersistedBeaconChain`. + let retain_historic_states = self.chain_config.reconstruct_historic_states; self.pending_io_batch.push(store.store_split_in_batch()); self.pending_io_batch.push( store - .init_anchor_info(weak_subj_block.message()) + .init_anchor_info(weak_subj_block.message(), retain_historic_states) .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, ); + self.pending_io_batch.push( + store + .init_blob_info(weak_subj_block.slot()) + .map_err(|e| format!("Failed to initialize blob info: {:?}", e))?, + ); // Store pruning checkpoint to prevent attempting to prune before the anchor state. self.pending_io_batch @@ -503,13 +546,12 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; - let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, - current_slot, + Some(weak_subj_slot), &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -597,6 +639,11 @@ where self } + pub fn trusted_setup(mut self, trusted_setup: TrustedSetup) -> Self { + self.trusted_setup = Some(trusted_setup); + self + } + /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -638,6 +685,15 @@ where slot_clock.now().ok_or("Unable to read slot")? }; + let kzg = if let Some(trusted_setup) = self.trusted_setup { + let kzg = Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| format!("Failed to load trusted setup: {:?}", e))?; + let kzg_arc = Arc::new(kzg); + Some(kzg_arc) + } else { + None + }; + let initial_head_block_root = fork_choice .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; @@ -672,9 +728,8 @@ where Err(e) => return Err(descriptive_db_error("head block", &e)), }; - let head_state_root = head_block.state_root(); - let head_state = store - .get_state(&head_state_root, Some(head_block.slot())) + let (_head_state_root, head_state) = store + .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; @@ -799,14 +854,14 @@ where }; let beacon_chain = BeaconChain { - spec: self.spec, + spec: self.spec.clone(), config: self.chain_config, - store, + store: store.clone(), task_executor: self .task_executor .ok_or("Cannot build without task executor")?, store_migrator, - slot_clock, + slot_clock: slot_clock.clone(), op_pool: self.op_pool.ok_or("Cannot build without op pool")?, // TODO: allow for persisting and loading the pool from disk. naive_aggregation_pool: <_>::default(), @@ -828,6 +883,7 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), + observed_blob_sidecars: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), @@ -869,6 +925,11 @@ where slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, + data_availability_checker: Arc::new( + DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, &log, self.spec) + .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + ), + kzg, }; let head = beacon_chain.head_snapshot(); @@ -931,6 +992,13 @@ where ); } + // Prune blobs older than the blob data availability boundary in the background. + if let Some(data_availability_boundary) = beacon_chain.data_availability_boundary() { + beacon_chain + .store_migrator + .process_prune_blobs(data_availability_boundary); + } + Ok(beacon_chain) } } @@ -1028,6 +1096,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { #[cfg(test)] mod test { use super::*; + use crate::test_utils::EphemeralHarnessType; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use ethereum_hashing::hash; use genesis::{ @@ -1042,6 +1111,7 @@ mod test { use types::{EthSpec, MinimalEthSpec, Slot}; type TestEthSpec = MinimalEthSpec; + type Builder = BeaconChainBuilder>; fn get_logger() -> Logger { let builder = NullLoggerBuilder; @@ -1074,7 +1144,7 @@ mod test { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let runtime = TestRuntime::default(); - let chain = BeaconChainBuilder::new(MinimalEthSpec) + let chain = Builder::new(MinimalEthSpec) .logger(log.clone()) .store(Arc::new(store)) .task_executor(runtime.task_executor.clone()) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2b1f714362..35355754bd 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -47,7 +47,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, + ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, + ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -298,10 +299,10 @@ impl CanonicalHead { let beacon_block = store .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state = store - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let current_slot = fork_choice.fc_store().get_current_slot(); + let (_, beacon_state) = store + .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { beacon_block_root, @@ -669,10 +670,14 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let (_, beacon_state) = self + .store + .get_advanced_hot_state( + new_view.head_block_root, + current_slot, + beacon_block.state_root(), + )? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; Ok(BeaconSnapshot { beacon_block: Arc::new(beacon_block), @@ -979,6 +984,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_blob_sidecars.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .map(|mut snapshot_cache| { @@ -1046,6 +1058,12 @@ impl BeaconChain { self.head_tracker.clone(), )?; + // Prune blobs in the background. + if let Some(data_availability_boundary) = self.data_availability_boundary() { + self.store_migrator + .process_prune_blobs(data_availability_boundary); + } + // Take a write-lock on the canonical head and signal for it to prune. self.canonical_head.fork_choice_write_lock().prune()?; diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index d1bddcf736..bccc3732c3 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,5 +1,5 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::time::Duration; use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs new file mode 100644 index 0000000000..a8d077a6d0 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -0,0 +1,608 @@ +use crate::blob_verification::{verify_kzg_for_blob, verify_kzg_for_blob_list, GossipVerifiedBlob}; +use crate::block_verification_types::{ + AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, +}; +pub use crate::data_availability_checker::availability_view::{ + AvailabilityView, GetCommitment, GetCommitments, +}; +pub use crate::data_availability_checker::child_components::ChildComponents; +use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; +use crate::data_availability_checker::processing_cache::ProcessingCache; +use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use kzg::Kzg; +use parking_lot::RwLock; +pub use processing_cache::ProcessingComponents; +use slasher::test_utils::E; +use slog::{debug, error, Logger}; +use slot_clock::SlotClock; +use std::fmt; +use std::fmt::Debug; +use std::sync::Arc; +use task_executor::TaskExecutor; +use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; +use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; +use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; +use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +mod availability_view; +mod child_components; +mod error; +mod overflow_lru_cache; +mod processing_cache; +mod state_lru_cache; + +pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; + +/// The LRU Cache stores `PendingComponents` which can store up to +/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So +/// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this +/// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache +/// will target a size of less than 75% of capacity. +pub const OVERFLOW_LRU_CAPACITY: usize = 1024; +/// Until tree-states is implemented, we can't store very many states in memory :( +pub const STATE_LRU_CAPACITY: usize = 2; + +/// This includes a cache for any blocks or blobs that have been received over gossip or RPC +/// and are awaiting more components before they can be imported. Additionally the +/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as +/// checking whether a "availability check" is required at all. +pub struct DataAvailabilityChecker { + processing_cache: RwLock>, + availability_cache: Arc>, + slot_clock: T::SlotClock, + kzg: Option>, + log: Logger, + spec: ChainSpec, +} + +/// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. +/// +/// Indicates if the block is fully `Available` or if we need blobs or blocks +/// to "complete" the requirements for an `AvailableBlock`. +#[derive(PartialEq)] +pub enum Availability { + MissingComponents(Hash256), + Available(Box>), +} + +impl Debug for Availability { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::MissingComponents(block_root) => { + write!(f, "MissingComponents({})", block_root) + } + Self::Available(block) => write!(f, "Available({:?})", block.import_data.block_root), + } + } +} + +impl DataAvailabilityChecker { + pub fn new( + slot_clock: T::SlotClock, + kzg: Option>, + store: BeaconStore, + log: &Logger, + spec: ChainSpec, + ) -> Result { + let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; + Ok(Self { + processing_cache: <_>::default(), + availability_cache: Arc::new(overflow_cache), + slot_clock, + log: log.clone(), + kzg, + spec, + }) + } + + /// Checks if the given block root is cached. + pub fn has_block(&self, block_root: &Hash256) -> bool { + self.processing_cache.read().has_block(block_root) + } + + /// Get the processing info for a block. + pub fn get_processing_components( + &self, + block_root: Hash256, + ) -> Option> { + self.processing_cache.read().get(&block_root).cloned() + } + + /// A `None` indicates blobs are not required. + /// + /// If there's no block, all possible ids will be returned that don't exist in the given blobs. + /// If there no blobs, all possible ids will be returned. + pub fn get_missing_blob_ids>( + &self, + block_root: Hash256, + availability_view: &V, + ) -> MissingBlobs { + let Some(current_slot) = self.slot_clock.now_or_genesis() else { + error!( + self.log, + "Failed to read slot clock when checking for missing blob ids" + ); + return MissingBlobs::BlobsNotRequired; + }; + + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + if self.da_check_required_for_epoch(current_epoch) { + match availability_view.get_cached_block() { + Some(cached_block) => { + let block_commitments = cached_block.get_commitments(); + let blob_commitments = availability_view.get_cached_blobs(); + + let num_blobs_expected = block_commitments.len(); + let mut blob_ids = Vec::with_capacity(num_blobs_expected); + + // Zip here will always limit the number of iterations to the size of + // `block_commitment` because `blob_commitments` will always be populated + // with `Option` values up to `MAX_BLOBS_PER_BLOCK`. + for (index, (block_commitment, blob_commitment_opt)) in block_commitments + .into_iter() + .zip(blob_commitments.iter()) + .enumerate() + { + // Always add a missing blob. + let Some(blob_commitment) = blob_commitment_opt else { + blob_ids.push(BlobIdentifier { + block_root, + index: index as u64, + }); + continue; + }; + + let blob_commitment = *blob_commitment.get_commitment(); + + // Check for consistency, but this shouldn't happen, an availability view + // should guaruntee consistency. + if blob_commitment != block_commitment { + error!(self.log, + "Inconsistent availability view"; + "block_root" => ?block_root, + "block_commitment" => ?block_commitment, + "blob_commitment" => ?blob_commitment, + "index" => index + ); + blob_ids.push(BlobIdentifier { + block_root, + index: index as u64, + }); + } + } + MissingBlobs::KnownMissing(blob_ids) + } + None => { + MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) + } + } + } else { + MissingBlobs::BlobsNotRequired + } + } + + /// Get a blob from the availability cache. + pub fn get_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + self.availability_cache.peek_blob(blob_id) + } + + /// Put a list of blobs received via RPC into the availability cache. This performs KZG + /// verification on the blobs in the list. + pub fn put_rpc_blobs( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result, AvailabilityCheckError> { + let mut verified_blobs = vec![]; + if let Some(kzg) = self.kzg.as_ref() { + for blob in blobs.iter().flatten() { + verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?) + } + } else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + self.availability_cache + .put_kzg_verified_blobs(block_root, verified_blobs) + } + + /// This first validates the KZG commitments included in the blob sidecar. + /// Check if we've cached other blobs for this block. If it completes a set and we also + /// have a block cached, return the `Availability` variant triggering block import. + /// Otherwise cache the blob sidecar. + /// + /// This should only accept gossip verified blobs, so we should not have to worry about dupes. + pub fn put_gossip_blob( + &self, + gossip_blob: GossipVerifiedBlob, + ) -> Result, AvailabilityCheckError> { + // Verify the KZG commitments. + let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { + verify_kzg_for_blob(gossip_blob.to_blob(), kzg)? + } else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + + self.availability_cache + .put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob]) + } + + /// Check if we have all the blobs for a block. Returns `Availability` which has information + /// about whether all components have been received or more are required. + pub fn put_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_pending_executed_block(executed_block) + } + + /// Checks if a block is available, returns a `MaybeAvailableBlock` that may include the fully + /// available block. + pub fn check_rpc_block_availability( + &self, + block: RpcBlock, + ) -> Result, AvailabilityCheckError> { + let (block_root, block, blobs) = block.deconstruct(); + match blobs { + None => { + if self.blobs_required_for_block(&block) { + Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + } else { + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + })) + } + } + Some(blob_list) => { + let verified_blobs = if self.blobs_required_for_block(&block) { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(&blob_list, kzg)?; + Some(blob_list) + } else { + None + }; + Ok(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: verified_blobs, + })) + } + } + } + + /// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required. + /// If the block's epoch is from prior to the data availability boundary, no blobs are required. + fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { + block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch()) + } + + /// Adds block commitments to the processing cache. These commitments are unverified but caching + /// them here is useful to avoid duplicate downloads of blocks, as well as understanding + /// our blob download requirements. + pub fn notify_block_commitments( + &self, + slot: Slot, + block_root: Hash256, + commitments: KzgCommitments, + ) { + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_block(commitments); + } + + /// Add a single blob commitment to the processing cache. This commitment is unverified but caching + /// them here is useful to avoid duplicate downloads of blobs, as well as understanding + /// our block and blob download requirements. + pub fn notify_gossip_blob( + &self, + slot: Slot, + block_root: Hash256, + blob: &GossipVerifiedBlob, + ) { + let index = blob.as_blob().index; + let commitment = blob.as_blob().kzg_commitment; + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_single_blob(index as usize, commitment); + } + + /// Adds blob commitments to the processing cache. These commitments are unverified but caching + /// them here is useful to avoid duplicate downloads of blobs, as well as understanding + /// our block and blob download requirements. + pub fn notify_rpc_blobs( + &self, + slot: Slot, + block_root: Hash256, + blobs: &FixedBlobSidecarList, + ) { + let mut commitments = KzgCommitmentOpts::::default(); + for blob in blobs.iter().flatten() { + if let Some(commitment) = commitments.get_mut(blob.index as usize) { + *commitment = Some(blob.kzg_commitment); + } + } + self.processing_cache + .write() + .entry(block_root) + .or_insert_with(|| ProcessingComponents::new(slot)) + .merge_blobs(commitments); + } + + /// Clears the block and all blobs from the processing cache for a give root if they exist. + pub fn remove_notified(&self, block_root: &Hash256) { + self.processing_cache.write().remove(block_root) + } + + /// Gather all block roots for which we are not currently processing all components for the + /// given slot. + pub fn incomplete_processing_components(&self, slot: Slot) -> Vec { + self.processing_cache + .read() + .incomplete_processing_components(slot) + } + + /// Determines whether we are at least the `single_lookup_delay` duration into the given slot. + /// If we are not currently in the Deneb fork, this delay is not considered. + /// + /// The `single_lookup_delay` is the duration we wait for a blocks or blobs to arrive over + /// gossip before making single block or blob requests. This is to minimize the number of + /// single lookup requests we end up making. + pub fn should_delay_lookup(&self, slot: Slot) -> bool { + if !self.is_deneb() { + return false; + } + + let current_or_future_slot = self + .slot_clock + .now() + .map_or(false, |current_slot| current_slot <= slot); + + let delay_threshold_unmet = self + .slot_clock + .millis_from_current_slot_start() + .map_or(false, |millis_into_slot| { + millis_into_slot < self.slot_clock.single_lookup_delay() + }); + current_or_future_slot && delay_threshold_unmet + } + + /// The epoch at which we require a data availability check in block processing. + /// `None` if the `Deneb` fork is disabled. + pub fn data_availability_boundary(&self) -> Option { + self.spec.deneb_fork_epoch.and_then(|fork_epoch| { + self.slot_clock + .now() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .map(|current_epoch| { + std::cmp::max( + fork_epoch, + current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + ) + }) + }) + } + + /// Returns true if the given epoch lies within the da boundary and false otherwise. + pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { + self.data_availability_boundary() + .map_or(false, |da_epoch| block_epoch >= da_epoch) + } + + /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. + pub fn is_deneb(&self) -> bool { + self.slot_clock.now().map_or(false, |slot| { + self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| { + let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + now_epoch >= deneb_epoch + }) + }) + } + + /// Persist all in memory components to disk + pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> { + self.availability_cache.write_all_to_disk() + } +} + +pub fn start_availability_cache_maintenance_service( + executor: TaskExecutor, + chain: Arc>, +) { + // this cache only needs to be maintained if deneb is configured + if chain.spec.deneb_fork_epoch.is_some() { + let overflow_cache = chain.data_availability_checker.availability_cache.clone(); + executor.spawn( + async move { availability_cache_maintenance_service(chain, overflow_cache).await }, + "availability_cache_service", + ); + } else { + debug!( + chain.log, + "Deneb fork not configured, not starting availability cache maintenance service" + ); + } +} + +async fn availability_cache_maintenance_service( + chain: Arc>, + overflow_cache: Arc>, +) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + // this service should run 3/4 of the way through the epoch + let additional_delay = (epoch_duration * 3) / 4; + tokio::time::sleep(duration + additional_delay).await; + + let Some(deneb_fork_epoch) = chain.spec.deneb_fork_epoch else { + // shutdown service if deneb fork epoch not set + break; + }; + + debug!( + chain.log, + "Availability cache maintenance service firing"; + ); + let Some(current_epoch) = chain + .slot_clock + .now() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + else { + continue; + }; + + if current_epoch < deneb_fork_epoch { + // we are not in deneb yet + continue; + } + + let finalized_epoch = chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint() + .epoch; + // any data belonging to an epoch before this should be pruned + let cutoff_epoch = std::cmp::max( + finalized_epoch + 1, + std::cmp::max( + current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + deneb_fork_epoch, + ), + ); + + if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) { + error!(chain.log, "Failed to maintain availability cache"; "error" => ?e); + } + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + tokio::time::sleep(chain.slot_clock.slot_duration()).await; + } + }; + } +} + +/// A fully available block that is ready to be imported into fork choice. +#[derive(Clone, Debug, PartialEq)] +pub struct AvailableBlock { + block_root: Hash256, + block: Arc>, + blobs: Option>, +} + +impl AvailableBlock { + pub fn block(&self) -> &SignedBeaconBlock { + &self.block + } + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } + + pub fn blobs(&self) -> Option<&BlobSidecarList> { + self.blobs.as_ref() + } + + pub fn deconstruct( + self, + ) -> ( + Hash256, + Arc>, + Option>, + ) { + let AvailableBlock { + block_root, + block, + blobs, + } = self; + (block_root, block, blobs) + } +} + +#[derive(Debug, Clone)] +pub enum MaybeAvailableBlock { + /// This variant is fully available. + /// i.e. for pre-deneb blocks, it contains a (`SignedBeaconBlock`, `Blobs::None`) and for + /// post-4844 blocks, it contains a `SignedBeaconBlock` and a Blobs variant other than `Blobs::None`. + Available(AvailableBlock), + /// This variant is not fully available and requires blobs to become fully available. + AvailabilityPending { + block_root: Hash256, + block: Arc>, + }, +} + +#[derive(Debug, Clone)] +pub enum MissingBlobs { + /// We know for certain these blobs are missing. + KnownMissing(Vec), + /// We think these blobs might be missing. + PossibleMissing(Vec), + /// Blobs are not required. + BlobsNotRequired, +} + +impl MissingBlobs { + pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self { + if is_deneb { + MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) + } else { + MissingBlobs::BlobsNotRequired + } + } + pub fn is_empty(&self) -> bool { + match self { + MissingBlobs::KnownMissing(v) => v.is_empty(), + MissingBlobs::PossibleMissing(v) => v.is_empty(), + MissingBlobs::BlobsNotRequired => true, + } + } + pub fn contains(&self, blob_id: &BlobIdentifier) -> bool { + match self { + MissingBlobs::KnownMissing(v) => v.contains(blob_id), + MissingBlobs::PossibleMissing(v) => v.contains(blob_id), + MissingBlobs::BlobsNotRequired => false, + } + } + pub fn remove(&mut self, blob_id: &BlobIdentifier) { + match self { + MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id), + MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id), + MissingBlobs::BlobsNotRequired => {} + } + } + pub fn indices(&self) -> Vec { + match self { + MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(), + MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(), + MissingBlobs::BlobsNotRequired => vec![], + } + } +} + +impl Into> for MissingBlobs { + fn into(self) -> Vec { + match self { + MissingBlobs::KnownMissing(v) => v, + MissingBlobs::PossibleMissing(v) => v, + MissingBlobs::BlobsNotRequired => vec![], + } + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs new file mode 100644 index 0000000000..f013cf649a --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs @@ -0,0 +1,562 @@ +use super::child_components::ChildComponents; +use super::state_lru_cache::DietAvailabilityPendingExecutedBlock; +use crate::blob_verification::KzgVerifiedBlob; +use crate::block_verification_types::AsBlock; +use crate::data_availability_checker::overflow_lru_cache::PendingComponents; +use crate::data_availability_checker::ProcessingComponents; +use kzg::KzgCommitment; +use ssz_types::FixedVector; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +/// Defines an interface for managing data availability with two key invariants: +/// +/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index) +/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different +/// commitment. +/// 2. On block insertion, any non-matching blob commitments are evicted. +/// +/// Types implementing this trait can be used for validating and managing availability +/// of blocks and blobs in a cache-like data structure. +pub trait AvailabilityView { + /// The type representing a block in the implementation. + type BlockType: GetCommitments; + + /// The type representing a blob in the implementation. Must implement `Clone`. + type BlobType: Clone + GetCommitment; + + /// Returns an immutable reference to the cached block. + fn get_cached_block(&self) -> &Option; + + /// Returns an immutable reference to the fixed vector of cached blobs. + fn get_cached_blobs(&self) -> &FixedVector, E::MaxBlobsPerBlock>; + + /// Returns a mutable reference to the cached block. + fn get_cached_block_mut(&mut self) -> &mut Option; + + /// Returns a mutable reference to the fixed vector of cached blobs. + fn get_cached_blobs_mut( + &mut self, + ) -> &mut FixedVector, E::MaxBlobsPerBlock>; + + /// Checks if a block exists in the cache. + /// + /// Returns: + /// - `true` if a block exists. + /// - `false` otherwise. + fn block_exists(&self) -> bool { + self.get_cached_block().is_some() + } + + /// Checks if a blob exists at the given index in the cache. + /// + /// Returns: + /// - `true` if a blob exists at the given index. + /// - `false` otherwise. + fn blob_exists(&self, blob_index: usize) -> bool { + self.get_cached_blobs() + .get(blob_index) + .map(|b| b.is_some()) + .unwrap_or(false) + } + + /// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a + /// block. + /// + /// This corresponds to the number of commitments that are present in a block. + fn num_expected_blobs(&self) -> Option { + self.get_cached_block() + .as_ref() + .map(|b| b.get_commitments().len()) + } + + /// Returns the number of blobs that have been received and are stored in the cache. + fn num_received_blobs(&self) -> usize { + self.get_cached_blobs().iter().flatten().count() + } + + /// Inserts a block into the cache. + fn insert_block(&mut self, block: Self::BlockType) { + *self.get_cached_block_mut() = Some(block) + } + + /// Inserts a blob at a specific index in the cache. + /// + /// Existing blob at the index will be replaced. + fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) { + if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) { + *b = Some(blob); + } + } + + /// Merges a given set of blobs into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists. + /// 2. The block exists and its commitment matches the blob's commitment. + fn merge_blobs(&mut self, blobs: FixedVector, E::MaxBlobsPerBlock>) { + for (index, blob) in blobs.iter().cloned().enumerate() { + let Some(blob) = blob else { continue }; + self.merge_single_blob(index, blob); + } + } + + /// Merges a single blob into the cache. + /// + /// Blobs are only inserted if: + /// 1. The blob entry at the index is empty and no block exists, or + /// 2. The block exists and its commitment matches the blob's commitment. + fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) { + let commitment = *blob.get_commitment(); + if let Some(cached_block) = self.get_cached_block() { + let block_commitment_opt = cached_block.get_commitments().get(index).copied(); + if let Some(block_commitment) = block_commitment_opt { + if block_commitment == commitment { + self.insert_blob_at_index(index, blob) + } + } + } else if !self.blob_exists(index) { + self.insert_blob_at_index(index, blob) + } + } + + /// Inserts a new block and revalidates the existing blobs against it. + /// + /// Blobs that don't match the new block's commitments are evicted. + fn merge_block(&mut self, block: Self::BlockType) { + self.insert_block(block); + let reinsert = std::mem::take(self.get_cached_blobs_mut()); + self.merge_blobs(reinsert); + } + + /// Checks if the block and all of its expected blobs are available in the cache. + /// + /// Returns `true` if both the block exists and the number of received blobs matches the number + /// of expected blobs. + fn is_available(&self) -> bool { + if let Some(num_expected_blobs) = self.num_expected_blobs() { + num_expected_blobs == self.num_received_blobs() + } else { + false + } + } +} + +/// Implements the `AvailabilityView` trait for a given struct. +/// +/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`. +/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait. +/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait. +/// - `$block_field`: The field name in the struct that holds the cached block. +/// - `$blob_field`: The field name in the struct that holds the cached blobs. +#[macro_export] +macro_rules! impl_availability_view { + ($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => { + impl AvailabilityView for $struct_name { + type BlockType = $block_type; + type BlobType = $blob_type; + + fn get_cached_block(&self) -> &Option { + &self.$block_field + } + + fn get_cached_blobs( + &self, + ) -> &FixedVector, E::MaxBlobsPerBlock> { + &self.$blob_field + } + + fn get_cached_block_mut(&mut self) -> &mut Option { + &mut self.$block_field + } + + fn get_cached_blobs_mut( + &mut self, + ) -> &mut FixedVector, E::MaxBlobsPerBlock> { + &mut self.$blob_field + } + } + }; +} + +impl_availability_view!( + ProcessingComponents, + KzgCommitments, + KzgCommitment, + block_commitments, + blob_commitments +); + +impl_availability_view!( + PendingComponents, + DietAvailabilityPendingExecutedBlock, + KzgVerifiedBlob, + executed_block, + verified_blobs +); + +impl_availability_view!( + ChildComponents, + Arc>, + Arc>, + downloaded_block, + downloaded_blobs +); + +pub trait GetCommitments { + fn get_commitments(&self) -> KzgCommitments; +} + +pub trait GetCommitment { + fn get_commitment(&self) -> &KzgCommitment; +} + +// These implementations are required to implement `AvailabilityView` for `ProcessingView`. +impl GetCommitments for KzgCommitments { + fn get_commitments(&self) -> KzgCommitments { + self.clone() + } +} +impl GetCommitment for KzgCommitment { + fn get_commitment(&self) -> &KzgCommitment { + self + } +} + +// These implementations are required to implement `AvailabilityView` for `PendingComponents`. +impl GetCommitments for DietAvailabilityPendingExecutedBlock { + fn get_commitments(&self) -> KzgCommitments { + self.as_block() + .message() + .body() + .blob_kzg_commitments() + .cloned() + .unwrap_or_default() + } +} + +impl GetCommitment for KzgVerifiedBlob { + fn get_commitment(&self) -> &KzgCommitment { + &self.as_blob().kzg_commitment + } +} + +// These implementations are required to implement `AvailabilityView` for `ChildComponents`. +impl GetCommitments for Arc> { + fn get_commitments(&self) -> KzgCommitments { + self.message() + .body() + .blob_kzg_commitments() + .ok() + .cloned() + .unwrap_or_default() + } +} +impl GetCommitment for Arc> { + fn get_commitment(&self) -> &KzgCommitment { + &self.kzg_commitment + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::block_verification_types::BlockImportData; + use crate::eth1_finalization_cache::Eth1FinalizationData; + use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::AvailabilityPendingExecutedBlock; + use crate::PayloadVerificationOutcome; + use fork_choice::PayloadVerificationStatus; + use rand::rngs::StdRng; + use rand::SeedableRng; + use state_processing::ConsensusContext; + use types::test_utils::TestRandom; + use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot}; + + type E = MainnetEthSpec; + + type Setup = ( + SignedBeaconBlock, + FixedVector>, ::MaxBlobsPerBlock>, + FixedVector>, ::MaxBlobsPerBlock>, + ); + + pub fn pre_setup() -> Setup { + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let (block, blobs_vec) = + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); + let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + + for blob in blobs_vec { + if let Some(b) = blobs.get_mut(blob.index as usize) { + *b = Some(blob); + } + } + + let mut invalid_blobs: FixedVector< + Option>, + ::MaxBlobsPerBlock, + > = FixedVector::default(); + for (index, blob) in blobs.iter().enumerate() { + let mut invalid_blob_opt = blob.clone(); + if let Some(invalid_blob) = invalid_blob_opt.as_mut() { + invalid_blob.kzg_commitment = KzgCommitment::random_for_test(&mut rng); + } + *invalid_blobs.get_mut(index).unwrap() = invalid_blob_opt; + } + + (block, blobs, invalid_blobs) + } + + type ProcessingViewSetup = ( + KzgCommitments, + FixedVector, ::MaxBlobsPerBlock>, + FixedVector, ::MaxBlobsPerBlock>, + ); + + pub fn setup_processing_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + ) -> ProcessingViewSetup { + let commitments = block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + let blobs = FixedVector::from( + valid_blobs + .iter() + .map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment)) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .iter() + .map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment)) + .collect::>(), + ); + (commitments, blobs, invalid_blobs) + } + + type PendingComponentsSetup = ( + DietAvailabilityPendingExecutedBlock, + FixedVector>, ::MaxBlobsPerBlock>, + FixedVector>, ::MaxBlobsPerBlock>, + ); + + pub fn setup_pending_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + ) -> PendingComponentsSetup { + let blobs = FixedVector::from( + valid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::new(blob.clone())) + }) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .iter() + .map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| KzgVerifiedBlob::new(blob.clone())) + }) + .collect::>(), + ); + let dummy_parent = block.clone_as_blinded(); + let block = AvailabilityPendingExecutedBlock { + block: Arc::new(block), + import_data: BlockImportData { + block_root: Default::default(), + state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), + parent_block: dummy_parent, + parent_eth1_finalization_data: Eth1FinalizationData { + eth1_data: Default::default(), + eth1_deposit_index: 0, + }, + confirmed_state_roots: vec![], + consensus_context: ConsensusContext::new(Slot::new(0)), + }, + payload_verification_outcome: PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }, + }; + (block.into(), blobs, invalid_blobs) + } + + type ChildComponentsSetup = ( + Arc>, + FixedVector>>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, + ); + + pub fn setup_child_components( + block: SignedBeaconBlock, + valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + ) -> ChildComponentsSetup { + let blobs = FixedVector::from( + valid_blobs + .into_iter() + .map(|blob_opt| blob_opt.clone().map(Arc::new)) + .collect::>(), + ); + let invalid_blobs = FixedVector::from( + invalid_blobs + .into_iter() + .map(|blob_opt| blob_opt.clone().map(Arc::new)) + .collect::>(), + ); + (Arc::new(block), blobs, invalid_blobs) + } + + pub fn assert_cache_consistent>(cache: V) { + if let Some(cached_block) = cache.get_cached_block() { + let cached_block_commitments = cached_block.get_commitments(); + for index in 0..E::max_blobs_per_block() { + let block_commitment = cached_block_commitments.get(index).copied(); + let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); + let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); + assert_eq!(block_commitment, blob_commitment); + } + } else { + panic!("No cached block") + } + } + + pub fn assert_empty_blob_cache>(cache: V) { + for blob in cache.get_cached_blobs().iter() { + assert!(blob.is_none()); + } + } + + #[macro_export] + macro_rules! generate_tests { + ($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => { + mod $module_name { + use super::*; + use types::Hash256; + + #[test] + fn valid_block_invalid_blobs_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_block_valid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn invalid_blobs_valid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(random_blobs); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + + assert_empty_blob_cache(cache); + } + + #[test] + fn block_valid_blobs_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_block(block_commitments); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_block_invalid_blobs() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_block(block_commitments); + cache.merge_blobs(random_blobs); + + assert_cache_consistent(cache); + } + + #[test] + fn valid_blobs_invalid_blobs_block() { + let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs) = + $setup_fn(block_commitments, blobs, random_blobs); + + let block_root = Hash256::zero(); + let mut cache = <$type_name>::empty(block_root); + cache.merge_blobs(blobs); + cache.merge_blobs(random_blobs); + cache.merge_block(block_commitments); + + assert_cache_consistent(cache); + } + } + }; + } + + generate_tests!( + processing_components_tests, + ProcessingComponents::, + kzg_commitments, + processing_blobs, + setup_processing_components + ); + generate_tests!( + pending_components_tests, + PendingComponents, + executed_block, + verified_blobs, + setup_pending_components + ); + generate_tests!( + child_component_tests, + ChildComponents::, + downloaded_block, + downloaded_blobs, + setup_child_components + ); +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs new file mode 100644 index 0000000000..028bf9d67c --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs @@ -0,0 +1,54 @@ +use crate::block_verification_types::RpcBlock; +use crate::data_availability_checker::AvailabilityView; +use bls::Hash256; +use std::sync::Arc; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{EthSpec, SignedBeaconBlock}; + +/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct +/// is used to cache components as they are sent to the network service. We can't use the +/// data availability cache currently because any blocks or blobs without parents +/// won't pass validation and therefore won't make it into the cache. +pub struct ChildComponents { + pub block_root: Hash256, + pub downloaded_block: Option>>, + pub downloaded_blobs: FixedBlobSidecarList, +} + +impl From> for ChildComponents { + fn from(value: RpcBlock) -> Self { + let (block_root, block, blobs) = value.deconstruct(); + let fixed_blobs = blobs.map(|blobs| { + FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()) + }); + Self::new(block_root, Some(block), fixed_blobs) + } +} + +impl ChildComponents { + pub fn empty(block_root: Hash256) -> Self { + Self { + block_root, + downloaded_block: None, + downloaded_blobs: <_>::default(), + } + } + pub fn new( + block_root: Hash256, + block: Option>>, + blobs: Option>, + ) -> Self { + let mut cache = Self::empty(block_root); + if let Some(block) = block { + cache.merge_block(block); + } + if let Some(blobs) = blobs { + cache.merge_blobs(blobs); + } + cache + } + + pub fn clear_blobs(&mut self) { + self.downloaded_blobs = FixedBlobSidecarList::default(); + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs new file mode 100644 index 0000000000..b2979f2bf0 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -0,0 +1,78 @@ +use kzg::{Error as KzgError, KzgCommitment}; +use types::{BeaconStateError, Hash256}; + +#[derive(Debug)] +pub enum Error { + Kzg(KzgError), + KzgNotInitialized, + KzgVerificationFailed, + KzgCommitmentMismatch { + blob_commitment: KzgCommitment, + block_commitment: KzgCommitment, + }, + Unexpected, + SszTypes(ssz_types::Error), + MissingBlobs, + BlobIndexInvalid(u64), + StoreError(store::Error), + DecodeError(ssz::DecodeError), + InconsistentBlobBlockRoots { + block_root: Hash256, + blob_block_root: Hash256, + }, + ParentStateMissing(Hash256), + BlockReplayError(state_processing::BlockReplayError), + RebuildingStateCaches(BeaconStateError), +} + +pub enum ErrorCategory { + /// Internal Errors (not caused by peers) + Internal, + /// Errors caused by faulty / malicious peers + Malicious, +} + +impl Error { + pub fn category(&self) -> ErrorCategory { + match self { + Error::KzgNotInitialized + | Error::SszTypes(_) + | Error::MissingBlobs + | Error::StoreError(_) + | Error::DecodeError(_) + | Error::Unexpected + | Error::ParentStateMissing(_) + | Error::BlockReplayError(_) + | Error::RebuildingStateCaches(_) => ErrorCategory::Internal, + Error::Kzg(_) + | Error::BlobIndexInvalid(_) + | Error::KzgCommitmentMismatch { .. } + | Error::KzgVerificationFailed + | Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious, + } + } +} + +impl From for Error { + fn from(value: ssz_types::Error) -> Self { + Self::SszTypes(value) + } +} + +impl From for Error { + fn from(value: store::Error) -> Self { + Self::StoreError(value) + } +} + +impl From for Error { + fn from(value: ssz::DecodeError) -> Self { + Self::DecodeError(value) + } +} + +impl From for Error { + fn from(value: state_processing::BlockReplayError) -> Self { + Self::BlockReplayError(value) + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs new file mode 100644 index 0000000000..e2eef45d25 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -0,0 +1,1696 @@ +//! This module implements a LRU cache for storing partially available blocks and blobs. +//! When the cache overflows, the least recently used items are persisted to the database. +//! This prevents lighthouse from using too much memory storing unfinalized blocks and blobs +//! if the chain were to lose finality. +//! +//! ## Deadlock safety +//! +//! The main object in this module is the `OverflowLruCache`. It contains two locks: +//! +//! - `self.critical` is an `RwLock` that protects content stored in memory. +//! - `self.maintenance_lock` is held when moving data between memory and disk. +//! +//! You mostly need to ensure that you don't try to hold the critical lock more than once +//! +//! ## Basic Algorithm +//! +//! As blocks and blobs come in from the network, their components are stored in memory in +//! this cache. When a block becomes fully available, it is removed from the cache and +//! imported into fork-choice. Blocks/blobs that remain unavailable will linger in the +//! cache until they are older than the finalized epoch or older than the data availability +//! cutoff. In the event the chain is not finalizing, the cache will eventually overflow and +//! the least recently used items will be persisted to disk. When this happens, we will still +//! store the hash of the block in memory so we always know we have data for that block +//! without needing to check the database. +//! +//! When the client is shut down, all pending components are persisted in the database. +//! On startup, the keys of these components are stored in memory and will be loaded in +//! the cache when they are accessed. + +use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; +use crate::beacon_chain::BeaconStore; +use crate::blob_verification::KzgVerifiedBlob; +use crate::block_verification_types::{ + AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, +}; +use crate::data_availability_checker::availability_view::AvailabilityView; +use crate::data_availability_checker::{Availability, AvailabilityCheckError}; +use crate::store::{DBColumn, KeyValueStore}; +use crate::BeaconChainTypes; +use lru::LruCache; +use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use std::{collections::HashSet, sync::Arc}; +use types::blob_sidecar::BlobIdentifier; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; + +/// This represents the components of a partially available block +/// +/// The blobs are all gossip and kzg verified. +/// The block has completed all verifications except the availability check. +#[derive(Encode, Decode, Clone)] +pub struct PendingComponents { + pub block_root: Hash256, + pub verified_blobs: FixedVector>, T::MaxBlobsPerBlock>, + pub executed_block: Option>, +} + +impl PendingComponents { + pub fn empty(block_root: Hash256) -> Self { + Self { + block_root, + verified_blobs: FixedVector::default(), + executed_block: None, + } + } + + /// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs. + /// This does not check whether a block *should* have blobs, these checks should have been + /// completed when producing the `AvailabilityPendingBlock`. + /// + /// WARNING: This function can potentially take a lot of time if the state needs to be + /// reconstructed from disk. Ensure you are not holding any write locks while calling this. + pub fn make_available(self, recover: R) -> Result, AvailabilityCheckError> + where + R: FnOnce( + DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError>, + { + let Self { + block_root, + verified_blobs, + executed_block, + } = self; + + let Some(diet_executed_block) = executed_block else { + return Err(AvailabilityCheckError::Unexpected); + }; + let num_blobs_expected = diet_executed_block.num_blobs_expected(); + let Some(verified_blobs) = verified_blobs + .into_iter() + .cloned() + .map(|b| b.map(|b| b.to_blob())) + .take(num_blobs_expected) + .collect::>>() + else { + return Err(AvailabilityCheckError::Unexpected); + }; + let verified_blobs = VariableList::new(verified_blobs)?; + + let executed_block = recover(diet_executed_block)?; + + let AvailabilityPendingExecutedBlock { + block, + import_data, + payload_verification_outcome, + } = executed_block; + + let available_block = AvailableBlock { + block_root, + block, + blobs: Some(verified_blobs), + }; + Ok(Availability::Available(Box::new( + AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), + ))) + } + + pub fn epoch(&self) -> Option { + self.executed_block + .as_ref() + .map(|pending_block| pending_block.as_block().epoch()) + .or_else(|| { + for maybe_blob in self.verified_blobs.iter() { + if maybe_blob.is_some() { + return maybe_blob.as_ref().map(|kzg_verified_blob| { + kzg_verified_blob.as_blob().slot.epoch(T::slots_per_epoch()) + }); + } + } + None + }) + } +} + +/// Blocks and blobs are stored in the database sequentially so that it's +/// fast to iterate over all the data for a particular block. +#[derive(Debug, PartialEq)] +enum OverflowKey { + Block(Hash256), + Blob(Hash256, u8), +} + +impl OverflowKey { + pub fn from_block_root(block_root: Hash256) -> Self { + Self::Block(block_root) + } + + pub fn from_blob_id( + blob_id: BlobIdentifier, + ) -> Result { + if blob_id.index > E::max_blobs_per_block() as u64 || blob_id.index > u8::MAX as u64 { + return Err(AvailabilityCheckError::BlobIndexInvalid(blob_id.index)); + } + Ok(Self::Blob(blob_id.block_root, blob_id.index as u8)) + } + + pub fn root(&self) -> &Hash256 { + match self { + Self::Block(root) => root, + Self::Blob(root, _) => root, + } + } +} + +/// A wrapper around BeaconStore that implements various +/// methods used for saving and retrieving blocks / blobs +/// from the store (for organization) +struct OverflowStore(BeaconStore); + +impl OverflowStore { + /// Store pending components in the database + pub fn persist_pending_components( + &self, + block_root: Hash256, + mut pending_components: PendingComponents, + ) -> Result<(), AvailabilityCheckError> { + let col = DBColumn::OverflowLRUCache; + + if let Some(block) = pending_components.executed_block.take() { + let key = OverflowKey::from_block_root(block_root); + self.0 + .hot_db + .put_bytes(col.as_str(), &key.as_ssz_bytes(), &block.as_ssz_bytes())? + } + + for blob in Vec::from(pending_components.verified_blobs) + .into_iter() + .flatten() + { + let key = OverflowKey::from_blob_id::(BlobIdentifier { + block_root, + index: blob.blob_index(), + })?; + + self.0 + .hot_db + .put_bytes(col.as_str(), &key.as_ssz_bytes(), &blob.as_ssz_bytes())? + } + + Ok(()) + } + + /// Load the pending components that we have in the database for a given block root + pub fn load_pending_components( + &self, + block_root: Hash256, + ) -> Result>, AvailabilityCheckError> { + // read everything from disk and reconstruct + let mut maybe_pending_components = None; + for res in self + .0 + .hot_db + .iter_raw_entries(DBColumn::OverflowLRUCache, block_root.as_bytes()) + { + let (key_bytes, value_bytes) = res?; + match OverflowKey::from_ssz_bytes(&key_bytes)? { + OverflowKey::Block(_) => { + maybe_pending_components + .get_or_insert_with(|| PendingComponents::empty(block_root)) + .executed_block = + Some(DietAvailabilityPendingExecutedBlock::from_ssz_bytes( + value_bytes.as_slice(), + )?); + } + OverflowKey::Blob(_, index) => { + *maybe_pending_components + .get_or_insert_with(|| PendingComponents::empty(block_root)) + .verified_blobs + .get_mut(index as usize) + .ok_or(AvailabilityCheckError::BlobIndexInvalid(index as u64))? = + Some(KzgVerifiedBlob::from_ssz_bytes(value_bytes.as_slice())?); + } + } + } + + Ok(maybe_pending_components) + } + + /// Returns the hashes of all the blocks we have any data for on disk + pub fn read_keys_on_disk(&self) -> Result, AvailabilityCheckError> { + let mut disk_keys = HashSet::new(); + for res in self.0.hot_db.iter_raw_keys(DBColumn::OverflowLRUCache, &[]) { + let key_bytes = res?; + disk_keys.insert(*OverflowKey::from_ssz_bytes(&key_bytes)?.root()); + } + Ok(disk_keys) + } + + /// Load a single blob from the database + pub fn load_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + let key = OverflowKey::from_blob_id::(*blob_id)?; + + self.0 + .hot_db + .get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())? + .map(|blob_bytes| Arc::>::from_ssz_bytes(blob_bytes.as_slice())) + .transpose() + .map_err(|e| e.into()) + } + + /// Delete a set of keys from the database + pub fn delete_keys(&self, keys: &Vec) -> Result<(), AvailabilityCheckError> { + for key in keys { + self.0 + .hot_db + .key_delete(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?; + } + Ok(()) + } +} + +/// This data stores the *critical* data that we need to keep in memory +/// protected by the RWLock +struct Critical { + /// This is the LRU cache of pending components + pub in_memory: LruCache>, + /// This holds all the roots of the blocks for which we have + /// `PendingComponents` in the database. + pub store_keys: HashSet, +} + +impl Critical { + pub fn new(capacity: usize) -> Self { + Self { + in_memory: LruCache::new(capacity), + store_keys: HashSet::new(), + } + } + + pub fn reload_store_keys( + &mut self, + overflow_store: &OverflowStore, + ) -> Result<(), AvailabilityCheckError> { + let disk_keys = overflow_store.read_keys_on_disk()?; + self.store_keys = disk_keys; + Ok(()) + } + + /// This only checks for the blobs in memory + pub fn peek_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + if let Some(pending_components) = self.in_memory.peek(&blob_id.block_root) { + Ok(pending_components + .verified_blobs + .get(blob_id.index as usize) + .ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_id.index))? + .as_ref() + .map(|blob| blob.clone_blob())) + } else { + Ok(None) + } + } + + /// Puts the pending components in the LRU cache. If the cache + /// is at capacity, the LRU entry is written to the store first + pub fn put_pending_components( + &mut self, + block_root: Hash256, + pending_components: PendingComponents, + overflow_store: &OverflowStore, + ) -> Result<(), AvailabilityCheckError> { + if self.in_memory.len() == self.in_memory.cap() { + // cache will overflow, must write lru entry to disk + if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() { + overflow_store.persist_pending_components(lru_key, lru_value)?; + self.store_keys.insert(lru_key); + } + } + self.in_memory.put(block_root, pending_components); + Ok(()) + } + + /// Removes and returns the pending_components corresponding to + /// the `block_root` or `None` if it does not exist + pub fn pop_pending_components( + &mut self, + block_root: Hash256, + store: &OverflowStore, + ) -> Result>, AvailabilityCheckError> { + match self.in_memory.pop_entry(&block_root) { + Some((_, pending_components)) => Ok(Some(pending_components)), + None => { + // not in memory, is it in the store? + if self.store_keys.remove(&block_root) { + // We don't need to remove the data from the store as we have removed it from + // `store_keys` so we won't go looking for it on disk. The maintenance thread + // will remove it from disk the next time it runs. + store.load_pending_components(block_root) + } else { + Ok(None) + } + } + } + } +} + +/// This is the main struct for this module. Outside methods should +/// interact with the cache through this. +pub struct OverflowLRUCache { + /// Contains all the data we keep in memory, protected by an RwLock + critical: RwLock>, + /// This is how we read and write components to the disk + overflow_store: OverflowStore, + /// This cache holds a limited number of states in memory and reconstructs them + /// from disk when necessary. This is necessary until we merge tree-states + state_cache: StateLRUCache, + /// Mutex to guard maintenance methods which move data between disk and memory + maintenance_lock: Mutex<()>, + /// The capacity of the LRU cache + capacity: usize, +} + +impl OverflowLRUCache { + pub fn new( + capacity: usize, + beacon_store: BeaconStore, + spec: ChainSpec, + ) -> Result { + let overflow_store = OverflowStore(beacon_store.clone()); + let mut critical = Critical::new(capacity); + critical.reload_store_keys(&overflow_store)?; + Ok(Self { + critical: RwLock::new(critical), + overflow_store, + state_cache: StateLRUCache::new(beacon_store, spec), + maintenance_lock: Mutex::new(()), + capacity, + }) + } + + /// Fetch a blob from the cache without affecting the LRU ordering + pub fn peek_blob( + &self, + blob_id: &BlobIdentifier, + ) -> Result>>, AvailabilityCheckError> { + let read_lock = self.critical.read(); + if let Some(blob) = read_lock.peek_blob(blob_id)? { + Ok(Some(blob)) + } else if read_lock.store_keys.contains(&blob_id.block_root) { + drop(read_lock); + self.overflow_store.load_blob(blob_id) + } else { + Ok(None) + } + } + + pub fn put_kzg_verified_blobs( + &self, + block_root: Hash256, + kzg_verified_blobs: Vec>, + ) -> Result, AvailabilityCheckError> { + let mut fixed_blobs = FixedVector::default(); + + // Initial check to ensure all provided blobs have a consistent block root. + for blob in kzg_verified_blobs { + let blob_block_root = blob.block_root(); + if blob_block_root != block_root { + return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { + block_root, + blob_block_root, + }); + } + if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { + *blob_opt = Some(blob); + } + } + + let mut write_lock = self.critical.write(); + + // Grab existing entry or create a new entry. + let mut pending_components = write_lock + .pop_pending_components(block_root, &self.overflow_store)? + .unwrap_or_else(|| PendingComponents::empty(block_root)); + + // Merge in the blobs. + pending_components.merge_blobs(fixed_blobs); + + if pending_components.is_available() { + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) + } else { + write_lock.put_pending_components( + block_root, + pending_components, + &self.overflow_store, + )?; + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check if we have all the blobs for a block. If we do, return the Availability variant that + /// triggers import of the block. + pub fn put_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let mut write_lock = self.critical.write(); + let block_root = executed_block.import_data.block_root; + + // register the block to get the diet block + let diet_executed_block = self + .state_cache + .register_pending_executed_block(executed_block); + + // Grab existing entry or create a new entry. + let mut pending_components = write_lock + .pop_pending_components(block_root, &self.overflow_store)? + .unwrap_or_else(|| PendingComponents::empty(block_root)); + + // Merge in the block. + pending_components.merge_block(diet_executed_block); + + // Check if we have all components and entire set is consistent. + if pending_components.is_available() { + // No need to hold the write lock anymore + drop(write_lock); + pending_components.make_available(|diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) + } else { + write_lock.put_pending_components( + block_root, + pending_components, + &self.overflow_store, + )?; + Ok(Availability::MissingComponents(block_root)) + } + } + + /// write all in memory objects to disk + pub fn write_all_to_disk(&self) -> Result<(), AvailabilityCheckError> { + let maintenance_lock = self.maintenance_lock.lock(); + let mut critical_lock = self.critical.write(); + + let mut swap_lru = LruCache::new(self.capacity); + std::mem::swap(&mut swap_lru, &mut critical_lock.in_memory); + + for (root, pending_components) in swap_lru.into_iter() { + self.overflow_store + .persist_pending_components(root, pending_components)?; + critical_lock.store_keys.insert(root); + } + + drop(critical_lock); + drop(maintenance_lock); + Ok(()) + } + + /// maintain the cache + pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { + // ensure memory usage is below threshold + let threshold = self.capacity * 3 / 4; + self.maintain_threshold(threshold, cutoff_epoch)?; + // clean up any keys on the disk that shouldn't be there + self.prune_disk(cutoff_epoch)?; + // clean up any lingering states in the state cache + self.state_cache.do_maintenance(cutoff_epoch); + Ok(()) + } + + /// Enforce that the size of the cache is below a given threshold by + /// moving the least recently used items to disk. + fn maintain_threshold( + &self, + threshold: usize, + cutoff_epoch: Epoch, + ) -> Result<(), AvailabilityCheckError> { + // ensure only one thread at a time can be deleting things from the disk or + // moving things between memory and storage + let maintenance_lock = self.maintenance_lock.lock(); + + let mut stored = self.critical.read().in_memory.len(); + while stored > threshold { + let read_lock = self.critical.upgradable_read(); + let lru_entry = read_lock + .in_memory + .peek_lru() + .map(|(key, value)| (*key, value.clone())); + + let Some((lru_root, lru_pending_components)) = lru_entry else { + break; + }; + + if lru_pending_components + .epoch() + .map(|epoch| epoch < cutoff_epoch) + .unwrap_or(true) + { + // this data is no longer needed -> delete it + let mut write_lock = RwLockUpgradableReadGuard::upgrade(read_lock); + write_lock.in_memory.pop_entry(&lru_root); + stored = write_lock.in_memory.len(); + continue; + } else { + drop(read_lock); + } + + // write the lru entry to disk (we aren't holding any critical locks while we do this) + self.overflow_store + .persist_pending_components(lru_root, lru_pending_components)?; + // now that we've written to disk, grab the critical write lock + let mut write_lock = self.critical.write(); + if let Some((new_lru_root_ref, _)) = write_lock.in_memory.peek_lru() { + // need to ensure the entry we just wrote to disk wasn't updated + // while we were writing and is still the LRU entry + if *new_lru_root_ref == lru_root { + // it is still LRU entry -> delete it from memory & record that it's on disk + write_lock.in_memory.pop_entry(&lru_root); + write_lock.store_keys.insert(lru_root); + } + } + stored = write_lock.in_memory.len(); + drop(write_lock); + } + + drop(maintenance_lock); + Ok(()) + } + + /// Delete any data on disk that shouldn't be there. This can happen if + /// 1. The entry has been moved back to memory (or become fully available) + /// 2. The entry belongs to a block beyond the cutoff epoch + fn prune_disk(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { + // ensure only one thread at a time can be deleting things from the disk or + // moving things between memory and storage + let maintenance_lock = self.maintenance_lock.lock(); + + struct BlockData { + keys: Vec, + root: Hash256, + epoch: Epoch, + } + + let delete_if_outdated = |cache: &OverflowLRUCache, + block_data: Option| + -> Result<(), AvailabilityCheckError> { + let Some(block_data) = block_data else { + return Ok(()); + }; + let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root); + if not_in_store_keys { + // these keys aren't supposed to be on disk + cache.overflow_store.delete_keys(&block_data.keys)?; + } else { + // check this data is still relevant + if block_data.epoch < cutoff_epoch { + // this data is no longer needed -> delete it + self.overflow_store.delete_keys(&block_data.keys)?; + } + } + Ok(()) + }; + + let mut current_block_data: Option = None; + for res in self + .overflow_store + .0 + .hot_db + .iter_raw_entries(DBColumn::OverflowLRUCache, &[]) + { + let (key_bytes, value_bytes) = res?; + let overflow_key = OverflowKey::from_ssz_bytes(&key_bytes)?; + let current_root = *overflow_key.root(); + + match &mut current_block_data { + Some(block_data) if block_data.root == current_root => { + // still dealing with the same block + block_data.keys.push(overflow_key); + } + _ => { + // first time encountering data for this block + delete_if_outdated(self, current_block_data)?; + let current_epoch = match &overflow_key { + OverflowKey::Block(_) => { + DietAvailabilityPendingExecutedBlock::::from_ssz_bytes( + value_bytes.as_slice(), + )? + .as_block() + .epoch() + } + OverflowKey::Blob(_, _) => { + KzgVerifiedBlob::::from_ssz_bytes(value_bytes.as_slice())? + .as_blob() + .slot + .epoch(T::EthSpec::slots_per_epoch()) + } + }; + current_block_data = Some(BlockData { + keys: vec![overflow_key], + root: current_root, + epoch: current_epoch, + }); + } + } + } + // can't fall off the end + delete_if_outdated(self, current_block_data)?; + + drop(maintenance_lock); + Ok(()) + } + + #[cfg(test)] + /// get the state cache for inspection (used only for tests) + pub fn state_lru_cache(&self) -> &StateLRUCache { + &self.state_cache + } +} + +impl ssz::Encode for OverflowKey { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_append(&self, buf: &mut Vec) { + match self { + OverflowKey::Block(block_hash) => { + block_hash.ssz_append(buf); + buf.push(0u8) + } + OverflowKey::Blob(block_hash, index) => { + block_hash.ssz_append(buf); + buf.push(*index + 1) + } + } + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + 1 + } + + fn ssz_bytes_len(&self) -> usize { + match self { + Self::Block(root) => root.ssz_bytes_len() + 1, + Self::Blob(root, _) => root.ssz_bytes_len() + 1, + } + } +} + +impl ssz::Decode for OverflowKey { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + 1 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let h256_len = ::ssz_fixed_len(); + let expected = h256_len + 1; + + if len != expected { + Err(ssz::DecodeError::InvalidByteLength { len, expected }) + } else { + let root_bytes = bytes + .get(..h256_len) + .ok_or(ssz::DecodeError::OutOfBoundsByte { i: 0 })?; + let block_root = Hash256::from_ssz_bytes(root_bytes)?; + let id_byte = *bytes + .get(h256_len) + .ok_or(ssz::DecodeError::OutOfBoundsByte { i: h256_len })?; + match id_byte { + 0 => Ok(OverflowKey::Block(block_root)), + n => Ok(OverflowKey::Blob(block_root, n - 1)), + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + blob_verification::{ + validate_blob_sidecar_for_gossip, verify_kzg_for_blob, GossipVerifiedBlob, + }, + block_verification::PayloadVerificationOutcome, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::STATE_LRU_CAPACITY, + eth1_finalization_cache::Eth1FinalizationData, + test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, + }; + use fork_choice::PayloadVerificationStatus; + use logging::test_logger; + use slog::{info, Logger}; + use state_processing::ConsensusContext; + use std::collections::{BTreeMap, HashMap, VecDeque}; + use std::ops::AddAssign; + use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; + use tempfile::{tempdir, TempDir}; + use types::{ChainSpec, ExecPayload, MinimalEthSpec}; + + const LOW_VALIDATOR_COUNT: usize = 32; + + fn get_store_with_spec( + db_path: &TempDir, + spec: ChainSpec, + log: Logger, + ) -> Arc, LevelDB>> { + let hot_path = db_path.path().join("hot_db"); + let cold_path = db_path.path().join("cold_db"); + let config = StoreConfig::default(); + + HotColdDB::open( + &hot_path, + &cold_path, + None, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") + } + + // get a beacon chain harness advanced to just before deneb fork + async fn get_deneb_chain( + log: Logger, + db_path: &TempDir, + ) -> BeaconChainHarness> { + let altair_fork_epoch = Epoch::new(1); + let bellatrix_fork_epoch = Epoch::new(2); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec.deneb_fork_epoch = Some(deneb_fork_epoch); + + let chain_store = get_store_with_spec::(db_path, spec.clone(), log.clone()); + let validators_keypairs = + types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.clone()) + .logger(log.clone()) + .keypairs(validators_keypairs) + .fresh_disk_store(chain_store) + .mock_execution_layer() + .build(); + + // go to bellatrix slot + harness.extend_to_slot(bellatrix_fork_slot).await; + let merge_head = &harness.chain.head_snapshot().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), bellatrix_fork_slot); + assert!( + merge_head + .message() + .body() + .execution_payload() + .unwrap() + .is_default_with_empty_roots(), + "Merge head is default payload" + ); + // Trigger the terminal PoW block. + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + // go right before deneb slot + harness.extend_to_slot(deneb_fork_slot - 1).await; + + harness + } + + #[test] + fn overflow_key_encode_decode_equality() { + type E = types::MainnetEthSpec; + let key_block = OverflowKey::Block(Hash256::random()); + let key_blob_0 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 0, + }) + .expect("should create overflow key 0"); + let key_blob_1 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 1, + }) + .expect("should create overflow key 1"); + let key_blob_2 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 2, + }) + .expect("should create overflow key 2"); + let key_blob_3 = OverflowKey::from_blob_id::(BlobIdentifier { + block_root: Hash256::random(), + index: 3, + }) + .expect("should create overflow key 3"); + + let keys = vec![key_block, key_blob_0, key_blob_1, key_blob_2, key_blob_3]; + for key in keys { + let encoded = key.as_ssz_bytes(); + let decoded = OverflowKey::from_ssz_bytes(&encoded).expect("should decode"); + assert_eq!(key, decoded, "Encoded and decoded keys should be equal"); + } + } + + async fn availability_pending_block( + harness: &BeaconChainHarness>, + ) -> ( + AvailabilityPendingExecutedBlock, + Vec>>, + ) + where + E: EthSpec, + Hot: ItemStore, + Cold: ItemStore, + { + let chain = &harness.chain; + let log = chain.log.clone(); + let head = chain.head_snapshot(); + let parent_state = head.beacon_state.clone_with_only_committee_caches(); + + let target_slot = chain.slot().expect("should get slot") + 1; + let parent_root = head.beacon_block_root; + let parent_block = chain + .get_blinded_block(&parent_root) + .expect("should get block") + .expect("should have block"); + + let parent_eth1_finalization_data = Eth1FinalizationData { + eth1_data: parent_block.message().body().eth1_data().clone(), + eth1_deposit_index: 0, + }; + + let (signed_beacon_block_hash, (block, maybe_blobs), state) = harness + .add_block_at_slot(target_slot, parent_state) + .await + .expect("should add block"); + let block_root = signed_beacon_block_hash.into(); + assert_eq!( + block_root, + block.canonical_root(), + "block root should match" + ); + + // log kzg commitments + info!(log, "printing kzg commitments"); + for comm in Vec::from( + block + .message() + .body() + .blob_kzg_commitments() + .expect("should be deneb fork") + .clone(), + ) { + info!(log, "kzg commitment"; "commitment" => ?comm); + } + info!(log, "done printing kzg commitments"); + + let gossip_verified_blobs = if let Some(blobs) = maybe_blobs { + Vec::from(blobs) + .into_iter() + .map(|signed_blob| { + let subnet = signed_blob.message.index; + validate_blob_sidecar_for_gossip(signed_blob, subnet, &harness.chain) + .expect("should validate blob") + }) + .collect() + } else { + vec![] + }; + + let slot = block.slot(); + let consensus_context = ConsensusContext::::new(slot); + let import_data: BlockImportData = BlockImportData { + block_root, + state, + parent_block, + parent_eth1_finalization_data, + confirmed_state_roots: vec![], + consensus_context, + }; + + let payload_verification_outcome = PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }; + + let availability_pending_block = AvailabilityPendingExecutedBlock { + block: Arc::new(block), + import_data, + payload_verification_outcome, + }; + + (availability_pending_block, gossip_verified_blobs) + } + + async fn setup_harness_and_cache( + capacity: usize, + ) -> ( + BeaconChainHarness>, + Arc>, + TempDir, + ) + where + E: EthSpec, + T: BeaconChainTypes, ColdStore = LevelDB, EthSpec = E>, + { + let log = test_logger(); + let chain_db_path = tempdir().expect("should get temp dir"); + let harness = get_deneb_chain(log.clone(), &chain_db_path).await; + let spec = harness.spec.clone(); + let test_store = harness.chain.store.clone(); + let cache = Arc::new( + OverflowLRUCache::::new(capacity, test_store, spec.clone()) + .expect("should create cache"), + ); + (harness, cache, chain_db_path) + } + + #[tokio::test] + async fn overflow_cache_test_insert_components() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = 4; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let (pending_block, blobs) = availability_pending_block(&harness).await; + let root = pending_block.import_data.block_root; + + let blobs_expected = pending_block.num_blobs_expected(); + assert_eq!( + blobs.len(), + blobs_expected, + "should have expected number of blobs" + ); + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + if blobs_expected == 0 { + assert!( + matches!(availability, Availability::Available(_)), + "block doesn't have blobs, should be available" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 0, + "cache should be empty because we don't have blobs" + ); + } else { + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 1, + "cache should have one block" + ); + assert!( + cache.critical.read().in_memory.peek(&root).is_some(), + "newly inserted block should exist in memory" + ); + } + + let kzg = harness + .chain + .kzg + .as_ref() + .cloned() + .expect("kzg should exist"); + let mut kzg_verified_blobs = Vec::new(); + for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); + let availability = cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + if blob_index == blobs_expected - 1 { + assert!(matches!(availability, Availability::Available(_))); + } else { + assert!(matches!(availability, Availability::MissingComponents(_))); + assert_eq!(cache.critical.read().in_memory.len(), 1); + } + } + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty now that all components available" + ); + + let (pending_block, blobs) = availability_pending_block(&harness).await; + let blobs_expected = pending_block.num_blobs_expected(); + assert_eq!( + blobs.len(), + blobs_expected, + "should have expected number of blobs" + ); + let root = pending_block.import_data.block_root; + let mut kzg_verified_blobs = vec![]; + for gossip_blob in blobs { + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); + let availability = cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + assert_eq!(cache.critical.read().in_memory.len(), 1); + } + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::Available(_)), + "block should be available: {:?}", + availability + ); + assert!( + cache.critical.read().in_memory.is_empty(), + "cache should be empty now that all components available" + ); + } + + #[tokio::test] + async fn overflow_cache_test_overflow() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = 4; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut roots = VecDeque::new(); + while pending_blobs.len() < capacity + 1 { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let root = pending_block.block.canonical_root(); + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + roots.push_back(root); + } + + for i in 0..capacity { + cache + .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) + .expect("should put block"); + assert_eq!(cache.critical.read().in_memory.len(), i + 1); + } + for root in roots.iter().take(capacity) { + assert!(cache.critical.read().in_memory.peek(root).is_some()); + } + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache should be full" + ); + // the first block should be the lru entry + assert_eq!( + *cache + .critical + .read() + .in_memory + .peek_lru() + .expect("should exist") + .0, + roots[0], + "first block should be lru" + ); + + cache + .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) + .expect("should put block"); + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache should be full" + ); + assert!( + cache.critical.read().in_memory.peek(&roots[0]).is_none(), + "first block should be evicted" + ); + assert_eq!( + *cache + .critical + .read() + .in_memory + .peek_lru() + .expect("should exist") + .0, + roots[1], + "second block should be lru" + ); + + assert!(cache + .overflow_store + .load_pending_components(roots[0]) + .expect("should exist") + .is_some()); + + let threshold = capacity * 3 / 4; + cache + .maintain_threshold(threshold, Epoch::new(0)) + .expect("should maintain threshold"); + assert_eq!( + cache.critical.read().in_memory.len(), + threshold, + "cache should have been maintained" + ); + + let store_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys"); + assert_eq!(store_keys.len(), 2); + assert!(store_keys.contains(&roots[0])); + assert!(store_keys.contains(&roots[1])); + assert!(cache.critical.read().store_keys.contains(&roots[0])); + assert!(cache.critical.read().store_keys.contains(&roots[1])); + + let kzg = harness + .chain + .kzg + .as_ref() + .cloned() + .expect("kzg should exist"); + + let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); + let expected_blobs = blobs_0.len(); + let mut kzg_verified_blobs = vec![]; + for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); + let availability = cache + .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone()) + .expect("should put blob"); + if blob_index == expected_blobs - 1 { + assert!(matches!(availability, Availability::Available(_))); + } else { + // the first block should be brought back into memory + assert!( + cache.critical.read().in_memory.peek(&roots[0]).is_some(), + "first block should be in memory" + ); + assert!(matches!(availability, Availability::MissingComponents(_))); + } + } + assert_eq!( + cache.critical.read().in_memory.len(), + threshold, + "cache should no longer have the first block" + ); + cache.prune_disk(Epoch::new(0)).expect("should prune disk"); + assert!( + cache + .overflow_store + .load_pending_components(roots[1]) + .expect("no error") + .is_some(), + "second block should still be on disk" + ); + assert!( + cache + .overflow_store + .load_pending_components(roots[0]) + .expect("no error") + .is_none(), + "first block should not be on disk" + ); + } + + #[tokio::test] + async fn overflow_cache_test_maintenance() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = E::slots_per_epoch() as usize; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let n_epochs = 4; + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut epoch_count = BTreeMap::new(); + while pending_blobs.len() < n_epochs * capacity { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let epoch = pending_block + .block + .as_block() + .slot() + .epoch(E::slots_per_epoch()); + epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); + + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + } + + let kzg = harness + .chain + .kzg + .as_ref() + .cloned() + .expect("kzg should exist"); + + for _ in 0..(n_epochs * capacity) { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); + let block_root = pending_block.block.as_block().canonical_root(); + let expected_blobs = pending_block.num_blobs_expected(); + if expected_blobs > 1 { + // might as well add a blob too + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + let kzg_verified_blobs = vec![kzg_verified_blob]; + // generate random boolean + let block_first = (rand::random::() % 2) == 0; + if block_first { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "availabilty should be pending blobs: {:?}", + availability + ); + } else { + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + let root = pending_block.block.as_block().canonical_root(); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + } + } else { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + } + } + + // now we should have a full cache spanning multiple epochs + // run the maintenance routine for increasing epochs and ensure that the cache is pruned + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache memory should be full" + ); + let store_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys"); + assert_eq!( + store_keys.len(), + capacity * (n_epochs - 1), + "cache disk should have the rest" + ); + let mut expected_length = n_epochs * capacity; + for (epoch, count) in epoch_count { + cache + .do_maintenance(epoch + 1) + .expect("should run maintenance"); + let disk_keys = cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(); + let mem_keys = cache.critical.read().in_memory.len(); + expected_length -= count; + info!( + harness.chain.log, + "EPOCH: {} DISK KEYS: {} MEM KEYS: {} TOTAL: {} EXPECTED: {}", + epoch, + disk_keys, + mem_keys, + (disk_keys + mem_keys), + std::cmp::max(expected_length, capacity * 3 / 4), + ); + assert_eq!( + (disk_keys + mem_keys), + std::cmp::max(expected_length, capacity * 3 / 4), + "cache should be pruned" + ); + } + } + + #[tokio::test] + async fn overflow_cache_test_persist_recover() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = E::slots_per_epoch() as usize; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let n_epochs = 4; + let mut pending_blocks = VecDeque::new(); + let mut pending_blobs = VecDeque::new(); + let mut epoch_count = BTreeMap::new(); + while pending_blobs.len() < n_epochs * capacity { + let (pending_block, blobs) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let epoch = pending_block + .block + .as_block() + .slot() + .epoch(E::slots_per_epoch()); + epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); + + pending_blocks.push_back(pending_block); + pending_blobs.push_back(blobs); + } + + let kzg = harness + .chain + .kzg + .as_ref() + .cloned() + .expect("kzg should exist"); + + let mut remaining_blobs = HashMap::new(); + for _ in 0..(n_epochs * capacity) { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); + let block_root = pending_block.block.as_block().canonical_root(); + let expected_blobs = pending_block.num_blobs_expected(); + if expected_blobs > 1 { + // might as well add a blob too + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + let kzg_verified_blobs = vec![kzg_verified_blob]; + // generate random boolean + let block_first = (rand::random::() % 2) == 0; + if block_first { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "availabilty should be pending blobs: {:?}", + availability + ); + } else { + let availability = cache + .put_kzg_verified_blobs(block_root, kzg_verified_blobs) + .expect("should put blob"); + let root = pending_block.block.as_block().canonical_root(); + assert_eq!( + availability, + Availability::MissingComponents(root), + "should be pending block" + ); + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should have pending blobs" + ); + } + } else { + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + } + remaining_blobs.insert(block_root, pending_block_blobs); + } + + // now we should have a full cache spanning multiple epochs + // cache should be at capacity + assert_eq!( + cache.critical.read().in_memory.len(), + capacity, + "cache memory should be full" + ); + // write all components to disk + cache.write_all_to_disk().expect("should write all to disk"); + // everything should be on disk now + assert_eq!( + cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(), + capacity * n_epochs, + "cache disk should have the rest" + ); + assert_eq!( + cache.critical.read().in_memory.len(), + 0, + "cache memory should be empty" + ); + assert_eq!( + cache.critical.read().store_keys.len(), + n_epochs * capacity, + "cache store should have the rest" + ); + drop(cache); + + // create a new cache with the same store + let recovered_cache = OverflowLRUCache::::new( + capacity, + harness.chain.store.clone(), + harness.chain.spec.clone(), + ) + .expect("should recover cache"); + // again, everything should be on disk + assert_eq!( + recovered_cache + .overflow_store + .read_keys_on_disk() + .expect("should read keys") + .len(), + capacity * n_epochs, + "cache disk should have the rest" + ); + assert_eq!( + recovered_cache.critical.read().in_memory.len(), + 0, + "cache memory should be empty" + ); + assert_eq!( + recovered_cache.critical.read().store_keys.len(), + n_epochs * capacity, + "cache store should have the rest" + ); + + // now lets insert the remaining blobs until the cache is empty + for (root, blobs) in remaining_blobs { + let additional_blobs = blobs.len(); + let mut kzg_verified_blobs = vec![]; + for (i, gossip_blob) in blobs.into_iter().enumerate() { + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); + let availability = recovered_cache + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) + .expect("should put blob"); + if i == additional_blobs - 1 { + assert!(matches!(availability, Availability::Available(_))) + } else { + assert!(matches!(availability, Availability::MissingComponents(_))); + } + } + } + } + + #[tokio::test] + // ensure the state cache keeps memory usage low and that it can properly recover states + // THIS TEST CAN BE DELETED ONCE TREE STATES IS MERGED AND WE RIP OUT THE STATE CACHE + async fn overflow_cache_test_state_cache() { + type E = MinimalEthSpec; + type T = DiskHarnessType; + let capacity = STATE_LRU_CAPACITY * 2; + let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; + + let mut pending_blocks = VecDeque::new(); + let mut states = Vec::new(); + let mut state_roots = Vec::new(); + // Get enough blocks to fill the cache to capacity, ensuring all blocks have blobs + while pending_blocks.len() < capacity { + let (pending_block, _) = availability_pending_block(&harness).await; + if pending_block.num_blobs_expected() == 0 { + // we need blocks with blobs + continue; + } + let state_root = pending_block.import_data.state.canonical_root(); + states.push(pending_block.import_data.state.clone()); + pending_blocks.push_back(pending_block); + state_roots.push(state_root); + } + + let state_cache = cache.state_lru_cache().lru_cache(); + let mut pushed_diet_blocks = VecDeque::new(); + + for i in 0..capacity { + let pending_block = pending_blocks.pop_front().expect("should have block"); + let block_root = pending_block.as_block().canonical_root(); + + assert_eq!( + state_cache.read().len(), + std::cmp::min(i, STATE_LRU_CAPACITY), + "state cache should be empty at start" + ); + + if i >= STATE_LRU_CAPACITY { + let lru_root = state_roots[i - STATE_LRU_CAPACITY]; + assert_eq!( + state_cache.read().peek_lru().map(|(root, _)| root), + Some(&lru_root), + "lru block should be in cache" + ); + } + + // put the block in the cache + let availability = cache + .put_pending_executed_block(pending_block) + .expect("should put block"); + + // grab the diet block from the cache for later testing + let diet_block = cache + .critical + .read() + .in_memory + .peek(&block_root) + .map(|pending_components| { + pending_components + .executed_block + .clone() + .expect("should exist") + }) + .expect("should exist"); + pushed_diet_blocks.push_back(diet_block); + + // should be unavailable since we made sure all blocks had blobs + assert!( + matches!(availability, Availability::MissingComponents(_)), + "should be pending blobs" + ); + + if i >= STATE_LRU_CAPACITY { + let evicted_index = i - STATE_LRU_CAPACITY; + let evicted_root = state_roots[evicted_index]; + assert!( + state_cache.read().peek(&evicted_root).is_none(), + "lru root should be evicted" + ); + // get the diet block via direct conversion (testing only) + let diet_block = pushed_diet_blocks.pop_front().expect("should have block"); + // reconstruct the pending block by replaying the block on the parent state + let recovered_pending_block = cache + .state_lru_cache() + .reconstruct_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + + // assert the recovered state is the same as the original + assert_eq!( + recovered_pending_block.import_data.state, states[evicted_index], + "recovered state should be the same as the original" + ); + } + } + + // now check the last block + let last_block = pushed_diet_blocks.pop_back().expect("should exist").clone(); + // the state should still be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_some(), + "last block state should still be in cache" + ); + // get the diet block via direct conversion (testing only) + let diet_block = last_block.clone(); + // recover the pending block from the cache + let recovered_pending_block = cache + .state_lru_cache() + .recover_pending_executed_block(diet_block) + .expect("should reconstruct pending block"); + // assert the recovered state is the same as the original + assert_eq!( + Some(&recovered_pending_block.import_data.state), + states.last(), + "recovered state should be the same as the original" + ); + // the state should no longer be in the cache + assert!( + state_cache + .read() + .peek(&last_block.as_block().state_root()) + .is_none(), + "last block state should no longer be in cache" + ); + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs new file mode 100644 index 0000000000..969034c657 --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs @@ -0,0 +1,74 @@ +use crate::data_availability_checker::AvailabilityView; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; +use types::{EthSpec, Hash256, Slot}; + +/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp +/// a view of what we have and what we require. This cache serves a slightly different purpose than +/// gossip caches because it allows us to process duplicate blobs that are valid in gossip. +/// See `AvailabilityView`'s trait definition. +#[derive(Default)] +pub struct ProcessingCache { + processing_cache: HashMap>, +} + +impl ProcessingCache { + pub fn get(&self, block_root: &Hash256) -> Option<&ProcessingComponents> { + self.processing_cache.get(block_root) + } + pub fn entry(&mut self, block_root: Hash256) -> Entry<'_, Hash256, ProcessingComponents> { + self.processing_cache.entry(block_root) + } + pub fn remove(&mut self, block_root: &Hash256) { + self.processing_cache.remove(block_root); + } + pub fn has_block(&self, block_root: &Hash256) -> bool { + self.processing_cache + .get(block_root) + .map_or(false, |b| b.block_exists()) + } + pub fn incomplete_processing_components(&self, slot: Slot) -> Vec { + let mut roots_missing_components = vec![]; + for (&block_root, info) in self.processing_cache.iter() { + if info.slot == slot && !info.is_available() { + roots_missing_components.push(block_root); + } + } + roots_missing_components + } +} + +#[derive(Debug, Clone)] +pub struct ProcessingComponents { + slot: Slot, + /// Blobs required for a block can only be known if we have seen the block. So `Some` here + /// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure + /// out whether incoming blobs actually match the block. + pub block_commitments: Option>, + /// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See + /// `AvailabilityView`'s trait definition for more details. + pub blob_commitments: KzgCommitmentOpts, +} + +impl ProcessingComponents { + pub fn new(slot: Slot) -> Self { + Self { + slot, + block_commitments: None, + blob_commitments: KzgCommitmentOpts::::default(), + } + } +} + +// Not safe for use outside of tests as this always required a slot. +#[cfg(test)] +impl ProcessingComponents { + pub fn empty(_block_root: Hash256) -> Self { + Self { + slot: Slot::new(0), + block_commitments: None, + blob_commitments: KzgCommitmentOpts::::default(), + } + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs new file mode 100644 index 0000000000..d3348b67fb --- /dev/null +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -0,0 +1,230 @@ +use crate::block_verification_types::AsBlock; +use crate::{ + block_verification_types::BlockImportData, + data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY}, + eth1_finalization_cache::Eth1FinalizationData, + AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, +}; +use lru::LruCache; +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use state_processing::{BlockReplayer, ConsensusContext, StateProcessingStrategy}; +use std::sync::Arc; +use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; +use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; + +/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except +/// that it is much smaller because it contains only a state root instead of +/// a full `BeaconState`. +#[derive(Encode, Decode, Clone)] +pub struct DietAvailabilityPendingExecutedBlock { + #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] + block: Arc>, + state_root: Hash256, + #[ssz(with = "ssz_tagged_signed_beacon_block")] + parent_block: SignedBeaconBlock>, + parent_eth1_finalization_data: Eth1FinalizationData, + confirmed_state_roots: Vec, + consensus_context: ConsensusContext, + payload_verification_outcome: PayloadVerificationOutcome, +} + +/// just implementing the same methods as `AvailabilityPendingExecutedBlock` +impl DietAvailabilityPendingExecutedBlock { + pub fn as_block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn num_blobs_expected(&self) -> usize { + self.block + .message() + .body() + .blob_kzg_commitments() + .map_or(0, |commitments| commitments.len()) + } +} + +/// This LRU cache holds BeaconStates used for block import. If the cache overflows, +/// the least recently used state will be dropped. If the dropped state is needed +/// later on, it will be recovered from the parent state and replaying the block. +/// +/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock` +/// has already been imported into ForkChoice. If this is not the case, the cache +/// will fail to recover the state when the cache overflows because it can't load +/// the parent state! +pub struct StateLRUCache { + states: RwLock>>, + store: BeaconStore, + spec: ChainSpec, +} + +impl StateLRUCache { + pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { + Self { + states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)), + store, + spec, + } + } + + /// This will store the state in the LRU cache and return a + /// `DietAvailabilityPendingExecutedBlock` which is much cheaper to + /// keep around in memory. + pub fn register_pending_executed_block( + &self, + executed_block: AvailabilityPendingExecutedBlock, + ) -> DietAvailabilityPendingExecutedBlock { + let state = executed_block.import_data.state; + let state_root = executed_block.block.state_root(); + self.states.write().put(state_root, state); + + DietAvailabilityPendingExecutedBlock { + block: executed_block.block, + state_root, + parent_block: executed_block.import_data.parent_block, + parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, + confirmed_state_roots: executed_block.import_data.confirmed_state_roots, + consensus_context: executed_block.import_data.consensus_context, + payload_verification_outcome: executed_block.payload_verification_outcome, + } + } + + /// Recover the `AvailabilityPendingExecutedBlock` from the diet version. + /// This method will first check the cache and if the state is not found + /// it will reconstruct the state by loading the parent state from disk and + /// replaying the block. + pub fn recover_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let maybe_state = self.states.write().pop(&diet_executed_block.state_root); + if let Some(state) = maybe_state { + let block_root = diet_executed_block.block.canonical_root(); + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block + .parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } else { + self.reconstruct_pending_executed_block(diet_executed_block) + } + } + + /// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent + /// state from disk and replaying the block. This function does NOT check the + /// LRU cache. + pub fn reconstruct_pending_executed_block( + &self, + diet_executed_block: DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let block_root = diet_executed_block.block.canonical_root(); + let state = self.reconstruct_state(&diet_executed_block)?; + Ok(AvailabilityPendingExecutedBlock { + block: diet_executed_block.block, + import_data: BlockImportData { + block_root, + state, + parent_block: diet_executed_block.parent_block, + parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, + confirmed_state_roots: diet_executed_block.confirmed_state_roots, + consensus_context: diet_executed_block.consensus_context, + }, + payload_verification_outcome: diet_executed_block.payload_verification_outcome, + }) + } + + /// Reconstruct the state by loading the parent state from disk and replaying + /// the block. + fn reconstruct_state( + &self, + diet_executed_block: &DietAvailabilityPendingExecutedBlock, + ) -> Result, AvailabilityCheckError> { + let parent_block_root = diet_executed_block.parent_block.canonical_root(); + let parent_block_state_root = diet_executed_block.parent_block.state_root(); + let (parent_state_root, parent_state) = self + .store + .get_advanced_hot_state( + parent_block_root, + diet_executed_block.parent_block.slot(), + parent_block_state_root, + ) + .map_err(AvailabilityCheckError::StoreError)? + .ok_or(AvailabilityCheckError::ParentStateMissing( + parent_block_state_root, + ))?; + + let state_roots = vec![ + Ok((parent_state_root, diet_executed_block.parent_block.slot())), + Ok(( + diet_executed_block.state_root, + diet_executed_block.block.slot(), + )), + ]; + + let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = + BlockReplayer::new(parent_state, &self.spec) + .no_signature_verification() + .state_processing_strategy(StateProcessingStrategy::Accurate) + .state_root_iter(state_roots.into_iter()) + .minimal_block_root_verification(); + + block_replayer + .apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None) + .map(|block_replayer| block_replayer.into_state()) + .and_then(|mut state| { + state + .build_exit_cache(&self.spec) + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + state + .update_tree_hash_cache() + .map_err(AvailabilityCheckError::RebuildingStateCaches)?; + Ok(state) + }) + } + + /// returns the state cache for inspection in tests + #[cfg(test)] + pub fn lru_cache(&self) -> &RwLock>> { + &self.states + } + + /// remove any states from the cache from before the given epoch + pub fn do_maintenance(&self, cutoff_epoch: Epoch) { + let mut write_lock = self.states.write(); + while let Some((_, state)) = write_lock.peek_lru() { + if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch { + write_lock.pop_lru(); + } else { + break; + } + } + } +} + +/// This can only be used during testing. The intended way to +/// obtain a `DietAvailabilityPendingExecutedBlock` is to call +/// `register_pending_executed_block` on the `StateLRUCache`. +#[cfg(test)] +impl From> + for DietAvailabilityPendingExecutedBlock +{ + fn from(value: AvailabilityPendingExecutedBlock) -> Self { + Self { + block: value.block, + state_root: value.import_data.state.canonical_root(), + parent_block: value.import_data.parent_block, + parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, + confirmed_state_roots: value.import_data.confirmed_state_roots, + consensus_context: value.import_data.consensus_context, + payload_verification_outcome: value.payload_verification_outcome, + } + } +} diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 1ddbe13241..da3c2c8a1e 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -1,3 +1,4 @@ +use crate::data_availability_checker::AvailableBlock; use crate::{ attester_cache::{CommitteeLengths, Error}, metrics, @@ -5,6 +6,7 @@ use crate::{ use parking_lot::RwLock; use proto_array::Block as ProtoBlock; use std::sync::Arc; +use types::blob_sidecar::BlobSidecarList; use types::*; pub struct CacheItem { @@ -20,6 +22,7 @@ pub struct CacheItem { * Values used to make the block available. */ block: Arc>, + blobs: Option>, proto_block: ProtoBlock, } @@ -49,7 +52,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: Arc>, + block: AvailableBlock, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -67,6 +70,7 @@ impl EarlyAttesterCache { }, }; + let (_, block, blobs) = block.deconstruct(); let item = CacheItem { epoch, committee_lengths, @@ -74,6 +78,7 @@ impl EarlyAttesterCache { source, target, block, + blobs, proto_block, }; @@ -94,9 +99,7 @@ impl EarlyAttesterCache { spec: &ChainSpec, ) -> Result>, Error> { let lock = self.item.read(); - let item = if let Some(item) = lock.as_ref() { - item - } else { + let Some(item) = lock.as_ref() else { return Ok(None); }; @@ -155,6 +158,15 @@ impl EarlyAttesterCache { .map(|item| item.block.clone()) } + /// Returns the blobs, if `block_root` matches the cached item. + pub fn get_blobs(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .and_then(|item| item.blobs.clone()) + } + /// Returns the proto-array block, if `block_root` matches the cached item. pub fn get_proto_block(&self, block_root: Hash256) -> Option { self.item diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 8714131150..7c1bb04917 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -2,12 +2,14 @@ use crate::attester_cache::Error as AttesterCacheError; use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; +use crate::data_availability_checker::AvailabilityCheckError; use crate::eth1_chain::Error as Eth1ChainError; use crate::historical_blocks::HistoricalBlockError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; +use crate::observed_blob_sidecars::Error as ObservedBlobSidecarsError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; @@ -102,6 +104,7 @@ pub enum BeaconChainError { ObservedAttestationsError(ObservedAttestationsError), ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), + ObservedBlobSidecarsError(ObservedBlobSidecarsError), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), @@ -146,6 +149,8 @@ pub enum BeaconChainError { BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box), EngineGetCapabilititesFailed(Box), + ExecutionLayerGetBlockByNumberFailed(Box), + ExecutionLayerGetBlockByHashFailed(Box), BlockHashMissingFromExecutionLayer(ExecutionBlockHash), InconsistentPayloadReconstructed { slot: Slot, @@ -215,6 +220,7 @@ pub enum BeaconChainError { InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), UnableToPublish, + AvailabilityCheckError(AvailabilityCheckError), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -231,6 +237,7 @@ easy_from_to!(NaiveAggregationError, BeaconChainError); easy_from_to!(ObservedAttestationsError, BeaconChainError); easy_from_to!(ObservedAttestersError, BeaconChainError); easy_from_to!(ObservedBlockProducersError, BeaconChainError); +easy_from_to!(ObservedBlobSidecarsError, BeaconChainError); easy_from_to!(AttesterCacheError, BeaconChainError); easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); @@ -240,6 +247,7 @@ easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); +easy_from_to!(AvailabilityCheckError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -268,11 +276,17 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ShuttingDown, + MissingBlobs, MissingSyncAggregate, MissingExecutionPayload, - TokioJoin(tokio::task::JoinError), + MissingKzgCommitment(String), + TokioJoin(JoinError), BeaconChain(BeaconChainError), InvalidPayloadFork, + TrustedSetupNotInitialized, + InvalidBlockVariant(String), + KzgError(kzg::Error), + FailedToBuildBlobSidecars(String), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 7cf805a126..24b6542eab 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -1,4 +1,5 @@ use slog::{debug, Logger}; +use ssz_derive::{Decode, Encode}; use std::cmp; use std::collections::BTreeMap; use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; @@ -10,7 +11,7 @@ pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; /// These fields are named the same as the corresponding fields in the `BeaconState` /// as this structure stores these values from the `BeaconState` at a `Checkpoint` -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Encode, Decode)] pub struct Eth1FinalizationData { pub eth1_data: Eth1Data, pub eth1_deposit_index: u64, @@ -66,7 +67,7 @@ impl CheckpointMap { pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { self.store .entry(checkpoint.epoch) - .or_insert_with(Vec::new) + .or_default() .push((checkpoint.root, eth1_finalization_data)); // faster to reduce size after the fact than do pre-checking to see diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index b267cc853f..8d3a682794 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -9,6 +9,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16; pub struct ServerSentEventHandler { attestation_tx: Sender>, block_tx: Sender>, + blob_sidecar_tx: Sender>, finalized_tx: Sender>, head_tx: Sender>, exit_tx: Sender>, @@ -31,6 +32,7 @@ impl ServerSentEventHandler { pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { let (attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); + let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); let (head_tx, _) = broadcast::channel(capacity); let (exit_tx, _) = broadcast::channel(capacity); @@ -43,6 +45,7 @@ impl ServerSentEventHandler { Self { attestation_tx, block_tx, + blob_sidecar_tx, finalized_tx, head_tx, exit_tx, @@ -73,6 +76,10 @@ impl ServerSentEventHandler { .block_tx .send(kind) .map(|count| log_count("block", count)), + EventKind::BlobSidecar(_) => self + .blob_sidecar_tx + .send(kind) + .map(|count| log_count("blob sidecar", count)), EventKind::FinalizedCheckpoint(_) => self .finalized_tx .send(kind) @@ -119,6 +126,10 @@ impl ServerSentEventHandler { self.block_tx.subscribe() } + pub fn subscribe_blob_sidecar(&self) -> Receiver> { + self.blob_sidecar_tx.subscribe() + } + pub fn subscribe_finalized(&self) -> Receiver> { self.finalized_tx.subscribe() } @@ -159,6 +170,10 @@ impl ServerSentEventHandler { self.block_tx.receiver_count() > 0 } + pub fn has_blob_sidecar_subscribers(&self) -> bool { + self.blob_sidecar_tx.receiver_count() > 0 + } + pub fn has_finalized_subscribers(&self) -> bool { self.finalized_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 1ac7229cc6..33c97efd26 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -12,7 +12,9 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BlockProposalContents, BuilderParams, NewPayloadRequest, PayloadAttributes, PayloadStatus, +}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::{debug, warn}; @@ -68,11 +70,10 @@ impl PayloadNotifier { // the block as optimistically imported. This is particularly relevant in the case // where we do not send the block to the EL at all. let block_message = block.message(); - let payload = block_message.execution_payload()?; partially_verify_execution_payload::<_, FullPayload<_>>( state, block.slot(), - payload, + block_message.body(), &chain.spec, ) .map_err(BlockError::PerBlockProcessingError)?; @@ -86,13 +87,11 @@ impl PayloadNotifier { .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - if let Err(e) = - execution_layer.verify_payload_block_hash(payload.execution_payload_ref()) - { + if let Err(e) = execution_layer.verify_payload_block_hash(block_message) { warn!( chain.log, "Falling back to slow block hash verification"; - "block_number" => payload.block_number(), + "block_number" => ?block_message.execution_payload().map(|payload| payload.block_number()), "info" => "you can silence this warning with --disable-optimistic-finalized-sync", "error" => ?e, ); @@ -138,15 +137,15 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - let execution_payload = block.execution_payload()?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; + let new_payload_request: NewPayloadRequest = block.try_into()?; + let execution_block_hash = new_payload_request.block_hash(); let new_payload_response = execution_layer - .notify_new_payload(&execution_payload.into()) + .notify_new_payload(new_payload_request) .await; match new_payload_response { @@ -164,7 +163,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( "Invalid execution payload"; "validation_error" => ?validation_error, "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_payload.block_hash(), + "execution_block_hash" => ?execution_block_hash, "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -210,7 +209,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( chain.log, "Invalid execution payload block hash"; "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_payload.block_hash(), + "execution_block_hash" => ?execution_block_hash, "root" => ?block.tree_hash_root(), "graffiti" => block.body().graffiti().as_utf8_lossy(), "proposer_index" => block.proposer_index(), @@ -405,6 +404,7 @@ pub fn get_execution_payload< >( chain: Arc>, state: &BeaconState, + parent_block_root: Hash256, proposer_index: u64, builder_params: BuilderParams, ) -> Result, BlockProductionError> { @@ -419,11 +419,19 @@ pub fn get_execution_payload< let latest_execution_payload_header_block_hash = state.latest_execution_payload_header()?.block_hash(); let withdrawals = match state { - &BeaconState::Capella(_) => Some(get_expected_withdrawals(state, spec)?.into()), + &BeaconState::Capella(_) | &BeaconState::Deneb(_) => { + Some(get_expected_withdrawals(state, spec)?.into()) + } &BeaconState::Merge(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable &BeaconState::Base(_) | &BeaconState::Altair(_) => None, }; + let parent_beacon_block_root = match state { + BeaconState::Deneb(_) => Some(parent_block_root), + BeaconState::Merge(_) | BeaconState::Capella(_) => None, + // These shouldn't happen but they're here to make the pattern irrefutable + BeaconState::Base(_) | BeaconState::Altair(_) => None, + }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The // `join_handle` can be used to await the result of the function. @@ -441,6 +449,7 @@ pub fn get_execution_payload< latest_execution_payload_header_block_hash, builder_params, withdrawals, + parent_beacon_block_root, ) .await }, @@ -475,6 +484,7 @@ pub async fn prepare_execution_payload( latest_execution_payload_header_block_hash: ExecutionBlockHash, builder_params: BuilderParams, withdrawals: Option>, + parent_beacon_block_root: Option, ) -> Result, BlockProductionError> where T: BeaconChainTypes, @@ -536,8 +546,13 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, random, suggested_fee_recipient, withdrawals); + let payload_attributes = PayloadAttributes::new( + timestamp, + random, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + ); // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 5f59073500..b5e58e7ffa 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,3 +1,4 @@ +use crate::data_availability_checker::AvailableBlock; use crate::{errors::BeaconChainError as Error, metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; use slog::debug; @@ -7,10 +8,9 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; -use std::sync::Arc; use std::time::Duration; -use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; -use types::{Hash256, SignedBlindedBeaconBlock, Slot}; +use store::{chunked_vector::BlockRoots, AnchorInfo, BlobInfo, ChunkWriter, KeyValueStore}; +use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// @@ -59,27 +59,30 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>>, + mut blocks: Vec>, ) -> Result { let anchor_info = self .store .get_anchor_info() .ok_or(HistoricalBlockError::NoAnchorInfo)?; + let blob_info = self.store.get_blob_info(); // Take all blocks with slots less than the oldest block slot. - let num_relevant = - blocks.partition_point(|block| block.slot() < anchor_info.oldest_block_slot); - let blocks_to_import = &blocks - .get(..num_relevant) - .ok_or(HistoricalBlockError::IndexOutOfBounds)?; + let num_relevant = blocks.partition_point(|available_block| { + available_block.block().slot() < anchor_info.oldest_block_slot + }); - if blocks_to_import.len() != blocks.len() { + let total_blocks = blocks.len(); + blocks.truncate(num_relevant); + let blocks_to_import = blocks; + + if blocks_to_import.len() != total_blocks { debug!( self.log, "Ignoring some historic blocks"; "oldest_block_slot" => anchor_info.oldest_block_slot, - "total_blocks" => blocks.len(), - "ignored" => blocks.len().saturating_sub(blocks_to_import.len()), + "total_blocks" => total_blocks, + "ignored" => total_blocks.saturating_sub(blocks_to_import.len()), ); } @@ -87,17 +90,23 @@ impl BeaconChain { return Ok(0); } + let n_blobs_lists_to_import = blocks_to_import + .iter() + .filter(|available_block| available_block.blobs().is_some()) + .count(); + let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut chunk_writer = ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; + let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; - let mut cold_batch = Vec::with_capacity(blocks.len()); - let mut hot_batch = Vec::with_capacity(blocks.len()); + let mut cold_batch = Vec::with_capacity(blocks_to_import.len()); + let mut hot_batch = Vec::with_capacity(blocks_to_import.len() + n_blobs_lists_to_import); + let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); - for block in blocks_to_import.iter().rev() { - // Check chain integrity. - let block_root = block.canonical_root(); + for available_block in blocks_to_import.into_iter().rev() { + let (block_root, block, maybe_blobs) = available_block.deconstruct(); if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { @@ -107,9 +116,16 @@ impl BeaconChain { .into()); } + let blinded_block = block.clone_as_blinded(); // Store block in the hot database without payload. self.store - .blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch); + .blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch); + // Store the blobs too + if let Some(blobs) = maybe_blobs { + new_oldest_blob_slot = Some(block.slot()); + self.store + .blobs_as_kv_store_ops(&block_root, blobs, &mut hot_batch); + } // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { @@ -119,21 +135,23 @@ impl BeaconChain { prev_block_slot = block.slot(); expected_block_root = block.message().parent_root(); - // If we've reached genesis, add the genesis block root to the batch and set the - // anchor slot to 0 to indicate completion. + // If we've reached genesis, add the genesis block root to the batch for all slots + // between 0 and the first block slot, and set the anchor slot to 0 to indicate + // completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - chunk_writer.set( - genesis_slot.as_usize(), - self.genesis_block_root, - &mut cold_batch, - )?; + for slot in genesis_slot.as_usize()..block.slot().as_usize() { + chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?; + } prev_block_slot = genesis_slot; expected_block_root = Hash256::zero(); break; } + signed_blocks.push(block); } chunk_writer.write(&mut cold_batch)?; + // these were pushed in reverse order so we reverse again + signed_blocks.reverse(); // Verify signatures in one batch, holding the pubkey cache lock for the shortest duration // possible. For each block fetch the parent root from its successor. Slicing from index 1 @@ -144,15 +162,16 @@ impl BeaconChain { .validator_pubkey_cache .try_read_for(PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(HistoricalBlockError::ValidatorPubkeyCacheTimeout)?; - let block_roots = blocks_to_import + let block_roots = signed_blocks .get(1..) .ok_or(HistoricalBlockError::IndexOutOfBounds)? .iter() .map(|block| block.parent_root()) .chain(iter::once(anchor_info.oldest_block_parent)); - let signature_set = blocks_to_import + let signature_set = signed_blocks .iter() .zip_eq(block_roots) + .filter(|&(_block, block_root)| (block_root != self.genesis_block_root)) .map(|(block, block_root)| { block_proposal_signature_set_from_parts( block, @@ -183,6 +202,22 @@ impl BeaconChain { self.store.hot_db.do_atomically(hot_batch)?; self.store.cold_db.do_atomically(cold_batch)?; + let mut anchor_and_blob_batch = Vec::with_capacity(2); + + // Update the blob info. + if new_oldest_blob_slot != blob_info.oldest_blob_slot { + if let Some(oldest_blob_slot) = new_oldest_blob_slot { + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(oldest_blob_slot), + ..blob_info.clone() + }; + anchor_and_blob_batch.push( + self.store + .compare_and_set_blob_info(blob_info, new_blob_info)?, + ); + } + } + // Update the anchor. let new_anchor = AnchorInfo { oldest_block_slot: prev_block_slot, @@ -190,8 +225,11 @@ impl BeaconChain { ..anchor_info }; let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); - self.store - .compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?; + anchor_and_blob_batch.push( + self.store + .compare_and_set_anchor_info(Some(anchor_info), Some(new_anchor))?, + ); + self.store.hot_db.do_atomically(anchor_and_blob_batch)?; // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. @@ -203,6 +241,6 @@ impl BeaconChain { self.store_migrator.process_reconstruction(); } - Ok(blocks_to_import.len()) + Ok(num_relevant) } } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs new file mode 100644 index 0000000000..9f5186f310 --- /dev/null +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -0,0 +1,76 @@ +use kzg::{Blob as KzgBlob, Error as KzgError, Kzg}; +use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; + +/// Converts a blob ssz List object to an array to be used with the kzg +/// crypto library. +fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { + KzgBlob::from_bytes(blob.as_ref()) +} + +/// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. +pub fn validate_blob( + kzg: &Kzg, + blob: &Blob, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, +) -> Result { + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) +} + +/// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. +pub fn validate_blobs( + kzg: &Kzg, + expected_kzg_commitments: &[KzgCommitment], + blobs: Vec<&Blob>, + kzg_proofs: &[KzgProof], +) -> Result { + let blobs = blobs + .into_iter() + .map(|blob| ssz_blob_to_crypto_blob::(blob)) + .collect::, KzgError>>()?; + + kzg.verify_blob_kzg_proof_batch(&blobs, expected_kzg_commitments, kzg_proofs) +} + +/// Compute the kzg proof given an ssz blob and its kzg commitment. +pub fn compute_blob_kzg_proof( + kzg: &Kzg, + blob: &Blob, + kzg_commitment: KzgCommitment, +) -> Result { + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.compute_blob_kzg_proof(&kzg_blob, kzg_commitment) +} + +/// Compute the kzg commitment for a given blob. +pub fn blob_to_kzg_commitment( + kzg: &Kzg, + blob: &Blob, +) -> Result { + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.blob_to_kzg_commitment(&kzg_blob) +} + +/// Compute the kzg proof for a given blob and an evaluation point z. +pub fn compute_kzg_proof( + kzg: &Kzg, + blob: &Blob, + z: Hash256, +) -> Result<(KzgProof, Hash256), KzgError> { + let z = z.0.into(); + let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; + kzg.compute_kzg_proof(&kzg_blob, &z) + .map(|(proof, z)| (proof, Hash256::from_slice(&z.to_vec()))) +} + +/// Verify a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` +pub fn verify_kzg_proof( + kzg: &Kzg, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, + z: Hash256, + y: Hash256, +) -> Result { + kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4ea1eeee01..4efc776b2c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,13 +7,16 @@ mod beacon_chain; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; +pub mod blob_verification; pub mod block_reward; mod block_times_cache; mod block_verification; +pub mod block_verification_types; pub mod builder; pub mod canonical_head; pub mod capella_readiness; pub mod chain_config; +pub mod data_availability_checker; mod early_attester_cache; mod errors; pub mod eth1_chain; @@ -24,6 +27,7 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; pub mod merge_readiness; @@ -32,6 +36,7 @@ pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; mod observed_attesters; +mod observed_blob_sidecars; pub mod observed_block_producers; pub mod observed_operations; pub mod otb_verification_service; @@ -51,9 +56,10 @@ pub mod validator_monitor; pub mod validator_pubkey_cache; pub use self::beacon_chain::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, + BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, + ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; @@ -63,15 +69,19 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, - IntoExecutionPendingBlock, IntoGossipVerifiedBlock, + get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, IntoGossipVerifiedBlockContents, PayloadVerificationOutcome, + PayloadVerificationStatus, }; +pub use block_verification_types::AvailabilityPendingExecutedBlock; +pub use block_verification_types::ExecutedBlock; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; +pub use kzg::TrustedSetup; pub use metrics::scrape_for_metrics; pub use migrate::MigratorConfig; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 638d2b4012..86cc6695ed 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -34,7 +34,7 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientFinalityUpdate from state. FailedConstructingUpdate, - /// Beacon chain error occured. + /// Beacon chain error occurred. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), } diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 2d1a5cf97c..8b7e644527 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -37,7 +37,7 @@ pub enum Error { FailedConstructingUpdate, /// Unknown block with parent root. UnknownBlockParentRoot(Hash256), - /// Beacon chain error occured. + /// Beacon chain error occurred. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), } diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index bfc2b36fdb..52a5ea912e 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -1,8 +1,10 @@ //! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge //! transition. -use crate::{BeaconChain, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainError as Error, BeaconChainTypes}; +use execution_layer::BlockByNumberQuery; use serde::{Deserialize, Serialize, Serializer}; +use slog::debug; use std::fmt; use std::fmt::Write; use types::*; @@ -120,6 +122,25 @@ impl fmt::Display for MergeReadiness { } } +pub enum GenesisExecutionPayloadStatus { + Correct(ExecutionBlockHash), + BlockHashMismatch { + got: ExecutionBlockHash, + expected: ExecutionBlockHash, + }, + TransactionsRootMismatch { + got: Hash256, + expected: Hash256, + }, + WithdrawalsRootMismatch { + got: Hash256, + expected: Hash256, + }, + OtherMismatch, + Irrelevant, + AlreadyHappened, +} + impl BeaconChain { /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will /// occur within `MERGE_READINESS_PREPARATION_SECONDS`. @@ -144,9 +165,9 @@ impl BeaconChain { } /// Attempts to connect to the EL and confirm that it is ready for the merge. - pub async fn check_merge_readiness(&self) -> MergeReadiness { + pub async fn check_merge_readiness(&self, current_slot: Slot) -> MergeReadiness { if let Some(el) = self.execution_layer.as_ref() { - if !el.is_synced_for_notifier().await { + if !el.is_synced_for_notifier(current_slot).await { // The EL is not synced. return MergeReadiness::NotSynced; } @@ -161,6 +182,91 @@ impl BeaconChain { MergeReadiness::NoExecutionEndpoint } } + + /// Check that the execution payload embedded in the genesis state matches the EL's genesis + /// block. + pub async fn check_genesis_execution_payload_is_correct( + &self, + ) -> Result { + let head_snapshot = self.head_snapshot(); + let genesis_state = &head_snapshot.beacon_state; + + if genesis_state.slot() != 0 { + return Ok(GenesisExecutionPayloadStatus::AlreadyHappened); + } + + let Ok(latest_execution_payload_header) = genesis_state.latest_execution_payload_header() + else { + return Ok(GenesisExecutionPayloadStatus::Irrelevant); + }; + let fork = self.spec.fork_name_at_epoch(Epoch::new(0)); + + let execution_layer = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)?; + let exec_block_hash = latest_execution_payload_header.block_hash(); + + // Use getBlockByNumber(0) to check that the block hash matches. + // At present, Geth does not respond to engine_getPayloadBodiesByRange before genesis. + let execution_block = execution_layer + .get_block_by_number(BlockByNumberQuery::Tag("0x0")) + .await + .map_err(|e| Error::ExecutionLayerGetBlockByNumberFailed(Box::new(e)))? + .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + + if execution_block.block_hash != exec_block_hash { + return Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { + got: execution_block.block_hash, + expected: exec_block_hash, + }); + } + + // Double-check the block by reconstructing it. + let execution_payload = execution_layer + .get_payload_by_hash_legacy(exec_block_hash, fork) + .await + .map_err(|e| Error::ExecutionLayerGetBlockByHashFailed(Box::new(e)))? + .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + + // Verify payload integrity. + let header_from_payload = ExecutionPayloadHeader::from(execution_payload.to_ref()); + + let got_transactions_root = header_from_payload.transactions_root(); + let expected_transactions_root = latest_execution_payload_header.transactions_root(); + let got_withdrawals_root = header_from_payload.withdrawals_root().ok(); + let expected_withdrawals_root = latest_execution_payload_header.withdrawals_root().ok(); + + if got_transactions_root != expected_transactions_root { + return Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { + got: got_transactions_root, + expected: expected_transactions_root, + }); + } + + if let Some(&expected) = expected_withdrawals_root { + if let Some(&got) = got_withdrawals_root { + if got != expected { + return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { + got, + expected, + }); + } + } + } + + if header_from_payload.to_ref() != latest_execution_payload_header { + debug!( + self.log, + "Genesis execution payload reconstruction failure"; + "consensus_node_header" => ?latest_execution_payload_header, + "execution_node_header" => ?header_from_payload + ); + return Ok(GenesisExecutionPayloadStatus::OtherMismatch); + } + + Ok(GenesisExecutionPayloadStatus::Correct(exec_block_hash)) + } } /// Utility function to serialize a Uint256 as a decimal string. diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index dff663ded0..a23bcdc0b5 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -40,6 +40,10 @@ lazy_static! { "beacon_block_processing_block_root_seconds", "Time spent calculating the block root when processing a block." ); + pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result = try_create_histogram( + "beacon_block_processing_blob_root_seconds", + "Time spent calculating the blob root when processing a block." + ); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" @@ -282,6 +286,11 @@ lazy_static! { "Count of times the early attester cache returns an attestation" ); +} + +// Second lazy-static block is used to account for macro recursion limit. +lazy_static! { + /* * Attestation Production */ @@ -301,10 +310,7 @@ lazy_static! { "attestation_production_cache_prime_seconds", "Time spent loading a new state from the disk due to a cache miss" ); -} -// Second lazy-static block is used to account for macro recursion limit. -lazy_static! { /* * Fork Choice */ @@ -380,6 +386,8 @@ lazy_static! { try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches"); pub static ref PERSIST_FORK_CHOICE: Result = try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct"); + pub static ref PERSIST_DATA_AVAILABILITY_CHECKER: Result = + try_create_histogram("beacon_persist_data_availability_checker", "Time taken to persist the data availability checker"); /* * Eth1 @@ -980,6 +988,22 @@ lazy_static! { "beacon_pre_finalization_block_lookup_count", "Number of block roots subject to single block lookups" ); + + /* + * Blob sidecar Verification + */ + pub static ref BLOBS_SIDECAR_PROCESSING_REQUESTS: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_requests_total", + "Count of all blob sidecars submitted for processing" + ); + pub static ref BLOBS_SIDECAR_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "beacon_blobs_sidecar_processing_successes_total", + "Number of blob sidecars verified for gossip" + ); + pub static ref BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_blobs_sidecar_gossip_verification_seconds", + "Full runtime of blob sidecars gossip verification" + ); } // Fifth lazy-static block is used to account for macro recursion limit. @@ -1009,6 +1033,28 @@ lazy_static! { "beacon_aggregated_attestation_subsets_total", "Count of new aggregated attestations that are subsets of already known aggregates" ); + + /* + * Kzg related metrics + */ + pub static ref KZG_VERIFICATION_SINGLE_TIMES: Result = + try_create_histogram("kzg_verification_single_seconds", "Runtime of single kzg verification"); + pub static ref KZG_VERIFICATION_BATCH_TIMES: Result = + try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification"); + + pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result = try_create_histogram( + "beacon_block_production_blobs_verification_seconds", + "Time taken to verify blobs against commitments and creating BlobSidecar objects in block production" + ); + /* + * Availability related metrics + */ + pub static ref BLOCK_AVAILABILITY_DELAY: Result = try_create_histogram_with_buckets( + "block_availability_delay", + "Duration between start of the slot and the time at which all components of the block are available.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 8306b66d7b..ad597bf92a 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -117,6 +117,7 @@ pub enum PruningError { pub enum Notification { Finalization(FinalizationNotification), Reconstruction, + PruneBlobs(Epoch), } pub struct FinalizationNotification { @@ -191,6 +192,14 @@ impl, Cold: ItemStore> BackgroundMigrator>, log: &Logger) { if let Err(e) = db.reconstruct_historic_states() { error!( @@ -201,6 +210,20 @@ impl, Cold: ItemStore> BackgroundMigrator>, + data_availability_boundary: Epoch, + log: &Logger, + ) { + if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) { + error!( + log, + "Blob pruning failed"; + "error" => ?e, + ); + } + } + /// If configured to run in the background, send `notif` to the background thread. /// /// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise. @@ -266,6 +289,7 @@ impl, Cold: ItemStore> BackgroundMigrator state, @@ -319,7 +343,12 @@ impl, Cold: ItemStore> BackgroundMigrator {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( @@ -361,29 +390,44 @@ impl, Cold: ItemStore> BackgroundMigrator Notification::Reconstruction, - ( - Notification::Finalization(fin1), - Notification::Finalization(fin2), - ) => { - if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch - { - other - } else { - best - } - } - }); - + let mut reconstruction_notif = None; + let mut finalization_notif = None; + let mut prune_blobs_notif = None; match notif { - Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log), - Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log), + Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::Finalization(fin) => finalization_notif = Some(fin), + Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + } + // Read the rest of the messages in the channel, taking the best of each type. + for notif in rx.try_iter() { + match notif { + Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::Finalization(fin) => { + if let Some(current) = finalization_notif.as_mut() { + if fin.finalized_checkpoint.epoch + > current.finalized_checkpoint.epoch + { + *current = fin; + } + } else { + finalization_notif = Some(fin); + } + } + Notification::PruneBlobs(dab) => { + prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab)); + } + } + } + // If reconstruction is on-going, ignore finalization migration and blob pruning. + if reconstruction_notif.is_some() { + Self::run_reconstruction(db.clone(), &log); + } else { + if let Some(fin) = finalization_notif { + Self::run_migration(db.clone(), fin, &log); + } + if let Some(dab) = prune_blobs_notif { + Self::run_prune_blobs(db.clone(), dab, &log); + } } } }); @@ -624,13 +668,14 @@ impl, Cold: ItemStore> BackgroundMigrator> = abandoned_blocks + let mut batch: Vec> = abandoned_blocks .into_iter() .map(Into::into) .flat_map(|block_root: Hash256| { [ StoreOp::DeleteBlock(block_root), StoreOp::DeleteExecutionPayload(block_root), + StoreOp::DeleteBlobs(block_root), ] }) .chain( @@ -640,8 +685,6 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator { + finalized_slot: Slot, + /// Stores all received blob indices for a given `(Root, Slot)` tuple. + items: HashMap<(Hash256, Slot), HashSet>, + _phantom: PhantomData, +} + +impl Default for ObservedBlobSidecars { + /// Instantiates `Self` with `finalized_slot == 0`. + fn default() -> Self { + Self { + finalized_slot: Slot::new(0), + items: HashMap::new(), + _phantom: PhantomData, + } + } +} + +impl ObservedBlobSidecars { + /// Observe the `blob_sidecar` at (`blob_sidecar.block_root, blob_sidecar.slot`). + /// This will update `self` so future calls to it indicate that this `blob_sidecar` is known. + /// + /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. + pub fn observe_sidecar(&mut self, blob_sidecar: &Arc>) -> Result { + self.sanitize_blob_sidecar(blob_sidecar)?; + + let did_not_exist = self + .items + .entry((blob_sidecar.block_root, blob_sidecar.slot)) + .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())) + .insert(blob_sidecar.index); + + Ok(!did_not_exist) + } + + /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. + pub fn is_known(&self, blob_sidecar: &Arc>) -> Result { + self.sanitize_blob_sidecar(blob_sidecar)?; + let is_known = self + .items + .get(&(blob_sidecar.block_root, blob_sidecar.slot)) + .map_or(false, |set| set.contains(&blob_sidecar.index)); + Ok(is_known) + } + + fn sanitize_blob_sidecar(&self, blob_sidecar: &Arc>) -> Result<(), Error> { + if blob_sidecar.index >= T::max_blobs_per_block() as u64 { + return Err(Error::InvalidBlobIndex(blob_sidecar.index)); + } + let finalized_slot = self.finalized_slot; + if finalized_slot > 0 && blob_sidecar.slot <= finalized_slot { + return Err(Error::FinalizedBlob { + slot: blob_sidecar.slot, + finalized_slot, + }); + } + + Ok(()) + } + + /// Prune all values earlier than the given slot. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|k, _| k.1 > finalized_slot); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{BlobSidecar, Hash256, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn get_blob_sidecar(slot: u64, block_root: Hash256, index: u64) -> Arc> { + let mut blob_sidecar = BlobSidecar::empty(); + blob_sidecar.block_root = block_root; + blob_sidecar.slot = slot.into(); + blob_sidecar.index = index; + Arc::new(blob_sidecar) + } + + #[test] + fn pruning() { + let mut cache = ObservedBlobSidecars::default(); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 0, "no slots should be present"); + + // Slot 0, index 0 + let block_root_a = Hash256::random(); + let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(false), + "can observe proposer, indicates proposer unobserved" + ); + + /* + * Preconditions. + */ + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!( + cache.items.len(), + 1, + "only one (slot, root) tuple should be present" + ); + assert_eq!( + cache + .items + .get(&(block_root_a, Slot::new(0))) + .expect("slot zero should be present") + .len(), + 1, + "only one item should be present" + ); + + /* + * Check that a prune at the genesis slot does nothing. + */ + + cache.prune(Slot::new(0)); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&(block_root_a, Slot::new(0))) + .expect("slot zero should be present") + .len(), + 1, + "only one item should be present" + ); + + /* + * Check that a prune empties the cache + */ + + cache.prune(E::slots_per_epoch().into()); + assert_eq!( + cache.finalized_slot, + Slot::from(E::slots_per_epoch()), + "finalized slot is updated" + ); + assert_eq!(cache.items.len(), 0, "no items left"); + + /* + * Check that we can't insert a finalized sidecar + */ + + // First slot of finalized epoch + let block_b = get_blob_sidecar(E::slots_per_epoch(), Hash256::random(), 0); + + assert_eq!( + cache.observe_sidecar(&block_b), + Err(Error::FinalizedBlob { + slot: E::slots_per_epoch().into(), + finalized_slot: E::slots_per_epoch().into(), + }), + "cant insert finalized sidecar" + ); + + assert_eq!(cache.items.len(), 0, "sidecar was not added"); + + /* + * Check that we _can_ insert a non-finalized block + */ + + let three_epochs = E::slots_per_epoch() * 3; + + // First slot of finalized epoch + let block_root_b = Hash256::random(); + let block_b = get_blob_sidecar(three_epochs, block_root_b, 0); + + assert_eq!( + cache.observe_sidecar(&block_b), + Ok(false), + "can insert non-finalized block" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&(block_root_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one proposer should be present" + ); + + /* + * Check that a prune doesnt wipe later blocks + */ + + let two_epochs = E::slots_per_epoch() * 2; + cache.prune(two_epochs.into()); + + assert_eq!( + cache.finalized_slot, + Slot::from(two_epochs), + "finalized slot is updated" + ); + + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&(block_root_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present") + .len(), + 1, + "only one proposer should be present" + ); + } + + #[test] + fn simple_observations() { + let mut cache = ObservedBlobSidecars::default(); + + // Slot 0, index 0 + let block_root_a = Hash256::random(); + let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + + assert_eq!( + cache.is_known(&sidecar_a), + Ok(false), + "no observation in empty cache" + ); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(false), + "can observe proposer, indicates proposer unobserved" + ); + + assert_eq!( + cache.is_known(&sidecar_a), + Ok(true), + "observed block is indicated as true" + ); + + assert_eq!( + cache.observe_sidecar(&sidecar_a), + Ok(true), + "observing again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); + assert_eq!( + cache + .items + .get(&(block_root_a, Slot::new(0))) + .expect("slot zero should be present") + .len(), + 1, + "only one proposer should be present" + ); + + // Slot 1, proposer 0 + + let block_root_b = Hash256::random(); + let sidecar_b = get_blob_sidecar(1, block_root_b, 0); + + assert_eq!( + cache.is_known(&sidecar_b), + Ok(false), + "no observation for new slot" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_b), + Ok(false), + "can observe proposer for new slot, indicates proposer unobserved" + ); + assert_eq!( + cache.is_known(&sidecar_b), + Ok(true), + "observed block in slot 1 is indicated as true" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_b), + Ok(true), + "observing slot 1 again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!( + cache + .items + .get(&(block_root_a, Slot::new(0))) + .expect("slot zero should be present") + .len(), + 1, + "only one proposer should be present in slot 0" + ); + assert_eq!( + cache + .items + .get(&(block_root_b, Slot::new(1))) + .expect("slot zero should be present") + .len(), + 1, + "only one proposer should be present in slot 1" + ); + + // Slot 0, index 1 + let sidecar_c = get_blob_sidecar(0, block_root_a, 1); + + assert_eq!( + cache.is_known(&sidecar_c), + Ok(false), + "no observation for new index" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_c), + Ok(false), + "can observe new index, indicates sidecar unobserved for new index" + ); + assert_eq!( + cache.is_known(&sidecar_c), + Ok(true), + "observed new sidecar is indicated as true" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_c), + Ok(true), + "observing new sidecar again indicates true" + ); + + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); + assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!( + cache + .items + .get(&(block_root_a, Slot::new(0))) + .expect("slot zero should be present") + .len(), + 2, + "two blob indices should be present in slot 0" + ); + + // Try adding an out of bounds index + let invalid_index = E::max_blobs_per_block() as u64; + let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index); + assert_eq!( + cache.observe_sidecar(&sidecar_d), + Err(Error::InvalidBlobIndex(invalid_index)), + "cannot add an index > MaxBlobsPerBlock" + ); + } +} diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs index 805b61dd9c..b934c553e6 100644 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -119,10 +119,13 @@ pub fn start_otb_verification_service( pub fn load_optimistic_transition_blocks( chain: &BeaconChain, ) -> Result, StoreError> { - process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - })? + process_results( + chain.store.hot_db.iter_column::(OTBColumn), + |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + }, + )? } #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 7b398db2f5..88b5682505 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -5,6 +5,7 @@ mod migration_schema_v14; mod migration_schema_v15; mod migration_schema_v16; mod migration_schema_v17; +mod migration_schema_v18; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -150,6 +151,14 @@ pub fn migrate_schema( let ops = migration_schema_v17::downgrade_from_v17::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(17), SchemaVersion(18)) => { + let ops = migration_schema_v18::upgrade_to_v18::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(18), SchemaVersion(17)) => { + let ops = migration_schema_v18::downgrade_from_v18::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index c9aa2097f8..ab267796e1 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -16,15 +16,14 @@ pub fn upgrade_to_v12( let spec = db.get_chain_spec(); // Load a V5 op pool and transform it to V12. - let PersistedOperationPoolV5 { + let Some(PersistedOperationPoolV5 { attestations_v5, sync_contributions, attester_slashings_v5, proposer_slashings_v5, voluntary_exits_v5, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; @@ -168,15 +167,14 @@ pub fn downgrade_from_v12( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V5. - let PersistedOperationPoolV12:: { + let Some(PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v12 - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs index be913d8cc5..52a990dc6e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -18,19 +18,14 @@ fn get_slot_clock( log: &Logger, ) -> Result, Error> { let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { - block - } else { + let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else { error!(log, "Missing genesis block"); return Ok(None); }; - let genesis_state = - if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { - state - } else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; + let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; Ok(Some(T::SlotClock::new( spec.genesis_slot, Duration::from_secs(genesis_state.genesis_time()), @@ -43,15 +38,14 @@ pub fn upgrade_to_v14( log: Logger, ) -> Result, Error> { // Load a V12 op pool and transform it to V14. - let PersistedOperationPoolV12:: { + let Some(PersistedOperationPoolV12:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - } = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v12 - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; @@ -94,16 +88,15 @@ pub fn downgrade_from_v14( } // Load a V14 op pool and transform it to V12. - let PersistedOperationPoolV14:: { + let Some(PersistedOperationPoolV14:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs index 07c86bd931..0eb2c5fa3f 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -11,16 +11,15 @@ pub fn upgrade_to_v15( log: Logger, ) -> Result, Error> { // Load a V14 op pool and transform it to V15. - let PersistedOperationPoolV14:: { + let Some(PersistedOperationPoolV14:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, - } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool_v14 - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; @@ -43,7 +42,7 @@ pub fn downgrade_from_v15( log: Logger, ) -> Result, Error> { // Load a V15 op pool and transform it to V14. - let PersistedOperationPoolV15:: { + let Some(PersistedOperationPoolV15:: { attestations, sync_contributions, attester_slashings, @@ -51,9 +50,8 @@ pub fn downgrade_from_v15( voluntary_exits, bls_to_execution_changes, capella_bls_change_broadcast_indices, - } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { - op_pool - } else { + }) = db.get_item(&OP_POOL_DB_KEY)? + else { debug!(log, "Nothing to do, no operation pool stored"); return Ok(vec![]); }; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs new file mode 100644 index 0000000000..e7b68eb418 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs @@ -0,0 +1,118 @@ +use crate::beacon_chain::BeaconChainTypes; +use slog::{error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{ + get_key_for_col, metadata::BLOB_INFO_KEY, DBColumn, Error, HotColdDB, KeyValueStoreOp, +}; +use types::{Epoch, EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +fn get_current_epoch( + db: &Arc>, + log: &Logger, +) -> Result { + get_slot_clock::(db, log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration) +} + +pub fn upgrade_to_v18( + db: Arc>, + log: Logger, +) -> Result, Error> { + db.heal_freezer_block_roots()?; + info!(log, "Healed freezer block roots"); + + // No-op, even if Deneb has already occurred. The database is probably borked in this case, but + // *maybe* the fork recovery will revert the minority fork and succeed. + if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { + let current_epoch = get_current_epoch::(&db, &log)?; + if current_epoch >= deneb_fork_epoch { + warn!( + log, + "Attempting upgrade to v18 schema"; + "info" => "this may not work as Deneb has already been activated" + ); + } else { + info!( + log, + "Upgrading to v18 schema"; + "info" => "ready for Deneb", + "epochs_until_deneb" => deneb_fork_epoch - current_epoch + ); + } + } else { + info!( + log, + "Upgrading to v18 schema"; + "info" => "ready for Deneb once it is scheduled" + ); + } + Ok(vec![]) +} + +pub fn downgrade_from_v18( + db: Arc>, + log: Logger, +) -> Result, Error> { + // We cannot downgrade from V18 once the Deneb fork has been activated, because there will + // be blobs and blob metadata in the database that aren't understood by the V17 schema. + if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { + let current_epoch = get_current_epoch::(&db, &log)?; + if current_epoch >= deneb_fork_epoch { + error!( + log, + "Deneb already active: v18+ is mandatory"; + "current_epoch" => current_epoch, + "deneb_fork_epoch" => deneb_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } else { + info!( + log, + "Downgrading to v17 schema"; + "info" => "you will need to upgrade before Deneb", + "epochs_until_deneb" => deneb_fork_epoch - current_epoch + ); + } + } else { + info!( + log, + "Downgrading to v17 schema"; + "info" => "you need to upgrade before Deneb", + ); + } + + let ops = vec![KeyValueStoreOp::DeleteKey(get_key_for_col( + DBColumn::BeaconMeta.into(), + BLOB_INFO_KEY.as_bytes(), + ))]; + + Ok(ops) +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f73223fa54..f3e97168a5 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -113,14 +113,11 @@ async fn state_advance_timer( let slot_duration = slot_clock.slot_duration(); loop { - let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => duration, - None => { - error!(log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(slot_duration).await; - continue; - } + let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; }; // Run the state advance 3/4 of the way through the slot (9s on mainnet). diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 5fc05c5551..7ef9842544 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ @@ -14,10 +15,13 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use eth2::types::SignedBlockContentsTuple; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET, + ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, @@ -25,16 +29,19 @@ use execution_layer::{ use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; +use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; -use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; +use parking_lot::{Mutex, RwLock}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; -use slog::Logger; +use slog::{o, Drain, Logger}; +use slog_async::Async; +use slog_term::{FullFormat, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ @@ -44,20 +51,24 @@ use state_processing::{ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::marker::PhantomData; use std::str::FromStr; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; +use types::test_utils::TestRandom; use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. -const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // @@ -167,7 +178,6 @@ pub struct Builder { store_mutator: Option>, execution_layer: Option>, mock_execution_layer: Option>, - mock_builder: Option>, testing_slot_clock: Option, runtime: TestRuntime, log: Logger, @@ -190,11 +200,12 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { + let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, builder.get_spec(), ) .expect("should generate interop state"); @@ -251,11 +262,12 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { + let header = generate_genesis_header::(builder.get_spec(), false); let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, builder.get_spec(), ) .expect("should generate interop state"); @@ -301,7 +313,6 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, - mock_builder: None, testing_slot_clock: None, runtime, log, @@ -319,6 +330,11 @@ where self } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to /// the "determistic" values, regardless of wether or not the validator has /// a BLS or execution address in the genesis deposits. @@ -334,11 +350,6 @@ where ) } - pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { - self.withdrawal_keypairs = withdrawal_keypairs; - self - } - pub fn default_spec(self) -> Self { self.spec_or_default(None) } @@ -387,7 +398,7 @@ where self } - pub fn execution_layer(mut self, urls: &[&str]) -> Self { + pub fn execution_layer_from_urls(mut self, urls: &[&str]) -> Self { assert!( self.execution_layer.is_none(), "execution layer already defined" @@ -416,6 +427,11 @@ where self } + pub fn execution_layer(mut self, el: Option>) -> Self { + self.execution_layer = el; + self + } + pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self { let mock = self .mock_execution_layer @@ -429,68 +445,28 @@ where spec.capella_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); self } - pub fn mock_execution_layer(mut self) -> Self { - let spec = self.spec.clone().expect("cannot build without spec"); - let shanghai_time = spec.capella_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() - }); - let mock = MockExecutionLayer::new( + pub fn mock_execution_layer(self) -> Self { + self.mock_execution_layer_with_config(None) + } + + pub fn mock_execution_layer_with_config(mut self, builder_threshold: Option) -> Self { + let mock = mock_execution_layer_from_parts::( + self.spec.as_ref().expect("cannot build without spec"), self.runtime.task_executor.clone(), - DEFAULT_TERMINAL_BLOCK, - shanghai_time, - None, - Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec, - None, + builder_threshold, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); self } - pub fn mock_execution_layer_with_builder( - mut self, - beacon_url: SensitiveUrl, - builder_threshold: Option, - ) -> Self { - // Get a random unused port - let port = unused_port::unused_tcp4_port().unwrap(); - let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); - - let spec = self.spec.clone().expect("cannot build without spec"); - let shanghai_time = spec.capella_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() - }); - let mock_el = MockExecutionLayer::new( - self.runtime.task_executor.clone(), - DEFAULT_TERMINAL_BLOCK, - shanghai_time, - builder_threshold, - Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec.clone(), - Some(builder_url.clone()), - ) - .move_to_terminal_block(); - - let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); - - self.mock_builder = Some(TestingBuilder::new( - mock_el_url, - builder_url, - beacon_url, - spec, - self.runtime.task_executor.clone(), - )); - self.execution_layer = Some(mock_el.el.clone()); - self.mock_execution_layer = Some(mock_el); - - self - } - /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { @@ -517,6 +493,9 @@ where .validator_keypairs .expect("cannot build without validator keypairs"); let chain_config = self.chain_config.unwrap_or_default(); + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .unwrap(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) @@ -537,7 +516,8 @@ where log.clone(), 5, ))) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log); + .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) + .trusted_setup(trusted_setup); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) @@ -572,12 +552,42 @@ where shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, - mock_builder: self.mock_builder.map(Arc::new), + mock_builder: None, + blob_signature_cache: <_>::default(), rng: make_rng(), } } } +pub fn mock_execution_layer_from_parts( + spec: &ChainSpec, + task_executor: TaskExecutor, + builder_threshold: Option, +) -> MockExecutionLayer { + let shanghai_time = spec.capella_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64() + }); + let cancun_time = spec.deneb_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * T::slots_per_epoch() * epoch.as_u64() + }); + + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + let kzg = Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"); + + MockExecutionLayer::new( + task_executor, + DEFAULT_TERMINAL_BLOCK, + shanghai_time, + cancun_time, + builder_threshold, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + spec.clone(), + Some(kzg), + ) +} + /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// @@ -597,11 +607,31 @@ pub struct BeaconChainHarness { pub runtime: TestRuntime, pub mock_execution_layer: Option>, - pub mock_builder: Option>>, + pub mock_builder: Option>>, + + /// Cache for blob signature because we don't need them for import, but we do need them + /// to test gossip validation. We always make them during block production but drop them + /// before storing them in the db. + pub blob_signature_cache: Arc>>, pub rng: Mutex, } +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct BlobSignatureKey { + block_root: Hash256, + blob_index: u64, +} + +impl BlobSignatureKey { + pub fn new(block_root: Hash256, blob_index: u64) -> Self { + Self { + block_root, + blob_index, + } + } +} + pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; @@ -633,6 +663,65 @@ where .execution_block_generator() } + pub fn set_mock_builder( + &mut self, + beacon_url: SensitiveUrl, + ) -> impl futures::Future { + let mock_el = self + .mock_execution_layer + .as_ref() + .expect("harness was not built with mock execution layer"); + + let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); + + // Create the builder, listening on a free port. + let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing( + mock_el_url, + beacon_url, + self.spec.clone(), + self.runtime.task_executor.clone(), + ); + + // Set the builder URL in the execution layer now that its port is known. + let port = addr.port(); + mock_el + .el + .set_builder_url( + SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), + None, + ) + .unwrap(); + + self.mock_builder = Some(Arc::new(mock_builder)); + + // Sanity check. + let el_builder = self + .chain + .execution_layer + .as_ref() + .unwrap() + .builder() + .unwrap(); + let mock_el_builder = mock_el.el.builder().unwrap(); + assert!(Arc::ptr_eq(&el_builder, &mock_el_builder)); + + mock_builder_server + } + + pub fn get_head_block(&self) -> RpcBlock { + let block = self.chain.head_beacon_block(); + let block_root = block.canonical_root(); + let blobs = self.chain.get_blobs(&block_root).unwrap(); + RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap() + } + + pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { + let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); + let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); + let blobs = self.chain.get_blobs(block_root).unwrap(); + RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap() + } + pub fn get_all_validators(&self) -> Vec { (0..self.validator_keypairs.len()).collect() } @@ -742,9 +831,28 @@ where &self, state: BeaconState, slot: Slot, - ) -> (SignedBlindedBeaconBlock, BeaconState) { + ) -> ( + SignedBlockContentsTuple>, + BeaconState, + ) { let (unblinded, new_state) = self.make_block(state, slot).await; - (unblinded.into(), new_state) + let maybe_blinded_blob_sidecars = unblinded.1.map(|blob_sidecar_list| { + VariableList::new( + blob_sidecar_list + .into_iter() + .map(|blob_sidecar| { + let blinded_sidecar: BlindedBlobSidecar = blob_sidecar.message.into(); + SignedSidecar { + message: Arc::new(blinded_sidecar), + signature: blob_sidecar.signature, + _phantom: PhantomData, + } + }) + .collect(), + ) + .unwrap() + }); + ((unblinded.0.into(), maybe_blinded_blob_sidecars), new_state) } /// Returns a newly created block, signed by the proposer for the given slot. @@ -752,7 +860,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple>, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -770,7 +878,7 @@ where let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); - let (block, state) = self + let (block, state, maybe_blob_sidecars) = self .chain .produce_block_on_state( state, @@ -790,7 +898,18 @@ where &self.spec, ); - (signed_block, state) + let block_contents: SignedBlockContentsTuple> = match &signed_block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => (signed_block, None), + SignedBeaconBlock::Deneb(_) => ( + signed_block, + maybe_blob_sidecars.map(|blobs| self.sign_blobs(blobs, &state, proposer_index)), + ), + }; + + (block_contents, state) } /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after @@ -799,7 +918,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple>, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -819,7 +938,7 @@ where let pre_state = state.clone(); - let (block, state) = self + let (block, state, maybe_blob_sidecars) = self .chain .produce_block_on_state( state, @@ -839,7 +958,40 @@ where &self.spec, ); - (signed_block, pre_state) + let block_contents: SignedBlockContentsTuple> = match &signed_block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => (signed_block, None), + SignedBeaconBlock::Deneb(_) => { + if let Some(blobs) = maybe_blob_sidecars { + let signed_blobs: SignedSidecarList> = Vec::from(blobs) + .into_iter() + .map(|blob| { + blob.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ) + }) + .collect::>() + .into(); + let mut guard = self.blob_signature_cache.write(); + for blob in &signed_blobs { + guard.insert( + BlobSignatureKey::new(blob.message.block_root, blob.message.index), + blob.signature.clone(), + ); + } + (signed_block, Some(signed_blobs)) + } else { + (signed_block, None) + } + } + }; + + (block_contents, pre_state) } /// Create a randao reveal for a block at `slot`. @@ -876,6 +1028,35 @@ where ) } + /// Sign blobs, and cache their signatures. + pub fn sign_blobs( + &self, + blobs: BlobSidecarList, + state: &BeaconState, + proposer_index: usize, + ) -> SignedSidecarList> { + let signed_blobs: SignedSidecarList> = Vec::from(blobs) + .into_iter() + .map(|blob| { + blob.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ) + }) + .collect::>() + .into(); + let mut guard = self.blob_signature_cache.write(); + for blob in &signed_blobs { + guard.insert( + BlobSignatureKey::new(blob.message.block_root, blob.message.index), + blob.signature.clone(), + ); + } + signed_blobs + } + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -975,9 +1156,9 @@ where ) -> (Vec>, Vec) { let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); - let attesters = Mutex::new(vec![]); + let num_attesters = AtomicUsize::new(0); - let attestations = state + let (attestations, split_attesters) = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() @@ -990,13 +1171,14 @@ where return None; } - let mut attesters = attesters.lock(); if let Some(limit) = limit { - if attesters.len() >= limit { + // This atomics stuff is necessary because we're under a par_iter, + // and Rayon will deadlock if we use a mutex. + if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit { + num_attesters.fetch_sub(1, Ordering::Relaxed); return None; } } - attesters.push(*validator_index); let mut attestation = self .produce_unaggregated_attestation_for_block( @@ -1036,14 +1218,17 @@ where ) .unwrap(); - Some((attestation, subnet_id)) + Some(((attestation, subnet_id), validator_index)) }) - .collect::>() + .unzip::<_, _, Vec<_>, Vec<_>>() }) - .collect::>(); + .unzip::<_, _, Vec<_>, Vec<_>>(); + + // Flatten attesters. + let attesters = split_attesters.into_iter().flatten().collect::>(); - let attesters = attesters.into_inner(); if let Some(limit) = limit { + assert_eq!(limit, num_attesters.load(Ordering::Relaxed)); assert_eq!( limit, attesters.len(), @@ -1514,14 +1699,13 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { epoch, validator_index, } - .sign(sk, &fork, genesis_validators_root, &self.chain.spec) + .sign(sk, genesis_validators_root, &self.chain.spec) } pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { @@ -1630,11 +1814,12 @@ where state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), - ) -> (SignedBeaconBlock, BeaconState) { + ) -> (SignedBlockContentsTuple>, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, state) = self.make_block_return_pre_state(state, slot).await; + let ((block, blobs), state) = self.make_block_return_pre_state(state, slot).await; + let (mut block, _) = block.deconstruct(); block_modifier(&mut block); @@ -1647,7 +1832,7 @@ where state.genesis_validators_root(), &self.spec, ); - (signed_block, state) + ((signed_block, blobs), state) } pub fn make_deposits<'a>( @@ -1727,37 +1912,60 @@ where &self, slot: Slot, block_root: Hash256, - block: SignedBeaconBlock, + block_contents: SignedBlockContentsTuple>, ) -> Result> { self.set_current_slot(slot); + let (block, blobs) = block_contents; + // Note: we are just dropping signatures here and skipping signature verification. + let blobs_without_signatures = blobs.map(|blobs| { + VariableList::from( + blobs + .into_iter() + .map(|blob| blob.message) + .collect::>(), + ) + }); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - Arc::new(block), + RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) .await? - .into(); + .try_into() + .unwrap(); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } pub async fn process_block_result( &self, - block: SignedBeaconBlock, + block_contents: SignedBlockContentsTuple>, ) -> Result> { + let (block, blobs) = block_contents; + // Note: we are just dropping signatures here and skipping signature verification. + let blobs_without_signatures = blobs.map(|blobs| { + VariableList::from( + blobs + .into_iter() + .map(|blob| blob.message) + .collect::>(), + ) + }); + let block_root = block.canonical_root(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( - block.canonical_root(), - Arc::new(block), + block_root, + RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) .await? - .into(); + .try_into() + .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1817,13 +2025,25 @@ where &self, slot: Slot, state: BeaconState, - ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { + ) -> Result< + ( + SignedBeaconBlockHash, + SignedBlockContentsTuple>, + BeaconState, + ), + BlockError, + > { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot).await; + let (block_contents, new_state) = self.make_block(state, slot).await; + let block_hash = self - .process_block(slot, block.canonical_root(), block.clone()) + .process_block( + slot, + block_contents.0.canonical_root(), + block_contents.clone(), + ) .await?; - Ok((block_hash, block, new_state)) + Ok((block_hash, block_contents, new_state)) } pub fn attest_block( @@ -1877,7 +2097,7 @@ where sync_committee_strategy: SyncCommitteeStrategy, ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; - self.attest_block(&state, state_root, block_hash, &block, validators); + self.attest_block(&state, state_root, block_hash, &block.0, validators); if sync_committee_strategy == SyncCommitteeStrategy::AllValidators && state.current_sync_committee().is_ok() @@ -2075,8 +2295,9 @@ where chain_dump .iter() .cloned() - .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) - .filter(|block_hash| *block_hash != Hash256::zero().into()) + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) + .filter(|block_hash| *block_hash != Hash256::zero()) + .map(|hash| hash.into()) .collect() } @@ -2128,6 +2349,29 @@ where .await } + /// Uses `Self::extend_chain` to `num_slots` blocks. + /// + /// Utilizes: + /// + /// - BlockStrategy::OnCanonicalHead, + /// - AttestationStrategy::SomeValidators(validators), + pub async fn extend_slots_some_validators( + &self, + num_slots: usize, + validators: Vec, + ) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { + self.advance_slot(); + } + + self.extend_chain( + num_slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(validators), + ) + .await + } + /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// @@ -2288,3 +2532,74 @@ pub struct MakeAttestationOptions { /// Fork to use for signing attestations. pub fork: Fork, } + +pub fn build_log(level: slog::Level, enabled: bool) -> Logger { + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = Async::new(drain).build().fuse(); + + if enabled { + Logger::root(drain.filter_level(level).fuse(), o!()) + } else { + Logger::root(drain.filter(|_| false).fuse(), o!()) + } +} + +pub enum NumBlobs { + Random, + None, +} + +pub fn generate_rand_block_and_blobs( + fork_name: ForkName, + num_blobs: NumBlobs, + rng: &mut impl Rng, +) -> (SignedBeaconBlock>, Vec>) { + let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); + let mut blob_sidecars = vec![]; + if let Ok(message) = block.message_deneb_mut() { + // Get either zero blobs or a random number of blobs between 1 and Max Blobs. + let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; + let num_blobs = match num_blobs { + NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + message.body.blob_kzg_commitments = bundle.commitments.clone(); + + let eth2::types::BlobsBundle { + commitments, + proofs, + blobs, + } = bundle; + + let block_root = block.canonical_root(); + + for (index, ((blob, kzg_commitment), kzg_proof)) in blobs + .into_iter() + .zip(commitments.into_iter()) + .zip(proofs.into_iter()) + .enumerate() + { + blob_sidecars.push(BlobSidecar { + block_root, + index: index as u64, + slot: block.slot(), + block_parent_root: block.parent_root(), + proposer_index: block.message().proposer_index(), + blob: blob.clone(), + kzg_commitment, + kzg_proof, + }); + } + } + + (block, blob_sidecars) +} diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 79910df292..00140dd6ec 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.do_atomically(store_ops)?; + store.do_atomically_with_block_and_blobs_cache(store_ops)?; Ok(cache) } @@ -299,7 +299,7 @@ mod test { let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); - store.do_atomically(ops).unwrap(); + store.do_atomically_with_block_and_blobs_cache(ops).unwrap(); check_cache_get(&cache, &keypairs[..]); drop(cache); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 85e4f1f093..a8ad75304b 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; @@ -67,6 +68,7 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); + let blobs = chain.get_blobs(&block_root).unwrap(); let epoch_boundary_slot = state .current_epoch() @@ -131,6 +133,19 @@ async fn produces_attestations() { assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); + let rpc_block = + RpcBlock::::new(None, Arc::new(block.clone()), Some(blobs.clone())) + .unwrap(); + let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( + available_block, + ) = chain + .data_availability_checker + .check_rpc_block_availability(rpc_block) + .unwrap() + else { + panic!("block should be available") + }; + let early_attestation = { let proto_block = chain .canonical_head @@ -141,7 +156,7 @@ async fn produces_attestations() { .early_attester_cache .add_head_block( block_root, - Arc::new(block.clone()), + available_block, proto_block, &state, &chain.spec, @@ -192,12 +207,29 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); + let head_blobs = harness + .chain + .get_blobs(&head.beacon_block_root) + .expect("should get blobs"); + + let rpc_block = + RpcBlock::::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap(); + let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = + harness + .chain + .data_availability_checker + .check_rpc_block_availability(rpc_block) + .unwrap() + else { + panic!("block should be available") + }; + harness .chain .early_attester_cache .add_head_block( head.beacon_block_root, - head.beacon_block.clone(), + available_block, head_proto_block, &head.beacon_state, &harness.chain.spec, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 5cea51090b..2501768c78 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -9,7 +9,7 @@ use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }, - BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, + BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, }; use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Slot { + pub fn earliest_valid_attestation_slot(&self) -> Slot { + let offset = match self.harness.spec.fork_name_at_epoch(self.epoch()) { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + // Subtract an additional slot since the harness will be exactly on the start of the + // slot and the propagation tolerance will allow an extra slot. + E::slots_per_epoch() + 1 + } + // EIP-7045 + ForkName::Deneb => { + let epoch_slot_offset = (self.slot() % E::slots_per_epoch()).as_u64(); + if epoch_slot_offset != 0 { + E::slots_per_epoch() + epoch_slot_offset + } else { + // Here the propagation tolerance will cause the cutoff to be an entire epoch earlier + 2 * E::slots_per_epoch() + } + } + }; + self.slot() .as_u64() - .checked_sub(E::slots_per_epoch() + 2) + .checked_sub(offset) .expect("chain is not sufficiently deep for test") .into() } @@ -476,18 +502,21 @@ async fn aggregated_gossip_verification() { ) .inspect_aggregate_err( "aggregate from past slot", - |tester, a| a.message.aggregate.data.slot = tester.two_epochs_ago(), + |tester, a| { + let too_early_slot = tester.earliest_valid_attestation_slot() - 1; + a.message.aggregate.data.slot = too_early_slot; + a.message.aggregate.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); + }, |tester, err| { + let valid_early_slot = tester.earliest_valid_attestation_slot(); assert!(matches!( err, AttnError::PastSlot { attestation_slot, - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. earliest_permissible_slot } - if attestation_slot == tester.two_epochs_ago() - && earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1 + if attestation_slot == valid_early_slot - 1 + && earliest_permissible_slot == valid_early_slot )) }, ) @@ -792,22 +821,20 @@ async fn unaggregated_gossip_verification() { .inspect_unaggregate_err( "attestation from past slot", |tester, a, _| { - let early_slot = tester.two_epochs_ago(); - a.data.slot = early_slot; - a.data.target.epoch = early_slot.epoch(E::slots_per_epoch()); + let too_early_slot = tester.earliest_valid_attestation_slot() - 1; + a.data.slot = too_early_slot; + a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); }, |tester, err| { - dbg!(&err); + let valid_early_slot = tester.earliest_valid_attestation_slot(); assert!(matches!( err, AttnError::PastSlot { attestation_slot, - // Subtract an additional slot since the harness will be exactly on the start of the - // slot and the propagation tolerance will allow an extra slot. earliest_permissible_slot, } - if attestation_slot == tester.two_epochs_ago() - && earliest_permissible_slot == tester.slot() - E::slots_per_epoch() - 1 + if attestation_slot == valid_early_slot - 1 + && earliest_permissible_slot == valid_early_slot )) }, ) diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 75b00b2b44..3ac3980714 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,10 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::test_utils::BlobSignatureKey; +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, }; use beacon_chain::{ - BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer, + BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, + NotifyExecutionLayer, }; use lazy_static::lazy_static; use logging::test_logger; @@ -32,7 +36,7 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } -async fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -44,6 +48,7 @@ async fn get_chain_segment() -> Vec> { .await; let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); for snapshot in harness .chain .chain_dump() @@ -62,13 +67,82 @@ async fn get_chain_segment() -> Vec> { beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, }); + segment_blobs.push(Some( + harness + .chain + .get_blobs(&snapshot.beacon_block_root) + .unwrap(), + )) } - segment + (segment, segment_blobs) +} + +async fn get_chain_segment_with_signed_blobs() -> ( + Vec>, + Vec, ::MaxBlobsPerBlock>>>, +) { + let harness = get_harness(VALIDATOR_COUNT); + + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); + for snapshot in harness + .chain + .chain_dump() + .expect("should dump chain") + .into_iter() + .skip(1) + { + let full_block = harness + .chain + .get_block(&snapshot.beacon_block_root) + .await + .unwrap() + .unwrap(); + segment.push(BeaconSnapshot { + beacon_block_root: snapshot.beacon_block_root, + beacon_block: Arc::new(full_block), + beacon_state: snapshot.beacon_state, + }); + let signed_blobs = harness + .chain + .get_blobs(&snapshot.beacon_block_root) + .unwrap() + .into_iter() + .map(|blob| { + let block_root = blob.block_root; + let blob_index = blob.index; + SignedBlobSidecar { + message: blob, + signature: harness + .blob_signature_cache + .read() + .get(&BlobSignatureKey::new(block_root, blob_index)) + .unwrap() + .clone(), + _phantom: PhantomData, + } + }) + .collect::>(); + segment_blobs.push(Some(VariableList::from(signed_blobs))) + } + (segment, segment_blobs) } fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -79,10 +153,16 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness]) -> Vec>> { +fn chain_segment_blocks( + chain_segment: &[BeaconSnapshot], + blobs: &[Option>], +) -> Vec> { chain_segment .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(blobs.into_iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect() } @@ -124,22 +204,57 @@ fn update_proposal_signatures( } } -fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { +fn update_parent_roots( + snapshots: &mut [BeaconSnapshot], + blobs: &mut [Option>], +) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); - if let Some(child) = snapshots.get_mut(i + 1) { + if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) { let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) + let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); + let new_child_root = new_child.canonical_root(); + child.beacon_block = new_child; + if let Some(blobs) = child_blobs { + update_blob_roots(new_child_root, blobs); + } } } } +fn update_blob_roots(block_root: Hash256, blobs: &mut BlobSidecarList) { + for old_blob_sidecar in blobs.iter_mut() { + let index = old_blob_sidecar.index; + let slot = old_blob_sidecar.slot; + let block_parent_root = old_blob_sidecar.block_parent_root; + let proposer_index = old_blob_sidecar.proposer_index; + let blob = old_blob_sidecar.blob.clone(); + let kzg_commitment = old_blob_sidecar.kzg_commitment; + let kzg_proof = old_blob_sidecar.kzg_proof; + + let new_blob = Arc::new(BlobSidecar:: { + block_root, + index, + slot, + block_parent_root, + proposer_index, + blob, + kzg_commitment, + kzg_proof, + }); + *old_blob_sidecar = new_blob; + } +} + #[tokio::test] async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; - let blocks = chain_segment_blocks(&chain_segment); + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); harness .chain @@ -174,8 +289,11 @@ async fn chain_segment_full_segment() { async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; - let blocks = chain_segment_blocks(&chain_segment); + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); harness .chain @@ -204,7 +322,7 @@ async fn chain_segment_varying_chunk_size() { #[tokio::test] async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain @@ -214,7 +332,10 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(&chain_segment); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); blocks.remove(2); assert!( @@ -232,10 +353,17 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); + + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -253,7 +381,7 @@ async fn chain_segment_non_linear_parent_roots() { #[tokio::test] async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain .slot_clock @@ -263,10 +391,16 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -284,10 +418,16 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(&chain_segment); - let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .map(|block| block.into()) + .collect(); + let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); + blocks[3] = RpcBlock::new_without_blobs( + None, + Arc::new(SignedBeaconBlock::from_block(block, signature)), + ); assert!( matches!( @@ -304,14 +444,18 @@ async fn chain_segment_non_linear_slots() { async fn assert_invalid_signature( chain_segment: &[BeaconSnapshot], + chain_segment_blobs: &[Option>], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], item: &str, ) { - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); // Ensure the block will be rejected if imported in a chain segment. @@ -335,7 +479,10 @@ async fn assert_invalid_signature( let ancestor_blocks = chain_segment .iter() .take(block_index) - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. @@ -349,7 +496,12 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - snapshots[block_index].beacon_block.clone(), + RpcBlock::new( + None, + snapshots[block_index].beacon_block.clone(), + chain_segment_blobs[block_index].clone(), + ) + .unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -381,7 +533,7 @@ async fn get_invalid_sigs_harness( } #[tokio::test] async fn invalid_signature_gossip_block() { - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). let harness = get_invalid_sigs_harness(&chain_segment).await; @@ -399,7 +551,10 @@ async fn invalid_signature_gossip_block() { let ancestor_blocks = chain_segment .iter() .take(block_index) - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); harness .chain @@ -428,7 +583,7 @@ async fn invalid_signature_gossip_block() { #[tokio::test] async fn invalid_signature_block_proposal() { - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -441,9 +596,12 @@ async fn invalid_signature_block_proposal() { block.clone(), junk_signature(), )); - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. assert!( @@ -462,7 +620,7 @@ async fn invalid_signature_block_proposal() { #[tokio::test] async fn invalid_signature_randao_reveal() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -474,15 +632,23 @@ async fn invalid_signature_randao_reveal() { *block.body_mut().randao_reveal_mut() = junk_signature(); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; + assert_invalid_signature( + &chain_segment, + &chain_segment_blobs, + &harness, + block_index, + &snapshots, + "randao", + ) + .await; } } #[tokio::test] async fn invalid_signature_proposer_slashing() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -508,10 +674,11 @@ async fn invalid_signature_proposer_slashing() { .expect("should update proposer slashing"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -523,7 +690,7 @@ async fn invalid_signature_proposer_slashing() { #[tokio::test] async fn invalid_signature_attester_slashing() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -560,10 +727,11 @@ async fn invalid_signature_attester_slashing() { .expect("should update attester slashing"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -575,7 +743,7 @@ async fn invalid_signature_attester_slashing() { #[tokio::test] async fn invalid_signature_attestation() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { @@ -590,10 +758,11 @@ async fn invalid_signature_attestation() { attestation.signature = junk_aggregate_signature(); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -612,7 +781,7 @@ async fn invalid_signature_attestation() { #[tokio::test] async fn invalid_signature_deposit() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! let harness = get_invalid_sigs_harness(&chain_segment).await; @@ -638,11 +807,14 @@ async fn invalid_signature_deposit() { .expect("should update deposit"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - let blocks = snapshots + let blocks: Vec> = snapshots .iter() - .map(|snapshot| snapshot.beacon_block.clone()) + .zip(chain_segment_blobs.iter()) + .map(|(snapshot, blobs)| { + RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + }) .collect(); assert!( !matches!( @@ -660,7 +832,7 @@ async fn invalid_signature_deposit() { #[tokio::test] async fn invalid_signature_exit() { - let chain_segment = get_chain_segment().await; + let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await; for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); @@ -683,10 +855,11 @@ async fn invalid_signature_exit() { .expect("should update deposit"); snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); - update_parent_roots(&mut snapshots); + update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature( &chain_segment, + &chain_segment_blobs, &harness, block_index, &snapshots, @@ -706,7 +879,7 @@ fn unwrap_err(result: Result) -> E { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let chain_segment = get_chain_segment().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -716,7 +889,10 @@ async fn block_gossip_verification() { .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &chain_segment[0..block_index] { + for (snapshot, blobs_opt) in chain_segment[0..block_index] + .iter() + .zip(chain_segment_blobs.iter()) + { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) @@ -733,6 +909,21 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); + if let Some(blobs) = blobs_opt { + for blob in blobs { + let blob_index = blob.message.index; + let gossip_verified = harness + .chain + .verify_blob_sidecar_for_gossip(blob.clone(), blob_index) + .expect("should obtain gossip verified blob"); + + harness + .chain + .process_gossip_blob(gossip_verified) + .await + .expect("should import valid gossip verified blob"); + } + } } // Recompute the head to ensure we cache the latest view of fork choice. @@ -757,7 +948,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), BlockError::FutureSlot { present_slot, block_slot, @@ -791,7 +982,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -821,10 +1012,9 @@ async fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( - block, - junk_signature() - ))) + .verify_block_for_gossip( + Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into() + ) .await ), BlockError::ProposalSignatureInvalid @@ -849,7 +1039,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -875,7 +1065,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -913,7 +1103,7 @@ async fn block_gossip_verification() { ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -925,7 +1115,7 @@ async fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" @@ -951,7 +1141,7 @@ async fn block_gossip_verification() { matches!( harness .chain - .verify_block_for_gossip(block.clone()) + .verify_block_for_gossip(block.clone().into()) .await .err() .expect("should error when processing known block"), @@ -979,14 +1169,29 @@ async fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let (block2, _) = harness.make_block(state, Slot::new(1)).await; + let ((block1, blobs1), _) = harness.make_block(state.clone(), Slot::new(1)).await; + let ((block2, _blobs2), _) = harness.make_block(state, Slot::new(1)).await; let verified_block = harness .chain .verify_block_for_gossip(Arc::new(block1)) .await .unwrap(); + + if let Some(blobs) = blobs1 { + for blob in blobs { + let blob_index = blob.message.index; + let verified_blob = harness + .chain + .verify_blob_sidecar_for_gossip(blob, blob_index) + .unwrap(); + harness + .chain + .process_gossip_blob(verified_blob) + .await + .unwrap(); + } + } harness .chain .process_block( @@ -1019,7 +1224,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await; let verified_block = harness .chain @@ -1106,7 +1311,7 @@ async fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; + let ((altair_signed_block, _), _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1163,7 +1368,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(base_block.clone())) + .verify_block_for_gossip(Arc::new(base_block.clone()).into()) .await .err() .expect("should error when processing base block"), @@ -1196,7 +1401,10 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)], NotifyExecutionLayer::Yes,) + .process_chain_segment( + vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))], + NotifyExecutionLayer::Yes, + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1240,7 +1448,7 @@ async fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; + let ((base_signed_block, _), _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1298,7 +1506,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(altair_block.clone())) + .verify_block_for_gossip(Arc::new(altair_block.clone()).into()) .await .err() .expect("should error when processing altair block"), @@ -1331,7 +1539,10 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)], NotifyExecutionLayer::Yes) + .process_chain_segment( + vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))], + NotifyExecutionLayer::Yes + ) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1381,7 +1592,8 @@ async fn import_duplicate_block_unrealized_justification() { // Produce a block to justify epoch 2. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (block, _) = harness.make_block(state.clone(), slot).await; + let (block_contents, _) = harness.make_block(state.clone(), slot).await; + let (block, _) = block_contents; let block = Arc::new(block); let block_root = block.canonical_root(); @@ -1397,9 +1609,7 @@ async fn import_duplicate_block_unrealized_justification() { .unwrap(); // Import the first block, simulating a block processed via a finalized chain segment. - chain - .clone() - .import_execution_pending_block(verified_block1) + import_execution_pending_block(chain.clone(), verified_block1) .await .unwrap(); @@ -1418,9 +1628,7 @@ async fn import_duplicate_block_unrealized_justification() { drop(fc); // Import the second verified block, simulating a block processed via RPC. - chain - .clone() - .import_execution_pending_block(verified_block2) + import_execution_pending_block(chain.clone(), verified_block2) .await .unwrap(); @@ -1439,3 +1647,23 @@ async fn import_duplicate_block_unrealized_justification() { Some(unrealized_justification) ); } + +async fn import_execution_pending_block( + chain: Arc>, + execution_pending_block: ExecutionPendingBlock, +) -> Result { + match chain + .clone() + .into_executed_block(execution_pending_block) + .await + .unwrap() + { + ExecutedBlock::Available(block) => chain + .import_available_block(Box::from(block)) + .await + .map_err(|e| format!("{e:?}")), + ExecutedBlock::AvailabilityPending(_) => { + Err("AvailabilityPending not expected in this test. Block not imported.".to_string()) + } + } +} diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs new file mode 100644 index 0000000000..c48cf310a2 --- /dev/null +++ b/beacon_node/beacon_chain/tests/events.rs @@ -0,0 +1,99 @@ +use beacon_chain::blob_verification::GossipVerifiedBlob; +use beacon_chain::test_utils::BeaconChainHarness; +use bls::Signature; +use eth2::types::{EventKind, SseBlobSidecar}; +use rand::rngs::StdRng; +use rand::SeedableRng; +use std::marker::PhantomData; +use std::sync::Arc; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar}; + +type E = MinimalEthSpec; + +/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. +#[tokio::test] +async fn blob_sidecar_event_on_process_gossip_blob() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process a gossip verified blob + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let signed_sidecar = SignedBlobSidecar { + message: BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(), + signature: Signature::empty(), + _phantom: PhantomData, + }; + let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar); + let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob()); + + let _ = harness + .chain + .process_gossip_blob(gossip_verified_blob) + .await + .unwrap(); + + let sidecar_event = blob_event_receiver.try_recv().unwrap(); + assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); +} + +/// Verifies that a blob event is emitted when blobs are received via RPC. +#[tokio::test] +async fn blob_sidecar_event_on_process_rpc_blobs() { + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // subscribe to blob sidecar events + let event_handler = harness.chain.event_handler.as_ref().unwrap(); + let mut blob_event_receiver = event_handler.subscribe_blob_sidecar(); + + // build and process multiple rpc blobs + let kzg = harness.chain.kzg.as_ref().unwrap(); + let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + + let blob_1 = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let blob_2 = Arc::new(BlobSidecar { + index: 1, + ..BlobSidecar::random_valid(&mut rng, kzg).unwrap() + }); + let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let expected_sse_blobs = vec![ + SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), + SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), + ]; + + let _ = harness + .chain + .process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs) + .await + .unwrap(); + + let mut sse_blobs: Vec = vec![]; + while let Ok(sidecar_event) = blob_event_receiver.try_recv() { + if let EventKind::BlobSidecar(sse_blob_sidecar) = sidecar_event { + sse_blobs.push(sse_blob_sidecar); + } else { + panic!("`BlobSidecar` event kind expected."); + } + } + assert_eq!(sse_blobs, expected_sse_blobs); +} diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index c81a547406..332f6a4829 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -2,6 +2,7 @@ mod attestation_production; mod attestation_verification; mod block_verification; mod capella; +mod events; mod merge; mod op_verification; mod payload_invalidation; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 535fe080a7..f4af490710 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -31,8 +31,16 @@ fn get_store(db_path: &TempDir) -> Arc { let cold_path = db_path.path().join("cold_db"); let config = StoreConfig::default(); let log = NullLoggerBuilder.build().expect("logger should build"); - HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) - .expect("disk store should initialize") + HotColdDB::open( + &hot_path, + &cold_path, + None, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") } fn get_harness(store: Arc, validator_count: usize) -> TestHarness { diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 9a8c324d09..5076a6c1a9 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, + BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, @@ -59,6 +59,10 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() @@ -167,7 +171,7 @@ impl InvalidPayloadRig { async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { let mut roots = Vec::with_capacity(num_blocks as usize); for _ in 0..num_blocks { - roots.push(self.import_block(is_valid.clone()).await); + roots.push(self.import_block(is_valid).await); } roots } @@ -221,7 +225,7 @@ impl InvalidPayloadRig { let head = self.harness.chain.head_snapshot(); let state = head.beacon_state.clone_with_only_committee_caches(); let slot = slot_override.unwrap_or(state.slot() + 1); - let (block, post_state) = self.harness.make_block(state, slot).await; + let ((block, blobs), post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -285,7 +289,7 @@ impl InvalidPayloadRig { } let root = self .harness - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); @@ -326,7 +330,7 @@ impl InvalidPayloadRig { match self .harness - .process_block(slot, block.canonical_root(), block) + .process_block(slot, block.canonical_root(), (block, blobs)) .await { Err(error) if evaluate_error(&error) => (), @@ -689,7 +693,8 @@ async fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let ((fork_block, _), _fork_post_state) = + rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_block_root = rig .harness .chain @@ -700,6 +705,8 @@ async fn invalidates_all_descendants() { || Ok(()), ) .await + .unwrap() + .try_into() .unwrap(); rig.recompute_head().await; @@ -785,7 +792,8 @@ async fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let ((fork_block, _), _fork_post_state) = + rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); let fork_block_root = rig .harness @@ -797,6 +805,8 @@ async fn switches_heads() { || Ok(()), ) .await + .unwrap() + .try_into() .unwrap(); rig.recompute_head().await; @@ -811,13 +821,16 @@ async fn switches_heads() { }) .await; - // The fork block should become the head. - assert_eq!(rig.harness.head_block_root(), fork_block_root); + // NOTE: The `import_block` method above will cause the `ExecutionStatus` of the + // `fork_block_root`'s payload to switch from `Optimistic` to `Invalid`. This means it *won't* + // be set as head, it's parent block will instead. This is an issue with the mock EL and/or + // the payload invalidation rig. + assert_eq!(rig.harness.head_block_root(), fork_parent_root); // The fork block has not yet been validated. assert!(rig .execution_status(fork_block_root) - .is_strictly_optimistic()); + .is_optimistic_or_invalid()); for root in blocks { let slot = rig @@ -1008,6 +1021,7 @@ async fn payload_preparation() { .unwrap(), fee_recipient, None, + None, ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } @@ -1030,8 +1044,8 @@ async fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, state) = rig.harness.make_block(parent_state, slot).await; - let block = Arc::new(block); + let (block_tuple, state) = rig.harness.make_block(parent_state, slot).await; + let block = Arc::new(block_tuple.0); let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); @@ -1041,7 +1055,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1424,13 +1438,13 @@ async fn build_optimistic_chain( .server .all_get_block_by_hash_requests_return_natural_value(); - return rig; + rig } #[tokio::test] async fn optimistic_transition_block_valid_unfinalized() { let ttd = 42; - let num_blocks = 16 as usize; + let num_blocks = 16_usize; let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; let post_transition_block_root = rig @@ -1484,7 +1498,7 @@ async fn optimistic_transition_block_valid_unfinalized() { #[tokio::test] async fn optimistic_transition_block_valid_finalized() { let ttd = 42; - let num_blocks = 130 as usize; + let num_blocks = 130_usize; let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; let post_transition_block_root = rig @@ -1539,7 +1553,7 @@ async fn optimistic_transition_block_valid_finalized() { async fn optimistic_transition_block_invalid_unfinalized() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 22 as usize; + let num_blocks = 22_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1615,7 +1629,7 @@ async fn optimistic_transition_block_invalid_unfinalized() { async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 22 as usize; + let num_blocks = 22_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1728,7 +1742,7 @@ async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { async fn optimistic_transition_block_invalid_finalized() { let block_ttd = 42; let rig_ttd = 1337; - let num_blocks = 130 as usize; + let num_blocks = 130_usize; let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; let post_transition_block_root = rig @@ -1850,8 +1864,8 @@ impl InvalidHeadSetup { .chain .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) .unwrap(); - let (fork_block, _) = rig.harness.make_block(parent_state, slot).await; - opt_fork_block = Some(Arc::new(fork_block)); + let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await; + opt_fork_block = Some(Arc::new(fork_block_tuple.0)); } else { // Skipped slot. }; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index be271804b9..7c8f01cf55 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -14,7 +14,7 @@ use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; use lazy_static::lazy_static; use types::beacon_state::Error as BeaconStateError; -use types::{BeaconState, ChainSpec}; +use types::{BeaconState, ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; @@ -219,6 +219,100 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_verify_attestation_rewards_altair_inactivity_leak() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; + + // advance until beginning of epoch N + 1 and get balances + harness + .extend_slots_some_validators( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + half_validators.clone(), + ) + .await; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness + .extend_slots_some_validators(1, half_validators.clone()) + .await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert inactivity penalty for both ideal rewards and individual validators + assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); + assert!(total_rewards[..half] + .iter() + .all(|reward| reward.inactivity == 0)); + assert!(total_rewards[half..] + .iter() + .all(|reward| reward.inactivity < 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_verify_attestation_rewards_base_subset_only() { let harness = get_harness(E::default_spec()); @@ -297,3 +391,32 @@ fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec, + expected_balances: Vec, +) -> Vec { + let calculated_balances = expected_balances + .iter() + .enumerate() + .map(|(i, balance)| balance + proposal_rewards_map.get(&(i as u64)).unwrap_or(&0u64)) + .collect(); + + calculated_balances +} + +fn apply_sync_committee_rewards( + sync_committee_rewards_map: &HashMap, + expected_balances: Vec, +) -> Vec { + let calculated_balances = expected_balances + .iter() + .enumerate() + .map(|(i, balance)| { + (*balance as i64 + sync_committee_rewards_map.get(&(i as u64)).unwrap_or(&0i64)) + .unsigned_abs() + }) + .collect(); + + calculated_balances +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2902774825..e8bb76778b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,22 +1,27 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, + BlockStrategy, DiskHarnessType, }; use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ - historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, - ServerSentEventHandler, WhenSlotSkipped, + data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, + migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, + BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::TrustedSetup; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; use rand::Rng; -use state_processing::BlockReplayer; +use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::{state_advance::complete_state_advance, BlockReplayer}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -24,8 +29,10 @@ use std::sync::Arc; use std::time::Duration; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{ + chunked_vector::{chunk_key, Field}, + get_key_for_col, iter::{BlockRootsIterator, StateRootsIterator}, - HotColdDB, LevelDB, StoreConfig, + DBColumn, HotColdDB, KeyValueStore, KeyValueStoreOp, LevelDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -46,37 +53,234 @@ type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { - get_store_with_spec(db_path, test_spec::()) + get_store_generic(db_path, StoreConfig::default(), test_spec::()) } -fn get_store_with_spec( +fn get_store_generic( db_path: &TempDir, + config: StoreConfig, spec: ChainSpec, ) -> Arc, LevelDB>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); - let config = StoreConfig::default(); let log = test_logger(); - HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) - .expect("disk store should initialize") + HotColdDB::open( + &hot_path, + &cold_path, + None, + |_, _, _| Ok(()), + config, + spec, + log, + ) + .expect("disk store should initialize") } fn get_harness( store: Arc, LevelDB>>, validator_count: usize, ) -> TestHarness { - let harness = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + get_harness_generic(store, validator_count, chain_config) +} + +fn get_harness_generic( + store: Arc, LevelDB>>, + validator_count: usize, + chain_config: ChainConfig, +) -> TestHarness { + let harness = TestHarness::builder(MinimalEthSpec) + .spec(store.get_chain_spec().clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() + .chain_config(chain_config) .build(); harness.advance_slot(); harness } +/// Tests that `store.heal_freezer_block_roots` inserts block roots between last restore point +/// slot and the split slot. +#[tokio::test] +async fn heal_freezer_block_roots() { + // chunk_size is hard-coded to 128 + let num_blocks_produced = E::slots_per_epoch() * 20; + let db_path = tempdir().unwrap(); + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let split_slot = store.get_split_slot(); + assert_eq!(split_slot, 18 * E::slots_per_epoch()); + + // Do a heal before deleting to make sure that it doesn't break. + let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); + store.heal_freezer_block_roots().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Delete block roots between `last_restore_point_slot` and `split_slot`. + let chunk_index = >::chunk_index( + last_restore_point_slot.as_usize(), + ); + let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); + store + .cold_db + .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) + .unwrap(); + + let block_root_err = store + .forwards_block_roots_iterator_until( + last_restore_point_slot, + last_restore_point_slot + 1, + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .next() + .unwrap() + .unwrap_err(); + + assert!(matches!(block_root_err, store::Error::NoContinuationData)); + + // Re-insert block roots + store.heal_freezer_block_roots().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Run for another two epochs to check that the invariant is maintained. + let additional_blocks_produced = 2 * E::slots_per_epoch(); + harness + .extend_slots(additional_blocks_produced as usize) + .await; + + check_finalization(&harness, num_blocks_produced + additional_blocks_produced); + check_split_slot(&harness, store); + check_chain_dump( + &harness, + num_blocks_produced + additional_blocks_produced + 1, + ); + check_iterators(&harness); +} + +/// Tests that `store.heal_freezer_block_roots` inserts block roots between last restore point +/// slot and the split slot. +#[tokio::test] +async fn heal_freezer_block_roots_with_skip_slots() { + // chunk_size is hard-coded to 128 + let num_blocks_produced = E::slots_per_epoch() * 20; + let db_path = tempdir().unwrap(); + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let current_state = harness.get_current_state(); + let state_root = harness.get_current_state().tree_hash_root(); + let all_validators = &harness.get_all_validators(); + harness + .add_attested_blocks_at_slots( + current_state, + state_root, + &(1..=num_blocks_produced) + .filter(|i| i % 12 != 0) + .map(Slot::new) + .collect::>(), + all_validators, + ) + .await; + + // split slot should be 18 here + let split_slot = store.get_split_slot(); + assert_eq!(split_slot, 18 * E::slots_per_epoch()); + + let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); + let chunk_index = >::chunk_index( + last_restore_point_slot.as_usize(), + ); + let key_chunk = get_key_for_col(DBColumn::BeaconBlockRoots.as_str(), &chunk_key(chunk_index)); + store + .cold_db + .do_atomically(vec![KeyValueStoreOp::DeleteKey(key_chunk)]) + .unwrap(); + + let block_root_err = store + .forwards_block_roots_iterator_until( + last_restore_point_slot, + last_restore_point_slot + 1, + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .next() + .unwrap() + .unwrap_err(); + + assert!(matches!(block_root_err, store::Error::NoContinuationData)); + + // heal function + store.heal_freezer_block_roots().unwrap(); + check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); + + // Run for another two epochs to check that the invariant is maintained. + let additional_blocks_produced = 2 * E::slots_per_epoch(); + harness + .extend_slots(additional_blocks_produced as usize) + .await; + + check_finalization(&harness, num_blocks_produced + additional_blocks_produced); + check_split_slot(&harness, store); + check_iterators(&harness); +} + +fn check_freezer_block_roots( + harness: &TestHarness, + last_restore_point_slot: Slot, + split_slot: Slot, +) { + for slot in (last_restore_point_slot.as_u64()..split_slot.as_u64()).map(Slot::new) { + let (block_root, result_slot) = harness + .chain + .store + .forwards_block_roots_iterator_until(slot, slot, || unreachable!(), &harness.chain.spec) + .unwrap() + .next() + .unwrap() + .unwrap(); + assert_eq!(slot, result_slot); + let expected_block_root = harness + .chain + .block_root_at_slot(slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + assert_eq!(expected_block_root, block_root); + } +} + #[tokio::test] async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; @@ -406,7 +610,7 @@ async fn forwards_iter_block_and_state_roots_until() { // The last restore point slot is the point at which the hybrid forwards iterator behaviour // changes. - let last_restore_point_slot = store.get_latest_restore_point_slot(); + let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); assert!(last_restore_point_slot > 0); let chain = &harness.chain; @@ -460,13 +664,15 @@ async fn block_replay_with_inaccurate_state_roots() { .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_root) = harness.get_current_state_and_root(); + let (mut head_state, head_state_root) = harness.get_current_state_and_root(); + let head_block_root = harness.head_block_root(); assert_ne!(head_state.slot() % 32, 0); - let mut fast_head_state = store + let (_, mut fast_head_state) = store .get_inconsistent_state_for_attestation_verification_only( - &head_root, - Some(head_state.slot()), + &head_block_root, + head_state.slot(), + head_state_root, ) .unwrap() .unwrap(); @@ -565,14 +771,7 @@ async fn block_replayer_hooks() { async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let validators_keypairs = - types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_disk_store(store.clone()) - .mock_execution_layer() - .build(); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -697,7 +896,7 @@ async fn multi_epoch_fork_valid_blocks_test( let store = get_store(&db_path); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = BeaconChainHarness::builder(MinimalEthSpec) + let harness = TestHarness::builder(MinimalEthSpec) .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store) @@ -1015,18 +1214,14 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1073,7 +1268,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { ); } - assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); + assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); assert!(rig.chain.knows_head(&stray_head)); @@ -1100,8 +1295,11 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { for &block_hash in stray_blocks.values() { assert!( !rig.block_exists(block_hash), - "abandoned block {} should have been pruned", - block_hash + "abandoned block {block_hash:?} should have been pruned", + ); + assert!( + !rig.chain.store.blobs_exist(&block_hash.into()).unwrap(), + "blobs for abandoned block {block_hash:?} should have been pruned" ); } @@ -1125,18 +1323,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1260,15 +1454,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 32; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1352,18 +1542,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks @@ -1497,18 +1683,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1626,18 +1808,14 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1810,6 +1988,10 @@ fn check_no_blocks_exist<'a>( "did not expect block {:?} to be in the DB", block_hash ); + assert!( + !harness.chain.store.blobs_exist(&block_hash.into()).unwrap(), + "blobs for abandoned block {block_hash:?} should have been pruned" + ); } } @@ -2011,7 +2193,7 @@ async fn garbage_collect_temp_states_from_failed_block() { let genesis_state = harness.get_current_state(); let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; let (mut block, _) = signed_block.deconstruct(); @@ -2027,7 +2209,10 @@ async fn garbage_collect_temp_states_from_failed_block() { // The block should be rejected, but should store a bunch of temporary states. harness.set_current_slot(block_slot); - harness.process_block_result(block).await.unwrap_err(); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); assert_eq!( store.iter_temporary_state_roots().count(), @@ -2053,39 +2238,94 @@ async fn garbage_collect_temp_states_from_failed_block() { } #[tokio::test] -async fn weak_subjectivity_sync() { +async fn weak_subjectivity_sync_easy() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots).map(Slot::new).collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots leading up to the checkpoint slot. + slot <= checkpoint_slot - 3 || slot > checkpoint_slot + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots after the checkpoint slot. + slot <= checkpoint_slot || slot > checkpoint_slot + 3 + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +// Regression test for https://github.com/sigp/lighthouse/issues/4817 +// Skip 3 slots immediately after genesis, creating a gap between the genesis block and the first +// real block. +#[tokio::test] +async fn weak_subjectivity_sync_skips_at_genesis() { + let start_slot = 4; + let end_slot = E::slots_per_epoch() * 4; + let slots = (start_slot..end_slot).map(Slot::new).collect(); + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Build an initial chain on one harness, representing a synced node with full history. - let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; let temp1 = tempdir().unwrap(); let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); harness - .extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, ) .await; - let genesis_state = full_store - .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_checkpoint = harness.finalized_checkpoint(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness .chain .store - .get_full_block(&wss_checkpoint.root) + .get_full_block(&wss_block_root) .unwrap() .unwrap(); let wss_state = full_store - .get_state(&wss_block.state_root(), None) + .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() .unwrap(); - let wss_slot = wss_block.slot(); // Add more blocks that advance finalization further. harness.advance_slot(); @@ -2103,45 +2343,61 @@ async fn weak_subjectivity_sync() { let store = get_store(&temp2); let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| println!("Unable to read trusted setup file: {}", e)) + .unwrap(); - // Initialise a new beacon chain from the finalized checkpoint - let beacon_chain = Arc::new( - BeaconChainBuilder::new(MinimalEthSpec) - .store(store.clone()) - .custom_spec(test_spec::()) - .task_executor(harness.chain.task_executor.clone()) - .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) - .unwrap() - .logger(log.clone()) - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) - .expect("should configure testing slot clock") - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) - .build() - .expect("should build"), + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone(), None); + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) + .trusted_setup(trusted_setup) + .build() + .expect("should build"); + + let beacon_chain = Arc::new(beacon_chain); // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); - let new_blocks = &chain_dump[wss_slot.as_usize() + 1..]; - - assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1); + let new_blocks = chain_dump + .iter() + .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); for snapshot in new_blocks { + let block_root = snapshot.beacon_block_root; let full_block = harness .chain .get_block(&snapshot.beacon_block_root) .await .unwrap() .unwrap(); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2149,7 +2405,7 @@ async fn weak_subjectivity_sync() { beacon_chain .process_block( full_block.canonical_root(), - Arc::new(full_block), + RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2194,14 +2450,38 @@ async fn weak_subjectivity_sync() { .filter(|s| s.beacon_block.slot() != 0) .map(|s| s.beacon_block.clone()) .collect::>(); + + let mut available_blocks = vec![]; + for blinded in historical_blocks { + let block_root = blinded.canonical_root(); + let full_block = harness + .chain + .get_block(&block_root) + .await + .expect("should get block") + .expect("should get block"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + + if let MaybeAvailableBlock::Available(block) = harness + .chain + .data_availability_checker + .check_rpc_block_availability( + RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + ) + .expect("should check availability") + { + available_blocks.push(block); + } + } + beacon_chain - .import_historical_block_batch(historical_blocks.clone()) + .import_historical_block_batch(available_blocks.clone()) .unwrap(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); // Resupplying the blocks should not fail, they can be safely ignored. beacon_chain - .import_historical_block_batch(historical_blocks) + .import_historical_block_batch(available_blocks) .unwrap(); // The forwards iterator should now match the original chain @@ -2219,13 +2499,17 @@ async fn weak_subjectivity_sync() { assert_eq!(forwards, expected); // All blocks can be loaded. + let mut prev_block_root = Hash256::zero(); for (block_root, slot) in beacon_chain .forwards_iter_block_roots(Slot::new(0)) .unwrap() .map(Result::unwrap) { let block = store.get_blinded_block(&block_root).unwrap().unwrap(); - assert_eq!(block.slot(), slot); + if block_root != prev_block_root { + assert_eq!(block.slot(), slot); + } + prev_block_root = block_root; } // All states from the oldest state slot can be loaded. @@ -2240,14 +2524,141 @@ async fn weak_subjectivity_sync() { assert_eq!(state.canonical_root(), state_root); } - // Anchor slot is still set to the starting slot. - assert_eq!(store.get_anchor_slot(), Some(wss_slot)); + // Anchor slot is still set to the slot of the checkpoint block. + assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); // Reconstruct states. store.clone().reconstruct_historic_states().unwrap(); assert_eq!(store.get_anchor_slot(), None); } +/// Test that blocks and attestations that refer to states around an unaligned split state are +/// processed correctly. +#[tokio::test] +async fn process_blocks_and_attestations_for_unaligned_checkpoint() { + let temp = tempdir().unwrap(); + let store = get_store(&temp); + let chain_config = ChainConfig { + reconstruct_historic_states: false, + ..ChainConfig::default() + }; + let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); + + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let split_slot = Slot::new(E::slots_per_epoch() * 4); + let pre_skips = 1; + let post_skips = 1; + + // Build the chain up to the intended split slot, with 3 skips before the split. + let slots = (1..=split_slot.as_u64() - pre_skips) + .map(Slot::new) + .collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Before the split slot becomes finalized, create two forking blocks that build on the split + // block: + // + // - one that is invalid because it conflicts with finalization (slot <= finalized_slot) + // - one that is valid because its slot is not finalized (slot > finalized_slot) + let (unadvanced_split_state, unadvanced_split_state_root) = + harness.get_current_state_and_root(); + + let ((invalid_fork_block, _), _) = harness + .make_block(unadvanced_split_state.clone(), split_slot) + .await; + let ((valid_fork_block, _), _) = harness + .make_block(unadvanced_split_state.clone(), split_slot + 1) + .await; + + // Advance the chain so that the intended split slot is finalized. + // Do not attest in the epoch boundary slot, to make attestation production later easier (no + // equivocations). + let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); + for _ in 0..pre_skips + post_skips { + harness.advance_slot(); + } + harness.extend_to_slot(finalizing_slot - 1).await; + harness + .add_block_at_slot(finalizing_slot, harness.get_current_state()) + .await + .unwrap(); + + // Check that the split slot is as intended. + let split = store.get_split_info(); + assert_eq!(split.slot, split_slot); + assert_eq!(split.block_root, valid_fork_block.parent_root()); + assert_ne!(split.state_root, unadvanced_split_state_root); + + // Applying the invalid block should fail. + let err = harness + .chain + .process_block( + invalid_fork_block.canonical_root(), + Arc::new(invalid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap_err(); + assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); + + // Applying the valid block should succeed, but it should not become head. + harness + .chain + .process_block( + valid_fork_block.canonical_root(), + Arc::new(valid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap(); + harness.chain.recompute_head_at_current_slot().await; + assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root()); + + // Attestations to the split block in the next 2 epochs should be processed successfully. + let attestation_start_slot = harness.get_current_slot(); + let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch(); + let (split_state_root, mut advanced_split_state) = harness + .chain + .store + .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .unwrap() + .unwrap(); + complete_state_advance( + &mut advanced_split_state, + Some(split_state_root), + attestation_start_slot, + &harness.chain.spec, + ) + .unwrap(); + advanced_split_state + .build_caches(&harness.chain.spec) + .unwrap(); + let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap(); + for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) { + let attestations = harness.make_attestations( + &all_validators, + &advanced_split_state, + advanced_split_state_root, + split.block_root.into(), + slot, + ); + harness.advance_slot(); + harness.process_attestations(attestations); + } +} + #[tokio::test] async fn finalizes_after_resuming_from_db() { let validator_count = 16; @@ -2302,11 +2713,12 @@ async fn finalizes_after_resuming_from_db() { let original_chain = harness.chain; - let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) + let resumed_harness = BeaconChainHarness::>::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) - .mock_execution_layer() + .testing_slot_clock(original_chain.slot_clock.clone()) + .execution_layer(original_chain.execution_layer.clone()) .build(); assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); @@ -2371,7 +2783,7 @@ async fn revert_minority_fork_on_resume() { // Chain with no fork epoch configured. let db_path1 = tempdir().unwrap(); - let store1 = get_store_with_spec(&db_path1, spec1.clone()); + let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); let harness1 = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec1) .keypairs(KEYPAIRS[0..validator_count].to_vec()) @@ -2381,7 +2793,7 @@ async fn revert_minority_fork_on_resume() { // Chain with fork epoch configured. let db_path2 = tempdir().unwrap(); - let store2 = get_store_with_spec(&db_path2, spec2.clone()); + let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); let harness2 = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec2.clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) @@ -2407,14 +2819,14 @@ async fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot).await; + let ((block, blobs), new_state) = harness1.make_block(state, slot).await; harness1 - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); harness2 - .process_block(slot, block.canonical_root(), block.clone()) + .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) .await .unwrap(); @@ -2448,17 +2860,17 @@ async fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot).await; + let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await; harness1 - .process_block(slot, block1.canonical_root(), block1) + .process_block(slot, block1.canonical_root(), (block1, blobs1)) .await .unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot).await; + let ((block2, blobs2), new_state2) = harness2.make_block(state2, slot).await; harness2 - .process_block(slot, block2.canonical_root(), block2.clone()) + .process_block(slot, block2.canonical_root(), (block2.clone(), blobs2)) .await .unwrap(); @@ -2476,9 +2888,9 @@ async fn revert_minority_fork_on_resume() { // We have to do some hackery with the `slot_clock` so that the correct slot is set when // the beacon chain builder loads the head block. drop(harness1); - let resume_store = get_store_with_spec(&db_path1, spec2.clone()); + let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); - let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) + let resumed_harness = TestHarness::builder(MinimalEthSpec) .spec(spec2) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(resume_store) @@ -2511,7 +2923,7 @@ async fn revert_minority_fork_on_resume() { let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { resumed_harness - .process_block_result(block.clone()) + .process_block_result((block.clone(), None)) .await .unwrap(); @@ -2551,14 +2963,19 @@ async fn schema_downgrade_to_min_version() { ) .await; - let min_version = if harness.spec.capella_fork_epoch.is_some() { - // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that - // at all if Capella is enabled. + let min_version = if harness.spec.deneb_fork_epoch.is_some() { + // Can't downgrade beyond V18 once Deneb is reached, for simplicity don't test that + // at all if Deneb is enabled. + SchemaVersion(18) + } else if harness.spec.capella_fork_epoch.is_some() { SchemaVersion(14) } else { SchemaVersion(11) }; + // Save the slot clock so that the new harness doesn't revert in time. + let slot_clock = harness.chain.slot_clock.clone(); + // Close the database to ensure everything is written to disk. drop(store); drop(harness); @@ -2589,11 +3006,12 @@ async fn schema_downgrade_to_min_version() { ) .expect("schema upgrade from minimum version should work"); - // Rescreate the harness. + // Recreate the harness. let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) .logger(store.logger().clone()) + .testing_slot_clock(slot_clock) .resumed_disk_store(store.clone()) .mock_execution_layer() .build(); @@ -2616,6 +3034,278 @@ async fn schema_downgrade_to_min_version() { .expect_err("should not downgrade below minimum version"); } +/// Check that blob pruning prunes blobs older than the data availability boundary. +#[tokio::test] +async fn deneb_prune_blobs_happy_case() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let num_blocks_produced = E::slots_per_epoch() * 8; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Prior to manual pruning with an artifically low data availability boundary all blobs should + // be stored. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true); + + // Trigger blob pruning of blobs older than epoch 2. + let data_availability_boundary = Epoch::new(2); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is updated accordingly and prior blobs have been deleted. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!( + oldest_blob_slot, + data_availability_boundary.start_slot(E::slots_per_epoch()) + ); + check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false); + check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true); +} + +/// Check that blob pruning does not prune without finalization. +#[tokio::test] +async fn deneb_prune_blobs_no_finalization() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let initial_num_blocks = E::slots_per_epoch() * 5; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Finalize to epoch 3. + harness + .extend_chain( + initial_num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Extend the chain for another few epochs without attestations. + let unfinalized_num_blocks = E::slots_per_epoch() * 3; + harness.advance_slot(); + harness + .extend_chain( + unfinalized_num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Finalization should be at epoch 3. + let finalized_slot = Slot::new(E::slots_per_epoch() * 3); + assert_eq!(harness.get_current_state().finalized_checkpoint().epoch, 3); + assert_eq!(store.get_split_slot(), finalized_slot); + + // All blobs should still be available. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Attempt blob pruning of blobs older than epoch 4, which is newer than finalization. + let data_availability_boundary = Epoch::new(4); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is only updated to finalization, and NOT to the DAB. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!(oldest_blob_slot, finalized_slot); + check_blob_existence(&harness, Slot::new(0), finalized_slot - 1, false); + check_blob_existence(&harness, finalized_slot, harness.head_slot(), true); +} + +/// Check that blob pruning does not fail trying to prune across the fork boundary. +#[tokio::test] +async fn deneb_prune_blobs_fork_boundary() { + let deneb_fork_epoch = Epoch::new(4); + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + spec.deneb_fork_epoch = Some(deneb_fork_epoch); + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, StoreConfig::default(), spec); + + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = E::slots_per_epoch() * 7; + + // Finalize to epoch 5. + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Finalization should be at epoch 5. + let finalized_epoch = Epoch::new(5); + let finalized_slot = finalized_epoch.start_slot(E::slots_per_epoch()); + assert_eq!( + harness.get_current_state().finalized_checkpoint().epoch, + finalized_epoch + ); + assert_eq!(store.get_split_slot(), finalized_slot); + + // All blobs should still be available. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Attempt pruning with data availability epochs that precede the fork epoch. + // No pruning should occur. + assert!(deneb_fork_epoch < finalized_epoch); + for data_availability_boundary in [Epoch::new(0), Epoch::new(3), deneb_fork_epoch] { + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is not updated. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + } + // All blobs should still be available. + check_blob_existence(&harness, Slot::new(0), harness.head_slot(), true); + + // Prune one epoch past the fork. + let pruned_slot = (deneb_fork_epoch + 1).start_slot(E::slots_per_epoch()); + store.try_prune_blobs(true, deneb_fork_epoch + 1).unwrap(); + assert_eq!(store.get_blob_info().oldest_blob_slot, Some(pruned_slot)); + check_blob_existence(&harness, Slot::new(0), pruned_slot - 1, false); + check_blob_existence(&harness, pruned_slot, harness.head_slot(), true); +} + +/// Check that blob pruning prunes blobs older than the data availability boundary with margin +/// applied. +#[tokio::test] +async fn deneb_prune_blobs_margin1() { + deneb_prune_blobs_margin_test(1).await; +} + +#[tokio::test] +async fn deneb_prune_blobs_margin3() { + deneb_prune_blobs_margin_test(3).await; +} + +#[tokio::test] +async fn deneb_prune_blobs_margin4() { + deneb_prune_blobs_margin_test(4).await; +} + +async fn deneb_prune_blobs_margin_test(margin: u64) { + let config = StoreConfig { + blob_prune_margin_epochs: margin, + ..StoreConfig::default() + }; + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, config, test_spec::()); + + let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { + // No-op prior to Deneb. + return; + }; + let deneb_fork_slot = deneb_fork_epoch.start_slot(E::slots_per_epoch()); + + let num_blocks_produced = E::slots_per_epoch() * 8; + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Prior to manual pruning with an artifically low data availability boundary all blobs should + // be stored. + assert_eq!( + store.get_blob_info().oldest_blob_slot, + Some(deneb_fork_slot) + ); + check_blob_existence(&harness, Slot::new(1), harness.head_slot(), true); + + // Trigger blob pruning of blobs older than epoch 6 - margin (6 is the minimum, due to + // finalization). + let data_availability_boundary = Epoch::new(6); + let effective_data_availability_boundary = + data_availability_boundary - store.get_config().blob_prune_margin_epochs; + assert!( + effective_data_availability_boundary > 0, + "must be > 0 because epoch 0 won't get pruned alone" + ); + store + .try_prune_blobs(true, data_availability_boundary) + .unwrap(); + + // Check oldest blob slot is updated accordingly and prior blobs have been deleted. + let oldest_blob_slot = store.get_blob_info().oldest_blob_slot.unwrap(); + assert_eq!( + oldest_blob_slot, + effective_data_availability_boundary.start_slot(E::slots_per_epoch()) + ); + check_blob_existence(&harness, Slot::new(0), oldest_blob_slot - 1, false); + check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true); +} + +/// Check that there are blob sidecars (or not) at every slot in the range. +fn check_blob_existence( + harness: &TestHarness, + start_slot: Slot, + end_slot: Slot, + should_exist: bool, +) { + let mut blobs_seen = 0; + for (block_root, slot) in harness + .chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .unwrap() + .map(Result::unwrap) + { + if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() { + assert!(should_exist, "blobs at slot {slot} exist but should not"); + blobs_seen += blobs.len(); + } else { + // We don't actually store empty blobs, so unfortunately we can't assert anything + // meaningful here (like asserting that the blob should not exist). + } + } + if should_exist { + assert_ne!(blobs_seen, 0, "expected non-zero number of blobs"); + } +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c5b2892cbd..4334f90836 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; @@ -28,6 +28,10 @@ lazy_static! { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -680,19 +684,20 @@ async fn run_skip_slot_test(skip_slots: u64) { Slot::new(0) ); - assert_eq!( - harness_b - .chain - .process_block( - harness_a.chain.head_snapshot().beacon_block_root, - harness_a.chain.head_snapshot().beacon_block.clone(), - NotifyExecutionLayer::Yes, - || Ok(()) - ) - .await - .unwrap(), - harness_a.chain.head_snapshot().beacon_block_root - ); + let status = harness_b + .chain + .process_block( + harness_a.chain.head_snapshot().beacon_block_root, + harness_a.get_head_block(), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap(); + + let root: Hash256 = status.try_into().unwrap(); + + assert_eq!(root, harness_a.chain.head_snapshot().beacon_block_root); harness_b.chain.recompute_head_at_current_slot().await; diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index c626441bb7..723b09b581 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -1,26 +1,26 @@ [package] name = "beacon_processor" version = "0.1.0" -edition = "2021" +edition = { workspace = true } [dependencies] -slog = { version = "2.5.2", features = ["max_level_trace"] } -itertools = "0.10.0" -logging = { path = "../../common/logging" } -tokio = { version = "1.14.0", features = ["full"] } -tokio-util = { version = "0.6.3", features = ["time"] } -futures = "0.3.7" -fnv = "1.0.7" -strum = "0.24.0" -task_executor = { path = "../../common/task_executor" } -slot_clock = { path = "../../common/slot_clock" } -lighthouse_network = { path = "../lighthouse_network" } -hex = "0.4.2" -derivative = "2.2.0" -types = { path = "../../consensus/types" } -ethereum_ssz = "0.5.0" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -parking_lot = "0.12.0" -num_cpus = "1.13.0" -serde = { version = "1.0.116", features = ["derive"] } \ No newline at end of file +slog = { workspace = true } +itertools = { workspace = true } +logging = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } +futures = { workspace = true } +fnv = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +slot_clock = { workspace = true } +lighthouse_network = { workspace = true } +hex = { workspace = true } +derivative = { workspace = true } +types = { workspace = true } +ethereum_ssz = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +parking_lot = { workspace = true } +num_cpus = { workspace = true } +serde = { workspace = true } \ No newline at end of file diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index bf5d8bced4..31d4e4aac7 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -39,13 +39,11 @@ //! task. use crate::work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedBackfillBatch, QueuedGossipBlock, - QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, ReprocessQueueMessage, + QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage, }; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; -use lighthouse_network::NetworkGlobals; -use lighthouse_network::{MessageId, PeerId}; +use lighthouse_network::{MessageId, NetworkGlobals, PeerId}; use logging::TimeLatch; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; @@ -62,8 +60,14 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, Slot, SubnetId}; +use types::consts::deneb::MAX_BLOBS_PER_BLOCK; +use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{EthSpec, Slot}; use work_reprocessing_queue::IgnoredRpcBlock; +use work_reprocessing_queue::{ + spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, + QueuedUnaggregate, ReadyWork, +}; mod metrics; pub mod work_reprocessing_queue; @@ -102,6 +106,10 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that +/// will be stored before we start dropping them. +const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; @@ -142,6 +150,10 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; /// will be stored before we start dropping them. const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobSidecar` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `Vec` objects received during syncing that will /// be stored before we start dropping them. const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64; @@ -154,10 +166,19 @@ const MAX_STATUS_QUEUE_LEN: usize = 1_024; /// will be stored before we start dropping them. const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = + MAX_BLOCKS_BY_RANGE_QUEUE_LEN * MAX_BLOBS_PER_BLOCK as usize; + /// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that +/// will be stored before we start dropping them. +const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024; + /// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. /// /// This value is set high to accommodate the large spike that is expected immediately after Capella @@ -204,6 +225,7 @@ pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; pub const GOSSIP_BLOCK: &str = "gossip_block"; +pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; @@ -214,11 +236,14 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; +pub const RPC_BLOBS: &str = "rpc_blob"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; +pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; +pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; @@ -566,6 +591,7 @@ pub enum Work { process_batch: Box>) + Send + Sync>, }, GossipBlock(AsyncFn), + GossipSignedBlobSidecar(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -581,6 +607,9 @@ pub enum Work { RpcBlock { process_fn: AsyncFn, }, + RpcBlobs { + process_fn: AsyncFn, + }, IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -589,6 +618,8 @@ pub enum Work { Status(BlockingFn), BlocksByRangeRequest(BlockingFnWithManualSendOnIdle), BlocksByRootsRequest(BlockingFnWithManualSendOnIdle), + BlobsByRangeRequest(BlockingFn), + BlobsByRootsRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), @@ -610,6 +641,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock(_) => GOSSIP_BLOCK, + Work::GossipSignedBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, @@ -619,12 +651,15 @@ impl Work { Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::RpcBlobs { .. } => RPC_BLOBS, Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, Work::Status(_) => STATUS_PROCESSING, Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, + Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, + Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, @@ -708,7 +743,6 @@ impl Stream for InboundEvents { pub struct BeaconProcessor { pub network_globals: Arc>, pub executor: TaskExecutor, - pub max_workers: usize, pub current_workers: usize, pub config: BeaconProcessorConfig, pub log: Logger, @@ -721,7 +755,7 @@ impl BeaconProcessor { /// - Performed immediately, if a worker is available. /// - Queued for later processing, if no worker is currently available. /// - /// Only `self.max_workers` will ever be spawned at one time. Each worker is a `tokio` task + /// Only `self.config.max_workers` will ever be spawned at one time. Each worker is a `tokio` task /// started with `spawn_blocking`. /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work @@ -772,14 +806,18 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); + let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); + let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); + let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN); + let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); let mut gossip_bls_to_execution_change_queue = FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); @@ -896,7 +934,7 @@ impl BeaconProcessor { let _ = work_journal_tx.try_send(id); } - let can_spawn = self.current_workers < self.max_workers; + let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() .map_or(false, |event| event.drop_during_sync); @@ -916,6 +954,8 @@ impl BeaconProcessor { // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_blob_queue.pop() { + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -924,7 +964,9 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, idle_tx); - // Check the priority 0 API requests after blocks, but before attestations. + } else if let Some(item) = gossip_blob_queue.pop() { + self.spawn_worker(item, idle_tx); + // Check the priority 0 API requests after blocks and blobs, but before attestations. } else if let Some(item) = api_request_p0_queue.pop() { self.spawn_worker(item, idle_tx); // Check the aggregates, *then* the unaggregates since we assume that @@ -1069,6 +1111,10 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = bbroots_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = blbrange_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = blbroots_queue.pop() { + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1159,6 +1205,9 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } + Work::GossipSignedBlobSidecar { .. } => { + gossip_blob_queue.push(work, work_id, &self.log) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) } @@ -1184,6 +1233,7 @@ impl BeaconProcessor { Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { rpc_block_queue.push(work, work_id, &self.log) } + Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) } @@ -1197,6 +1247,9 @@ impl BeaconProcessor { Work::BlocksByRootsRequest { .. } => { bbroots_queue.push(work, work_id, &self.log) } + Work::BlobsByRangeRequest { .. } => { + blbrange_queue.push(work, work_id, &self.log) + } Work::LightClientBootstrapRequest { .. } => { lcbootstrap_queue.push(work, work_id, &self.log) } @@ -1209,6 +1262,9 @@ impl BeaconProcessor { Work::GossipBlsToExecutionChange { .. } => { gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) } + Work::BlobsByRootsRequest { .. } => { + blbroots_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } @@ -1246,10 +1302,18 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, + gossip_block_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, + rpc_blob_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, @@ -1389,11 +1453,17 @@ impl BeaconProcessor { beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } => task_spawner.spawn_async(process_fn), + Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => { + task_spawner.spawn_async(process_fn) + } Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::GossipBlock(work) => task_spawner.spawn_async(async move { - work.await; - }), + Work::GossipBlock(work) | Work::GossipSignedBlobSidecar(work) => task_spawner + .spawn_async(async move { + work.await; + }), + Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { + task_spawner.spawn_blocking(process_fn) + } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { task_spawner.spawn_blocking_with_manual_send_idle(work) } diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index e14c39e9a8..9082a7d474 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -46,6 +46,11 @@ lazy_static::lazy_static! { "beacon_processor_gossip_block_queue_total", "Count of blocks from gossip waiting to be verified." ); + // Gossip blobs. + pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_gossip_blob_queue_total", + "Count of blocks from gossip waiting to be verified." + ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_exit_queue_total", @@ -71,6 +76,11 @@ lazy_static::lazy_static! { "beacon_processor_rpc_block_queue_total", "Count of blocks from the rpc waiting to be verified." ); + // Rpc blobs. + pub static ref BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_rpc_blob_queue_total", + "Count of blobs from the rpc waiting to be verified." + ); // Chain segments. pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_chain_segment_queue_total", diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index b79fc5e407..21b9b84133 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "builder_client" version = "0.1.0" -edition = "2021" +edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] -reqwest = { version = "0.11.0", features = ["json","stream"] } -sensitive_url = { path = "../../common/sensitive_url" } -eth2 = { path = "../../common/eth2" } -serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.58" -lighthouse_version = { path = "../../common/lighthouse_version" } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +eth2 = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index c78f686d02..28cd1fe486 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,8 +1,8 @@ use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::FullPayloadContents; use eth2::types::{ - AbstractExecPayload, BlindedPayload, EthSpec, ExecutionBlockHash, ExecutionPayload, - ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, - Slot, + BlindedPayload, EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, + SignedBlockContents, SignedValidatorRegistrationData, Slot, }; pub use eth2::Error; use eth2::{ok_or_error, StatusCode}; @@ -140,8 +140,8 @@ impl BuilderHttpClient { /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, - blinded_block: &SignedBeaconBlock>, - ) -> Result>, Error> { + blinded_block: &SignedBlockContents>, + ) -> Result>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -163,12 +163,12 @@ impl BuilderHttpClient { } /// `GET /eth/v1/builder/header` - pub async fn get_builder_header>( + pub async fn get_builder_header( &self, slot: Slot, parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 72b6a6c7d4..26c53154e3 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -2,46 +2,45 @@ name = "client" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -serde_yaml = "0.8.13" -state_processing = { path = "../../consensus/state_processing" } -operation_pool = { path = "../operation_pool" } -tokio = "1.14.0" +serde_yaml = { workspace = true } +state_processing = { workspace = true } +operation_pool = { workspace = true } +tokio = { workspace = true } [dependencies] -beacon_chain = { path = "../beacon_chain" } -store = { path = "../store" } -network = { path = "../network" } +beacon_chain = { workspace = true } +store = { workspace = true } +network = { workspace = true } timer = { path = "../timer" } -lighthouse_network = { path = "../lighthouse_network" } -logging = { path = "../../common/logging" } -parking_lot = "0.12.0" -types = { path = "../../consensus/types" } -eth2_config = { path = "../../common/eth2_config" } -slot_clock = { path = "../../common/slot_clock" } -serde = "1.0.116" -serde_derive = "1.0.116" -error-chain = "0.12.4" -slog = { version = "2.5.2", features = ["max_level_trace"] } -tokio = "1.14.0" -dirs = "3.0.1" -eth1 = { path = "../eth1" } -eth2 = { path = "../../common/eth2" } -sensitive_url = { path = "../../common/sensitive_url" } -genesis = { path = "../genesis" } -task_executor = { path = "../../common/task_executor" } -environment = { path = "../../lighthouse/environment" } -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lighthouse_network = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +types = { workspace = true } +eth2_config = { workspace = true } +slot_clock = { workspace = true } +serde = { workspace = true } +error-chain = { workspace = true } +slog = { workspace = true } +tokio = { workspace = true } +dirs = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } +sensitive_url = { workspace = true } +genesis = { workspace = true } +task_executor = { workspace = true } +environment = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } time = "0.3.5" -directory = {path = "../../common/directory"} -http_api = { path = "../http_api" } +directory = { workspace = true } +http_api = { workspace = true } http_metrics = { path = "../http_metrics" } -slasher = { path = "../../slasher" } +slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -monitoring_api = {path = "../../common/monitoring_api"} -execution_layer = { path = "../execution_layer" } -beacon_processor = { path = "../beacon_processor" } -num_cpus = "1.13.0" +monitoring_api = { workspace = true } +execution_layer = { workspace = true } +beacon_processor = { workspace = true } +num_cpus = { workspace = true } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs index 272ee908fb..69614159fe 100644 --- a/beacon_node/client/src/address_change_broadcast.rs +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -99,7 +99,7 @@ pub async fn broadcast_address_changes( messages: vec![pubsub_message], }; // It seems highly unlikely that this unbounded send will fail, but - // we handle the result nontheless. + // we handle the result nonetheless. if let Err(e) = network_send.send(message) { debug!( log, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index bd2946bb7a..d1184cf75d 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -2,6 +2,7 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; @@ -29,7 +30,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use std::cmp; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -155,6 +155,7 @@ where let runtime_context = runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; let context = runtime_context.service_context("beacon".into()); + let log = context.log(); let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; let event_handler = if self.http_api_config.enabled { Some(ServerSentEventHandler::new( @@ -165,7 +166,7 @@ where None }; - let execution_layer = if let Some(config) = config.execution_layer { + let execution_layer = if let Some(config) = config.execution_layer.clone() { let context = runtime_context.service_context("exec".into()); let execution_layer = ExecutionLayer::from_config( config, @@ -250,23 +251,19 @@ where )?; builder.genesis_state(genesis_state).map(|v| (v, None))? } - ClientGenesis::SszBytes { - genesis_state_bytes, - } => { + ClientGenesis::GenesisState => { info!( context.log(), "Starting from known genesis state"; ); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log).await?; builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, - genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); if config.chain.genesis_backfill { @@ -280,17 +277,13 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log).await?; builder .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) .map(|v| (v, None))? } - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } => { + ClientGenesis::CheckpointSyncUrl { url } => { info!( context.log(), "Starting checkpoint sync"; @@ -309,7 +302,6 @@ where config.chain.checkpoint_sync_url_timeout, )), ); - let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { // We want to fetch deposit snapshot before fetching the finalized beacon state to @@ -356,10 +348,23 @@ where None }; - debug!(context.log(), "Downloading finalized block"); - // Find a suitable finalized block on an epoch boundary. - let mut block = remote - .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) + debug!( + context.log(), + "Downloading finalized state"; + ); + let state = remote + .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) + .await + .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? + .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; + + debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); + + let finalized_block_slot = state.latest_block_header().slot; + + debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); + let block = remote + .get_beacon_blocks_ssz::(BlockId::Slot(finalized_block_slot), &spec) .await .map_err(|e| match e { ApiError::InvalidSsz(e) => format!( @@ -373,65 +378,14 @@ where debug!(context.log(), "Downloaded finalized block"); - let mut block_slot = block.slot(); - - while block.slot() % slots_per_epoch != 0 { - block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot - ); - - if let Some(found_block) = remote - .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) - .await - .map_err(|e| { - format!("Error fetching block at slot {}: {:?}", block_slot, e) - })? - { - block = found_block; - } - } - - debug!( - context.log(), - "Downloaded aligned finalized block"; - "block_root" => ?block.canonical_root(), - "block_slot" => block.slot(), - ); - - let state_root = block.state_root(); - debug!( - context.log(), - "Downloading finalized state"; - "state_root" => ?state_root - ); - let state = remote - .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) - .await - .map_err(|e| { - format!( - "Error loading checkpoint state from remote {:?}: {:?}", - state_root, e - ) - })? - .ok_or_else(|| { - format!("Checkpoint state missing from remote: {:?}", state_root) - })?; - - debug!(context.log(), "Downloaded finalized state"); - - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log).await?; info!( context.log(), "Loaded checkpoint block and state"; - "slot" => block.slot(), + "block_slot" => block.slot(), + "state_slot" => state.slot(), "block_root" => ?block.canonical_root(), - "state_root" => ?state_root, ); let service = @@ -555,6 +509,12 @@ where ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, }; + let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { + beacon_chain_builder.trusted_setup(trusted_setup) + } else { + beacon_chain_builder + }; + if config.sync_eth1_chain { self.eth1_service = eth1_service_option; } @@ -795,7 +755,6 @@ where BeaconProcessor { network_globals: network_globals.clone(), executor: beacon_processor_context.executor.clone(), - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: beacon_processor_context.log().clone(), @@ -886,6 +845,10 @@ where start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); + start_availability_cache_maintenance_service( + runtime_context.executor.clone(), + beacon_chain.clone(), + ); } Ok(Client { @@ -946,6 +909,7 @@ where mut self, hot_path: &Path, cold_path: &Path, + blobs_path: Option, config: StoreConfig, log: Logger, ) -> Result { @@ -983,6 +947,7 @@ where let store = HotColdDB::open( hot_path, cold_path, + blobs_path, schema_upgrade, config, spec, @@ -1129,3 +1094,23 @@ where Ok(self) } } + +/// Obtain the genesis state from the `eth2_network_config` in `context`. +async fn genesis_state( + context: &RuntimeContext, + config: &ClientConfig, + log: &Logger, +) -> Result, String> { + let eth2_network_config = context + .eth2_network_config + .as_ref() + .ok_or("An eth2_network_config is required to obtain the genesis state")?; + eth2_network_config + .genesis_state::( + config.genesis_state_url.as_deref(), + config.genesis_state_url_timeout, + log, + ) + .await? + .ok_or_else(|| "Genesis state is unknown".to_string()) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 283efe9c3f..8b47d0fc62 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,12 +1,14 @@ use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; +use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; use network::NetworkConfig; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use std::time::Duration; use types::{Graffiti, PublicKeyBytes}; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -25,18 +27,13 @@ pub enum ClientGenesis { /// contract. #[default] DepositContract, - /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. - /// - /// We include the bytes instead of the `BeaconState` because the `EthSpec` type - /// parameter would be very annoying. - SszBytes { genesis_state_bytes: Vec }, + /// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`. + GenesisState, WeakSubjSszBytes { - genesis_state_bytes: Vec, anchor_state_bytes: Vec, anchor_block_bytes: Vec, }, CheckpointSyncUrl { - genesis_state_bytes: Vec, url: SensitiveUrl, }, } @@ -49,6 +46,8 @@ pub struct Config { pub db_name: String, /// Path where the freezer database will be located. pub freezer_db_path: Option, + /// Path where the blobs database will be located if blobs should be in a separate database. + pub blobs_db_path: Option, pub log_file: PathBuf, /// If true, the node will use co-ordinated junk for eth1 values. /// @@ -75,12 +74,15 @@ pub struct Config { pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, pub execution_layer: Option, + pub trusted_setup: Option, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, pub monitoring_api: Option, pub slasher: Option, pub logger_config: LoggerConfig, pub beacon_processor: BeaconProcessorConfig, + pub genesis_state_url: Option, + pub genesis_state_url_timeout: Duration, } impl Default for Config { @@ -89,6 +91,7 @@ impl Default for Config { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), freezer_db_path: None, + blobs_db_path: None, log_file: PathBuf::from(""), genesis: <_>::default(), store: <_>::default(), @@ -98,6 +101,7 @@ impl Default for Config { sync_eth1_chain: false, eth1: <_>::default(), execution_layer: None, + trusted_setup: None, graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), @@ -108,6 +112,9 @@ impl Default for Config { validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), beacon_processor: <_>::default(), + genesis_state_url: <_>::default(), + // This default value should always be overwritten by the CLI default value. + genesis_state_url_timeout: Duration::from_secs(60), } } } @@ -149,11 +156,27 @@ impl Config { .unwrap_or_else(|| self.default_freezer_db_path()) } + /// Returns the path to which the client may initialize the on-disk blobs database. + /// + /// Will attempt to use the user-supplied path from e.g. the CLI, or will default + /// to None. + pub fn get_blobs_db_path(&self) -> Option { + self.blobs_db_path.clone() + } + /// Get the freezer DB path, creating it if necessary. pub fn create_freezer_db_path(&self) -> Result { ensure_dir_exists(self.get_freezer_db_path()) } + /// Get the blobs DB path, creating it if necessary. + pub fn create_blobs_db_path(&self) -> Result, String> { + match self.get_blobs_db_path() { + Some(blobs_db_path) => Ok(Some(ensure_dir_exists(blobs_db_path)?)), + None => Ok(None), + } + } + /// Returns the "modern" path to the data_dir. /// /// See `Self::get_data_dir` documentation for more info. diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 584a0d736d..399aa06511 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -46,20 +46,6 @@ impl Client { self.http_metrics_listen_addr } - /// Returns the ipv4 port of the client's libp2p stack, if it was started. - pub fn libp2p_listen_ipv4_port(&self) -> Option { - self.network_globals - .as_ref() - .and_then(|n| n.listen_port_tcp4()) - } - - /// Returns the ipv6 port of the client's libp2p stack, if it was started. - pub fn libp2p_listen_ipv6_port(&self) -> Option { - self.network_globals - .as_ref() - .and_then(|n| n.listen_port_tcp6()) - } - /// Returns the list of libp2p addresses the client is listening to. pub fn libp2p_listen_addresses(&self) -> Option> { self.network_globals.as_ref().map(|n| n.listen_multiaddrs()) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 7d81594ee6..2c7738e8fa 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,7 +1,7 @@ use crate::metrics; use beacon_chain::{ capella_readiness::CapellaReadiness, - merge_readiness::{MergeConfig, MergeReadiness}, + merge_readiness::{GenesisExecutionPayloadStatus, MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; use lighthouse_network::{types::SyncState, NetworkGlobals}; @@ -62,6 +62,9 @@ pub fn spawn_notifier( "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), ); eth1_logging(&beacon_chain, &log); + merge_readiness_logging(Slot::new(0), &beacon_chain, &log).await; + capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await; + genesis_execution_payload_logging(&beacon_chain, &log).await; sleep(slot_duration).await; } _ => break, @@ -365,7 +368,7 @@ async fn merge_readiness_logging( return; } - match beacon_chain.check_merge_readiness().await { + match beacon_chain.check_merge_readiness(current_slot).await { MergeReadiness::Ready { config, current_difficulty, @@ -476,6 +479,79 @@ async fn capella_readiness_logging( } } +async fn genesis_execution_payload_logging( + beacon_chain: &BeaconChain, + log: &Logger, +) { + match beacon_chain + .check_genesis_execution_payload_is_correct() + .await + { + Ok(GenesisExecutionPayloadStatus::Correct(block_hash)) => { + info!( + log, + "Execution enabled from genesis"; + "genesis_payload_block_hash" => ?block_hash, + ); + } + Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { got, expected }) => { + error!( + log, + "Genesis payload block hash mismatch"; + "info" => "genesis is misconfigured and likely to fail", + "consensus_node_block_hash" => ?expected, + "execution_node_block_hash" => ?got, + ); + } + Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { got, expected }) => { + error!( + log, + "Genesis payload transactions root mismatch"; + "info" => "genesis is misconfigured and likely to fail", + "consensus_node_transactions_root" => ?expected, + "execution_node_transactions_root" => ?got, + ); + } + Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, expected }) => { + error!( + log, + "Genesis payload withdrawals root mismatch"; + "info" => "genesis is misconfigured and likely to fail", + "consensus_node_withdrawals_root" => ?expected, + "execution_node_withdrawals_root" => ?got, + ); + } + Ok(GenesisExecutionPayloadStatus::OtherMismatch) => { + error!( + log, + "Genesis payload header mismatch"; + "info" => "genesis is misconfigured and likely to fail", + "detail" => "see debug logs for payload headers" + ); + } + Ok(GenesisExecutionPayloadStatus::Irrelevant) => { + info!( + log, + "Execution is not enabled from genesis"; + ); + } + Ok(GenesisExecutionPayloadStatus::AlreadyHappened) => { + warn!( + log, + "Unable to check genesis which has already occurred"; + "info" => "this is probably a race condition or a bug" + ); + } + Err(e) => { + error!( + log, + "Unable to check genesis execution payload"; + "error" => ?e + ); + } + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index cc982aee08..2f716cd19b 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -2,33 +2,33 @@ name = "eth1" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -eth1_test_rig = { path = "../../testing/eth1_test_rig" } -serde_yaml = "0.8.13" -sloggers = { version = "2.1.1", features = ["json"] } -environment = { path = "../../lighthouse/environment" } +eth1_test_rig = { workspace = true } +serde_yaml = { workspace = true } +sloggers = { workspace = true } +environment = { workspace = true } [dependencies] -reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } -execution_layer = { path = "../execution_layer" } -futures = "0.3.7" -serde_json = "1.0.58" -serde = { version = "1.0.116", features = ["derive"] } -hex = "0.4.2" -types = { path = "../../consensus/types"} -merkle_proof = { path = "../../consensus/merkle_proof"} -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -tree_hash = "0.5.0" -parking_lot = "0.12.0" -slog = "2.5.2" -superstruct = "0.5.0" -tokio = { version = "1.14.0", features = ["full"] } -state_processing = { path = "../../consensus/state_processing" } -lighthouse_metrics = { path = "../../common/lighthouse_metrics"} -lazy_static = "1.4.0" -task_executor = { path = "../../common/task_executor" } -eth2 = { path = "../../common/eth2" } -sensitive_url = { path = "../../common/sensitive_url" } +reqwest = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +hex = { workspace = true } +types = { workspace = true } +merkle_proof = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +parking_lot = { workspace = true } +slog = { workspace = true } +superstruct = { workspace = true } +tokio = { workspace = true } +state_processing = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +task_executor = { workspace = true } +eth2 = { workspace = true } +sensitive_url = { workspace = true } diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 26e160115e..e676d17ab9 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -13,7 +13,7 @@ pub enum Error { /// Some `Eth1Block` was provided with the same block number but different data. The source /// of eth1 data is inconsistent. Conflicting(u64), - /// The given block was not one block number higher than the higest known block number. + /// The given block was not one block number higher than the highest known block number. NonConsecutive { given: u64, expected: u64 }, /// Some invariant was violated, there is a likely bug in the code. Internal(String), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 8492a5159e..7f65268980 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -1,53 +1,53 @@ [package] name = "execution_layer" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { path = "../../consensus/types"} -tokio = { version = "1.10.0", features = ["full"] } +types = { workspace = true } +tokio = { workspace = true } async-trait = "0.1.51" -slog = "2.5.2" -futures = "0.3.7" -sensitive_url = { path = "../../common/sensitive_url" } -reqwest = { version = "0.11.0", features = ["json","stream"] } -ethereum_serde_utils = "0.5.0" -serde_json = "1.0.58" -serde = { version = "1.0.116", features = ["derive"] } -warp = { version = "0.3.2", features = ["tls"] } +slog = { workspace = true } +futures = { workspace = true } +sensitive_url = { workspace = true } +reqwest = { workspace = true } +ethereum_serde_utils = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +warp = { workspace = true } jsonwebtoken = "8" -environment = { path = "../../lighthouse/environment" } -bytes = "1.1.0" -task_executor = { path = "../../common/task_executor" } -hex = "0.4.2" -ethereum_ssz = "0.5.0" -ssz_types = "0.5.3" -eth2 = { path = "../../common/eth2" } -state_processing = { path = "../../consensus/state_processing" } -superstruct = "0.6.0" -lru = "0.7.1" -exit-future = "0.2.0" -tree_hash = "0.5.0" -tree_hash_derive = "0.5.0" -parking_lot = "0.12.0" -slot_clock = { path = "../../common/slot_clock" } -tempfile = "3.1.0" -rand = "0.8.5" -zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lazy_static = "1.4.0" -ethers-core = "1.0.2" +environment = { workspace = true } +bytes = { workspace = true } +task_executor = { workspace = true } +hex = { workspace = true } +ethereum_ssz = { workspace = true } +ssz_types = { workspace = true } +eth2 = { workspace = true } +kzg = { workspace = true } +state_processing = { workspace = true } +superstruct = { workspace = true } +lru = { workspace = true } +exit-future = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +parking_lot = { workspace = true } +slot_clock = { workspace = true } +tempfile = { workspace = true } +rand = { workspace = true } +zeroize = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +ethers-core = { workspace = true } builder_client = { path = "../builder_client" } -fork_choice = { path = "../../consensus/fork_choice" } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } -ssz_rs = "0.9.0" -tokio-stream = { version = "0.1.9", features = [ "sync" ] } -strum = "0.24.0" +fork_choice = { workspace = true } +tokio-stream = { workspace = true } +strum = { workspace = true } keccak-hash = "0.10.0" hash256-std-hasher = "0.15.2" triehash = "0.8.4" hash-db = "0.15.2" -pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } \ No newline at end of file +pretty_reqwest_error = { workspace = true } +arc-swap = "1.6.0" +eth2_network_config = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index c889fead0a..5ba61beafc 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use ethers_core::utils::rlp::RlpStream; use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ - map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash, + map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256, }; @@ -18,6 +18,7 @@ impl ExecutionLayer { /// transactions. pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, + parent_beacon_block_root: Hash256, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -37,12 +38,23 @@ impl ExecutionLayer { None }; + let rlp_blob_gas_used = payload.blob_gas_used().ok(); + let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + + // Calculate parent beacon block root (post-Deneb). + let rlp_parent_beacon_block_root = rlp_excess_blob_gas + .as_ref() + .map(|_| parent_beacon_block_root); + // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( payload, KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), rlp_transactions_root, rlp_withdrawals_root, + rlp_blob_gas_used, + rlp_excess_blob_gas, + rlp_parent_beacon_block_root, ); // Hash the RLP encoding of the block header. @@ -56,10 +68,14 @@ impl ExecutionLayer { /// Verify `payload.block_hash` locally within Lighthouse. /// /// No remote calls to the execution client will be made, so this is quite a cheap check. - pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef) -> Result<(), Error> { + pub fn verify_payload_block_hash(&self, block: BeaconBlockRef) -> Result<(), Error> { + let payload = block.execution_payload()?.execution_payload_ref(); + let parent_beacon_block_root = block.parent_root(); + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = Self::calculate_execution_block_hash(payload); + let (header_hash, rlp_transactions_root) = + Self::calculate_execution_block_hash(payload, parent_beacon_block_root); if header_hash != payload.block_hash() { return Err(Error::BlockHashMismatch { @@ -88,12 +104,21 @@ pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec { pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { let mut rlp_header_stream = RlpStream::new(); rlp_header_stream.begin_unbounded_list(); - map_execution_block_header_fields_except_withdrawals!(&header, |_, field| { + map_execution_block_header_fields_base!(&header, |_, field| { rlp_header_stream.append(field); }); if let Some(withdrawals_root) = &header.withdrawals_root { rlp_header_stream.append(withdrawals_root); } + if let Some(blob_gas_used) = &header.blob_gas_used { + rlp_header_stream.append(blob_gas_used); + } + if let Some(excess_blob_gas) = &header.excess_blob_gas { + rlp_header_stream.append(excess_blob_gas); + } + if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { + rlp_header_stream.append(parent_beacon_block_root); + } rlp_header_stream.finalize_unbounded_list(); rlp_header_stream.out().into() } @@ -140,6 +165,9 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -168,6 +196,9 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x036b_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -197,10 +228,43 @@ mod test { nonce: Hash64::zero(), base_fee_per_gas: 0x34187b238_u64.into(), withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } + + #[test] + fn test_rlp_encode_block_deneb() { + let header = ExecutionBlockHeader { + parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), + beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), + state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), + transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + difficulty: 0.into(), + number: 97.into(), + gas_limit: 27482534.into(), + gas_used: 0.into(), + timestamp: 1692132829u64, + extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), + mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + nonce: Hash64::zero(), + base_fee_per_gas: 2374u64.into(), + withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), + blob_gas_used: Some(0x0u64), + excess_blob_gas: Some(0x0u64), + parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + }; + let expected_hash = + Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + .unwrap(); + test_rlp_encoding(&header, None, expected_hash); + } } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 359dcb5223..683e39a503 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,26 +1,35 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, }; -use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; -pub use ethers_core::types::Transaction; -use ethers_core::utils::rlp::{self, Decodable, Rlp}; +use eth2::types::{ + BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, + SsePayloadAttributesV3, +}; +use ethers_core::types::Transaction; +use ethers_core::utils::rlp; +use ethers_core::utils::rlp::{Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use pretty_reqwest_error::PrettyReqwestError; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, Withdrawal, Withdrawals, }; -use types::{ExecutionPayloadCapella, ExecutionPayloadMerge}; +use types::{ + BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, + KzgProofs, VersionedHash, +}; pub mod auth; pub mod http; @@ -48,14 +57,12 @@ pub enum Error { PayloadIdUnavailable, TransitionConfigurationMismatch, PayloadConversionLogicFlaw, - DeserializeTransaction(ssz_types::Error), - DeserializeTransactions(ssz_types::Error), + SszError(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), - BadConversion(String), RlpDecoderError(rlp::DecoderError), } @@ -96,6 +103,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszError(e) + } +} + #[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { @@ -137,7 +150,7 @@ pub struct ExecutionBlock { /// Representation of an execution block with enough detail to reconstruct a payload. #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive(Clone, Debug, PartialEq, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -171,8 +184,14 @@ pub struct ExecutionBlockWithTransactions { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, pub transactions: Vec, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub withdrawals: Vec, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::u64_hex_be")] + pub blob_gas_used: u64, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::u64_hex_be")] + pub excess_blob_gas: u64, } impl TryFrom> for ExecutionBlockWithTransactions { @@ -226,13 +245,39 @@ impl TryFrom> for ExecutionBlockWithTransactions .collect(), }) } + ExecutionPayload::Deneb(block) => Self::Deneb(ExecutionBlockWithTransactionsDeneb { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions: block + .transactions + .iter() + .map(|tx| Transaction::decode(&Rlp::new(tx))) + .collect::, _>>()?, + withdrawals: Vec::from(block.withdrawals) + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), + blob_gas_used: block.blob_gas_used, + excess_blob_gas: block.excess_blob_gas, + }), }; Ok(json_payload) } } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -245,8 +290,10 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3), partial_getter(copy))] + pub parent_beacon_block_root: Hash256, } impl PayloadAttributes { @@ -255,14 +302,24 @@ impl PayloadAttributes { prev_randao: Hash256, suggested_fee_recipient: Address, withdrawals: Option>, + parent_beacon_block_root: Option, ) -> Self { match withdrawals { - Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient, - withdrawals, - }), + Some(withdrawals) => match parent_beacon_block_root { + Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), + None => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + }, None => PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, @@ -295,6 +352,19 @@ impl From for SsePayloadAttributes { suggested_fee_recipient, withdrawals, }), + PayloadAttributes::V3(PayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }) => Self::V3(SsePayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), } } } @@ -320,7 +390,7 @@ pub struct ProposeBlindedBlockResponse { } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -333,7 +403,27 @@ pub struct GetPayloadResponse { pub execution_payload: ExecutionPayloadMerge, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb, pub block_value: Uint256, + #[superstruct(only(Deneb))] + pub blobs_bundle: BlobsBundle, + #[superstruct(only(Deneb), partial_getter(copy))] + pub should_override_builder: bool, +} + +impl GetPayloadResponse { + pub fn fee_recipient(&self) -> Address { + ExecutionPayloadRef::from(self.to_ref()).fee_recipient() + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + ExecutionPayloadRef::from(self.to_ref()).block_hash() + } + + pub fn block_number(&self) -> u64 { + ExecutionPayloadRef::from(self.to_ref()).block_number() + } } impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { @@ -352,16 +442,25 @@ impl From> for ExecutionPayload { } } -impl From> for (ExecutionPayload, Uint256) { +impl From> + for (ExecutionPayload, Uint256, Option>) +{ fn from(response: GetPayloadResponse) -> Self { match response { GetPayloadResponse::Merge(inner) => ( ExecutionPayload::Merge(inner.execution_payload), inner.block_value, + None, ), GetPayloadResponse::Capella(inner) => ( ExecutionPayload::Capella(inner.execution_payload), inner.block_value, + None, + ), + GetPayloadResponse::Deneb(inner) => ( + ExecutionPayload::Deneb(inner.execution_payload), + inner.block_value, + Some(inner.blobs_bundle), ), } } @@ -435,6 +534,138 @@ impl ExecutionPayloadBodyV1 { )) } } + ExecutionPayloadHeader::Deneb(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } + } + } +} + +#[superstruct( + variants(Merge, Capella, Deneb), + variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) +)] +#[derive(Clone, Debug, PartialEq)] +pub struct NewPayloadRequest { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb, + #[superstruct(only(Deneb))] + pub versioned_hashes: Vec, + #[superstruct(only(Deneb))] + pub parent_beacon_block_root: Hash256, +} + +impl NewPayloadRequest { + pub fn parent_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.parent_hash, + Self::Capella(payload) => payload.execution_payload.parent_hash, + Self::Deneb(payload) => payload.execution_payload.parent_hash, + } + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + match self { + Self::Merge(payload) => payload.execution_payload.block_hash, + Self::Capella(payload) => payload.execution_payload.block_hash, + Self::Deneb(payload) => payload.execution_payload.block_hash, + } + } + + pub fn block_number(&self) -> u64 { + match self { + Self::Merge(payload) => payload.execution_payload.block_number, + Self::Capella(payload) => payload.execution_payload.block_number, + Self::Deneb(payload) => payload.execution_payload.block_number, + } + } + + pub fn into_execution_payload(self) -> ExecutionPayload { + map_new_payload_request_into_execution_payload!(self, |request, cons| { + cons(request.execution_payload) + }) + } +} + +impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest { + type Error = BeaconStateError; + + fn try_from(block: BeaconBlockRef<'a, E>) -> Result { + match block { + BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => { + Err(Self::Error::IncorrectStateVariant) + } + BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + })), + BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + })), + BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb { + execution_payload: block_ref.body.execution_payload.execution_payload.clone(), + versioned_hashes: block_ref + .body + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(), + parent_beacon_block_root: block_ref.parent_root, + })), + } + } +} + +impl TryFrom> for NewPayloadRequest { + type Error = BeaconStateError; + + fn try_from(payload: ExecutionPayload) -> Result { + match payload { + ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge { + execution_payload: payload, + })), + ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella { + execution_payload: payload, + })), + ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant), } } } @@ -443,12 +674,15 @@ impl ExecutionPayloadBodyV1 { pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, + pub new_payload_v3: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, + pub forkchoice_updated_v3: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, pub get_payload_v2: bool, + pub get_payload_v3: bool, } impl EngineCapabilities { @@ -460,12 +694,18 @@ impl EngineCapabilities { if self.new_payload_v2 { response.push(ENGINE_NEW_PAYLOAD_V2); } + if self.new_payload_v3 { + response.push(ENGINE_NEW_PAYLOAD_V3); + } if self.forkchoice_updated_v1 { response.push(ENGINE_FORKCHOICE_UPDATED_V1); } if self.forkchoice_updated_v2 { response.push(ENGINE_FORKCHOICE_UPDATED_V2); } + if self.forkchoice_updated_v3 { + response.push(ENGINE_FORKCHOICE_UPDATED_V3); + } if self.get_payload_bodies_by_hash_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); } @@ -478,6 +718,9 @@ impl EngineCapabilities { if self.get_payload_v2 { response.push(ENGINE_GET_PAYLOAD_V2); } + if self.get_payload_v3 { + response.push(ENGINE_GET_PAYLOAD_V3); + } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0ce03e6029..ac7dfa57e9 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -32,14 +32,17 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; +pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; +pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; +pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; @@ -58,10 +61,13 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601; pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ]; @@ -72,12 +78,15 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: false, + new_payload_v3: false, forkchoice_updated_v1: true, forkchoice_updated_v2: false, + forkchoice_updated_v3: false, get_payload_bodies_by_hash_v1: false, get_payload_bodies_by_range_v1: false, get_payload_v1: true, get_payload_v2: false, + get_payload_v3: false, }; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. @@ -741,6 +750,14 @@ impl HttpJsonRpc { ) .await?, ), + ForkName::Deneb => ExecutionBlockWithTransactions::Deneb( + self.rpc_request( + ETH_GET_BLOCK_BY_HASH, + params, + ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?, + ), ForkName::Base | ForkName::Altair => { return Err(Error::UnsupportedForkVariant(format!( "called get_block_by_hash_with_txns with fork {:?}", @@ -784,6 +801,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v3( + &self, + new_payload_request_deneb: NewPayloadRequestDeneb, + ) -> Result { + let params = json!([ + JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()), + new_payload_request_deneb.versioned_hashes, + new_payload_request_deneb.parent_beacon_block_root, + ]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V3, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, @@ -835,10 +873,33 @@ impl HttpJsonRpc { .await?; Ok(JsonGetPayloadResponse::V2(response).into()) } - ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( - "called get_payload_v2 with {}", - fork_name - ))), + ForkName::Base | ForkName::Altair | ForkName::Deneb => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), + ), + } + } + + pub async fn get_payload_v3( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Deneb => { + let response: JsonGetPayloadResponseV3 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V3, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V3(response).into()) + } + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)), + ), } } @@ -884,6 +945,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v3( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V3, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, @@ -950,14 +1032,17 @@ impl HttpJsonRpc { Ok(capabilities) => Ok(EngineCapabilities { new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), get_payload_bodies_by_hash_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), }), } } @@ -994,15 +1079,28 @@ impl HttpJsonRpc { // new_payload that the execution engine supports pub async fn new_payload( &self, - execution_payload: ExecutionPayload, + new_payload_request: NewPayloadRequest, ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.new_payload_v2 { - self.new_payload_v2(execution_payload).await - } else if engine_capabilities.new_payload_v1 { - self.new_payload_v1(execution_payload).await - } else { - Err(Error::RequiredMethodUnsupported("engine_newPayload")) + match new_payload_request { + NewPayloadRequest::Merge(_) | NewPayloadRequest::Capella(_) => { + if engine_capabilities.new_payload_v2 { + self.new_payload_v2(new_payload_request.into_execution_payload()) + .await + } else if engine_capabilities.new_payload_v1 { + self.new_payload_v1(new_payload_request.into_execution_payload()) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayload")) + } + } + NewPayloadRequest::Deneb(new_payload_request_deneb) => { + if engine_capabilities.new_payload_v3 { + self.new_payload_v3(new_payload_request_deneb).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayloadV3")) + } + } } } @@ -1014,12 +1112,27 @@ impl HttpJsonRpc { payload_id: PayloadId, ) -> Result, Error> { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.get_payload_v2 { - self.get_payload_v2(fork_name, payload_id).await - } else if engine_capabilities.new_payload_v1 { - self.get_payload_v1(payload_id).await - } else { - Err(Error::RequiredMethodUnsupported("engine_getPayload")) + match fork_name { + ForkName::Merge | ForkName::Capella => { + if engine_capabilities.get_payload_v2 { + self.get_payload_v2(fork_name, payload_id).await + } else if engine_capabilities.new_payload_v1 { + self.get_payload_v1(payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayload")) + } + } + ForkName::Deneb => { + if engine_capabilities.get_payload_v3 { + self.get_payload_v3(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayloadV3")) + } + } + ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( + "called get_payload with {}", + fork_name + ))), } } @@ -1028,14 +1141,41 @@ impl HttpJsonRpc { pub async fn forkchoice_updated( &self, forkchoice_state: ForkchoiceState, - payload_attributes: Option, + maybe_payload_attributes: Option, ) -> Result { let engine_capabilities = self.get_engine_capabilities(None).await?; - if engine_capabilities.forkchoice_updated_v2 { - self.forkchoice_updated_v2(forkchoice_state, payload_attributes) + if let Some(payload_attributes) = maybe_payload_attributes.as_ref() { + match payload_attributes { + PayloadAttributes::V1(_) | PayloadAttributes::V2(_) => { + if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v1 { + self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) + } + } + PayloadAttributes::V3(_) => { + if engine_capabilities.forkchoice_updated_v3 { + self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_forkchoiceUpdatedV3", + )) + } + } + } + } else if engine_capabilities.forkchoice_updated_v3 { + self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) + .await + } else if engine_capabilities.forkchoice_updated_v2 { + self.forkchoice_updated_v2(forkchoice_state, maybe_payload_attributes) .await } else if engine_capabilities.forkchoice_updated_v1 { - self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + self.forkchoice_updated_v1(forkchoice_state, maybe_payload_attributes) .await } else { Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index d85d294c83..e8641be795 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -2,10 +2,12 @@ use super::*; use serde::{Deserialize, Serialize}; use strum::EnumString; use superstruct::superstruct; +use types::beacon_block_body::KzgCommitments; +use types::blob_sidecar::BlobsList; use types::{ - EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, + EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadMerge, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -61,7 +63,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -94,8 +96,14 @@ pub struct JsonExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: VariableList, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::u64_hex_be")] + pub blob_gas_used: u64, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::u64_hex_be")] + pub excess_blob_gas: u64, } impl From> for JsonExecutionPayloadV1 { @@ -144,12 +152,41 @@ impl From> for JsonExecutionPayloadV2 } } } +impl From> for JsonExecutionPayloadV3 { + fn from(payload: ExecutionPayloadDeneb) -> Self { + JsonExecutionPayloadV3 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} impl From> for JsonExecutionPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), + ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()), } } } @@ -200,18 +237,47 @@ impl From> for ExecutionPayloadCapella } } } +impl From> for ExecutionPayloadDeneb { + fn from(payload: JsonExecutionPayloadV3) -> Self { + ExecutionPayloadDeneb { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} impl From> for ExecutionPayload { fn from(json_execution_payload: JsonExecutionPayload) -> Self { match json_execution_payload { JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()), } } } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Serialize, Deserialize), serde(bound = "T: EthSpec", rename_all = "camelCase") @@ -226,8 +292,14 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV1, #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, + #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] + pub execution_payload: JsonExecutionPayloadV3, #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, + #[superstruct(only(V3))] + pub blobs_bundle: JsonBlobsBundleV1, + #[superstruct(only(V3))] + pub should_override_builder: bool, } impl From> for GetPayloadResponse { @@ -245,6 +317,14 @@ impl From> for GetPayloadResponse { block_value: response.block_value, }) } + JsonGetPayloadResponse::V3(response) => { + GetPayloadResponse::Deneb(GetPayloadResponseDeneb { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + blobs_bundle: response.blobs_bundle.into(), + should_override_builder: response.should_override_builder, + }) + } } } } @@ -284,7 +364,7 @@ impl From for Withdrawal { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, Clone, PartialEq, Serialize, Deserialize), serde(rename_all = "camelCase") @@ -299,13 +379,15 @@ pub struct JsonPayloadAttributes { pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3))] + pub parent_beacon_block_root: Hash256, } impl From for JsonPayloadAttributes { - fn from(payload_atributes: PayloadAttributes) -> Self { - match payload_atributes { + fn from(payload_attributes: PayloadAttributes) -> Self { + match payload_attributes { PayloadAttributes::V1(pa) => Self::V1(JsonPayloadAttributesV1 { timestamp: pa.timestamp, prev_randao: pa.prev_randao, @@ -317,6 +399,13 @@ impl From for JsonPayloadAttributes { suggested_fee_recipient: pa.suggested_fee_recipient, withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), }), + PayloadAttributes::V3(pa) => Self::V3(JsonPayloadAttributesV3 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: pa.parent_beacon_block_root, + }), } } } @@ -335,6 +424,41 @@ impl From for PayloadAttributes { suggested_fee_recipient: jpa.suggested_fee_recipient, withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), }), + JsonPayloadAttributes::V3(jpa) => Self::V3(PayloadAttributesV3 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: jpa.parent_beacon_block_root, + }), + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(bound = "E: EthSpec", rename_all = "camelCase")] +pub struct JsonBlobsBundleV1 { + pub commitments: KzgCommitments, + pub proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +impl From> for JsonBlobsBundleV1 { + fn from(blobs_bundle: BlobsBundle) -> Self { + Self { + commitments: blobs_bundle.commitments, + proofs: blobs_bundle.proofs, + blobs: blobs_bundle.blobs, + } + } +} +impl From> for BlobsBundle { + fn from(json_blobs_bundle: JsonBlobsBundleV1) -> Self { + Self { + commitments: json_blobs_bundle.commitments, + proofs: json_blobs_bundle.proofs, + blobs: json_blobs_bundle.blobs, } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index b57bba7518..623965ada3 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -5,6 +5,7 @@ //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. use crate::payload_cache::PayloadCache; +use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; @@ -13,7 +14,9 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; +use eth2::types::{FullPayloadContents, SignedBlockContents}; +use ethers_core::types::Transaction as EthersTransaction; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -26,7 +29,6 @@ use std::collections::HashMap; use std::fmt; use std::future::Future; use std::io::Write; -use std::marker::PhantomData; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -38,12 +40,15 @@ use tokio::{ }; use tokio_stream::wrappers::WatchStream; use tree_hash::TreeHash; -use types::{AbstractExecPayload, BeaconStateError, ExecPayload}; +use types::beacon_block_body::KzgCommitments; +use types::builder_bid::BuilderBid; +use types::sidecar::{BlobItems, Sidecar}; +use types::KzgProofs; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionPayloadCapella, ExecutionPayloadMerge, - ForkVersionedResponse, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, - Slot, + AbstractExecPayload, BeaconStateError, BlindedPayload, BlockType, ChainSpec, Epoch, + ExecPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, }; +use types::{ProposerPreparationData, PublicKeyBytes, Signature, Slot}; mod block_hash; mod engine_api; @@ -82,6 +87,40 @@ pub enum ProvenancedPayload

{ Builder(P), } +impl> TryFrom> + for ProvenancedPayload> +{ + type Error = Error; + + fn try_from(value: BuilderBid) -> Result { + let block_proposal_contents = match value { + BuilderBid::Merge(builder_bid) => BlockProposalContents::Payload { + payload: ExecutionPayloadHeader::Merge(builder_bid.header) + .try_into() + .map_err(|_| Error::InvalidPayloadConversion)?, + block_value: builder_bid.value, + }, + BuilderBid::Capella(builder_bid) => BlockProposalContents::Payload { + payload: ExecutionPayloadHeader::Capella(builder_bid.header) + .try_into() + .map_err(|_| Error::InvalidPayloadConversion)?, + block_value: builder_bid.value, + }, + BuilderBid::Deneb(builder_bid) => BlockProposalContents::PayloadAndBlobs { + payload: ExecutionPayloadHeader::Deneb(builder_bid.header) + .try_into() + .map_err(|_| Error::InvalidPayloadConversion)?, + block_value: builder_bid.value, + kzg_commitments: builder_bid.blinded_blobs_bundle.commitments, + blobs: BlobItems::try_from_blob_roots(builder_bid.blinded_blobs_bundle.blob_roots) + .map_err(Error::InvalidBlobConversion)?, + proofs: builder_bid.blinded_blobs_bundle.proofs, + }, + }; + Ok(ProvenancedPayload::Builder(block_proposal_contents)) + } +} + #[derive(Debug)] pub enum Error { NoEngine, @@ -103,6 +142,8 @@ pub enum Error { InvalidJWTSecret(String), InvalidForkForPayload, InvalidPayloadBody(String), + InvalidPayloadConversion, + InvalidBlobConversion(String), BeaconStateError(BeaconStateError), } @@ -122,37 +163,81 @@ pub enum BlockProposalContents> { Payload { payload: Payload, block_value: Uint256, - // TODO: remove for 4844, since it appears in PayloadAndBlobs - _phantom: PhantomData, + }, + PayloadAndBlobs { + payload: Payload, + block_value: Uint256, + kzg_commitments: KzgCommitments, + blobs: >::BlobItems, + proofs: KzgProofs, }, } +impl> TryFrom> + for BlockProposalContents +{ + type Error = Error; + + fn try_from(response: GetPayloadResponse) -> Result { + let (execution_payload, block_value, maybe_bundle) = response.into(); + match maybe_bundle { + Some(bundle) => Ok(Self::PayloadAndBlobs { + payload: execution_payload.into(), + block_value, + kzg_commitments: bundle.commitments, + blobs: BlobItems::try_from_blobs(bundle.blobs) + .map_err(Error::InvalidBlobConversion)?, + proofs: bundle.proofs, + }), + None => Ok(Self::Payload { + payload: execution_payload.into(), + block_value, + }), + } + } +} + +#[allow(clippy::type_complexity)] impl> BlockProposalContents { - pub fn payload(&self) -> &Payload { + pub fn deconstruct( + self, + ) -> ( + Payload, + Option>, + Option<>::BlobItems>, + Option>, + ) { match self { Self::Payload { payload, block_value: _, - _phantom: _, - } => payload, + } => (payload, None, None, None), + Self::PayloadAndBlobs { + payload, + block_value: _, + kzg_commitments, + blobs, + proofs, + } => (payload, Some(kzg_commitments), Some(blobs), Some(proofs)), + } + } + + pub fn payload(&self) -> &Payload { + match self { + Self::Payload { payload, .. } => payload, + Self::PayloadAndBlobs { payload, .. } => payload, } } pub fn to_payload(self) -> Payload { match self { - Self::Payload { - payload, - block_value: _, - _phantom: _, - } => payload, + Self::Payload { payload, .. } => payload, + Self::PayloadAndBlobs { payload, .. } => payload, } } pub fn block_value(&self) -> &Uint256 { match self { - Self::Payload { - payload: _, - block_value, - _phantom: _, - } => block_value, + Self::Payload { block_value, .. } => block_value, + Self::PayloadAndBlobs { block_value, .. } => block_value, } } pub fn default_at_fork(fork_name: ForkName) -> Result { @@ -161,9 +246,15 @@ impl> BlockProposalContents BlockProposalContents::PayloadAndBlobs { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + blobs: Payload::default_blobs_at_fork(fork_name)?, + kzg_commitments: VariableList::default(), + proofs: VariableList::default(), + }, }) } } @@ -207,9 +298,11 @@ pub enum FailedCondition { EpochsSinceFinalization, } +type PayloadContentsRefTuple<'a, T> = (ExecutionPayloadRef<'a, T>, Option<&'a BlobsBundle>); + struct Inner { engine: Arc, - builder: Option, + builder: ArcSwapOption, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option

, proposer_preparation_data: Mutex>, @@ -220,6 +313,7 @@ struct Inner { builder_profit_threshold: Uint256, log: Logger, always_prefer_builder_payload: bool, + ignore_builder_override_suggestion_threshold: f32, /// Track whether the last `newPayload` call errored. /// /// This is used *only* in the informational sync status endpoint, so that a VC using this @@ -250,6 +344,7 @@ pub struct Config { pub builder_profit_threshold: u128, pub execution_timeout_multiplier: Option, pub always_prefer_builder_payload: bool, + pub ignore_builder_override_suggestion_threshold: f32, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -259,6 +354,40 @@ pub struct ExecutionLayer { inner: Arc>, } +/// This function will return the percentage difference between 2 U256 values, using `base_value` +/// as the denominator. It is accurate to 7 decimal places which is about the precision of +/// an f32. +/// +/// If some error is encountered in the calculation, None will be returned. +fn percentage_difference_u256(base_value: Uint256, comparison_value: Uint256) -> Option { + if base_value == Uint256::zero() { + return None; + } + // this is the total supply of ETH in WEI + let max_value = Uint256::from(12u8) * Uint256::exp10(25); + if base_value > max_value || comparison_value > max_value { + return None; + } + + // Now we should be able to calculate the difference without division by zero or overflow + const PRECISION: usize = 7; + let precision_factor = Uint256::exp10(PRECISION); + let scaled_difference = if base_value <= comparison_value { + (comparison_value - base_value) * precision_factor + } else { + (base_value - comparison_value) * precision_factor + }; + let scaled_proportion = scaled_difference / base_value; + // max value of scaled difference is 1.2 * 10^33, well below the max value of a u128 / f64 / f32 + let percentage = + 100.0f64 * scaled_proportion.low_u128() as f64 / precision_factor.low_u128() as f64; + if base_value <= comparison_value { + Some(percentage as f32) + } else { + Some(-percentage as f32) + } +} + impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { @@ -274,6 +403,7 @@ impl ExecutionLayer { builder_profit_threshold, execution_timeout_multiplier, always_prefer_builder_payload, + ignore_builder_override_suggestion_threshold, } = config; if urls.len() > 1 { @@ -324,25 +454,9 @@ impl ExecutionLayer { Engine::new(api, executor.clone(), &log) }; - let builder = builder_url - .map(|url| { - let builder_client = BuilderHttpClient::new(url.clone(), builder_user_agent) - .map_err(Error::Builder)?; - - info!( - log, - "Using external block builder"; - "builder_url" => ?url, - "builder_profit_threshold" => builder_profit_threshold, - "local_user_agent" => builder_client.get_user_agent(), - ); - Ok::<_, Error>(builder_client) - }) - .transpose()?; - let inner = Inner { engine: Arc::new(engine), - builder, + builder: ArcSwapOption::empty(), execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), @@ -353,31 +467,74 @@ impl ExecutionLayer { builder_profit_threshold: Uint256::from(builder_profit_threshold), log, always_prefer_builder_payload, + ignore_builder_override_suggestion_threshold, last_new_payload_errored: RwLock::new(false), }; - Ok(Self { + let el = Self { inner: Arc::new(inner), - }) - } -} + }; + + if let Some(builder_url) = builder_url { + el.set_builder_url(builder_url, builder_user_agent)?; + } + + Ok(el) + } -impl ExecutionLayer { fn engine(&self) -> &Arc { &self.inner.engine } - pub fn builder(&self) -> &Option { - &self.inner.builder + pub fn builder(&self) -> Option> { + self.inner.builder.load_full() + } + + /// Set the builder URL after initialization. + /// + /// This is useful for breaking circular dependencies between mock ELs and mock builders in + /// tests. + pub fn set_builder_url( + &self, + builder_url: SensitiveUrl, + builder_user_agent: Option, + ) -> Result<(), Error> { + let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) + .map_err(Error::Builder)?; + info!( + self.log(), + "Using external block builder"; + "builder_url" => ?builder_url, + "builder_profit_threshold" => self.inner.builder_profit_threshold.as_u128(), + "local_user_agent" => builder_client.get_user_agent(), + ); + self.inner.builder.swap(Some(Arc::new(builder_client))); + Ok(()) } /// Cache a full payload, keyed on the `tree_hash_root` of the payload - fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { - self.inner.payload_cache.put(payload.clone_from_ref()) + fn cache_payload( + &self, + payload_and_blobs: PayloadContentsRefTuple, + ) -> Option> { + let (payload_ref, maybe_json_blobs_bundle) = payload_and_blobs; + + let payload = payload_ref.clone_from_ref(); + let maybe_blobs_bundle = maybe_json_blobs_bundle + .cloned() + .map(|blobs_bundle| BlobsBundle { + commitments: blobs_bundle.commitments, + proofs: blobs_bundle.proofs, + blobs: blobs_bundle.blobs, + }); + + self.inner + .payload_cache + .put(FullPayloadContents::new(payload, maybe_blobs_bundle)) } /// Attempt to retrieve a full payload from the payload cache by the payload root - pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { + pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.get(root) } @@ -509,9 +666,9 @@ impl ExecutionLayer { /// /// This function is a wrapper over `Self::is_synced` that makes an additional /// check for the execution layer sync status. Checks if the latest block has - /// a `block_number != 0`. + /// a `block_number != 0` *if* the `current_slot` is also `> 0`. /// Returns the `Self::is_synced` response if unable to get latest block. - pub async fn is_synced_for_notifier(&self) -> bool { + pub async fn is_synced_for_notifier(&self, current_slot: Slot) -> bool { let synced = self.is_synced().await; if synced { if let Ok(Some(block)) = self @@ -520,7 +677,7 @@ impl ExecutionLayer { .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await { - if block.block_number == 0 { + if block.block_number == 0 && current_slot > 0 { return false; } } @@ -675,6 +832,7 @@ impl ExecutionLayer { current_fork, ) .await + .and_then(GetPayloadResponse::try_into) .map(ProvenancedPayload::Local) } }; @@ -740,11 +898,11 @@ impl ExecutionLayer { let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!( timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async { builder - .get_builder_header::(slot, parent_hash, &pubkey) + .get_builder_header::(slot, parent_hash, &pubkey) .await }), timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching::( + self.get_full_payload_caching( parent_hash, payload_attributes, forkchoice_update_params, @@ -758,13 +916,13 @@ impl ExecutionLayer { self.log(), "Requested blinded execution payload"; "relay_fee_recipient" => match &relay_result { - Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()), + Ok(Some(r)) => format!("{:?}", r.data.message.header().fee_recipient()), Ok(None) => "empty response".to_string(), Err(_) => "request failed".to_string(), }, "relay_response_ms" => relay_duration.as_millis(), "local_fee_recipient" => match &local_result { - Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()), + Ok(get_payload_response) => format!("{:?}", get_payload_response.fee_recipient()), Err(_) => "request failed".to_string() }, "local_response_ms" => local_duration.as_millis(), @@ -778,43 +936,61 @@ impl ExecutionLayer { "Builder error when requesting payload"; "info" => "falling back to local execution client", "relay_error" => ?e, - "local_block_hash" => ?local.payload().block_hash(), + "local_block_hash" => ?local.block_hash(), "parent_hash" => ?parent_hash, ); - Ok(ProvenancedPayload::Local(local)) + Ok(ProvenancedPayload::Local(local.try_into()?)) } (Ok(None), Ok(local)) => { info!( self.log(), "Builder did not return a payload"; "info" => "falling back to local execution client", - "local_block_hash" => ?local.payload().block_hash(), + "local_block_hash" => ?local.block_hash(), "parent_hash" => ?parent_hash, ); - Ok(ProvenancedPayload::Local(local)) + Ok(ProvenancedPayload::Local(local.try_into()?)) } (Ok(Some(relay)), Ok(local)) => { - let header = &relay.data.message.header; + let header = &relay.data.message.header(); info!( self.log(), "Received local and builder payloads"; "relay_block_hash" => ?header.block_hash(), - "local_block_hash" => ?local.payload().block_hash(), + "local_block_hash" => ?local.block_hash(), "parent_hash" => ?parent_hash, ); - let relay_value = relay.data.message.value; + let relay_value = relay.data.message.value(); let local_value = *local.block_value(); + if !self.inner.always_prefer_builder_payload { - if local_value >= relay_value { + if local_value >= *relay_value { info!( self.log(), "Local block is more profitable than relay block"; "local_block_value" => %local_value, "relay_value" => %relay_value ); - return Ok(ProvenancedPayload::Local(local)); + return Ok(ProvenancedPayload::Local(local.try_into()?)); + } else if local.should_override_builder().unwrap_or(false) { + let percentage_difference = + percentage_difference_u256(local_value, *relay_value); + if percentage_difference.map_or(false, |percentage| { + percentage + < self + .inner + .ignore_builder_override_suggestion_threshold + }) { + info!( + self.log(), + "Using local payload because execution engine suggested we ignore builder payload"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local.try_into()?)); + } } else { info!( self.log(), @@ -829,18 +1005,12 @@ impl ExecutionLayer { &relay, parent_hash, payload_attributes, - Some(local.payload().block_number()), + Some(local.block_number()), self.inner.builder_profit_threshold, current_fork, spec, ) { - Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), + Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), Err(reason) if !reason.payload_invalid() => { info!( self.log(), @@ -850,7 +1020,7 @@ impl ExecutionLayer { "relay_block_hash" => ?header.block_hash(), "parent_hash" => ?parent_hash, ); - Ok(ProvenancedPayload::Local(local)) + Ok(ProvenancedPayload::Local(local.try_into()?)) } Err(reason) => { metrics::inc_counter_vec( @@ -865,12 +1035,12 @@ impl ExecutionLayer { "relay_block_hash" => ?header.block_hash(), "parent_hash" => ?parent_hash, ); - Ok(ProvenancedPayload::Local(local)) + Ok(ProvenancedPayload::Local(local.try_into()?)) } } } (Ok(Some(relay)), Err(local_error)) => { - let header = &relay.data.message.header; + let header = &relay.data.message.header(); info!( self.log(), @@ -889,22 +1059,12 @@ impl ExecutionLayer { current_fork, spec, ) { - Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), + Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. - Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload { - payload: relay.data.message.header, - block_value: relay.data.message.value, - _phantom: PhantomData, - }, - )), + Err(e) if !e.payload_invalid() => { + Ok(ProvenancedPayload::try_from(relay.data.message)?) + } Err(reason) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -972,17 +1132,18 @@ impl ExecutionLayer { current_fork, ) .await + .and_then(GetPayloadResponse::try_into) .map(ProvenancedPayload::Local) } /// Get a full payload without caching its result in the execution layer's payload cache. - async fn get_full_payload>( + async fn get_full_payload( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - ) -> Result, Error> { + ) -> Result, Error> { self.get_full_payload_with( parent_hash, payload_attributes, @@ -994,13 +1155,13 @@ impl ExecutionLayer { } /// Get a full payload and cache its result in the execution layer's payload cache. - async fn get_full_payload_caching>( + async fn get_full_payload_caching( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - ) -> Result, Error> { + ) -> Result, Error> { self.get_full_payload_with( parent_hash, payload_attributes, @@ -1011,14 +1172,17 @@ impl ExecutionLayer { .await } - async fn get_full_payload_with>( + async fn get_full_payload_with( &self, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, - ) -> Result, Error> { + cache_fn: fn( + &ExecutionLayer, + PayloadContentsRefTuple, + ) -> Option>, + ) -> Result, Error> { self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1071,7 +1235,7 @@ impl ExecutionLayer { } }; - let payload_fut = async { + let payload_response = async { debug!( self.log(), "Issuing engine_getPayload"; @@ -1081,36 +1245,30 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); engine.api.get_payload::(current_fork, payload_id).await - }; - let payload_response = payload_fut.await; - let (execution_payload, block_value) = payload_response.map(|payload_response| { - if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { - error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), - "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), - ); - } - if f(self, payload_response.execution_payload_ref()).is_some() { - warn!( - self.log(), - "Duplicate payload cached, this might indicate redundant proposal \ - attempts." - ); - } - payload_response.into() - })?; - Ok(BlockProposalContents::Payload { - payload: execution_payload.into(), - block_value, - _phantom: PhantomData, - }) + }.await?; + + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), + "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + ); + } + if cache_fn(self, (payload_response.execution_payload_ref(), payload_response.blobs_bundle().ok())).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); + } + + Ok(payload_response) }) .await .map_err(Box::new) @@ -1120,24 +1278,25 @@ impl ExecutionLayer { /// Maps to the `engine_newPayload` JSON-RPC call. pub async fn notify_new_payload( &self, - execution_payload: &ExecutionPayload, + new_payload_request: NewPayloadRequest, ) -> Result { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::NEW_PAYLOAD], ); + let block_hash = new_payload_request.block_hash(); trace!( self.log(), "Issuing engine_newPayload"; - "parent_hash" => ?execution_payload.parent_hash(), - "block_hash" => ?execution_payload.block_hash(), - "block_number" => execution_payload.block_number(), + "parent_hash" => ?new_payload_request.parent_hash(), + "block_hash" => ?block_hash, + "block_number" => ?new_payload_request.block_number(), ); let result = self .engine() - .request(|engine| engine.api.new_payload(execution_payload.clone())) + .request(|engine| engine.api.new_payload(new_payload_request)) .await; if let Ok(status) = &result { @@ -1148,7 +1307,7 @@ impl ExecutionLayer { } *self.inner.last_new_payload_errored.write().await = result.is_err(); - process_payload_status(execution_payload.block_hash(), result, self.log()) + process_payload_status(block_hash, result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1565,6 +1724,7 @@ impl ExecutionLayer { let payload = match fork { ForkName::Merge => ExecutionPayloadMerge::default().into(), ForkName::Capella => ExecutionPayloadCapella::default().into(), + ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } @@ -1594,6 +1754,17 @@ impl ExecutionLayer { } } + pub async fn get_block_by_number( + &self, + query: BlockByNumberQuery<'_>, + ) -> Result, Error> { + self.engine() + .request(|engine| async move { engine.api.get_block_by_number(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + pub async fn get_payload_by_hash_legacy( &self, hash: ExecutionBlockHash, @@ -1621,31 +1792,30 @@ impl ExecutionLayer { return match fork { ForkName::Merge => Ok(Some(ExecutionPayloadMerge::default().into())), ForkName::Capella => Ok(Some(ExecutionPayloadCapella::default().into())), + ForkName::Deneb => Ok(Some(ExecutionPayloadDeneb::default().into())), ForkName::Base | ForkName::Altair => Err(ApiError::UnsupportedForkVariant( format!("called get_payload_by_hash_from_engine with {}", fork), )), }; } - let block = if let Some(block) = engine + let Some(block) = engine .api .get_block_by_hash_with_txns::(hash, fork) .await? - { - block - } else { + else { return Ok(None); }; - let transactions = VariableList::new( - block - .transactions() - .iter() - .map(|transaction| VariableList::new(transaction.rlp().to_vec())) - .collect::>() - .map_err(ApiError::DeserializeTransaction)?, - ) - .map_err(ApiError::DeserializeTransactions)?; + let convert_transactions = |transactions: Vec| { + VariableList::new( + transactions + .into_iter() + .map(|tx| VariableList::new(tx.rlp().to_vec())) + .collect::, ssz_types::Error>>()?, + ) + .map_err(ApiError::SszError) + }; let payload = match block { ExecutionBlockWithTransactions::Merge(merge_block) => { @@ -1663,7 +1833,7 @@ impl ExecutionLayer { extra_data: merge_block.extra_data, base_fee_per_gas: merge_block.base_fee_per_gas, block_hash: merge_block.block_hash, - transactions, + transactions: convert_transactions(merge_block.transactions)?, }) } ExecutionBlockWithTransactions::Capella(capella_block) => { @@ -1689,10 +1859,39 @@ impl ExecutionLayer { extra_data: capella_block.extra_data, base_fee_per_gas: capella_block.base_fee_per_gas, block_hash: capella_block.block_hash, - transactions, + transactions: convert_transactions(capella_block.transactions)?, withdrawals, }) } + ExecutionBlockWithTransactions::Deneb(deneb_block) => { + let withdrawals = VariableList::new( + deneb_block + .withdrawals + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawals)?; + ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: deneb_block.parent_hash, + fee_recipient: deneb_block.fee_recipient, + state_root: deneb_block.state_root, + receipts_root: deneb_block.receipts_root, + logs_bloom: deneb_block.logs_bloom, + prev_randao: deneb_block.prev_randao, + block_number: deneb_block.block_number, + gas_limit: deneb_block.gas_limit, + gas_used: deneb_block.gas_used, + timestamp: deneb_block.timestamp, + extra_data: deneb_block.extra_data, + base_fee_per_gas: deneb_block.base_fee_per_gas, + block_hash: deneb_block.block_hash, + transactions: convert_transactions(deneb_block.transactions)?, + withdrawals, + blob_gas_used: deneb_block.blob_gas_used, + excess_blob_gas: deneb_block.excess_blob_gas, + }) + } }; Ok(Some(payload)) @@ -1701,8 +1900,8 @@ impl ExecutionLayer { pub async fn propose_blinded_beacon_block( &self, block_root: Hash256, - block: &SignedBeaconBlock>, - ) -> Result, Error> { + block: &SignedBlockContents>, + ) -> Result, Error> { debug!( self.log(), "Sending block to builder"; @@ -1721,11 +1920,12 @@ impl ExecutionLayer { .await; match &payload_result { - Ok(payload) => { + Ok(unblinded_response) => { metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::SUCCESS], ); + let payload = unblinded_response.payload_ref(); info!( self.log(), "Builder successfully revealed payload"; @@ -1749,6 +1949,7 @@ impl ExecutionLayer { "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, "parent_hash" => ?block + .signed_block() .message() .execution_payload() .map(|payload| format!("{}", payload.parent_hash())) @@ -1867,8 +2068,8 @@ impl fmt::Display for InvalidBuilderPayload { } /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. -fn verify_builder_bid>( - bid: &ForkVersionedResponse>, +fn verify_builder_bid( + bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, payload_attributes: &PayloadAttributes, block_number: Option, @@ -1877,11 +2078,11 @@ fn verify_builder_bid>( spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); - let header = &bid.data.message.header; - let payload_value = bid.data.message.value; + let header = &bid.data.message.header(); + let payload_value = bid.data.message.value(); // Avoid logging values that we can't represent with our Prometheus library. - let payload_value_gwei = bid.data.message.value / 1_000_000_000; + let payload_value_gwei = bid.data.message.value() / 1_000_000_000; if payload_value_gwei <= Uint256::from(i64::max_value()) { metrics::set_gauge_vec( &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, @@ -1895,12 +2096,12 @@ fn verify_builder_bid>( .ok() .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); - let payload_withdrawals_root = header.withdrawals_root().ok(); + let payload_withdrawals_root = header.withdrawals_root().ok().copied(); - if payload_value < profit_threshold { + if *payload_value < profit_threshold { Err(Box::new(InvalidBuilderPayload::LowValue { profit_threshold, - payload_value, + payload_value: *payload_value, })) } else if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { @@ -1930,7 +2131,7 @@ fn verify_builder_bid>( } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { signature: bid.data.signature.clone(), - pubkey: bid.data.message.pubkey, + pubkey: *bid.data.message.pubkey(), })) } else if payload_withdrawals_root != expected_withdrawals_root { Err(Box::new(InvalidBuilderPayload::WithdrawalsRoot { @@ -1951,13 +2152,6 @@ async fn timed_future, T>(metric: &str, future: F) -> (T, (result, duration) } -fn noop( - _: &ExecutionLayer, - _: ExecutionPayloadRef, -) -> Option> { - None -} - #[cfg(test)] /// Returns the duration since the unix epoch. fn timestamp_now() -> u64 { @@ -1967,6 +2161,13 @@ fn timestamp_now() -> u64 { .as_secs() } +fn noop( + _: &ExecutionLayer, + _: PayloadContentsRefTuple, +) -> Option> { + None +} + #[cfg(test)] mod test { use super::*; @@ -2112,4 +2313,42 @@ mod test { }) .await; } + + #[tokio::test] + async fn percentage_difference_u256_tests() { + // ensure function returns `None` when base value is zero + assert_eq!(percentage_difference_u256(0.into(), 1.into()), None); + // ensure function returns `None` when either value is greater than 120 Million ETH + let max_value = Uint256::from(12u8) * Uint256::exp10(25); + assert_eq!( + percentage_difference_u256(1u8.into(), max_value + Uint256::from(1u8)), + None + ); + assert_eq!( + percentage_difference_u256(max_value + Uint256::from(1u8), 1u8.into()), + None + ); + // it should work up to max value + assert_eq!( + percentage_difference_u256(max_value, max_value / Uint256::from(2u8)), + Some(-50f32) + ); + // should work when base value is greater than comparison value + assert_eq!( + percentage_difference_u256(4u8.into(), 3u8.into()), + Some(-25f32) + ); + // should work when comparison value is greater than base value + assert_eq!( + percentage_difference_u256(4u8.into(), 5u8.into()), + Some(25f32) + ); + // should be accurate to 7 decimal places + let result = + percentage_difference_u256(Uint256::from(31415926u64), Uint256::from(13371337u64)) + .expect("should get percentage"); + // result = -57.4377116 + assert!(result > -57.43772); + assert!(result <= -57.43771); + } } diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs index 1722edff46..1155b1ca3a 100644 --- a/beacon_node/execution_layer/src/payload_cache.rs +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -1,13 +1,14 @@ +use eth2::types::FullPayloadContents; use lru::LruCache; use parking_lot::Mutex; use tree_hash::TreeHash; -use types::{EthSpec, ExecutionPayload, Hash256}; +use types::{EthSpec, Hash256}; pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; /// A cache mapping execution payloads by tree hash roots. pub struct PayloadCache { - payloads: Mutex>>, + payloads: Mutex>>, } #[derive(Hash, PartialEq, Eq)] @@ -22,16 +23,16 @@ impl Default for PayloadCache { } impl PayloadCache { - pub fn put(&self, payload: ExecutionPayload) -> Option> { - let root = payload.tree_hash_root(); + pub fn put(&self, payload: FullPayloadContents) -> Option> { + let root = payload.payload_ref().tree_hash_root(); self.payloads.lock().put(PayloadCacheId(root), payload) } - pub fn pop(&self, root: &Hash256) -> Option> { + pub fn pop(&self, root: &Hash256) -> Option> { self.payloads.lock().pop(&PayloadCacheId(*root)) } - pub fn get(&self, hash: &Hash256) -> Option> { + pub fn get(&self, hash: &Hash256) -> Option> { self.payloads.lock().get(&PayloadCacheId(*hash)).cloned() } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a8d98a767f..cf0faf655a 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,5 @@ use crate::engines::ForkchoiceState; +use crate::EthersTransaction; use crate::{ engine_api::{ json_structures::{ @@ -8,15 +9,27 @@ use crate::{ }, ExecutionBlockWithTransactions, }; +use eth2::types::BlobsBundle; +use kzg::{Kzg, KzgCommitment, KzgProof}; +use parking_lot::Mutex; +use rand::{rngs::StdRng, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; +use ssz::Decode; +use ssz_types::VariableList; use std::collections::HashMap; +use std::sync::Arc; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, - ForkName, Hash256, Uint256, + Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256, + Transaction, Transactions, Uint256, }; +use super::DEFAULT_TERMINAL_BLOCK; + +const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); + const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -118,6 +131,19 @@ pub struct ExecutionBlockGenerator { * Post-merge fork triggers */ pub shanghai_time: Option, // withdrawals + pub cancun_time: Option, // deneb + /* + * deneb stuff + */ + pub blobs_bundles: HashMap>, + pub kzg: Option>, + rng: Arc>, +} + +fn make_rng() -> Arc> { + // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary + // but fixed value for reproducibility. + Arc::new(Mutex::new(StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64))) } impl ExecutionBlockGenerator { @@ -126,6 +152,8 @@ impl ExecutionBlockGenerator { terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, + cancun_time: Option, + kzg: Option, ) -> Self { let mut gen = Self { head_block: <_>::default(), @@ -139,6 +167,10 @@ impl ExecutionBlockGenerator { next_payload_id: 0, payload_ids: <_>::default(), shanghai_time, + cancun_time, + blobs_bundles: <_>::default(), + kzg: kzg.map(Arc::new), + rng: make_rng(), }; gen.insert_pow_block(0).unwrap(); @@ -171,9 +203,12 @@ impl ExecutionBlockGenerator { } pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { - match self.shanghai_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Capella, - _ => ForkName::Merge, + match self.cancun_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Merge, + }, } } @@ -249,10 +284,15 @@ impl ExecutionBlockGenerator { finalized_block_hash )); } - let parent_hash = if block_number == 0 { - ExecutionBlockHash::zero() + let block = if block_number == 0 { + generate_genesis_block(self.terminal_total_difficulty, self.terminal_block_number)? } else if let Some(block) = self.block_by_number(block_number - 1) { - block.block_hash() + generate_pow_block( + self.terminal_total_difficulty, + self.terminal_block_number, + block_number, + block.block_hash(), + )? } else { return Err(format!( "parent with block number {} not found", @@ -260,13 +300,6 @@ impl ExecutionBlockGenerator { )); }; - let block = generate_pow_block( - self.terminal_total_difficulty, - self.terminal_block_number, - block_number, - parent_hash, - )?; - // Insert block into block tree self.insert_block(Block::PoW(block))?; @@ -327,10 +360,10 @@ impl ExecutionBlockGenerator { Ok(hash) } + // This does not reject duplicate blocks inserted. This lets us re-use the same execution + // block generator for multiple beacon chains which is useful in testing. pub fn insert_block(&mut self, block: Block) -> Result { - if self.blocks.contains_key(&block.block_hash()) { - return Err(format!("{:?} is already known", block.block_hash())); - } else if block.parent_hash() != ExecutionBlockHash::zero() + if block.parent_hash() != ExecutionBlockHash::zero() && !self.blocks.contains_key(&block.parent_hash()) { return Err(format!("parent block {:?} is unknown", block.parent_hash())); @@ -343,7 +376,7 @@ impl ExecutionBlockGenerator { let block_hash = block.block_hash(); self.block_hashes .entry(block.block_number()) - .or_insert_with(Vec::new) + .or_default() .push(block_hash); self.blocks.insert(block_hash, block); @@ -388,10 +421,12 @@ impl ExecutionBlockGenerator { self.payload_ids.get(id).cloned() } + pub fn get_blobs_bundle(&mut self, id: &PayloadId) -> Option> { + self.blobs_bundles.get(id).cloned() + } + pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { - let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) { - parent - } else { + let Some(parent) = self.blocks.get(&payload.parent_hash()) else { return PayloadStatusV1 { status: PayloadStatusV1Status::Syncing, latest_valid_hash: None, @@ -424,14 +459,20 @@ impl ExecutionBlockGenerator { forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { - if let Some(payload) = self - .pending_payloads - .remove(&forkchoice_state.head_block_hash) - { + // This is meant to cover starting post-merge transition at genesis. Useful for + // testing Capella forks and later. + let head_block_hash = forkchoice_state.head_block_hash; + if let Some(genesis_pow_block) = self.block_by_number(0) { + if genesis_pow_block.block_hash() == head_block_hash { + self.terminal_block_hash = head_block_hash; + } + } + + if let Some(payload) = self.pending_payloads.remove(&head_block_hash) { self.insert_block(Block::PoS(payload))?; } - let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); + let unknown_head_block_hash = !self.blocks.contains_key(&head_block_hash); let unknown_safe_block_hash = forkchoice_state.safe_block_hash != ExecutionBlockHash::zero() && !self.blocks.contains_key(&forkchoice_state.safe_block_hash); @@ -464,75 +505,15 @@ impl ExecutionBlockGenerator { let parent = self .blocks - .get(&forkchoice_state.head_block_hash) - .ok_or_else(|| { - format!( - "unknown parent block {:?}", - forkchoice_state.head_block_hash - ) - })?; + .get(&head_block_hash) + .cloned() + .ok_or_else(|| format!("unknown parent block {head_block_hash:?}"))?; let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = match &attributes { - PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { - ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), - }), - _ => unreachable!(), - }, - }; - - *execution_payload.block_hash_mut() = - ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); - + let execution_payload = + self.build_new_execution_payload(head_block_hash, &parent, id, &attributes)?; self.payload_ids.insert(id, execution_payload); Some(id) @@ -559,12 +540,236 @@ impl ExecutionBlockGenerator { payload_id: id.map(Into::into), }) } + + pub fn build_new_execution_payload( + &mut self, + head_block_hash: ExecutionBlockHash, + parent: &Block, + id: PayloadId, + attributes: &PayloadAttributes, + ) -> Result, String> { + let mut execution_payload = match attributes { + PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + }), + _ => unreachable!(), + }, + PayloadAttributes::V3(pa) => ExecutionPayload::Deneb(ExecutionPayloadDeneb { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + }; + + match execution_payload.fork_name() { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {} + ForkName::Deneb => { + // get random number between 0 and Max Blobs + let mut rng = self.rng.lock(); + let num_blobs = rng.gen::() % (T::max_blobs_per_block() + 1); + let (bundle, transactions) = generate_blobs(num_blobs)?; + for tx in Vec::from(transactions) { + execution_payload + .transactions_mut() + .push(tx) + .map_err(|_| "transactions are full".to_string())?; + } + self.blobs_bundles.insert(id, bundle); + } + } + + *execution_payload.block_hash_mut() = + ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); + Ok(execution_payload) + } +} + +pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Blob), String> { + let BlobsBundle:: { + commitments, + proofs, + blobs, + } = BlobsBundle::from_ssz_bytes(TEST_BLOB_BUNDLE) + .map_err(|e| format!("Unable to decode ssz: {:?}", e))?; + + Ok(( + commitments + .get(0) + .cloned() + .ok_or("commitment missing in test bundle")?, + proofs + .get(0) + .cloned() + .ok_or("proof missing in test bundle")?, + blobs.get(0).cloned().ok_or("blob missing in test bundle")?, + )) +} + +pub fn generate_blobs( + n_blobs: usize, +) -> Result<(BlobsBundle, Transactions), String> { + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + + let mut bundle = BlobsBundle::::default(); + let mut transactions = vec![]; + + for blob_index in 0..n_blobs { + let tx = static_valid_tx::() + .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; + + transactions.push(tx); + bundle + .blobs + .push(blob.clone()) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .commitments + .push(kzg_commitment) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .proofs + .push(kzg_proof) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + } + + Ok((bundle, transactions.into())) +} + +fn static_valid_tx() -> Result, String> { + // This is a real transaction hex encoded, but we don't care about the contents of the transaction. + let transaction: EthersTransaction = serde_json::from_str( + r#"{ + "blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2", + "blockNumber":"0x5daf3b", + "from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d", + "gas":"0xc350", + "gasPrice":"0x4a817c800", + "hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b", + "input":"0x68656c6c6f21", + "nonce":"0x15", + "to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb", + "transactionIndex":"0x41", + "value":"0xf3dbb76162000", + "v":"0x25", + "r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea", + "s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c" + }"#, + ) + .unwrap(); + VariableList::new(transaction.rlp().to_vec()) + .map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e)) } fn payload_id_from_u64(n: u64) -> PayloadId { n.to_le_bytes() } +pub fn generate_genesis_header( + spec: &ChainSpec, + post_transition_merge: bool, +) -> Option> { + let genesis_fork = spec.fork_name_at_slot::(spec.genesis_slot); + let genesis_block_hash = + generate_genesis_block(spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK) + .ok() + .map(|block| block.block_hash); + match genesis_fork { + ForkName::Base | ForkName::Altair => None, + ForkName::Merge => { + if post_transition_merge { + let mut header = ExecutionPayloadHeader::Merge(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } else { + Some(ExecutionPayloadHeader::::Merge(<_>::default())) + } + } + ForkName::Capella => { + let mut header = ExecutionPayloadHeader::Capella(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } + ForkName::Deneb => { + let mut header = ExecutionPayloadHeader::Deneb(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + Some(header) + } + } +} + +pub fn generate_genesis_block( + terminal_total_difficulty: Uint256, + terminal_block_number: u64, +) -> Result { + generate_pow_block( + terminal_total_difficulty, + terminal_block_number, + 0, + ExecutionBlockHash::zero(), + ) +} + pub fn generate_pow_block( terminal_total_difficulty: Uint256, terminal_block_number: u64, @@ -605,7 +810,9 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use types::MainnetEthSpec; + use eth2_network_config::TRUSTED_SETUP_BYTES; + use kzg::TrustedSetup; + use types::{MainnetEthSpec, MinimalEthSpec}; #[test] fn pow_chain_only() { @@ -618,6 +825,8 @@ mod test { TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, + None, + None, ); for i in 0..=TERMINAL_BLOCK { @@ -665,4 +874,32 @@ mod test { assert!(generator.block_by_number(next_i).is_none()); } } + + #[test] + fn valid_test_blobs() { + assert!( + validate_blob::().unwrap(), + "Mainnet preset test blobs bundle should contain valid proofs" + ); + assert!( + validate_blob::().unwrap(), + "Minimal preset test blobs bundle should contain valid proofs" + ); + } + + fn validate_blob() -> Result { + let kzg = load_kzg()?; + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) + .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; + kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) + .map_err(|e| format!("Invalid blobs bundle: {e:?}")) + } + + fn load_kzg() -> Result { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {e:?}"))?; + Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| format!("Failed to load trusted setup: {e:?}")) + } } diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz new file mode 100644 index 0000000000..6b549a4da8 Binary files /dev/null and b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz differ diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 62cab5ad28..9dff1ac008 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -93,7 +93,7 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { + ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 => { let request = match method { ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( get_param::>(params, 0) @@ -106,7 +106,17 @@ pub async fn handle_rpc( .map(|jep| JsonExecutionPayload::V1(jep)) }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, - // TODO(4844) add that here.. + ENGINE_NEW_PAYLOAD_V3 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V3(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + }) + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, _ => unreachable!(), }; @@ -144,7 +154,32 @@ pub async fn handle_rpc( )); } } - // TODO(4844) add 4844 error checking here + ForkName::Deneb => { + if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 { + return Err(( + format!("{} called after deneb fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` after deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } _ => unreachable!(), }; @@ -180,7 +215,7 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { + ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -196,6 +231,8 @@ pub async fn handle_rpc( ) })?; + let maybe_blobs = ctx.execution_block_generator.write().get_blobs_bundle(&id); + // validate method called correctly according to shanghai fork time if ctx .execution_block_generator @@ -209,7 +246,19 @@ pub async fn handle_rpc( FORK_REQUEST_MISMATCH_ERROR_CODE, )); } - // TODO(4844) add 4844 error checking here + // validate method called correctly according to deneb fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Deneb + && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2) + { + return Err(( + format!("{} called after deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } match method { ENGINE_GET_PAYLOAD_V1 => { @@ -230,11 +279,31 @@ pub async fn handle_rpc( }) .unwrap() } + _ => unreachable!(), + }), + ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V3(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV3 { + execution_payload, + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V3 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + }) + .unwrap() + } + _ => unreachable!(), }), _ => unreachable!(), } } - ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { + ENGINE_FORKCHOICE_UPDATED_V1 + | ENGINE_FORKCHOICE_UPDATED_V2 + | ENGINE_FORKCHOICE_UPDATED_V3 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { @@ -260,7 +329,7 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V1)) .transpose() } - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { get_param::>(params, 1) .map(|opt| opt.map(JsonPayloadAttributes::V2)) .transpose() @@ -272,10 +341,15 @@ pub async fn handle_rpc( }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } + ENGINE_FORKCHOICE_UPDATED_V3 => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V3)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } _ => unreachable!(), }; - // validate method called correctly according to shanghai fork time + // validate method called correctly according to fork time if let Some(pa) = payload_attributes.as_ref() { match ctx .execution_block_generator @@ -300,6 +374,15 @@ pub async fn handle_rpc( FORK_REQUEST_MISMATCH_ERROR_CODE, )); } + if method == ENGINE_FORKCHOICE_UPDATED_V3 { + return Err(( + format!( + "{} called with `JsonPayloadAttributesV3` before Deneb fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } if matches!(pa, JsonPayloadAttributes::V1(_)) { return Err(( format!( @@ -310,7 +393,20 @@ pub async fn handle_rpc( )); } } - // TODO(4844) add 4844 error checking here + ForkName::Deneb => { + if method == ENGINE_FORKCHOICE_UPDATED_V1 { + return Err(( + format!("{} called after Deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + if method == ENGINE_FORKCHOICE_UPDATED_V2 { + return Err(( + format!("{} called after Deneb fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } _ => unreachable!(), }; } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 9471be636d..1e45ef2726 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,44 +1,30 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; -use async_trait::async_trait; -use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -pub use ethereum_consensus::state_transition::Context; -use ethereum_consensus::{ - crypto::{SecretKey, Signature}, - primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, - state_transition::Error, -}; use fork_choice::ForkchoiceUpdateParameters; -use mev_rs::{ - blinded_block_provider::Server as BlindedBlockProviderServer, - signing::{sign_builder_message, verify_signed_builder_message}, - types::{ - bellatrix::{ - BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, - }, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, - SignedBuilderBid, SignedValidatorRegistration, - }, - Error as MevError, -}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use ssz::{Decode, Encode}; -use ssz_rs::{Merkleized, SimpleSerialize}; use std::collections::HashMap; use std::fmt::Debug; -use std::net::Ipv4Addr; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; +use types::builder_bid::{ + BuilderBid, BuilderBidCapella, BuilderBidDeneb, BuilderBidMerge, SignedBuilderBid, +}; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, + Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, + ExecutionPayloadHeaderRefMut, ForkName, ForkVersionedResponse, Hash256, PublicKeyBytes, + Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; +use types::{ExecutionBlockHash, SecretKey}; +use warp::{Filter, Rejection}; #[derive(Clone)] pub enum Operation { @@ -53,136 +39,176 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), MevError> { + fn apply>(self, bid: &mut B) { match self { - Operation::FeeRecipient(fee_recipient) => { - *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? - } - Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, - Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, - Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, - Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, + Operation::FeeRecipient(fee_recipient) => bid.set_fee_recipient(fee_recipient), + Operation::GasLimit(gas_limit) => bid.set_gas_limit(gas_limit as u64), + Operation::Value(value) => bid.set_value(value), + Operation::ParentHash(parent_hash) => bid.set_parent_hash(parent_hash), + Operation::PrevRandao(prev_randao) => bid.set_prev_randao(prev_randao), + Operation::BlockNumber(block_number) => bid.set_block_number(block_number as u64), + Operation::Timestamp(timestamp) => bid.set_timestamp(timestamp as u64), + Operation::WithdrawalsRoot(root) => bid.set_withdrawals_root(root), } - Ok(()) } } +#[derive(Debug)] +struct Custom(String); + +impl warp::reject::Reject for Custom {} + // contains functions we need for BuilderBids.. not sure what to call this -pub trait BidStuff { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; - fn gas_limit_mut(&mut self) -> &mut u64; - fn value_mut(&mut self) -> &mut U256; - fn parent_hash_mut(&mut self) -> &mut Hash32; - fn prev_randao_mut(&mut self) -> &mut Hash32; - fn block_number_mut(&mut self) -> &mut u64; - fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; +pub trait BidStuff { + fn set_fee_recipient(&mut self, fee_recipient_address: Address); + fn set_gas_limit(&mut self, gas_limit: u64); + fn set_value(&mut self, value: Uint256); + fn set_parent_hash(&mut self, parent_hash: Hash256); + fn set_prev_randao(&mut self, randao: Hash256); + fn set_block_number(&mut self, block_number: u64); + fn set_timestamp(&mut self, timestamp: u64); + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result; + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; - fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid; } -impl BidStuff for BuilderBid { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { - match self { - Self::Bellatrix(bid) => &mut bid.header.fee_recipient, - Self::Capella(bid) => &mut bid.header.fee_recipient, - } - } - - fn gas_limit_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.gas_limit, - Self::Capella(bid) => &mut bid.header.gas_limit, - } - } - - fn value_mut(&mut self) -> &mut U256 { - match self { - Self::Bellatrix(bid) => &mut bid.value, - Self::Capella(bid) => &mut bid.value, - } - } - - fn parent_hash_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.parent_hash, - Self::Capella(bid) => &mut bid.header.parent_hash, - } - } - - fn prev_randao_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.prev_randao, - Self::Capella(bid) => &mut bid.header.prev_randao, - } - } - - fn block_number_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.block_number, - Self::Capella(bid) => &mut bid.header.block_number, - } - } - - fn timestamp_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.timestamp, - Self::Capella(bid) => &mut bid.header.timestamp, - } - } - - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { - match self { - Self::Bellatrix(_) => Err(MevError::InvalidFork), - Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), - } - } - - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result { - match self { - Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), - Self::Capella(message) => sign_builder_message(message, signing_key, context), - } - } - - fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { - match self { - Self::Bellatrix(message) => { - SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) +impl BidStuff for BuilderBid { + fn set_fee_recipient(&mut self, fee_recipient: Address) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.fee_recipient = fee_recipient; } - Self::Capella(message) => { - SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.fee_recipient = fee_recipient; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.fee_recipient = fee_recipient; } } } + + fn set_gas_limit(&mut self, gas_limit: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.gas_limit = gas_limit; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.gas_limit = gas_limit; + } + } + } + + fn set_value(&mut self, value: Uint256) { + *self.value_mut() = value; + } + + fn set_parent_hash(&mut self, parent_hash: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } + } + } + + fn set_prev_randao(&mut self, prev_randao: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.prev_randao = prev_randao; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.prev_randao = prev_randao; + } + } + } + + fn set_block_number(&mut self, block_number: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.block_number = block_number; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.block_number = block_number; + } + } + } + + fn set_timestamp(&mut self, timestamp: u64) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.timestamp = timestamp; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.timestamp = timestamp; + } + } + } + + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) { + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Merge(_) => { + panic!("no withdrawals before capella") + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.withdrawals_root = withdrawals_root; + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.withdrawals_root = withdrawals_root; + } + } + } + + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature { + let domain = spec.get_builder_domain(); + let message = self.signing_root(domain); + sk.sign(message) + } + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { + SignedBuilderBid { + message: self, + signature, + } + } } -pub struct TestingBuilder { - server: BlindedBlockProviderServer>, - pub builder: MockBuilder, +#[derive(Clone)] +pub struct MockBuilder { + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + val_registration_cache: Arc>>, + builder_sk: SecretKey, + operations: Arc>>, + invalidate_signatures: Arc>, } -impl TestingBuilder { - pub fn new( +impl MockBuilder { + pub fn new_for_testing( mock_el_url: SensitiveUrl, - builder_url: SensitiveUrl, beacon_url: SensitiveUrl, spec: ChainSpec, executor: TaskExecutor, - ) -> Self { + ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -198,65 +224,28 @@ impl TestingBuilder { let el = ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); - // This should probably be done for all fields, we only update ones we are testing with so far. - let mut context = Context::for_mainnet(); - context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); - context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); - context.terminal_block_hash_activation_epoch = - to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); - let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), spec, - context, ); - let port = builder_url.full.port().unwrap(); - let host: Ipv4Addr = builder_url - .full - .host_str() - .unwrap() - .to_string() - .parse() - .unwrap(); - let server = BlindedBlockProviderServer::new(host, port, builder.clone()); - Self { server, builder } + let host: Ipv4Addr = Ipv4Addr::LOCALHOST; + let port = 0; + let server = serve(host, port, builder.clone()).expect("mock builder server should start"); + (builder, server) } - pub async fn run(&self) { - let server = self.server.serve(); - if let Err(err) = server.await { - println!("error while listening for incoming: {err}") - } - } -} - -#[derive(Clone)] -pub struct MockBuilder { - el: ExecutionLayer, - beacon_client: BeaconNodeHttpClient, - spec: ChainSpec, - context: Arc, - val_registration_cache: Arc>>, - builder_sk: SecretKey, - operations: Arc>>, - invalidate_signatures: Arc>, -} - -impl MockBuilder { pub fn new( el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Context, ) -> Self { - let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + let sk = SecretKey::random(); Self { el, beacon_client, // Should keep spec and context consistent somehow spec, - context: Arc::new(context), val_registration_cache: Arc::new(RwLock::new(HashMap::new())), builder_sk: sk, operations: Arc::new(RwLock::new(vec![])), @@ -278,237 +267,335 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { + fn apply_operations>(&self, bid: &mut B) { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { - op.apply(bid)?; + op.apply(bid); } - Ok(()) } } -#[async_trait] -impl mev_rs::BlindedBlockProvider for MockBuilder { - async fn register_validators( - &self, - registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), MevError> { - for registration in registrations { - let pubkey = registration.message.public_key.clone(); - let message = &mut registration.message; - verify_signed_builder_message( - message, - ®istration.signature, - &pubkey, - &self.context, - )?; - self.val_registration_cache.write().insert( - registration.message.public_key.clone(), - registration.clone(), - ); - } +pub fn serve( + listen_addr: Ipv4Addr, + listen_port: u16, + builder: MockBuilder, +) -> Result<(SocketAddr, impl Future), crate::test_utils::Error> { + let inner_ctx = builder.clone(); + let ctx_filter = warp::any().map(move || inner_ctx.clone()); - Ok(()) - } + let prefix = warp::path("eth") + .and(warp::path("v1")) + .and(warp::path("builder")); - async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { - let slot = Slot::new(bid_request.slot); - let fork = self.spec.fork_name_at_slot::(slot); - let signed_cached_data = self - .val_registration_cache - .read() - .get(&bid_request.public_key) - .ok_or_else(|| convert_err("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; + let validators = prefix + .and(warp::path("validators")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |registrations: Vec, builder: MockBuilder| async move { + for registration in registrations { + if !registration.verify_signature(&builder.spec) { + return Err(reject("invalid signature")); + } + builder + .val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(warp::reply()) + }, + ); - let head = self - .beacon_client - .get_beacon_blocks::(BlockId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing head block"))?; + let blinded_block = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block: SignedBlindedBeaconBlock, builder: MockBuilder| async move { + let slot = block.slot(); + let root = match block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err(reject("invalid fork")); + } + SignedBlindedBeaconBlock::Merge(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Deneb(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_hash = block - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(custom_err(format!( - "head mismatch: {} {}", - head_execution_hash, bid_request.parent_hash - ))); - } + let fork_name = builder.spec.fork_name_at_slot::(slot); + let payload = builder + .el + .get_payload_by_root(&root) + .ok_or_else(|| reject("missing payload for tx root"))?; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: payload, + }; - let finalized_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let json_payload = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize response"))?; + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body( + serde_json::to_string(&json_payload) + .map_err(|_| reject("nvalid JSON"))?, + ) + .unwrap(), + ) + }, + ); - let justified_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); + let status = prefix + .and(warp::path("status")) + .then(|| async { warp::reply() }); - let val_index = self - .beacon_client - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), - ) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing validator from state"))? - .data - .index; - let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; - let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + let header = prefix + .and(warp::path("header")) + .and(warp::path::param::().or_else(|_| async { Err(reject("Invalid slot")) })) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid parent hash")) }), + ) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid pubkey")) }), + ) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + builder: MockBuilder| async move { + let fork = builder.spec.fork_name_at_slot::(slot); + let signed_cached_data = builder + .val_registration_cache + .read() + .get(&pubkey) + .ok_or_else(|| reject("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; - let genesis_time = self - .beacon_client - .get_beacon_genesis() - .await - .map_err(convert_err)? - .data - .genesis_time; - let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + let head = builder + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(|_| reject("couldn't get head"))? + .ok_or_else(|| reject("missing head block"))?; - let head_state: BeaconState = self - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| custom_err("missing head state".to_string()))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(convert_err)?; + let block = head.data.message(); + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + if head_execution_hash != parent_hash { + return Err(reject("head mismatch")); + } - let payload_attributes = match fork { - ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), - // the withdrawals root is filled in by operations - ForkName::Capella => { - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) - } - ForkName::Base | ForkName::Altair => { - return Err(MevError::InvalidFork); - } - }; + let finalized_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| reject("couldn't get finalized block"))? + .ok_or_else(|| reject("missing finalized block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; + let justified_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| reject("couldn't get justified block"))? + .ok_or_else(|| reject("missing justified block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; + let val_index = builder + .beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| reject("couldn't get validator"))? + .ok_or_else(|| reject("missing validator"))? + .data + .index; + let fee_recipient = cached_data.fee_recipient; + let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); - let payload = self - .el - .get_full_payload_caching::>( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) - .await - .map_err(convert_err)? - .to_payload() - .to_execution_payload_header(); + let genesis_data = builder + .beacon_client + .get_beacon_genesis() + .await + .map_err(|_| reject("couldn't get beacon genesis"))? + .data; + let genesis_time = genesis_data.genesis_time; + let timestamp = + (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut message = match fork { - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), - }; - *message.gas_limit_mut() = cached_data.gas_limit; + let head_state: BeaconState = builder + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| reject("couldn't get state"))? + .ok_or_else(|| reject("missing state"))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| reject("couldn't get prev randao"))?; + let expected_withdrawals = match fork { + ForkName::Base | ForkName::Altair | ForkName::Merge => None, + ForkName::Capella | ForkName::Deneb => Some( + builder + .beacon_client + .get_expected_withdrawals(&StateId::Head) + .await + .unwrap() + .data, + ), + }; - self.apply_operations(&mut message)?; - let mut signature = - message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; + let payload_attributes = match fork { + // the withdrawals root is filled in by operations, but we supply the valid withdrawals + // first to avoid polluting the execution block generator with invalid payload attributes + // NOTE: this was part of an effort to add payload attribute uniqueness checks, + // which was abandoned because it broke too many tests in subtle ways. + ForkName::Merge | ForkName::Capella => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + None, + ), + ForkName::Deneb => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + ), + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")); + } + }; - if *self.invalidate_signatures.read() { - signature = Signature::default(); - } + builder + .el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; - Ok(message.to_signed_bid(signature)) - } + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; - async fn open_bid( - &self, - signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { - let node = match signed_block { - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - } - .map_err(convert_err)?; + let (payload, _block_value, maybe_blobs_bundle): ( + ExecutionPayload, + Uint256, + Option>, + ) = builder + .el + .get_full_payload_caching( + head_execution_hash, + &payload_attributes, + forkchoice_update_params, + fork, + ) + .await + .map_err(|_| reject("couldn't get payload"))? + .into(); - let payload = self - .el - .get_payload_by_root(&from_ssz_rs(&node)?) - .ok_or_else(|| convert_err("missing payload for tx root"))?; + let mut message = match fork { + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blinded_blobs_bundle: maybe_blobs_bundle + .map(Into::into) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Merge => BuilderBid::Merge(BuilderBidMerge { + header: payload + .as_merge() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => return Err(reject("invalid fork")), + }; - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - serde_json::from_str(json_payload.as_str()).map_err(convert_err) - } + message.set_gas_limit(cached_data.gas_limit); + + builder.apply_operations(&mut message); + + let mut signature = + message.sign_builder_message(&builder.builder_sk, &builder.spec); + + if *builder.invalidate_signatures.read() { + signature = Signature::empty(); + } + + let fork_name = builder + .spec + .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); + let signed_bid = SignedBuilderBid { message, signature }; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: signed_bid, + }; + let json_bid = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize signed bid"))?; + Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(json_bid) + .unwrap(), + ) + }, + ); + + let routes = warp::post() + .and(validators.or(blinded_block)) + .or(warp::get().and(status).or(header)) + .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); + + let (listening_socket, server) = warp::serve(routes) + .try_bind_ephemeral(SocketAddrV4::new(listen_addr, listen_port)) + .expect("mock builder server should start"); + Ok((listening_socket, server)) } -pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { - U::from_ssz_bytes( - ssz_rs::serialize(ssz_rs_data) - .map_err(convert_err)? - .as_ref(), - ) - .map_err(convert_err) -} - -pub fn to_ssz_rs(ssz_data: &T) -> Result { - ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) -} - -fn convert_err(e: E) -> MevError { - custom_err(format!("{e:?}")) -} - -// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. -fn custom_err(s: String) -> MevError { - MevError::Consensus(ethereum_consensus::state_transition::Error::Io( - std::io::Error::new(std::io::ErrorKind::Other, s), - )) +fn reject(msg: &'static str) -> Rejection { + warp::reject::custom(Custom(msg.to_string())) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 2b512d8b1c..6717428436 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -5,6 +5,7 @@ use crate::{ }, Config, *, }; +use kzg::Kzg; use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; @@ -29,6 +30,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, None, None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -40,10 +42,11 @@ impl MockExecutionLayer { executor: TaskExecutor, terminal_block: u64, shanghai_time: Option, + cancun_time: Option, builder_threshold: Option, jwt_key: Option, spec: ChainSpec, - builder_url: Option, + kzg: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -55,6 +58,8 @@ impl MockExecutionLayer { terminal_block, spec.terminal_block_hash, shanghai_time, + cancun_time, + kzg, ); let url = SensitiveUrl::parse(&server.url()).unwrap(); @@ -65,7 +70,6 @@ impl MockExecutionLayer { let config = Config { execution_endpoints: vec![url], - builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), @@ -99,13 +103,8 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - let payload_attributes = PayloadAttributes::new( - timestamp, - prev_randao, - Address::repeat_byte(42), - // FIXME: think about how to handle different forks / withdrawals here.. - None, - ); + let payload_attributes = + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -133,7 +132,7 @@ impl MockExecutionLayer { }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); let payload: ExecutionPayload = self .el .get_payload::>( @@ -141,7 +140,6 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, &self.spec, ) @@ -168,7 +166,7 @@ impl MockExecutionLayer { }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); let payload_header = self .el .get_payload::>( @@ -176,7 +174,6 @@ impl MockExecutionLayer { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: do we need to consider other forks somehow? What about withdrawals? ForkName::Merge, &self.spec, ) @@ -194,10 +191,15 @@ impl MockExecutionLayer { assert_eq!( self.el .get_payload_by_root(&payload_header.tree_hash_root()), - Some(payload.clone()) + Some(FullPayloadContents::Payload(payload.clone())) ); - let status = self.el.notify_new_payload(&payload).await.unwrap(); + // TODO: again consider forks + let status = self + .el + .notify_new_payload(payload.try_into().unwrap()) + .await + .unwrap(); assert_eq!(status, PayloadStatus::Valid); // Use junk values for slot/head-root to ensure there is no payload supplied. diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 99d264aa7b..92be94e603 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -8,6 +8,7 @@ use bytes::Bytes; use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; +use kzg::Kzg; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -23,9 +24,12 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; -pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use execution_block_generator::{ + generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, Block, + ExecutionBlockGenerator, +}; pub use hook::Hook; -pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; +pub use mock_builder::{MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; @@ -37,12 +41,15 @@ pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: true, + new_payload_v3: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, + forkchoice_updated_v3: true, get_payload_bodies_by_hash_v1: true, get_payload_bodies_by_range_v1: true, get_payload_v1: true, get_payload_v2: true, + get_payload_v3: true, }; mod execution_block_generator; @@ -59,6 +66,7 @@ pub struct MockExecutionConfig { pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, pub shanghai_time: Option, + pub cancun_time: Option, } impl Default for MockExecutionConfig { @@ -70,6 +78,7 @@ impl Default for MockExecutionConfig { terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), shanghai_time: None, + cancun_time: None, } } } @@ -90,10 +99,16 @@ impl MockServer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), None, // FIXME(capella): should this be the default? + None, // FIXME(deneb): should this be the default? + None, // FIXME(deneb): should this be the default? ) } - pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self { + pub fn new_with_config( + handle: &runtime::Handle, + config: MockExecutionConfig, + kzg: Option, + ) -> Self { let MockExecutionConfig { jwt_key, terminal_difficulty, @@ -101,6 +116,7 @@ impl MockServer { terminal_block_hash, server_config, shanghai_time, + cancun_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -109,6 +125,8 @@ impl MockServer { terminal_block, terminal_block_hash, shanghai_time, + cancun_time, + kzg, ); let ctx: Arc> = Arc::new(Context { @@ -161,6 +179,7 @@ impl MockServer { *self.ctx.engine_capabilities.write() = engine_capabilities; } + #[allow(clippy::too_many_arguments)] pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, @@ -168,6 +187,8 @@ impl MockServer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, shanghai_time: Option, + cancun_time: Option, + kzg: Option, ) -> Self { Self::new_with_config( handle, @@ -178,7 +199,9 @@ impl MockServer { terminal_block, terminal_block_hash, shanghai_time, + cancun_time, }, + kzg, ) } diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 8a7d224963..b01e6a6aea 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -2,23 +2,23 @@ name = "genesis" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -eth1_test_rig = { path = "../../testing/eth1_test_rig" } -sensitive_url = { path = "../../common/sensitive_url" } +eth1_test_rig = { workspace = true } +sensitive_url = { workspace = true } [dependencies] -futures = "0.3.7" -types = { path = "../../consensus/types"} -environment = { path = "../../lighthouse/environment"} -eth1 = { path = "../eth1"} -rayon = "1.4.1" -state_processing = { path = "../../consensus/state_processing" } -merkle_proof = { path = "../../consensus/merkle_proof" } -ethereum_ssz = "0.5.0" -ethereum_hashing = "1.0.0-beta.2" -tree_hash = "0.5.0" -tokio = { version = "1.14.0", features = ["full"] } -slog = "2.5.2" -int_to_bytes = { path = "../../consensus/int_to_bytes" } +futures = { workspace = true } +types = { workspace = true } +environment = { workspace = true } +eth1 = { workspace = true } +rayon = { workspace = true } +state_processing = { workspace = true } +merkle_proof = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_hashing = { workspace = true } +tree_hash = { workspace = true } +tokio = { workspace = true } +slog = { workspace = true } +int_to_bytes = { workspace = true } diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 06bf99f9f6..e48fa36204 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -39,7 +39,7 @@ pub fn genesis_deposits( Ok(deposit_data .into_iter() - .zip(proofs.into_iter()) + .zip(proofs) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect()) diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0fa286f165..b58e0442f7 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -2,54 +2,53 @@ name = "http_api" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } autotests = false # using a single test binary compiles faster [dependencies] -warp = { version = "0.3.2", features = ["tls"] } -serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.14.0", features = ["macros", "sync"] } -tokio-stream = { version = "0.1.3", features = ["sync"] } -types = { path = "../../consensus/types" } -hex = "0.4.2" -beacon_chain = { path = "../beacon_chain" } -eth2 = { path = "../../common/eth2", features = ["lighthouse"] } -slog = "2.5.2" -network = { path = "../network" } -lighthouse_network = { path = "../lighthouse_network" } -eth1 = { path = "../eth1" } -state_processing = { path = "../../consensus/state_processing" } -lighthouse_version = { path = "../../common/lighthouse_version" } -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lazy_static = "1.4.0" -warp_utils = { path = "../../common/warp_utils" } -slot_clock = { path = "../../common/slot_clock" } -ethereum_ssz = "0.5.0" +warp = { workspace = true } +serde = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } +hex = { workspace = true } +beacon_chain = { workspace = true } +eth2 = { workspace = true } +slog = { workspace = true } +network = { workspace = true } +lighthouse_network = { workspace = true } +eth1 = { workspace = true } +state_processing = { workspace = true } +lighthouse_version = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +warp_utils = { workspace = true } +slot_clock = { workspace = true } +ethereum_ssz = { workspace = true } bs58 = "0.4.0" -futures = "0.3.8" -execution_layer = { path = "../execution_layer" } -parking_lot = "0.12.0" -safe_arith = { path = "../../consensus/safe_arith" } -task_executor = { path = "../../common/task_executor" } -lru = "0.7.7" -tree_hash = "0.5.0" -sysinfo = "0.26.5" +futures = { workspace = true } +execution_layer = { workspace = true } +parking_lot = { workspace = true } +safe_arith = { workspace = true } +task_executor = { workspace = true } +lru = { workspace = true } +tree_hash = { workspace = true } +sysinfo = { workspace = true } system_health = { path = "../../common/system_health" } -directory = { path = "../../common/directory" } -logging = { path = "../../common/logging" } -ethereum_serde_utils = "0.5.0" -operation_pool = { path = "../operation_pool" } -sensitive_url = { path = "../../common/sensitive_url" } -unused_port = { path = "../../common/unused_port" } -store = { path = "../store" } -bytes = "1.1.0" -beacon_processor = { path = "../beacon_processor" } +directory = { workspace = true } +logging = { workspace = true } +ethereum_serde_utils = { workspace = true } +operation_pool = { workspace = true } +sensitive_url = { workspace = true } +store = { workspace = true } +bytes = { workspace = true } +beacon_processor = { workspace = true } [dev-dependencies] -environment = { path = "../../lighthouse/environment" } -serde_json = "1.0.58" -proto_array = { path = "../../consensus/proto_array" } -genesis = { path = "../genesis" } +environment = { workspace = true } +serde_json = { workspace = true } +proto_array = { workspace = true } +genesis = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index f1a42b8744..45fc651f05 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,10 +1,11 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; +use types::{BlobSidecarList, EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -250,6 +251,37 @@ impl BlockId { } } } + + /// Return the `BlobSidecarList` identified by `self`. + pub fn blob_sidecar_list( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let root = self.root(chain)?.0; + chain + .get_blobs(&root) + .map_err(warp_utils::reject::beacon_chain_error) + } + + pub fn blob_sidecar_list_filtered( + &self, + indices: BlobIndicesQuery, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let blob_sidecar_list = self.blob_sidecar_list(chain)?; + let blob_sidecar_list_filtered = match indices.indices { + Some(vec) => { + let list = blob_sidecar_list + .into_iter() + .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) + .collect(); + BlobSidecarList::new(list) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? + } + None => blob_sidecar_list, + }; + Ok(blob_sidecar_list_filtered) + } } impl FromStr for BlockId { diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs new file mode 100644 index 0000000000..c8f28fa9ae --- /dev/null +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -0,0 +1,62 @@ +use beacon_chain::BlockProductionError; +use eth2::types::{BeaconBlockAndBlobSidecars, BlindedBeaconBlockAndBlobSidecars, BlockContents}; +use types::{ + BeaconBlock, BlindedBlobSidecarList, BlindedPayload, BlobSidecarList, EthSpec, ForkName, + FullPayload, +}; + +type Error = warp::reject::Rejection; +type FullBlockContents = BlockContents>; +type BlindedBlockContents = BlockContents>; + +pub fn build_block_contents( + fork_name: ForkName, + block: BeaconBlock>, + maybe_blobs: Option>, +) -> Result, Error> { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Ok(BlockContents::Block(block)) + } + ForkName::Deneb => { + if let Some(blob_sidecars) = maybe_blobs { + let block_and_blobs = BeaconBlockAndBlobSidecars { + block, + blob_sidecars, + }; + + Ok(BlockContents::BlockAndBlobSidecars(block_and_blobs)) + } else { + Err(warp_utils::reject::block_production_error( + BlockProductionError::MissingBlobs, + )) + } + } + } +} + +pub fn build_blinded_block_contents( + fork_name: ForkName, + block: BeaconBlock>, + maybe_blobs: Option>, +) -> Result, Error> { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Ok(BlockContents::Block(block)) + } + ForkName::Deneb => { + if let Some(blinded_blob_sidecars) = maybe_blobs { + let block_and_blobs = BlindedBeaconBlockAndBlobSidecars { + blinded_block: block, + blinded_blob_sidecars, + }; + + Ok(BlockContents::BlindedBlockAndBlobSidecars(block_and_blobs)) + } else { + Err(warp_utils::reject::block_production_error( + BlockProductionError::MissingBlobs, + )) + } + } + } +} diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs new file mode 100644 index 0000000000..90203f2d60 --- /dev/null +++ b/beacon_node/http_api/src/builder_states.rs @@ -0,0 +1,72 @@ +use crate::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use safe_arith::SafeArith; +use state_processing::per_block_processing::get_expected_withdrawals; +use state_processing::state_advance::partial_state_advance; +use std::sync::Arc; +use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; + +const MAX_EPOCH_LOOKAHEAD: u64 = 2; + +/// Get the withdrawals computed from the specified state, that will be included in the block +/// that gets built on the specified state. +pub fn get_next_withdrawals( + chain: &Arc>, + mut state: BeaconState, + state_id: StateId, + proposal_slot: Slot, +) -> Result, warp::Rejection> { + get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; + + // advance the state to the epoch of the proposal slot. + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + let (state_root, _, _) = state_id.root(chain)?; + if proposal_epoch != state.current_epoch() { + if let Err(e) = + partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) + { + return Err(warp_utils::reject::custom_server_error(format!( + "failed to advance to the epoch of the proposal slot: {:?}", + e + ))); + } + } + + match get_expected_withdrawals(&state, &chain.spec) { + Ok(withdrawals) => Ok(withdrawals), + Err(e) => Err(warp_utils::reject::custom_server_error(format!( + "failed to get expected withdrawal: {:?}", + e + ))), + } +} + +fn get_next_withdrawals_sanity_checks( + chain: &BeaconChain, + state: &BeaconState, + proposal_slot: Slot, +) -> Result<(), warp::Rejection> { + if proposal_slot <= state.slot() { + return Err(warp_utils::reject::custom_bad_request( + "proposal slot must be greater than the pre-state slot".to_string(), + )); + } + + let fork = chain.spec.fork_name_at_slot::(proposal_slot); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { + return Err(warp_utils::reject::custom_bad_request( + "the specified state is a pre-capella state.".to_string(), + )); + } + + let look_ahead_limit = MAX_EPOCH_LOOKAHEAD + .safe_mul(T::EthSpec::slots_per_epoch()) + .map_err(warp_utils::reject::arith_error)?; + if proposal_slot >= state.slot() + look_ahead_limit { + return Err(warp_utils::reject::custom_bad_request(format!( + "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" + ))); + } + + Ok(()) +} diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 645c19c40e..aa8b0e8ffc 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -1,8 +1,7 @@ -use beacon_chain::store::{metadata::CURRENT_SCHEMA_VERSION, AnchorInfo}; +use beacon_chain::store::metadata::CURRENT_SCHEMA_VERSION; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::DatabaseInfo; use std::sync::Arc; -use types::SignedBlindedBeaconBlock; pub fn info( chain: Arc>, @@ -11,25 +10,13 @@ pub fn info( let split = store.get_split_info(); let config = store.get_config().clone(); let anchor = store.get_anchor_info(); + let blob_info = store.get_blob_info(); Ok(DatabaseInfo { schema_version: CURRENT_SCHEMA_VERSION.as_u64(), config, split, anchor, + blob_info, }) } - -pub fn historical_blocks( - chain: Arc>, - blocks: Vec>>, -) -> Result { - chain - .import_historical_block_batch(blocks) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let anchor = chain.store.get_anchor_info().ok_or_else(|| { - warp_utils::reject::custom_bad_request("node is not checkpoint synced".to_string()) - })?; - Ok(anchor) -} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8e316834d0..a807655189 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,6 +10,8 @@ mod attester_duties; mod block_id; mod block_packing_efficiency; mod block_rewards; +mod build_block_contents; +mod builder_states; mod database; mod metrics; mod proposer_duties; @@ -32,11 +34,13 @@ use beacon_chain::{ }; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; +use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - SkipRandaoVerification, ValidatorId, ValidatorStatus, + SignedBlindedBlockContents, SignedBlockContents, SkipRandaoVerification, ValidatorId, + ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -73,9 +77,8 @@ use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -137,6 +140,8 @@ pub struct Config { pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, + #[serde(with = "eth2::types::serde_status_code")] + pub duplicate_block_status_code: StatusCode, } impl Default for Config { @@ -152,6 +157,7 @@ impl Default for Config { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, + duplicate_block_status_code: StatusCode::ACCEPTED, } } } @@ -508,6 +514,8 @@ pub fn serve( let task_spawner_filter = warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let duplicate_block_status_code = ctx.config.duplicate_block_status_code; + /* * * Start of HTTP method definitions. @@ -1282,22 +1290,22 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_contents: SignedBlockContents, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1312,29 +1320,29 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid SSZ: {e:?}" - )) - })?; + let block_contents = SignedBlockContents::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(Arc::new(block)), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1350,23 +1358,23 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block_contents: SignedBlockContents, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, - ProvenancedBlock::local(block), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1382,30 +1390,30 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid SSZ: {e:?}" - )) - })?; + let block_contents = SignedBlockContents::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_block( None, - ProvenancedBlock::local(Arc::new(block)), + ProvenancedBlock::local(block_contents), chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1425,21 +1433,21 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_contents: SignedBlindedBlockContents, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( - block, + block_contents, chain, &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1455,28 +1463,29 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBeaconBlock::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; + let block = + SignedBlockContents::>::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1492,87 +1501,64 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { - task_spawner.spawn_async(Priority::P0, async move { - match publish_blocks::publish_blinded_block( - block, + move |validation_level: api_types::BroadcastValidationQuery, + block_contents: SignedBlindedBlockContents, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( + block_contents, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } }) }, ); - let post_beacon_blinded_blocks_v2_ssz = - eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .and(log_filter.clone()) - .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| async move { + let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = - match SignedBeaconBlock::>::from_ssz_bytes( + SignedBlockContents::>::from_ssz_bytes( &block_bytes, &chain.spec, - ) { - Ok(data) => data, - Err(_) => { - return warp::reply::with_status( - StatusCode::BAD_REQUEST, - eth2::StatusCode::BAD_REQUEST, - ) - .into_response(); - } - }; - match publish_blocks::publish_blinded_block( + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( block, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } - }, - ); + }) + }, + ); let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( @@ -1646,13 +1632,19 @@ pub fn serve( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - block.canonical_root(), - )) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; + Ok( + api_types::GenericResponse::from(api_types::RootData::from(block_root)) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) }) }, ); @@ -1726,6 +1718,49 @@ pub fn serve( }, ); + /* + * beacon/blob_sidecars + */ + + // GET beacon/blob_sidecars/{block_id} + let get_blobs = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("blob_sidecars")) + .and(block_id_or_err) + .and(warp::query::()) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + indices: api_types::BlobIndicesQuery, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let blob_sidecar_list_filtered = + block_id.blob_sidecar_list_filtered(indices, &chain)?; + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&api_types::GenericResponse::from( + blob_sidecar_list_filtered, + )) + .into_response()), + } + }) + }, + ); + /* * beacon/pool */ @@ -2291,6 +2326,60 @@ pub fn serve( }, ); + /* + * builder/states + */ + + let builder_states_path = eth_v1 + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); + + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(withdrawals.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } + }) + }, + ); + /* * beacon/rewards */ @@ -2788,12 +2877,8 @@ pub fn serve( })?; if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { - let address = if let Some(socket_addr) = peer_info.seen_addresses().next() { - let mut addr = lighthouse_network::Multiaddr::from(socket_addr.ip()); - addr.push(lighthouse_network::multiaddr::Protocol::Tcp( - socket_addr.port(), - )); - addr.to_string() + let address = if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() } else if let Some(addr) = peer_info.listening_addresses().first() { addr.to_string() } else { @@ -2841,13 +2926,8 @@ pub fn serve( .peers() .for_each(|(peer_id, peer_info)| { let address = - if let Some(socket_addr) = peer_info.seen_addresses().next() { - let mut addr = - lighthouse_network::Multiaddr::from(socket_addr.ip()); - addr.push(lighthouse_network::multiaddr::Protocol::Tcp( - socket_addr.port(), - )); - addr.to_string() + if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() } else if let Some(addr) = peer_info.listening_addresses().first() { addr.to_string() } else { @@ -2974,6 +3054,7 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::query::()) + .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .and(log_filter.clone()) @@ -2981,6 +3062,7 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, + accept_header: Option, task_spawner: TaskSpawner, chain: Arc>, log: Logger| { @@ -3002,16 +3084,16 @@ pub fn serve( if query.skip_randao_verification == SkipRandaoVerification::Yes { if !randao_reveal.is_infinity() { return Err(warp_utils::reject::custom_bad_request( - "randao_reveal must be point-at-infinity if verification is skipped" - .into(), - )); + "randao_reveal must be point-at-infinity if verification is skipped" + .into(), + )); } ProduceBlockVerification::NoVerification } else { ProduceBlockVerification::VerifyRandao }; - let (block, _) = chain + let (block, _, maybe_blobs) = chain .produce_block_with_verification::>( randao_reveal, slot, @@ -3025,9 +3107,27 @@ pub fn serve( .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) + let block_contents = + build_block_contents::build_block_contents(fork_name, block, maybe_blobs)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(block_contents.as_ssz_bytes().into()) + .map(|res: Response| { + add_consensus_version_header(res, fork_name) + }) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => fork_versioned_response(endpoint_version, fork_name, block_contents) + .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)), + } }) }, ); @@ -3044,11 +3144,13 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(warp::query::()) + .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( |slot: Slot, query: api_types::ValidatorBlocksQuery, + accept_header: Option, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { @@ -3072,7 +3174,7 @@ pub fn serve( ProduceBlockVerification::VerifyRandao }; - let (block, _) = chain + let (block, _, maybe_blobs) = chain .produce_block_with_verification::>( randao_reveal, slot, @@ -3086,10 +3188,31 @@ pub fn serve( .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - // Pose as a V2 endpoint so we return the fork `version`. - fork_versioned_response(V2, fork_name, block) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) + let block_contents = build_block_contents::build_blinded_block_contents( + fork_name, + block, + maybe_blobs, + )?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(block_contents.as_ssz_bytes().into()) + .map(|res: Response| { + add_consensus_version_header(res, fork_name) + }) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + // Pose as a V2 endpoint so we return the fork `version`. + _ => fork_versioned_response(V2, fork_name, block_contents) + .map(|response| warp::reply::json(&response).into_response()) + .map(|res| add_consensus_version_header(res, fork_name)), + } }) }, ); @@ -3607,12 +3730,13 @@ pub fn serve( // send the response back to our original HTTP request // task via a channel. let builder_future = async move { - let builder = chain + let arc_builder = chain .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) .map_err(warp_utils::reject::beacon_chain_error)? - .builder() + .builder(); + let builder = arc_builder .as_ref() .ok_or(BeaconChainError::BuilderMissing) .map_err(warp_utils::reject::beacon_chain_error)?; @@ -4199,31 +4323,6 @@ pub fn serve( }, ); - // POST lighthouse/database/historical_blocks - let post_lighthouse_database_historical_blocks = database_path - .and(warp::path("historical_blocks")) - .and(warp::path::end()) - .and(warp::body::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(log_filter.clone()) - .then( - |blocks: Vec>>, - task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { - info!( - log, - "Importing historical blocks"; - "count" => blocks.len(), - "source" => "http_api" - ); - task_spawner.blocking_json_task(Priority::P1, move || { - database::historical_blocks(chain, blocks) - }) - }, - ); - // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4298,7 +4397,8 @@ pub fn serve( .then( |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P1, async move { - let merge_readiness = chain.check_merge_readiness().await; + let current_slot = chain.slot_clock.now_or_genesis().unwrap_or(Slot::new(0)); + let merge_readiness = chain.check_merge_readiness(current_slot).await; Ok::<_, warp::reject::Rejection>( warp::reply::json(&api_types::GenericResponse::from(merge_readiness)) .into_response(), @@ -4327,6 +4427,9 @@ pub fn serve( let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), + api_types::EventTopic::BlobSidecar => { + event_handler.subscribe_blob_sidecar() + } api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } @@ -4456,6 +4559,7 @@ pub fn serve( .uor(get_beacon_block_attestations) .uor(get_beacon_blinded_block) .uor(get_beacon_block_root) + .uor(get_blobs) .uor(get_beacon_pool_attestations) .uor(get_beacon_pool_attester_slashings) .uor(get_beacon_pool_proposer_slashings) @@ -4503,6 +4607,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) @@ -4540,7 +4645,6 @@ pub fn serve( .uor(post_validator_liveness_epoch) .uor(post_lighthouse_liveness) .uor(post_lighthouse_database_reconstruct) - .uor(post_lighthouse_database_historical_blocks) .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) @@ -4601,6 +4705,14 @@ fn publish_pubsub_message( ) } +/// Publish a message to the libp2p pubsub network. +fn publish_pubsub_messages( + network_tx: &UnboundedSender>, + messages: Vec>, +) -> Result<(), warp::Rejection> { + publish_network_message(network_tx, NetworkMessage::Publish { messages }) +} + /// Publish a message to the libp2p network. fn publish_network_message( network_tx: &UnboundedSender>, diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0f2f7b361c..e41cf51ec3 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,10 +1,13 @@ use crate::metrics; + +use beacon_chain::block_verification_types::{AsBlock, BlockContentsError}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, - NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, + IntoGossipVerifiedBlockContents, NotifyExecutionLayer, }; -use eth2::types::BroadcastValidation; +use eth2::types::{BroadcastValidation, ErrorMessage}; +use eth2::types::{FullPayloadContents, SignedBlockContents}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -17,11 +20,12 @@ use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, SignedBeaconBlock, + ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList, }; -use warp::Rejection; +use warp::http::StatusCode; +use warp::{reply::Response, Rejection, Reply}; -pub enum ProvenancedBlock> { +pub enum ProvenancedBlock> { /// The payload was built using a local EE. Local(B, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost @@ -29,7 +33,7 @@ pub enum ProvenancedBlock> { Builder(B, PhantomData), } -impl> ProvenancedBlock { +impl> ProvenancedBlock { pub fn local(block: B) -> Self { Self::Local(block, PhantomData) } @@ -40,25 +44,28 @@ impl> ProvenancedBlock } /// Handles a request from the HTTP API for full blocks. -pub async fn publish_block>( +pub async fn publish_block>( block_root: Option, provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { + duplicate_status_code: StatusCode, +) -> Result { let seen_timestamp = timestamp_now(); - let (block, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block, _) => (block, true), - ProvenancedBlock::Builder(block, _) => (block, false), + + let (block_contents, is_locally_built_block) = match provenanced_block { + ProvenancedBlock::Local(block_contents, _) => (block_contents, true), + ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), }; - let beacon_block = block.inner(); - let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); - debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); + let block = block_contents.inner_block(); + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); /* actually publish a block */ let publish_block = move |block: Arc>, + blobs_opt: Option>, sender, log, seen_timestamp| { @@ -69,40 +76,97 @@ pub async fn publish_block>( info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); - let message = PubsubMessage::BeaconBlock(block); - crate::publish_pubsub_message(&sender, message) - .map_err(|_| BeaconChainError::UnableToPublish.into()) + match block.as_ref() { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => { + crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; + } + SignedBeaconBlock::Deneb(_) => { + let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())]; + if let Some(signed_blobs) = blobs_opt { + for (blob_index, blob) in signed_blobs.into_iter().enumerate() { + pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( + blob_index as u64, + blob, + )))); + } + } + crate::publish_pubsub_messages(&sender, pubsub_messages) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?; + } + }; + Ok(()) }; + /* only publish if gossip- and consensus-valid and equivocation-free */ + let chain_clone = chain.clone(); + let slot = block.message().slot(); + let proposer_index = block.message().proposer_index(); + let sender_clone = network_tx.clone(); + let log_clone = log.clone(); + + // We can clone this because the blobs are `Arc`'d in `BlockContents`, but the block is not, + // so we avoid cloning the block at this point. + let blobs_opt = block_contents.inner_blobs(); + /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ - let gossip_verified_block = block.into_gossip_verified_block(&chain).map_err(|e| { - warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); - warp_utils::reject::custom_bad_request(e.to_string()) - })?; + let (gossip_verified_block, gossip_verified_blobs) = + match block_contents.into_gossip_verified_block(&chain) { + Ok(b) => b, + Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) => { + // Allow the status code for duplicate blocks to be overridden based on config. + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()); + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => slot, + "error" => ?e + ); + return Err(warp_utils::reject::custom_bad_request(e.to_string())); + } + }; + + // Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not, + // `Arc`'d but blobs are. + let block = gossip_verified_block.block.block_cloned(); let block_root = block_root.unwrap_or(gossip_verified_block.block_root); if let BroadcastValidation::Gossip = validation_level { publish_block( - beacon_block.clone(), - network_tx.clone(), + block.clone(), + blobs_opt.clone(), + sender_clone.clone(), log.clone(), seen_timestamp, ) .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; } - /* only publish if gossip- and consensus-valid and equivocation-free */ - let chain_clone = chain.clone(); - let block_clone = beacon_block.clone(); - let log_clone = log.clone(); - let sender_clone = network_tx.clone(); + let block_clone = block.clone(); let publish_fn = move || match validation_level { BroadcastValidation::Gossip => Ok(()), - BroadcastValidation::Consensus => { - publish_block(block_clone, sender_clone, log_clone, seen_timestamp) - } + BroadcastValidation::Consensus => publish_block( + block_clone, + blobs_opt, + sender_clone, + log_clone, + seen_timestamp, + ), BroadcastValidation::ConsensusAndEquivocation => { if chain_clone .observed_block_producers @@ -118,11 +182,35 @@ pub async fn publish_block>( ); Err(BlockError::Slashable) } else { - publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + publish_block( + block_clone, + blobs_opt, + sender_clone, + log_clone, + seen_timestamp, + ) } } }; + if let Some(gossip_verified_blobs) = gossip_verified_blobs { + for blob in gossip_verified_blobs { + if let Err(e) = chain.process_gossip_blob(blob).await { + let msg = format!("Invalid blob: {e}"); + return if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid blob provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) + }; + } + } + } + match chain .process_block( block_root, @@ -132,20 +220,20 @@ pub async fn publish_block>( ) .await { - Ok(root) => { + Ok(AvailabilityProcessingStatus::Imported(root)) => { info!( log, "Valid block from HTTP API"; "block_delay" => ?delay, "root" => format!("{}", root), - "proposer_index" => beacon_block.message().proposer_index(), - "slot" => beacon_block.slot(), + "proposer_index" => proposer_index, + "slot" =>slot, ); // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - beacon_block.message(), + block.message(), root, &chain.slot_clock, ); @@ -158,17 +246,22 @@ pub async fn publish_block>( // blocks built with builders we consider the broadcast time to be // when the blinded block is published to the builder. if is_locally_built_block { - late_block_logging( - &chain, - seen_timestamp, - beacon_block.message(), - root, - "local", - &log, - ) + late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) + } + Ok(warp::reply().into_response()) + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => { + let msg = format!("Missing parts of block with root {:?}", block_root); + if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(msg)) + } else { + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(msg)) } - - Ok(()) } Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { Err(warp_utils::reject::custom_server_error( @@ -178,10 +271,6 @@ pub async fn publish_block>( Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( "proposal for this slot and proposer has already been seen".to_string(), )), - Err(BlockError::BlockIsAlreadyKnown) => { - info!(log, "Block from HTTP API already known"; "block" => ?block_root); - Ok(()) - } Err(e) => { if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) @@ -203,15 +292,16 @@ pub async fn publish_block>( /// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full /// blocks before publishing. pub async fn publish_blinded_block( - block: SignedBeaconBlock>, + block_contents: SignedBlockContents>, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { - let block_root = block.canonical_root(); - let full_block: ProvenancedBlock>> = - reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + duplicate_status_code: StatusCode, +) -> Result { + let block_root = block_contents.signed_block().canonical_root(); + let full_block: ProvenancedBlock> = + reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?; publish_block::( Some(block_root), full_block, @@ -219,6 +309,7 @@ pub async fn publish_blinded_block( network_tx, log, validation_level, + duplicate_status_code, ) .await } @@ -229,28 +320,28 @@ pub async fn publish_blinded_block( pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, - block: SignedBeaconBlock>, + block_contents: SignedBlockContents>, log: Logger, -) -> Result>>, Rejection> { +) -> Result>, Rejection> { + let block = block_contents.signed_block(); let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) })?; // If the execution block hash is zero, use an empty payload. - let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { - let payload = FullPayload::default_at_fork( - chain - .spec - .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())), - ) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Default payload construction error: {e:?}" - )) - })? - .into(); - ProvenancedPayload::Local(payload) + let full_payload_contents = if payload_header.block_hash() == ExecutionBlockHash::zero() { + let fork_name = chain + .spec + .fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())); + if fork_name == ForkName::Merge { + let payload: FullPayload = FullPayloadMerge::default().into(); + ProvenancedPayload::Local(FullPayloadContents::Payload(payload.into())) + } else { + Err(warp_utils::reject::custom_server_error( + "Failed to construct full payload - block hash must be non-zero after Bellatrix.".to_string() + ))? + } // If we already have an execution payload with this transactions root cached, use it. } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) @@ -274,7 +365,7 @@ pub async fn reconstruct_block( ); let full_payload = el - .propose_blinded_beacon_block(block_root, &block) + .propose_blinded_beacon_block(block_root, &block_contents) .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -286,7 +377,7 @@ pub async fn reconstruct_block( ProvenancedPayload::Builder(full_payload) }; - Some(full_payload) + Some(full_payload_contents) } else { None }; @@ -294,21 +385,18 @@ pub async fn reconstruct_block( match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => block - .try_into_full_block(None) - .map(Arc::new) + None => block_contents + .try_into_full_block_and_blobs(None) .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Local(full_payload)) => block - .try_into_full_block(Some(full_payload)) - .map(Arc::new) + Some(ProvenancedPayload::Local(full_payload_contents)) => block_contents + .try_into_full_block_and_blobs(Some(full_payload_contents)) .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Builder(full_payload)) => block - .try_into_full_block(Some(full_payload)) - .map(Arc::new) + Some(ProvenancedPayload::Builder(full_payload_contents)) => block_contents + .try_into_full_block_and_blobs(Some(full_payload_contents)) .map(ProvenancedBlock::builder), } - .ok_or_else(|| { - warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}")) }) } diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index de7e5eb7d3..97e5a87fd3 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -5,8 +5,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::StandardBlockReward; use std::sync::Arc; use warp_utils::reject::beacon_chain_error; -//// The difference between block_rewards and beacon_block_rewards is the later returns block -//// reward format that satisfies beacon-api specs +/// The difference between block_rewards and beacon_block_rewards is the later returns block +/// reward format that satisfies beacon-api specs pub fn compute_beacon_block_rewards( chain: Arc>, block_id: BlockId, diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 5e86053771..1a76333e2d 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -89,9 +89,7 @@ impl StateId { } else { // This block is either old and finalized, or recent and unfinalized, so // it's safe to fallback to the optimistic status of the finalized block. - chain - .canonical_head - .fork_choice_read_lock() + fork_choice .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)? diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index dcf41429f6..8b0c7dc0ef 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -30,9 +30,7 @@ pub fn sync_committee_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let altair_fork_epoch = if let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch { - altair_fork_epoch - } else { + let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch else { // Empty response for networks with Altair disabled. return Ok(convert_to_response(vec![], false)); }; diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index 503faff717..8768e057da 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -159,46 +159,6 @@ impl TaskSpawner { .and_then(|x| x) } } - - /// Executes an async task which always returns a `Response`. - pub async fn spawn_async( - self, - priority: Priority, - func: impl Future + Send + Sync + 'static, - ) -> Response { - if let Some(beacon_processor_send) = &self.beacon_processor_send { - // Create a wrapper future that will execute `func` and send the - // result to a channel held by this thread. - let (tx, rx) = oneshot::channel(); - let process_fn = async move { - // Await the future, collect the return value. - let func_result = func.await; - // Send the result down the channel. Ignore any failures; the - // send can only fail if the receiver is dropped. - let _ = tx.send(func_result); - }; - - // Send the function to the beacon processor for execution at some arbitrary time. - let result = send_to_beacon_processor( - beacon_processor_send, - priority, - BlockingOrAsync::Async(Box::pin(process_fn)), - rx, - ) - .await; - convert_rejection(result).await - } else { - // There is no beacon processor so spawn a task directly on the - // tokio executor. - tokio::task::spawn(func).await.unwrap_or_else(|e| { - warp::reply::with_status( - warp::reply::json(&format!("Tokio did not execute task: {e:?}")), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response() - }) - } - } } /// Send a task to the beacon processor and await execution. diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index dcc2532229..fe47e56dc5 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,8 +1,6 @@ use crate::{Config, Context}; use beacon_chain::{ - test_utils::{ - BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, - }, + test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; @@ -23,7 +21,7 @@ use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; use std::future::Future; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; @@ -53,9 +51,8 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } -type Initializer = Box< - dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, ->; +type HarnessBuilder = Builder>; +type Initializer = Box) -> HarnessBuilder>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { @@ -129,17 +126,9 @@ pub async fn create_api_server( test_runtime: &TestRuntime, log: Logger, ) -> ApiServer> { - // Get a random unused port. - let port = unused_port::unused_tcp4_port().unwrap(); - create_api_server_on_port(chain, test_runtime, log, port).await -} + // Use port 0 to allocate a new unused port. + let port = 0; -pub async fn create_api_server_on_port( - chain: Arc>, - test_runtime: &TestRuntime, - log: Logger, - port: u16, -) -> ApiServer> { let (network_senders, network_receivers) = NetworkSenders::new(); // Default metadata @@ -152,8 +141,6 @@ pub async fn create_api_server_on_port( let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( enr.clone(), - Some(TCP_PORT), - None, meta_data, vec![], false, @@ -184,7 +171,14 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let beacon_processor_config = BeaconProcessorConfig::default(); + let beacon_processor_config = BeaconProcessorConfig { + // The number of workers must be greater than one. Tests which use the + // builder workflow sometimes require an internal HTTP request in order + // to fulfill an already in-flight HTTP request, therefore having only + // one worker will result in a deadlock. + max_workers: 2, + ..BeaconProcessorConfig::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -196,11 +190,6 @@ pub async fn create_api_server_on_port( BeaconProcessor { network_globals: network_globals.clone(), executor: test_runtime.task_executor.clone(), - // The number of workers must be greater than one. Tests which use the - // builder workflow sometimes require an internal HTTP request in order - // to fulfill an already in-flight HTTP request, therefore having only - // one worker will result in a deadlock. - max_workers: 2, current_workers: 0, config: beacon_processor_config, log: log.clone(), @@ -218,15 +207,9 @@ pub async fn create_api_server_on_port( let ctx = Arc::new(Context { config: Config { enabled: true, - listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: port, - allow_origin: None, - tls_config: None, - allow_sync_stalled: false, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), - spec_fork_name: None, - sse_capacity_multiplier: 1, - enable_beacon_processor: true, + ..Config::default() }, chain: Some(chain), network_senders: Some(network_senders), diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 0082589000..fe300ae5e1 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,12 +1,19 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, - GossipVerifiedBlock, + GossipVerifiedBlock, IntoGossipVerifiedBlockContents, +}; +use eth2::types::{ + BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlockContents, + SignedBlockContentsTuple, }; -use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use std::sync::Arc; use tree_hash::TreeHash; -use types::{Hash256, MainnetEthSpec, Slot}; +use types::{ + BlindedBlobSidecar, BlindedPayload, BlobSidecar, FullPayload, Hash256, MainnetEthSpec, + SignedSidecarList, Slot, +}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -63,7 +70,7 @@ pub async fn gossip_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -73,7 +80,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -83,7 +90,7 @@ pub async fn gossip_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -115,7 +122,7 @@ pub async fn gossip_partial_pass() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::random() @@ -124,7 +131,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -161,11 +168,15 @@ pub async fn gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = + tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -203,18 +214,19 @@ pub async fn gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let (block_contents_tuple, _) = tester.harness.make_block(state_a, slot_b).await; + let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2_ssz(&block, validation_level) + .post_beacon_blocks_v2_ssz(&block_contents, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -244,7 +256,7 @@ pub async fn consensus_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -254,7 +266,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -264,7 +276,7 @@ pub async fn consensus_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -296,14 +308,14 @@ pub async fn consensus_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -346,31 +358,34 @@ pub async fn consensus_partial_pass_only_consensus() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = + let ((block_a, _), state_after_a): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = + let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a, slot_b).await; + let block_b_root = block_b.canonical_root(); /* check for `make_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); - assert!(gossip_block_b.is_ok()); + let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_b.is_ok()); let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); assert!(gossip_block_a.is_err()); /* submit `block_b` which should induce equivocation */ let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_b.unwrap()), + ProvenancedBlock::local(gossip_block_contents_b.unwrap()), tester.harness.chain.clone(), &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -378,7 +393,7 @@ pub async fn consensus_partial_pass_only_consensus() { assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_b.canonical_root())); + .block_is_known_to_fork_choice(&block_b_root)); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -409,11 +424,15 @@ pub async fn consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = + tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -452,7 +471,7 @@ pub async fn equivocation_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -462,7 +481,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -472,7 +491,7 @@ pub async fn equivocation_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -505,9 +524,9 @@ pub async fn equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = + let ((block_a, blobs_a), state_after_a): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = + let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ @@ -518,7 +537,10 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blocks_v2(&block_a, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block_a.clone(), blobs_a), + validation_level + ) .await .is_ok()); assert!(tester @@ -529,7 +551,10 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_b, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block_b.clone(), blobs_b), + validation_level, + ) .await; assert!(response.is_err()); @@ -538,7 +563,7 @@ pub async fn equivocation_consensus_early_equivocation() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) ); } @@ -571,14 +596,14 @@ pub async fn equivocation_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -592,9 +617,11 @@ pub async fn equivocation_gossip() { ); } -/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// This test checks that a block that is valid from both a gossip and consensus perspective but +/// that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. /// -/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to +/// hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ @@ -624,9 +651,9 @@ pub async fn equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBeaconBlock, _) = + let ((block_a, blobs_a), state_after_a): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a.clone(), slot_b).await; - let (block_b, state_after_b): (SignedBeaconBlock, _) = + let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock, _), _) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ @@ -634,20 +661,23 @@ pub async fn equivocation_consensus_late_equivocation() { assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); - assert!(gossip_block_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); - assert!(gossip_block_a.is_err()); + let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_b.is_ok()); + let gossip_block_contents_a = SignedBlockContents::new(block_a, blobs_a) + .into_gossip_verified_block(&tester.harness.chain); + assert!(gossip_block_contents_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, - ProvenancedBlock::local(gossip_block_b.unwrap()), + ProvenancedBlock::local(gossip_block_contents_b.unwrap()), tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -692,11 +722,15 @@ pub async fn equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + let ((block, blobs), _): ((SignedBeaconBlock, _), _) = + tester.harness.make_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -734,7 +768,7 @@ pub async fn blinded_gossip_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -742,11 +776,11 @@ pub async fn blinded_gossip_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -756,7 +790,7 @@ pub async fn blinded_gossip_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -788,18 +822,18 @@ pub async fn blinded_gossip_partial_pass() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero() }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -836,19 +870,18 @@ pub async fn blinded_gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; - + let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; + let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -879,19 +912,19 @@ pub async fn blinded_gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; + let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2_ssz(&block, validation_level) + .post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -922,7 +955,7 @@ pub async fn blinded_consensus_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -930,11 +963,11 @@ pub async fn blinded_consensus_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -944,7 +977,7 @@ pub async fn blinded_consensus_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -976,16 +1009,16 @@ pub async fn blinded_consensus_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -1027,19 +1060,19 @@ pub async fn blinded_consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; + let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block.canonical_root())); + .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1071,7 +1104,7 @@ pub async fn blinded_equivocation_invalid() { tester.harness.advance_slot(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(chain_state_before, slot, |b| { *b.state_root_mut() = Hash256::zero(); @@ -1079,11 +1112,11 @@ pub async fn blinded_equivocation_invalid() { }) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -1093,7 +1126,7 @@ pub async fn blinded_equivocation_invalid() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string()) ); } @@ -1126,14 +1159,18 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + let (block_contents_tuple_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + let (block_contents_tuple_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ + let block_contents_a: SignedBlockContents> = block_contents_tuple_a.into(); + let block_contents_b: SignedBlockContents> = block_contents_tuple_b.into(); + let block_a = block_contents_a.signed_block(); + let block_b = block_contents_b.signed_block(); assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); @@ -1141,7 +1178,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blinded_blocks_v2(&block_a, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_a, validation_level) .await .is_ok()); assert!(tester @@ -1152,7 +1189,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_b, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_b, validation_level) .await; assert!(response.is_err()); @@ -1161,7 +1198,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert!( - matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string()) ); } @@ -1194,16 +1231,16 @@ pub async fn blinded_equivocation_gossip() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBeaconBlock, _) = tester + let (block_contents_tuple, _) = tester .harness .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block: SignedBlindedBeaconBlock = block.into(); + let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) .await; assert!(response.is_err()); @@ -1249,11 +1286,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + let ((block_a, blobs_a), state_after_a): ((SignedBlindedBeaconBlock, _), _) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + let ((block_b, blobs_b), state_after_b): ((SignedBlindedBeaconBlock, _), _) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ @@ -1263,16 +1300,16 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_a = reconstruct_block( tester.harness.chain.clone(), - block_a.state_root(), - block_a, + block_a.canonical_root(), + SignedBlockContents::new(block_a, blobs_a), test_logger.clone(), ) .await .unwrap(); let unblinded_block_b = reconstruct_block( tester.harness.chain.clone(), - block_b.clone().state_root(), - block_b.clone(), + block_b.canonical_root(), + SignedBlockContents::new(block_b.clone(), blobs_b.clone()), test_logger.clone(), ) .await @@ -1287,19 +1324,26 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { ProvenancedBlock::Builder(b, _) => b, }; - let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); + let gossip_block_b = GossipVerifiedBlock::new( + Arc::new(inner_block_b.clone().deconstruct().0), + &tester.harness.chain, + ); assert!(gossip_block_b.is_ok()); - let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); + let gossip_block_a = GossipVerifiedBlock::new( + Arc::new(inner_block_a.clone().deconstruct().0), + &tester.harness.chain, + ); assert!(gossip_block_a.is_err()); let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_blinded_block( - block_b, + let publication_result = publish_blinded_block( + SignedBlockContents::new(block_b, blobs_b), tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -1339,12 +1383,15 @@ pub async fn blinded_equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block, _): (SignedBlindedBeaconBlock, _) = + let ((block, blobs), _): ((SignedBlindedBeaconBlock, _), _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block, validation_level) + .post_beacon_blocks_v2( + &SignedBlockContents::new(block.clone(), blobs), + validation_level, + ) .await; assert!(response.is_ok()); @@ -1353,3 +1400,20 @@ pub async fn blinded_equivocation_full_pass() { .chain .block_is_known_to_fork_choice(&block.canonical_root())); } + +fn into_signed_blinded_block_contents( + block_contents_tuple: SignedBlockContentsTuple>, +) -> SignedBlockContents> { + let (block, maybe_blobs) = block_contents_tuple; + SignedBlockContents::new(block.into(), maybe_blobs.map(into_blinded_blob_sidecars)) +} + +fn into_blinded_blob_sidecars( + blobs: SignedSidecarList>, +) -> SignedSidecarList { + blobs + .into_iter() + .map(|blob| blob.into()) + .collect::>() + .into() +} diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 0ab3c706e2..74b2647563 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use execution_layer::test_utils::generate_genesis_header; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; @@ -354,12 +355,13 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .iter() .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) .collect::>(); + let header = generate_genesis_header(&spec, true); let genesis_state = interop_genesis_state_with_withdrawal_credentials( &validator_keypairs, &withdrawal_credentials, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + header, &spec, ) .unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index d7ea7c2628..17213d6f53 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -391,8 +391,8 @@ pub async fn proposer_boost_re_org_test( ) { assert!(head_slot > 0); - // Test using Capella so that we simulate conditions as similar to mainnet as possible. - let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + // Test using the latest fork so that we simulate conditions as similar to mainnet as possible. + let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); spec.terminal_total_difficulty = 1.into(); // Ensure there are enough validators to have `attesters_per_slot`. @@ -551,7 +551,7 @@ pub async fn proposer_boost_re_org_test( // Produce block B and process it halfway through the slot. let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; - let block_b_root = block_b.canonical_root(); + let block_b_root = block_b.0.canonical_root(); let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2; slot_clock.set_current_time(obs_time); @@ -617,12 +617,13 @@ pub async fn proposer_boost_re_org_test( let randao_reveal = harness .sign_randao_reveal(&state_b, proposer_index, slot_c) .into(); - let unsigned_block_c = tester + let unsigned_block_contents_c = tester .client .get_validator_blocks(slot_c, &randao_reveal, None) .await .unwrap() .data; + let (unsigned_block_c, block_c_blobs) = unsigned_block_contents_c.deconstruct(); let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b); if should_re_org { @@ -633,9 +634,13 @@ pub async fn proposer_boost_re_org_test( assert_eq!(block_c.parent_root(), block_b_root); } + // Sign blobs. + let block_c_signed_blobs = + block_c_blobs.map(|blobs| harness.sign_blobs(blobs, &state_b, proposer_index)); + // Applying block C should cause it to become head regardless (re-org or continuation). let block_root_c = harness - .process_block_result(block_c.clone()) + .process_block_result((block_c.clone(), block_c_signed_blobs)) .await .unwrap() .into(); @@ -643,8 +648,18 @@ pub async fn proposer_boost_re_org_test( // Check the fork choice updates that were sent. let forkchoice_updates = forkchoice_updates.lock(); - let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash(); - let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash(); + let block_a_exec_hash = block_a + .0 + .message() + .execution_payload() + .unwrap() + .block_hash(); + let block_b_exec_hash = block_b + .0 + .message() + .execution_payload() + .unwrap() + .block_hash(); let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp(); @@ -688,6 +703,11 @@ pub async fn proposer_boost_re_org_test( assert_ne!(expected_withdrawals, pre_advance_withdrawals); } + // Check that the `parent_beacon_block_root` of the payload attributes are correct. + if let Ok(parent_beacon_block_root) = payload_attribs.parent_beacon_block_root() { + assert_eq!(parent_beacon_block_root, block_c.parent_root()); + } + let lookahead = slot_clock .start_of(slot_c) .unwrap() @@ -749,7 +769,7 @@ pub async fn fork_choice_before_proposal() { let state_a = harness.get_current_state(); let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; let block_root_b = harness - .process_block(slot_b, block_b.canonical_root(), block_b) + .process_block(slot_b, block_b.0.canonical_root(), block_b) .await .unwrap(); @@ -764,7 +784,7 @@ pub async fn fork_choice_before_proposal() { let (block_c, state_c) = harness.make_block(state_a, slot_c).await; let block_root_c = harness - .process_block(slot_c, block_c.canonical_root(), block_c.clone()) + .process_block(slot_c, block_c.0.canonical_root(), block_c.clone()) .await .unwrap(); @@ -804,7 +824,9 @@ pub async fn fork_choice_before_proposal() { .get_validator_blocks::>(slot_d, &randao_reveal, None) .await .unwrap() - .data; + .data + .deconstruct() + .0; // Head is now B. assert_eq!( diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 95f885faa5..d37026d406 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -100,9 +100,10 @@ async fn el_error_on_new_payload() { // Make a block. let pre_state = harness.get_current_state(); - let (block, _) = harness + let (block_contents, _) = harness .make_block(pre_state, Slot::new(num_blocks + 1)) .await; + let (block, blobs) = block_contents; let block_hash = block .message() .body() @@ -118,7 +119,9 @@ async fn el_error_on_new_payload() { // Attempt to process the block, which should error. harness.advance_slot(); assert!(matches!( - harness.process_block_result(block.clone()).await, + harness + .process_block_result((block.clone(), blobs.clone())) + .await, Err(BlockError::ExecutionPayloadError(_)) )); @@ -137,7 +140,7 @@ async fn el_error_on_new_payload() { validation_error: None, }, ); - harness.process_block_result(block).await.unwrap(); + harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; assert_eq!(api_response.el_offline, Some(false)); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index e8e78382e9..884c988c19 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,24 +1,23 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, Timeouts, + BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::TestingBuilder; -use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use execution_layer::test_utils::{ - Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_BUILDER_THRESHOLD_WEI, + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{ - test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + test_utils::{create_api_server, ApiServer}, BlockId, StateId, }; use lighthouse_network::{Enr, EnrExt, PeerId}; @@ -28,6 +27,7 @@ use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; +use state_processing::state_advance::partial_state_advance; use std::convert::TryInto; use std::sync::Arc; use tokio::time::Duration; @@ -62,8 +62,8 @@ struct ApiTester { harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, - next_block: SignedBeaconBlock, - reorg_block: SignedBeaconBlock, + next_block: SignedBlockContents, + reorg_block: SignedBlockContents, attestations: Vec>, contribution_and_proofs: Vec>, attester_slashing: AttesterSlashing, @@ -72,11 +72,12 @@ struct ApiTester { network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, - mock_builder: Option>>, + mock_builder: Option>>, } struct ApiTesterConfig { spec: ChainSpec, + retain_historic_states: bool, builder_threshold: Option, } @@ -86,11 +87,19 @@ impl Default for ApiTesterConfig { spec.shard_committee_period = 2; Self { spec, + retain_historic_states: false, builder_threshold: None, } } } +impl ApiTesterConfig { + fn retain_historic_states(mut self) -> Self { + self.retain_historic_states = true; + self + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. @@ -110,20 +119,28 @@ impl ApiTester { } pub async fn new_from_config(config: ApiTesterConfig) -> Self { - // Get a random unused port let spec = config.spec; - let port = unused_port::unused_tcp4_port().unwrap(); - let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); - let harness = Arc::new( - BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .logger(logging::test_logger()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold) - .build(), - ); + let mut harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .chain_config(ChainConfig { + reconstruct_historic_states: config.retain_historic_states, + ..ChainConfig::default() + }) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer_with_config(config.builder_threshold) + .build(); + + harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); harness.advance_slot(); @@ -154,11 +171,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let next_block = SignedBlockContents::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; + let reorg_block = SignedBlockContents::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -231,29 +250,36 @@ impl ApiTester { let ApiServer { server, - listening_socket: _, + listening_socket, network_rx, local_enr, external_peer_id, - } = create_api_server_on_port(chain.clone(), &harness.runtime, log, port).await; + } = create_api_server(chain.clone(), &harness.runtime, log).await; harness.runtime.task_executor.spawn(server, "api_server"); + // Late-initalize the mock builder now that the mock execution node and beacon API ports + // have been allocated. + let beacon_api_port = listening_socket.port(); + let beacon_url = + SensitiveUrl::parse(format!("http://127.0.0.1:{beacon_api_port}").as_str()).unwrap(); + let mock_builder_server = harness.set_mock_builder(beacon_url.clone()); + + // Start the mock builder service prior to building the chain out. + harness.runtime.task_executor.spawn( + async move { mock_builder_server.await }, + "mock_builder_server", + ); + + let mock_builder = harness.mock_builder.clone(); + let client = BeaconNodeHttpClient::new( beacon_url, Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), ); - let builder_ref = harness.mock_builder.as_ref().unwrap().clone(); - harness.runtime.task_executor.spawn( - async move { builder_ref.run().await }, - "mock_builder_server", - ); - - let mock_builder = harness.mock_builder.clone(); - Self { - harness, + harness: Arc::new(harness), chain, client, next_block, @@ -286,11 +312,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let next_block = SignedBlockContents::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; + let reorg_block = SignedBlockContents::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -365,7 +393,6 @@ impl ApiTester { .mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_BUILDER_THRESHOLD_WEI, ))); @@ -375,6 +402,7 @@ impl ApiTester { pub async fn new_mev_tester_no_builder_threshold() -> Self { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -387,7 +415,6 @@ impl ApiTester { .mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, ))); @@ -1229,7 +1256,7 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_valid(mut self) -> Self { - let next_block = &self.next_block; + let next_block = self.next_block.clone(); self.client .clone() @@ -1277,7 +1304,7 @@ impl ApiTester { assert!(self .client .clone() - .post_beacon_blocks(&block) + .post_beacon_blocks(&SignedBlockContents::from(block)) .await .is_err()); @@ -1302,7 +1329,11 @@ impl ApiTester { .await .0; - assert!(self.client.post_beacon_blocks_ssz(&block).await.is_err()); + assert!(self + .client + .post_beacon_blocks_ssz(&SignedBlockContents::from(block)) + .await + .is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), @@ -1312,6 +1343,71 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_duplicate(self) -> Self { + let block_contents = self + .harness + .make_block( + self.harness.get_current_state(), + self.harness.get_current_slot(), + ) + .await + .0 + .into(); + + assert!(self + .client + .post_beacon_blocks(&block_contents) + .await + .is_ok()); + + let blinded_block_contents = block_contents.clone_as_blinded(); + + // Test all the POST methods in sequence, they should all behave the same. + let responses = vec![ + self.client + .post_beacon_blocks(&block_contents) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_v2(&block_contents, None) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_ssz(&block_contents) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_v2_ssz(&block_contents, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks(&blinded_block_contents) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2(&blinded_block_contents, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_ssz(&blinded_block_contents) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2_ssz(&blinded_block_contents, None) + .await + .unwrap_err(), + ]; + for (i, response) in responses.into_iter().enumerate() { + assert_eq!( + response.status().unwrap(), + StatusCode::ACCEPTED, + "response {i}" + ); + } + + self + } + pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { let expected = block_id @@ -1723,9 +1819,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Capella(res.data)) + .map(|res| ConfigAndPreset::Deneb(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); @@ -2424,13 +2520,17 @@ impl ApiTester { .get_validator_blocks::>(slot, &randao_reveal, None) .await .unwrap() - .data; + .data + .deconstruct() + .0; let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_contents = + SignedBlockContents::try_from(signed_block.clone()).unwrap(); self.client .clone() - .post_beacon_blocks(&signed_block) + .post_beacon_blocks(&signed_block_contents) .await .unwrap(); @@ -2442,6 +2542,74 @@ impl ApiTester { self } + pub async fn test_block_production_ssz(self) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block_bytes = self + .client + .get_validator_blocks_ssz::>(slot, &randao_reveal, None) + .await + .unwrap() + .expect("block bytes"); + + let block_contents = + BlockContents::>::from_ssz_bytes(&block_bytes, &self.chain.spec) + .expect("block contents bytes can be decoded"); + + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); + + assert_eq!( + self.chain.head_beacon_block().as_ref(), + signed_block_contents.signed_block() + ); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); @@ -2456,7 +2624,9 @@ impl ApiTester { ) .await .unwrap() - .data; + .data + .deconstruct() + .0; assert_eq!(block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -2570,14 +2740,16 @@ impl ApiTester { .unwrap() .data; - let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_contents = + block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks(&signed_block) + .post_beacon_blinded_blocks(&signed_block_contents) .await .unwrap(); // This converts the generic `Payload` to a concrete type for comparison. + let signed_block = signed_block_contents.deconstruct().0; let head_block = SignedBeaconBlock::from(signed_block.clone()); assert_eq!(head_block, signed_block); @@ -2623,21 +2795,29 @@ impl ApiTester { sk.sign(message).into() }; - let block = self + let block_contents_bytes = self .client - .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() - .data; + .expect("block bytes"); - let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let block_contents = BlockContents::::from_ssz_bytes( + &block_contents_bytes, + &self.chain.spec, + ) + .expect("block contents bytes can be decoded"); + + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks_ssz(&signed_block) + .post_beacon_blinded_blocks_ssz(&signed_block_contents) .await .unwrap(); // This converts the generic `Payload` to a concrete type for comparison. + let signed_block = signed_block_contents.deconstruct().0; let head_block = SignedBeaconBlock::from(signed_block.clone()); assert_eq!(head_block, signed_block); @@ -2651,7 +2831,7 @@ impl ApiTester { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); - let block = self + let block_contents = self .client .get_validator_blinded_blocks_modular::( slot, @@ -2662,7 +2842,7 @@ impl ApiTester { .await .unwrap() .data; - assert_eq!(block.slot(), slot); + assert_eq!(block_contents.block().slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -3198,6 +3378,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3216,6 +3397,7 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + self } @@ -3224,7 +3406,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::GasLimit(30_000_000)); let slot = self.chain.slot().unwrap(); @@ -3238,6 +3419,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3267,7 +3449,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::FeeRecipient(test_fee_recipient)); let slot = self.chain.slot().unwrap(); @@ -3281,6 +3462,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3309,7 +3491,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::ParentHash(invalid_parent_hash)); let slot = self.chain.slot().unwrap(); @@ -3330,6 +3511,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3358,7 +3540,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::PrevRandao(invalid_prev_randao)); let slot = self.chain.slot().unwrap(); @@ -3378,6 +3559,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3403,7 +3585,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::BlockNumber(invalid_block_number)); let slot = self.chain.slot().unwrap(); @@ -3425,6 +3606,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3450,7 +3632,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Timestamp(invalid_timestamp)); let slot = self.chain.slot().unwrap(); @@ -3471,6 +3652,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3490,11 +3672,7 @@ impl ApiTester { } pub async fn test_payload_rejects_invalid_signature(self) -> Self { - self.mock_builder - .as_ref() - .unwrap() - .builder - .invalid_signatures(); + self.mock_builder.as_ref().unwrap().invalid_signatures(); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -3507,6 +3685,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3544,6 +3723,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3587,6 +3767,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3616,6 +3797,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3665,6 +3847,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3704,6 +3887,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3747,6 +3931,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3772,7 +3957,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_BUILDER_THRESHOLD_WEI - 1, ))); @@ -3788,6 +3972,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3809,7 +3994,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, ))); @@ -3825,6 +4009,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3846,7 +4031,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, ))); @@ -3862,6 +4046,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3883,7 +4068,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, ))); @@ -3899,6 +4083,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3920,26 +4105,11 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, ))); let slot = self.chain.slot().unwrap(); - let propose_state = self - .harness - .chain - .state_at_slot(slot, StateSkipConfig::WithoutStateRoots) - .unwrap(); - let withdrawals = get_expected_withdrawals(&propose_state, &self.chain.spec).unwrap(); - let withdrawals_root = withdrawals.tree_hash_root(); - // Set withdrawals root for builder - self.mock_builder - .as_ref() - .unwrap() - .builder - .add_operation(Operation::WithdrawalsRoot(withdrawals_root)); - let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; @@ -3949,6 +4119,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -3965,12 +4136,47 @@ impl ApiTester { self } + pub async fn test_builder_works_post_deneb(self) -> Self { + // Ensure builder payload is chosen + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let block_contents = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data; + let (block, maybe_sidecars) = block_contents.deconstruct(); + + // Response should contain blob sidecars + assert!(maybe_sidecars.is_some()); + + // The builder's payload should've been chosen, so this cache should not be populated + let payload: BlindedPayload = block.body().execution_payload().unwrap().into(); + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + pub async fn test_lighthouse_rejects_invalid_withdrawals_root(self) -> Self { // Ensure builder payload *would be* chosen self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::Value(Uint256::from( DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, ))); @@ -3978,7 +4184,6 @@ impl ApiTester { self.mock_builder .as_ref() .unwrap() - .builder .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); let slot = self.chain.slot().unwrap(); @@ -3991,6 +4196,7 @@ impl ApiTester { .await .unwrap() .data + .block() .body() .execution_payload() .unwrap() @@ -4246,12 +4452,12 @@ impl ApiTester { // Submit the next block, which is on an epoch boundary, so this will produce a finalized // checkpoint event, head event, and block event - let block_root = self.next_block.canonical_root(); + let block_root = self.next_block.signed_block().canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); - let next_slot = self.next_block.slot(); + let next_slot = self.next_block.signed_block().slot(); let finalization_distance = E::slots_per_epoch() * 2; let expected_block = EventKind::Block(SseBlock { @@ -4263,7 +4469,7 @@ impl ApiTester { let expected_head = EventKind::Head(SseHead { block: block_root, slot: next_slot, - state: self.next_block.state_root(), + state: self.next_block.signed_block().state_root(), current_duty_dependent_root, previous_duty_dependent_root: self .chain @@ -4313,13 +4519,17 @@ impl ApiTester { .unwrap(); let expected_reorg = EventKind::ChainReorg(SseChainReorg { - slot: self.reorg_block.slot(), + slot: self.reorg_block.signed_block().slot(), depth: 1, - old_head_block: self.next_block.canonical_root(), - old_head_state: self.next_block.state_root(), - new_head_block: self.reorg_block.canonical_root(), - new_head_state: self.reorg_block.state_root(), - epoch: self.next_block.slot().epoch(E::slots_per_epoch()), + old_head_block: self.next_block.signed_block().canonical_root(), + old_head_state: self.next_block.signed_block().state_root(), + new_head_block: self.reorg_block.signed_block().canonical_root(), + new_head_state: self.reorg_block.signed_block().state_root(), + epoch: self + .next_block + .signed_block() + .slot() + .epoch(E::slots_per_epoch()), execution_optimistic: false, }); @@ -4342,6 +4552,72 @@ impl ApiTester { self } + pub async fn test_get_expected_withdrawals_invalid_state(self) -> Self { + let state_id = CoreStateId::Root(Hash256::zero()); + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 404); + } + _ => panic!("query did not fail correctly"), + } + + self + } + + pub async fn test_get_expected_withdrawals_capella(self) -> Self { + let slot = self.chain.slot().unwrap(); + let state_id = CoreStateId::Slot(slot); + + // calculate the expected withdrawals + let (mut state, _, _) = StateId(state_id).state(&self.chain).unwrap(); + let proposal_slot = state.slot() + 1; + let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch()); + let (state_root, _, _) = StateId(state_id).root(&self.chain).unwrap(); + if proposal_epoch != state.current_epoch() { + let _ = partial_state_advance( + &mut state, + Some(state_root), + proposal_slot, + &self.chain.spec, + ); + } + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + + // fetch expected withdrawals from the client + let result = self.client.get_expected_withdrawals(&state_id).await; + match result { + Ok(withdrawal_response) => { + assert_eq!(withdrawal_response.execution_optimistic, Some(false)); + assert_eq!(withdrawal_response.finalized, Some(false)); + assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec()); + } + Err(e) => { + println!("{:?}", e); + panic!("query failed incorrectly"); + } + } + + self + } + + pub async fn test_get_expected_withdrawals_pre_capella(self) -> Self { + let state_id = CoreStateId::Head; + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 400); + } + _ => panic!("query did not fail correctly"), + } + + self + } + pub async fn test_get_events_altair(self) -> Self { let topics = vec![EventTopic::ContributionAndProof]; let mut events_future = self @@ -4384,8 +4660,8 @@ impl ApiTester { .await .unwrap(); - let block_root = self.next_block.canonical_root(); - let next_slot = self.next_block.slot(); + let block_root = self.next_block.signed_block().canonical_root(); + let next_slot = self.next_block.signed_block().slot(); let expected_block = EventKind::Block(SseBlock { block: block_root, @@ -4396,7 +4672,7 @@ impl ApiTester { let expected_head = EventKind::Head(SseHead { block: block_root, slot: next_slot, - state: self.next_block.state_root(), + state: self.next_block.signed_block().state_root(), current_duty_dependent_root: self.chain.genesis_block_root, previous_duty_dependent_root: self.chain.genesis_block_root, epoch_transition: false, @@ -4586,6 +4862,14 @@ async fn post_beacon_blocks_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_beacon_blocks_duplicate() { + ApiTester::new() + .await + .test_post_beacon_blocks_duplicate() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attestations_valid() { ApiTester::new() @@ -4721,7 +5005,7 @@ async fn get_validator_duties_attester_with_skip_slots() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .test_get_validator_duties_proposer() .await; @@ -4729,7 +5013,7 @@ async fn get_validator_duties_proposer() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer_with_skip_slots() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_duties_proposer() @@ -4766,6 +5050,20 @@ async fn block_production_verify_randao_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_ssz_full_payload() { + ApiTester::new().await.test_block_production_ssz().await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_ssz_with_skip_slots() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production_ssz() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_full_payload_premerge() { ApiTester::new() @@ -5061,6 +5359,7 @@ async fn builder_payload_chosen_by_profit() { async fn builder_works_post_capella() { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -5077,6 +5376,26 @@ async fn builder_works_post_capella() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_works_post_deneb() { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + retain_historic_states: false, + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + + ApiTester::new_from_config(config) + .await + .test_post_validator_register_validator() + .await + .test_builder_works_post_deneb() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_liveness_epoch() { ApiTester::new() @@ -5124,3 +5443,37 @@ async fn optimistic_responses() { .test_check_optimistic_responses() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_pre_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_pre_capella() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_state() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_invalid_state() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_valid_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_capella() + .await; +} diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index c98f2cb856..f8c93ad8fc 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -2,25 +2,25 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = "0.3.2" -serde = { version = "1.0.116", features = ["derive"] } -slog = "2.5.2" -beacon_chain = { path = "../beacon_chain" } -store = { path = "../store" } -lighthouse_network = { path = "../lighthouse_network" } -slot_clock = { path = "../../common/slot_clock" } -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lighthouse_version = { path = "../../common/lighthouse_version" } -warp_utils = { path = "../../common/warp_utils" } -malloc_utils = { path = "../../common/malloc_utils" } +warp = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +beacon_chain = { workspace = true } +store = { workspace = true } +lighthouse_network = { workspace = true } +slot_clock = { workspace = true } +lighthouse_metrics = { workspace = true } +lighthouse_version = { workspace = true } +warp_utils = { workspace = true } +malloc_utils = { workspace = true } [dev-dependencies] -tokio = { version = "1.14.0", features = ["sync"] } -reqwest = { version = "0.11.0", features = ["json"] } -environment = { path = "../../lighthouse/environment" } -types = { path = "../../consensus/types" } +tokio = { workspace = true } +reqwest = { workspace = true } +environment = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index f71845fed2..125bbe9bc2 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -2,62 +2,63 @@ name = "lighthouse_network" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2021" +edition = { workspace = true } [dependencies] -discv5 = { version = "0.3.1", features = ["libp2p"] } -unsigned-varint = { version = "0.6.0", features = ["codec"] } -types = { path = "../../consensus/types" } -ssz_types = "0.5.3" -serde = { version = "1.0.116", features = ["derive"] } -serde_derive = "1.0.116" -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -tree_hash = "0.5.0" -tree_hash_derive = "0.5.0" -slog = { version = "2.5.2", features = ["max_level_trace"] } -lighthouse_version = { path = "../../common/lighthouse_version" } -tokio = { version = "1.14.0", features = ["time", "macros"] } -futures = "0.3.7" -error-chain = "0.12.4" -dirs = "3.0.1" -fnv = "1.0.7" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -smallvec = "1.6.1" -tokio-io-timeout = "1.1.1" -lru = "0.7.1" -lru_cache = { path = "../../common/lru_cache" } -parking_lot = "0.12.0" -sha2 = "0.10" -snap = "1.0.1" -hex = "0.4.2" -tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] } -tiny-keccak = "2.0.2" -task_executor = { path = "../../common/task_executor" } -rand = "0.8.5" -directory = { path = "../../common/directory" } -regex = "1.5.5" -strum = { version = "0.24.0", features = ["derive"] } -superstruct = "0.5.0" +discv5 = { workspace = true } +unsigned-varint = { version = "0.6", features = ["codec"] } +ssz_types = { workspace = true } +types = { workspace = true } +serde = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +slog = { workspace = true } +lighthouse_version = { workspace = true } +tokio = { workspace = true } +futures = { workspace = true } +error-chain = { workspace = true } +dirs = { workspace = true } +fnv = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +smallvec = { workspace = true } +tokio-io-timeout = "1" +lru = { workspace = true } +lru_cache = { workspace = true } +parking_lot = { workspace = true } +sha2 = { workspace = true } +snap = { workspace = true } +hex = { workspace = true } +tokio-util = { workspace = true } +tiny-keccak = "2" +task_executor = { workspace = true } +rand = { workspace = true } +directory = { workspace = true } +regex = { workspace = true } +strum = { workspace = true } +superstruct = { workspace = true } prometheus-client = "0.21.0" -unused_port = { path = "../../common/unused_port" } -delay_map = "0.3.0" +unused_port = { workspace = true } +delay_map = { workspace = true } void = "1" +libp2p-quic= { version = "0.9.2", features=["tokio"]} +libp2p-mplex = "0.40.0" [dependencies.libp2p] version = "0.52" default-features = false -features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] [dev-dependencies] -slog-term = "2.6.0" -slog-async = "2.5.0" -tempfile = "3.1.0" -exit-future = "0.2.0" -void = "1" -quickcheck = "0.9.2" -quickcheck_macros = "0.9.1" +slog-term = { workspace = true } +slog-async = { workspace = true } +tempfile = { workspace = true } +exit-future = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } [features] -libp2p-websocket = [] \ No newline at end of file +libp2p-websocket = [] + diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 0ab7c03e7f..47bd7b8667 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -8,14 +8,20 @@ use directory::{ use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub; use libp2p::Multiaddr; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; +use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; +pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; +pub const DEFAULT_TCP_PORT: u16 = 9000u16; +pub const DEFAULT_DISC_PORT: u16 = 9000u16; +pub const DEFAULT_QUIC_PORT: u16 = 9001u16; + /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -58,17 +64,23 @@ pub struct Config { /// that no discovery address has been set in the CLI args. pub enr_address: (Option, Option), - /// The udp4 port to broadcast to peers in order to reach back for discovery. - pub enr_udp4_port: Option, + /// The udp ipv4 port to broadcast to peers in order to reach back for discovery. + pub enr_udp4_port: Option, - /// The tcp4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp4_port: Option, + /// The quic ipv4 port to broadcast to peers in order to reach back for libp2p services. + pub enr_quic4_port: Option, - /// The udp6 port to broadcast to peers in order to reach back for discovery. - pub enr_udp6_port: Option, + /// The tcp ipv4 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp4_port: Option, - /// The tcp6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp6_port: Option, + /// The udp ipv6 port to broadcast to peers in order to reach back for discovery. + pub enr_udp6_port: Option, + + /// The tcp ipv6 port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp6_port: Option, + + /// The quic ipv6 port to broadcast to peers in order to reach back for libp2p services. + pub enr_quic6_port: Option, /// Target number of connected peers. pub target_peers: usize, @@ -102,13 +114,16 @@ pub struct Config { /// Disables the discovery protocol from starting. pub disable_discovery: bool, + /// Disables quic support. + pub disable_quic_support: bool, + /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, - /// Import/aggregate all attestations recieved on subscribed subnets for the duration of the + /// Import/aggregate all attestations received on subscribed subnets for the duration of the /// runtime. pub import_all_attestations: bool, @@ -149,57 +164,76 @@ impl Config { /// Sets the listening address to use an ipv4 address. The discv5 ip_mode and table filter are /// adjusted accordingly to ensure addresses that are present in the enr are globally /// reachable. - pub fn set_ipv4_listening_address(&mut self, addr: Ipv4Addr, tcp_port: u16, udp_port: u16) { + pub fn set_ipv4_listening_address( + &mut self, + addr: Ipv4Addr, + tcp_port: u16, + disc_port: u16, + quic_port: u16, + ) { self.listen_addresses = ListenAddress::V4(ListenAddr { addr, - udp_port, + disc_port, + quic_port, tcp_port, }); - self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port); + self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) } /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is /// adjusted accordingly to ensure addresses that are present in the enr are globally /// reachable. - pub fn set_ipv6_listening_address(&mut self, addr: Ipv6Addr, tcp_port: u16, udp_port: u16) { + pub fn set_ipv6_listening_address( + &mut self, + addr: Ipv6Addr, + tcp_port: u16, + disc_port: u16, + quic_port: u16, + ) { self.listen_addresses = ListenAddress::V6(ListenAddr { addr, - udp_port, + disc_port, + quic_port, tcp_port, }); - self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port); + self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) } /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and /// table filter is adjusted accordingly to ensure addresses that are present in the enr are /// globally reachable. + #[allow(clippy::too_many_arguments)] pub fn set_ipv4_ipv6_listening_addresses( &mut self, v4_addr: Ipv4Addr, tcp4_port: u16, - udp4_port: u16, + disc4_port: u16, + quic4_port: u16, v6_addr: Ipv6Addr, tcp6_port: u16, - udp6_port: u16, + disc6_port: u16, + quic6_port: u16, ) { self.listen_addresses = ListenAddress::DualStack( ListenAddr { addr: v4_addr, - udp_port: udp4_port, + disc_port: disc4_port, + quic_port: quic4_port, tcp_port: tcp4_port, }, ListenAddr { addr: v6_addr, - udp_port: udp6_port, + disc_port: disc6_port, + quic_port: quic6_port, tcp_port: tcp6_port, }, ); self.discv5_config.listen_config = discv5::ListenConfig::default() - .with_ipv4(v4_addr, udp4_port) - .with_ipv6(v6_addr, udp6_port); + .with_ipv4(v4_addr, disc4_port) + .with_ipv6(v6_addr, disc6_port); self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) { (None, None) => false, @@ -213,27 +247,32 @@ impl Config { match listen_addr { ListenAddress::V4(ListenAddr { addr, - udp_port, + disc_port, + quic_port, tcp_port, - }) => self.set_ipv4_listening_address(addr, tcp_port, udp_port), + }) => self.set_ipv4_listening_address(addr, tcp_port, disc_port, quic_port), ListenAddress::V6(ListenAddr { addr, - udp_port, + disc_port, + quic_port, tcp_port, - }) => self.set_ipv6_listening_address(addr, tcp_port, udp_port), + }) => self.set_ipv6_listening_address(addr, tcp_port, disc_port, quic_port), ListenAddress::DualStack( ListenAddr { addr: ip4addr, - udp_port: udp4_port, + disc_port: disc4_port, + quic_port: quic4_port, tcp_port: tcp4_port, }, ListenAddr { addr: ip6addr, - udp_port: udp6_port, + disc_port: disc6_port, + quic_port: quic6_port, tcp_port: tcp6_port, }, ) => self.set_ipv4_ipv6_listening_addresses( - ip4addr, tcp4_port, udp4_port, ip6addr, tcp6_port, udp6_port, + ip4addr, tcp4_port, disc4_port, quic4_port, ip6addr, tcp6_port, disc6_port, + quic6_port, ), } } @@ -271,9 +310,10 @@ impl Default for Config { .expect("The total rate limit has been specified"), ); let listen_addresses = ListenAddress::V4(ListenAddr { - addr: Ipv4Addr::UNSPECIFIED, - udp_port: 9000, - tcp_port: 9000, + addr: DEFAULT_IPV4_ADDRESS, + disc_port: DEFAULT_DISC_PORT, + quic_port: DEFAULT_QUIC_PORT, + tcp_port: DEFAULT_TCP_PORT, }); let discv5_listen_config = @@ -305,10 +345,11 @@ impl Default for Config { network_dir, listen_addresses, enr_address: (None, None), - enr_udp4_port: None, + enr_quic4_port: None, enr_tcp4_port: None, enr_udp6_port: None, + enr_quic6_port: None, enr_tcp6_port: None, target_peers: 50, gs_config, @@ -320,6 +361,7 @@ impl Default for Config { disable_peer_scoring: false, client_version: lighthouse_version::version_with_platform(), disable_discovery: false, + disable_quic_support: false, upnp_enabled: true, network_load: 3, private: false, @@ -417,7 +459,7 @@ pub fn gossipsub_config( // We use the first 8 bytes of SHA256(topic, data) for content addressing let fast_gossip_message_id = |message: &gossipsub::RawMessage| { let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - gossipsub::FastMessageId::from(&Sha256::digest(data)[..8]) + gossipsub::FastMessageId::from(&Sha256::digest(&data)[..8]) }; fn prefix( prefix: [u8; 4], @@ -426,7 +468,7 @@ pub fn gossipsub_config( ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { - ForkName::Altair | ForkName::Merge | ForkName::Capella => { + ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Deneb => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index ef22f816a7..8eacabb4d0 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -17,6 +17,8 @@ use std::path::Path; use std::str::FromStr; use types::{EnrForkId, EthSpec}; +use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; + /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; /// The ENR field specifying the attestation subnet bitfield. @@ -142,7 +144,7 @@ pub fn build_or_load_enr( pub fn create_enr_builder_from_config( config: &NetworkConfig, - enable_tcp: bool, + enable_libp2p: bool, ) -> EnrBuilder { let mut builder = EnrBuilder::new("v4"); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -156,27 +158,58 @@ pub fn create_enr_builder_from_config( } if let Some(udp4_port) = config.enr_udp4_port { - builder.udp4(udp4_port); + builder.udp4(udp4_port.get()); } if let Some(udp6_port) = config.enr_udp6_port { - builder.udp6(udp6_port); + builder.udp6(udp6_port.get()); } - if enable_tcp { - // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. - let tcp4_port = config - .enr_tcp4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); - if let Some(tcp4_port) = tcp4_port { - builder.tcp4(tcp4_port); + if enable_libp2p { + // Add QUIC fields to the ENR. + // Since QUIC is used as an alternative transport for the libp2p protocols, + // the related fields should only be added when both QUIC and libp2p are enabled + if !config.disable_quic_support { + // If we are listening on ipv4, add the quic ipv4 port. + if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); + } + + // If we are listening on ipv6, add the quic ipv6 port. + if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); + } } - let tcp6_port = config - .enr_tcp6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); + // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. + let tcp4_port = config.enr_tcp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) + }); + if let Some(tcp4_port) = tcp4_port { + builder.tcp4(tcp4_port.get()); + } + + let tcp6_port = config.enr_tcp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) + }); if let Some(tcp6_port) = tcp6_port { - builder.tcp6(tcp6_port); + builder.tcp6(tcp6_port.get()); } } builder @@ -218,6 +251,9 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // tcp ports must match && local_enr.tcp4() == disk_enr.tcp4() && local_enr.tcp6() == disk_enr.tcp6() + // quic ports must match + && local_enr.quic4() == disk_enr.quic4() + && local_enr.quic6() == disk_enr.quic6() // must match on the same fork && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified diff --git a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs index 753da6292c..2efaa76ac3 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr_ext.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr_ext.rs @@ -6,12 +6,15 @@ use libp2p::core::multiaddr::Protocol; use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey}; use tiny_keccak::{Hasher, Keccak}; +pub const QUIC_ENR_KEY: &str = "quic"; +pub const QUIC6_ENR_KEY: &str = "quic6"; + /// Extend ENR for libp2p types. pub trait EnrExt { /// The libp2p `PeerId` for the record. fn peer_id(&self) -> PeerId; - /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. + /// Returns a list of multiaddrs if the ENR has an `ip` and one of [`tcp`,`udp`,`quic`] key **or** an `ip6` and one of [`tcp6`,`udp6`,`quic6`]. /// The vector remains empty if these fields are not defined. fn multiaddr(&self) -> Vec; @@ -26,6 +29,15 @@ pub trait EnrExt { /// Returns any multiaddrs that contain the TCP protocol. fn multiaddr_tcp(&self) -> Vec; + + /// Returns any QUIC multiaddrs that are registered in this ENR. + fn multiaddr_quic(&self) -> Vec; + + /// Returns the quic port if one is set. + fn quic4(&self) -> Option; + + /// Returns the quic6 port if one is set. + fn quic6(&self) -> Option; } /// Extend ENR CombinedPublicKey for libp2p types. @@ -49,7 +61,17 @@ impl EnrExt for Enr { self.public_key().as_peer_id() } - /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. + /// Returns the quic port if one is set. + fn quic4(&self) -> Option { + self.get_decodable(QUIC_ENR_KEY).and_then(Result::ok) + } + + /// Returns the quic6 port if one is set. + fn quic6(&self) -> Option { + self.get_decodable(QUIC6_ENR_KEY).and_then(Result::ok) + } + + /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp`, `quic` or `udp` key **or** an `ip6` and either a `tcp6` `quic6` or `udp6`. /// The vector remains empty if these fields are not defined. fn multiaddr(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); @@ -59,6 +81,12 @@ impl EnrExt for Enr { multiaddr.push(Protocol::Udp(udp)); multiaddrs.push(multiaddr); } + if let Some(quic) = self.quic4() { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(quic)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); + } if let Some(tcp) = self.tcp4() { let mut multiaddr: Multiaddr = ip.into(); @@ -73,6 +101,13 @@ impl EnrExt for Enr { multiaddrs.push(multiaddr); } + if let Some(quic6) = self.quic6() { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Udp(quic6)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); + } + if let Some(tcp6) = self.tcp6() { let mut multiaddr: Multiaddr = ip6.into(); multiaddr.push(Protocol::Tcp(tcp6)); @@ -174,8 +209,30 @@ impl EnrExt for Enr { multiaddrs } + /// Returns a list of multiaddrs if the ENR has an `ip` and a `quic` key **or** an `ip6` and a `quic6`. + fn multiaddr_quic(&self) -> Vec { + let mut multiaddrs: Vec = Vec::new(); + if let Some(quic_port) = self.quic4() { + if let Some(ip) = self.ip4() { + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(quic_port)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); + } + } + + if let Some(quic6_port) = self.quic6() { + if let Some(ip6) = self.ip6() { + let mut multiaddr: Multiaddr = ip6.into(); + multiaddr.push(Protocol::Udp(quic6_port)); + multiaddr.push(Protocol::QuicV1); + multiaddrs.push(multiaddr); + } + } + multiaddrs + } + /// Returns a list of multiaddrs if the ENR has an `ip` and either a `tcp` or `udp` key **or** an `ip6` and either a `tcp6` or `udp6`. - /// The vector remains empty if these fields are not defined. fn multiaddr_tcp(&self) -> Vec { let mut multiaddrs: Vec = Vec::new(); if let Some(ip) = self.ip4() { diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 82a371d8a2..388790568f 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -25,7 +25,7 @@ use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{ConnectedPoint, Multiaddr}, + core::{transport::ListenerId, ConnectedPoint, Multiaddr}, identity::PeerId, swarm::{ dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, @@ -75,7 +75,20 @@ const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); /// of the peer if it is specified. #[derive(Debug)] pub struct DiscoveredPeers { - pub peers: HashMap>, + pub peers: HashMap>, +} + +/// Specifies which port numbers should be modified after start of the discovery service +#[derive(Debug)] +pub struct UpdatePorts { + /// TCP port associated wih IPv4 address (if present) + pub tcp4: bool, + /// TCP port associated wih IPv6 address (if present) + pub tcp6: bool, + /// QUIC port associated wih IPv4 address (if present) + pub quic4: bool, + /// QUIC port associated wih IPv6 address (if present) + pub quic6: bool, } #[derive(Clone, PartialEq)] @@ -178,12 +191,8 @@ pub struct Discovery { /// always false. pub started: bool, - /// This keeps track of whether an external UDP port change should also indicate an internal - /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP - /// port is also our external TCP port. This assumption only holds if the user has not - /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the - /// second indicates tcp6. - update_tcp_port: (bool, bool), + /// Specifies whether various port numbers should be updated after the discovery service has been started + update_ports: UpdatePorts, /// Logger for the discovery behaviour. log: slog::Logger, @@ -208,7 +217,8 @@ impl Discovery { let local_node_id = local_enr.node_id(); info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6() + "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6(), + "quic4" => ?local_enr.quic4(), "quic6" => ?local_enr.quic6() ); // convert the keypair into an ENR key @@ -230,7 +240,8 @@ impl Discovery { "peer_id" => %bootnode_enr.peer_id(), "ip" => ?bootnode_enr.ip4(), "udp" => ?bootnode_enr.udp4(), - "tcp" => ?bootnode_enr.tcp4() + "tcp" => ?bootnode_enr.tcp4(), + "quic" => ?bootnode_enr.quic4() ); let repr = bootnode_enr.to_string(); let _ = discv5.add_enr(bootnode_enr).map_err(|e| { @@ -281,7 +292,8 @@ impl Discovery { "peer_id" => %enr.peer_id(), "ip" => ?enr.ip4(), "udp" => ?enr.udp4(), - "tcp" => ?enr.tcp4() + "tcp" => ?enr.tcp4(), + "quic" => ?enr.quic4() ); let _ = discv5.add_enr(enr).map_err(|e| { error!( @@ -298,10 +310,12 @@ impl Discovery { } } - let update_tcp_port = ( - config.enr_tcp4_port.is_none(), - config.enr_tcp6_port.is_none(), - ); + let update_ports = UpdatePorts { + tcp4: config.enr_tcp4_port.is_none(), + tcp6: config.enr_tcp6_port.is_none(), + quic4: config.enr_quic4_port.is_none(), + quic6: config.enr_quic6_port.is_none(), + }; Ok(Self { cached_enrs: LruCache::new(50), @@ -312,7 +326,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, - update_tcp_port, + update_ports, log, enr_dir, }) @@ -383,20 +397,6 @@ impl Discovery { self.discv5.table_entries_enr() } - /// Returns the ENR of a known peer if it exists. - pub fn enr_of_peer(&mut self, peer_id: &PeerId) -> Option { - // first search the local cache - if let Some(enr) = self.cached_enrs.get(peer_id) { - return Some(enr.clone()); - } - // not in the local cache, look in the routing table - if let Ok(node_id) = enr_ext::peer_id_to_node_id(peer_id) { - self.discv5.find_enr(&node_id) - } else { - None - } - } - /// Updates the local ENR TCP port. /// There currently isn't a case to update the address here. We opt for discovery to /// automatically update the external address. @@ -414,6 +414,23 @@ impl Discovery { Ok(()) } + // TODO: Group these functions here once the ENR is shared across discv5 and lighthouse and + // Lighthouse can modify the ENR directly. + // This currently doesn't support ipv6. All of these functions should be removed and + // addressed properly in the following issue. + // https://github.com/sigp/lighthouse/issues/4706 + pub fn update_enr_quic_port(&mut self, port: u16) -> Result<(), String> { + self.discv5 + .enr_insert("quic", &port) + .map_err(|e| format!("{:?}", e))?; + + // replace the global version + *self.network_globals.local_enr.write() = self.discv5.local_enr(); + // persist modified enr to disk + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + Ok(()) + } + /// Updates the local ENR UDP socket. /// /// This is with caution. Discovery should automatically maintain this. This should only be @@ -550,8 +567,6 @@ impl Discovery { if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. self.discv5.ban_node(&node_id, None); - // Remove the node from the routing table. - self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { @@ -647,7 +662,7 @@ impl Discovery { if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue - let grouped_queries: Vec = subnet_queries.drain(..).collect(); + let grouped_queries: Vec = std::mem::take(&mut subnet_queries); self.start_subnet_query(grouped_queries); processed = true; } @@ -733,23 +748,6 @@ impl Discovery { target_peers: usize, additional_predicate: impl Fn(&Enr) -> bool + Send + 'static, ) { - // Make sure there are subnet queries included - let contains_queries = match &query { - QueryType::Subnet(queries) => !queries.is_empty(), - QueryType::FindPeers => true, - }; - - if !contains_queries { - debug!( - self.log, - "No subnets included in this request. Skipping discovery request." - ); - return; - } - - // Generate a random target node id. - let random_node = NodeId::random(); - let enr_fork_id = match self.local_enr().eth2() { Ok(v) => v, Err(e) => { @@ -773,7 +771,8 @@ impl Discovery { // Build the future let query_future = self .discv5 - .find_node_predicate(random_node, predicate, target_peers) + // Generate a random target node id. + .find_node_predicate(NodeId::random(), predicate, target_peers) .map(|v| QueryResult { query_type: query, result: v, @@ -787,7 +786,7 @@ impl Discovery { fn process_completed_queries( &mut self, query: QueryResult, - ) -> Option>> { + ) -> Option>> { match query.query_type { QueryType::FindPeers => { self.find_peer_active = false; @@ -797,12 +796,14 @@ impl Discovery { } Ok(r) => { debug!(self.log, "Discovery query completed"; "peers_found" => r.len()); - let mut results: HashMap<_, Option> = HashMap::new(); - r.iter().for_each(|enr| { - // cache the found ENR's - self.cached_enrs.put(enr.peer_id(), enr.clone()); - results.insert(enr.peer_id(), None); - }); + let results = r + .into_iter() + .map(|enr| { + // cache the found ENR's + self.cached_enrs.put(enr.peer_id(), enr.clone()); + (enr, None) + }) + .collect(); return Some(results); } Err(e) => { @@ -850,17 +851,17 @@ impl Discovery { let subnet_predicate = subnet_predicate::(vec![query.subnet], &self.log); - r.iter() + r.clone() + .into_iter() .filter(|enr| subnet_predicate(enr)) - .map(|enr| enr.peer_id()) - .for_each(|peer_id| { + .for_each(|enr| { if let Some(v) = metrics::get_int_counter( &metrics::SUBNET_PEERS_FOUND, &[query_str], ) { v.inc(); } - let other_min_ttl = mapped_results.get_mut(&peer_id); + let other_min_ttl = mapped_results.get_mut(&enr); // map peer IDs to the min_ttl furthest in the future match (query.min_ttl, other_min_ttl) { @@ -878,15 +879,11 @@ impl Discovery { } // update the mapping if we have a specified min_ttl (Some(min_ttl), Some(None)) => { - mapped_results.insert(peer_id, Some(min_ttl)); + mapped_results.insert(enr, Some(min_ttl)); } // first seen min_ttl for this enr - (Some(min_ttl), None) => { - mapped_results.insert(peer_id, Some(min_ttl)); - } - // first seen min_ttl for this enr - (None, None) => { - mapped_results.insert(peer_id, None); + (min_ttl, None) => { + mapped_results.insert(enr, min_ttl); } (None, Some(Some(_))) => {} // Don't replace the existing specific min_ttl (None, Some(None)) => {} // No-op because this is a duplicate @@ -910,7 +907,7 @@ impl Discovery { } /// Drives the queries returning any results from completed queries. - fn poll_queries(&mut self, cx: &mut Context) -> Option>> { + fn poll_queries(&mut self, cx: &mut Context) -> Option>> { while let Poll::Ready(Some(query_result)) = self.active_queries.poll_next_unpin(cx) { let result = self.process_completed_queries(query_result); if result.is_some() { @@ -957,23 +954,6 @@ impl NetworkBehaviour for Discovery { ) { } - fn handle_pending_outbound_connection( - &mut self, - _connection_id: ConnectionId, - maybe_peer: Option, - _addresses: &[Multiaddr], - _effective_role: libp2p::core::Endpoint, - ) -> Result, libp2p::swarm::ConnectionDenied> { - if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) { - // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP - // port is removed, which is assumed to be associated with the discv5 protocol (and - // therefore irrelevant for other libp2p components). - Ok(enr.multiaddr_tcp()) - } else { - Ok(vec![]) - } - } - // Main execution loop to drive the behaviour fn poll( &mut self, @@ -1036,8 +1016,8 @@ impl NetworkBehaviour for Discovery { // Discv5 will have updated our local ENR. We save the updated version // to disk. - if (self.update_tcp_port.0 && socket_addr.is_ipv4()) - || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + if (self.update_ports.tcp4 && socket_addr.is_ipv4()) + || (self.update_ports.tcp6 && socket_addr.is_ipv6()) { // Update the TCP port in the ENR self.discv5.update_local_enr_socket(socket_addr, true); @@ -1047,25 +1027,8 @@ impl NetworkBehaviour for Discovery { // update network globals *self.network_globals.local_enr.write() = enr; // A new UDP socket has been detected. - // Build a multiaddr to report to libp2p - let addr = match socket_addr.ip() { - IpAddr::V4(v4_addr) => { - self.network_globals.listen_port_tcp4().map(|tcp4_port| { - Multiaddr::from(v4_addr).with(Protocol::Tcp(tcp4_port)) - }) - } - IpAddr::V6(v6_addr) => { - self.network_globals.listen_port_tcp6().map(|tcp6_port| { - Multiaddr::from(v6_addr).with(Protocol::Tcp(tcp6_port)) - }) - } - }; - - if let Some(address) = addr { - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address)); - } + // NOTE: We assume libp2p itself can keep track of IP changes and we do + // not inform it about IP changes found via discovery. } Discv5Event::EnrAdded { .. } | Discv5Event::TalkRequest(_) @@ -1083,12 +1046,79 @@ impl NetworkBehaviour for Discovery { FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { self.on_dial_failure(peer_id, error) } + FromSwarm::NewListenAddr(ev) => { + let addr = ev.addr; + let listener_id = ev.listener_id; + + trace!(self.log, "Received NewListenAddr event from swarm"; "listener_id" => ?listener_id, "addr" => ?addr); + + let mut addr_iter = addr.iter(); + + let attempt_enr_update = match addr_iter.next() { + Some(Protocol::Ip4(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + Some(Protocol::Ip6(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (no IP)"; "addr" => ?addr); + return; + } + }; + + let local_enr: Enr = self.discv5.local_enr(); + + match attempt_enr_update { + Ok(_) => { + info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) + } + Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), + } + } FromSwarm::ConnectionEstablished(_) | FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) @@ -1152,8 +1182,6 @@ mod tests { let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, - Some(9000), - None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), @@ -1261,6 +1289,6 @@ mod tests { assert_eq!(results.len(), 2); // when a peer belongs to multiple subnet ids, we use the highest ttl. - assert_eq!(results.get(&enr1.peer_id()).unwrap(), &instant1); + assert_eq!(results.get(&enr1).unwrap(), &instant1); } } diff --git a/beacon_node/lighthouse_network/src/listen_addr.rs b/beacon_node/lighthouse_network/src/listen_addr.rs index 20d87d403c..53f7d9daca 100644 --- a/beacon_node/lighthouse_network/src/listen_addr.rs +++ b/beacon_node/lighthouse_network/src/listen_addr.rs @@ -6,14 +6,23 @@ use serde::{Deserialize, Serialize}; /// A listening address composed by an Ip, an UDP port and a TCP port. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ListenAddr { + /// The IP address we will listen on. pub addr: Ip, - pub udp_port: u16, + /// The UDP port that discovery will listen on. + pub disc_port: u16, + /// The UDP port that QUIC will listen on. + pub quic_port: u16, + /// The TCP port that libp2p will listen on. pub tcp_port: u16, } impl + Clone> ListenAddr { - pub fn udp_socket_addr(&self) -> SocketAddr { - (self.addr.clone().into(), self.udp_port).into() + pub fn discovery_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.disc_port).into() + } + + pub fn quic_socket_addr(&self) -> SocketAddr { + (self.addr.clone().into(), self.quic_port).into() } pub fn tcp_socket_addr(&self) -> SocketAddr { @@ -46,22 +55,41 @@ impl ListenAddress { } } - /// Returns the TCP addresses. - pub fn tcp_addresses(&self) -> impl Iterator + '_ { - let v4_multiaddr = self + /// Returns the addresses the Swarm will listen on, given the setup. + pub fn libp2p_addresses(&self) -> impl Iterator { + let v4_tcp_multiaddr = self .v4() .map(|v4_addr| Multiaddr::from(v4_addr.addr).with(Protocol::Tcp(v4_addr.tcp_port))); - let v6_multiaddr = self + + let v4_quic_multiaddr = self.v4().map(|v4_addr| { + Multiaddr::from(v4_addr.addr) + .with(Protocol::Udp(v4_addr.quic_port)) + .with(Protocol::QuicV1) + }); + + let v6_quic_multiaddr = self.v6().map(|v6_addr| { + Multiaddr::from(v6_addr.addr) + .with(Protocol::Udp(v6_addr.quic_port)) + .with(Protocol::QuicV1) + }); + + let v6_tcp_multiaddr = self .v6() .map(|v6_addr| Multiaddr::from(v6_addr.addr).with(Protocol::Tcp(v6_addr.tcp_port))); - v4_multiaddr.into_iter().chain(v6_multiaddr) + + v4_tcp_multiaddr + .into_iter() + .chain(v4_quic_multiaddr) + .chain(v6_quic_multiaddr) + .chain(v6_tcp_multiaddr) } #[cfg(test)] pub fn unused_v4_ports() -> Self { ListenAddress::V4(ListenAddr { addr: Ipv4Addr::UNSPECIFIED, - udp_port: unused_port::unused_udp4_port().unwrap(), + disc_port: unused_port::unused_udp4_port().unwrap(), + quic_port: unused_port::unused_udp4_port().unwrap(), tcp_port: unused_port::unused_tcp4_port().unwrap(), }) } @@ -70,7 +98,8 @@ impl ListenAddress { pub fn unused_v6_ports() -> Self { ListenAddress::V6(ListenAddr { addr: Ipv6Addr::UNSPECIFIED, - udp_port: unused_port::unused_udp6_port().unwrap(), + disc_port: unused_port::unused_udp6_port().unwrap(), + quic_port: unused_port::unused_udp6_port().unwrap(), tcp_port: unused_port::unused_tcp6_port().unwrap(), }) } @@ -84,12 +113,14 @@ impl slog::KV for ListenAddress { ) -> slog::Result { if let Some(v4_addr) = self.v4() { serializer.emit_arguments("ip4_address", &format_args!("{}", v4_addr.addr))?; - serializer.emit_u16("udp4_port", v4_addr.udp_port)?; + serializer.emit_u16("disc4_port", v4_addr.disc_port)?; + serializer.emit_u16("quic4_port", v4_addr.quic_port)?; serializer.emit_u16("tcp4_port", v4_addr.tcp_port)?; } if let Some(v6_addr) = self.v6() { serializer.emit_arguments("ip6_address", &format_args!("{}", v6_addr.addr))?; - serializer.emit_u16("udp6_port", v6_addr.udp_port)?; + serializer.emit_u16("disc6_port", v6_addr.disc_port)?; + serializer.emit_u16("quic6_port", v6_addr.quic_port)?; serializer.emit_u16("tcp6_port", v6_addr.tcp_port)?; } slog::Result::Ok(()) diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 58cc992012..4650553d89 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,3 +1,6 @@ +use libp2p::bandwidth::BandwidthSinks; +use std::sync::Arc; + pub use lighthouse_metrics::*; lazy_static! { @@ -14,6 +17,16 @@ lazy_static! { "Count of libp2p peers currently connected" ); + pub static ref TCP_PEERS_CONNECTED: Result = try_create_int_gauge( + "libp2p_tcp_peers", + "Count of libp2p peers currently connected via TCP" + ); + + pub static ref QUIC_PEERS_CONNECTED: Result = try_create_int_gauge( + "libp2p_quic_peers", + "Count of libp2p peers currently connected via QUIC" + ); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" @@ -174,3 +187,46 @@ pub fn scrape_discovery_metrics() { set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); } + +/// Aggregated `BandwidthSinks` of tcp and quic transports +/// used in libp2p. +pub struct AggregatedBandwidthSinks { + tcp_sinks: Arc, + quic_sinks: Option>, +} + +impl AggregatedBandwidthSinks { + /// Create a new `AggregatedBandwidthSinks`. + pub fn new(tcp_sinks: Arc, quic_sinks: Option>) -> Self { + AggregatedBandwidthSinks { + tcp_sinks, + quic_sinks, + } + } + + /// Total QUIC inbound bandwidth. + pub fn total_quic_inbound(&self) -> u64 { + self.quic_sinks + .as_ref() + .map(|q| q.total_inbound()) + .unwrap_or_default() + } + + /// Total TCP inbound bandwidth. + pub fn total_tcp_inbound(&self) -> u64 { + self.tcp_sinks.total_inbound() + } + + /// Total QUIC outbound bandwidth. + pub fn total_quic_outbound(&self) -> u64 { + self.quic_sinks + .as_ref() + .map(|q| q.total_outbound()) + .unwrap_or_default() + } + + /// Total TCP outbound bandwidth. + pub fn total_tcp_outbound(&self) -> u64 { + self.tcp_sinks.total_outbound() + } +} diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4f3454f403..3459548ec3 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,5 +1,6 @@ //! Implementation of Lighthouse's peer management system. +use crate::discovery::enr_ext::EnrExt; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub}; @@ -13,7 +14,6 @@ use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; use smallvec::SmallVec; -use std::collections::BTreeMap; use std::{ sync::Arc, time::{Duration, Instant}, @@ -78,7 +78,7 @@ pub struct PeerManager { /// The target number of peers we would like to connect to. target_peers: usize, /// Peers queued to be dialed. - peers_to_dial: BTreeMap>, + peers_to_dial: Vec, /// The number of temporarily banned peers. This is used to prevent instantaneous /// reconnection. // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A @@ -312,16 +312,12 @@ impl PeerManager { /// Peers that have been returned by discovery requests that are suitable for dialing are /// returned here. /// - /// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated - /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the - /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup - /// proves resource constraining, we should switch to multiaddr dialling here. + /// This function decides whether or not to dial these peers. #[allow(clippy::mutable_key_type)] - pub fn peers_discovered(&mut self, results: HashMap>) -> Vec { - let mut to_dial_peers = Vec::with_capacity(4); - + pub fn peers_discovered(&mut self, results: HashMap>) { + let mut to_dial_peers = 0; let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); - for (peer_id, min_ttl) in results { + for (enr, min_ttl) in results { // There are two conditions in deciding whether to dial this peer. // 1. If we are less than our max connections. Discovery queries are executed to reach // our target peers, so its fine to dial up to our max peers (which will get pruned @@ -330,10 +326,8 @@ impl PeerManager { // considered a priority. We have pre-allocated some extra priority slots for these // peers as specified by PRIORITY_PEER_EXCESS. Therefore we dial these peers, even // if we are already at our max_peer limit. - if (min_ttl.is_some() - && connected_or_dialing + to_dial_peers.len() < self.max_priority_peers() - || connected_or_dialing + to_dial_peers.len() < self.max_peers()) - && self.network_globals.peers.read().should_dial(&peer_id) + if min_ttl.is_some() && connected_or_dialing + to_dial_peers < self.max_priority_peers() + || connected_or_dialing + to_dial_peers < self.max_peers() { // This should be updated with the peer dialing. In fact created once the peer is // dialed @@ -341,16 +335,16 @@ impl PeerManager { self.network_globals .peers .write() - .update_min_ttl(&peer_id, min_ttl); + .update_min_ttl(&enr.peer_id(), min_ttl); } - to_dial_peers.push(peer_id); + debug!(self.log, "Dialing discovered peer"; "peer_id" => %enr.peer_id()); + self.dial_peer(enr); + to_dial_peers += 1; } } // Queue another discovery if we need to - self.maintain_peer_count(to_dial_peers.len()); - - to_dial_peers + self.maintain_peer_count(to_dial_peers); } /// A STATUS message has been received from a peer. This resets the status timer. @@ -406,15 +400,22 @@ impl PeerManager { /* Notifications from the Swarm */ - // A peer is being dialed. - pub fn dial_peer(&mut self, peer_id: &PeerId, enr: Option) { - self.peers_to_dial.insert(*peer_id, enr); + /// A peer is being dialed. + pub fn dial_peer(&mut self, peer: Enr) { + if self + .network_globals + .peers + .read() + .should_dial(&peer.peer_id()) + { + self.peers_to_dial.push(peer); + } } /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { + pub fn ban_status(&self, peer_id: &PeerId) -> Option { self.network_globals.peers.read().ban_status(peer_id) } @@ -516,6 +517,11 @@ impl PeerManager { RPCError::ErrorResponse(code, _) => match code { RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::ResourceUnavailable => { + // Don't ban on this because we want to retry with a block by root request. + if matches!(protocol, Protocol::BlobsByRoot) { + return; + } + // NOTE: This error only makes sense for the `BlocksByRange` and `BlocksByRoot` // protocols. // @@ -544,11 +550,14 @@ impl PeerManager { Protocol::Ping => PeerAction::MidToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => PeerAction::LowToleranceError, + Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, }, + RPCResponseErrorCode::BlobsNotFoundForBlock => PeerAction::LowToleranceError, }, RPCError::SSZDecodeError(_) => PeerAction::Fatal, RPCError::UnsupportedProtocol => { @@ -560,6 +569,8 @@ impl PeerManager { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, Protocol::BlocksByRoot => return, + Protocol::BlobsByRange => return, + Protocol::BlobsByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::MetaData => PeerAction::Fatal, @@ -576,6 +587,8 @@ impl PeerManager { Protocol::Ping => PeerAction::LowToleranceError, Protocol::BlocksByRange => PeerAction::MidToleranceError, Protocol::BlocksByRoot => PeerAction::MidToleranceError, + Protocol::BlobsByRange => PeerAction::MidToleranceError, + Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::Goodbye => return, Protocol::MetaData => return, @@ -802,7 +815,7 @@ impl PeerManager { ) -> bool { { let mut peerdb = self.network_globals.peers.write(); - if !matches!(peerdb.ban_status(peer_id), BanResult::NotBanned) { + if peerdb.ban_status(peer_id).is_some() { // don't connect if the peer is banned error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } @@ -969,6 +982,7 @@ impl PeerManager { macro_rules! prune_peers { ($filter: expr) => { + let filter = $filter; for (peer_id, info) in self .network_globals .peers @@ -976,7 +990,7 @@ impl PeerManager { .worst_connected_peers() .iter() .filter(|(_, info)| { - !info.has_future_duty() && !info.is_trusted() && $filter(*info) + !info.has_future_duty() && !info.is_trusted() && filter(*info) }) { if peers_to_prune.len() @@ -1041,7 +1055,7 @@ impl PeerManager { Subnet::Attestation(_) => { subnet_to_peer .entry(subnet) - .or_insert_with(Vec::new) + .or_default() .push((*peer_id, info.clone())); } Subnet::SyncCommittee(id) => { @@ -2195,7 +2209,7 @@ mod tests { } impl Arbitrary for PeerCondition { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let attestation_net_bitfield = { let len = ::SubnetBitfieldLength::to_usize(); let mut bitfield = Vec::with_capacity(len); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 70f421681a..0617c8fa37 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,23 +1,25 @@ //! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. +use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; -use libp2p::core::ConnectedPoint; +use libp2p::core::{multiaddr, ConnectedPoint}; use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; -use slog::{debug, error}; +use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; +use slog::{debug, error, trace}; use types::EthSpec; +use crate::discovery::enr_ext::EnrExt; +use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; -use super::peerdb::BanResult; -use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; @@ -95,11 +97,23 @@ impl NetworkBehaviour for PeerManager { self.events.shrink_to_fit(); } - if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { - self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); + if let Some(enr) = self.peers_to_dial.pop() { + let peer_id = enr.peer_id(); + self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone())); + let quic_multiaddrs = enr.multiaddr_quic(); + if !quic_multiaddrs.is_empty() { + debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs); + } + + // Prioritize Quic connections over Tcp ones. + let multiaddrs = quic_multiaddrs + .into_iter() + .chain(enr.multiaddr_tcp()) + .collect(); return Poll::Ready(ToSwarm::Dial { opts: DialOpts::peer_id(peer_id) .condition(PeerCondition::Disconnected) + .addresses(multiaddrs) .build(), }); } @@ -124,9 +138,11 @@ impl NetworkBehaviour for PeerManager { } FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, + endpoint, + remaining_established, .. - }) => self.on_connection_closed(peer_id, remaining_established), + }) => self.on_connection_closed(peer_id, endpoint, remaining_established), FromSwarm::DialFailure(DialFailure { peer_id, error, @@ -154,26 +170,64 @@ impl NetworkBehaviour for PeerManager { } } + fn handle_pending_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result<(), ConnectionDenied> { + // get the IP address to verify it's not banned. + let ip = match remote_addr.iter().next() { + Some(libp2p::multiaddr::Protocol::Ip6(ip)) => IpAddr::V6(ip), + Some(libp2p::multiaddr::Protocol::Ip4(ip)) => IpAddr::V4(ip), + _ => { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: invalid multiaddr: {remote_addr}" + ))) + } + }; + + if self.network_globals.peers.read().is_ip_banned(&ip) { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: peer {ip} is banned" + ))); + } + + Ok(()) + } + fn handle_established_inbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, + peer_id: PeerId, _local_addr: &libp2p::Multiaddr, - _remote_addr: &libp2p::Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. + remote_addr: &libp2p::Multiaddr, + ) -> Result, ConnectionDenied> { + trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); + // We already checked if the peer was banned on `handle_pending_inbound_connection`. + if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + return Err(ConnectionDenied::new( + "Connection to peer rejected: peer has a bad score", + )); + } Ok(ConnectionHandler) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, - _addr: &libp2p::Multiaddr, + peer_id: PeerId, + addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. - Ok(ConnectionHandler) + trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); + match self.ban_status(&peer_id) { + Some(cause) => { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + Err(ConnectionDenied::new(cause)) + } + None => Ok(ConnectionHandler), + } } } @@ -184,7 +238,11 @@ impl PeerManager { endpoint: &ConnectedPoint, other_established: usize, ) { - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint()); + debug!(self.log, "Connection established"; "peer_id" => %peer_id, + "multiaddr" => %endpoint.get_remote_address(), + "connection" => ?endpoint.to_endpoint() + ); + if other_established == 0 { self.events.push(PeerManagerEvent::MetaData(peer_id)); } @@ -194,26 +252,29 @@ impl PeerManager { metrics::check_nat(); } - // Check to make sure the peer is not supposed to be banned - match self.ban_status(&peer_id) { - // TODO: directly emit the ban event? - BanResult::BadScore => { - // This is a faulty state - error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Disconnect the peer. - self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); - // Re-ban the peer to prevent repeated errors. - self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); - return; - } - BanResult::BannedIp(ip_addr) => { - // A good peer has connected to us via a banned IP address. We ban the peer and - // prevent future connections. - debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr); - self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); - return; - } - BanResult::NotBanned => {} + // increment prometheus metrics + if self.metrics_enabled { + let remote_addr = endpoint.get_remote_address(); + match remote_addr.iter().find(|proto| { + matches!( + proto, + multiaddr::Protocol::QuicV1 | multiaddr::Protocol::Tcp(_) + ) + }) { + Some(multiaddr::Protocol::QuicV1) => { + metrics::inc_gauge(&metrics::QUIC_PEERS_CONNECTED); + } + Some(multiaddr::Protocol::Tcp(_)) => { + metrics::inc_gauge(&metrics::TCP_PEERS_CONNECTED); + } + Some(_) => unreachable!(), + None => { + error!(self.log, "Connection established via unknown transport"; "addr" => %remote_addr) + } + }; + + self.update_connected_peer_metrics(); + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } // Count dialing peers in the limit if the peer dialed us. @@ -245,14 +306,15 @@ impl PeerManager { self.events .push(PeerManagerEvent::PeerConnectedOutgoing(peer_id)); } - } - - // increment prometheus metrics - self.update_connected_peer_metrics(); - metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + }; } - fn on_connection_closed(&mut self, peer_id: PeerId, remaining_established: usize) { + fn on_connection_closed( + &mut self, + peer_id: PeerId, + endpoint: &ConnectedPoint, + remaining_established: usize, + ) { if remaining_established > 0 { return; } @@ -278,9 +340,27 @@ impl PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(&peer_id); + let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics - self.update_connected_peer_metrics(); - metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + if self.metrics_enabled { + match remote_addr.iter().find(|proto| { + matches!( + proto, + multiaddr::Protocol::QuicV1 | multiaddr::Protocol::Tcp(_) + ) + }) { + Some(multiaddr::Protocol::QuicV1) => { + metrics::dec_gauge(&metrics::QUIC_PEERS_CONNECTED); + } + Some(multiaddr::Protocol::Tcp(_)) => { + metrics::dec_gauge(&metrics::TCP_PEERS_CONNECTED); + } + // If it's an unknown protocol we already logged when connection was established. + _ => {} + }; + self.update_connected_peer_metrics(); + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + } } /// A dial attempt has failed. diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 52f0bbd9df..7157a62721 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,17 +1,15 @@ -use crate::{ - metrics, - multiaddr::{Multiaddr, Protocol}, - types::Subnet, - Enr, Gossipsub, PeerId, -}; +use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, Gossipsub, PeerId}; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; -use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::time::Instant; +use std::{cmp::Ordering, fmt::Display}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Formatter, +}; use sync_status::SyncStatus; use types::EthSpec; @@ -141,26 +139,18 @@ impl PeerDB { } } - /// Returns the current [`BanResult`] of the peer. This doesn't check the connection state, rather the + /// Returns the current [`BanResult`] of the peer if banned. This doesn't check the connection state, rather the /// underlying score of the peer. A peer may be banned but still in the connected state /// temporarily. /// /// This is used to determine if we should accept incoming connections or not. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { - if let Some(peer) = self.peers.get(peer_id) { - match peer.score_state() { - ScoreState::Banned => BanResult::BadScore, - _ => { - if let Some(ip) = self.ip_is_banned(peer) { - BanResult::BannedIp(ip) - } else { - BanResult::NotBanned - } - } - } - } else { - BanResult::NotBanned - } + pub fn ban_status(&self, peer_id: &PeerId) -> Option { + self.peers + .get(peer_id) + .and_then(|peer| match peer.score_state() { + ScoreState::Banned => Some(BanResult::BadScore), + _ => self.ip_is_banned(peer).map(BanResult::BannedIp), + }) } /// Checks if the peer's known addresses are currently banned. @@ -764,28 +754,10 @@ impl PeerDB { | PeerConnectionStatus::Dialing { .. } => {} } - // Add the seen ip address and port to the peer's info - let socket_addr = match seen_address.iter().fold( - (None, None), - |(found_ip, found_port), protocol| match protocol { - Protocol::Ip4(ip) => (Some(ip.into()), found_port), - Protocol::Ip6(ip) => (Some(ip.into()), found_port), - Protocol::Tcp(port) => (found_ip, Some(port)), - _ => (found_ip, found_port), - }, - ) { - (Some(ip), Some(port)) => Some(SocketAddr::new(ip, port)), - (Some(_ip), None) => { - crit!(self.log, "Connected peer has an IP but no TCP port"; "peer_id" => %peer_id); - None - } - _ => None, - }; - // Update the connection state match direction { - ConnectionDirection::Incoming => info.connect_ingoing(socket_addr), - ConnectionDirection::Outgoing => info.connect_outgoing(socket_addr), + ConnectionDirection::Incoming => info.connect_ingoing(Some(seen_address)), + ConnectionDirection::Outgoing => info.connect_outgoing(Some(seen_address)), } } @@ -1206,23 +1178,25 @@ pub enum BanOperation { } /// When checking if a peer is banned, it can be banned for multiple reasons. +#[derive(Copy, Clone, Debug)] pub enum BanResult { /// The peer's score is too low causing it to be banned. BadScore, /// The peer should be banned because it is connecting from a banned IP address. BannedIp(IpAddr), - /// The peer is not banned. - NotBanned, } -// Helper function for unit tests -#[cfg(test)] -impl BanResult { - pub fn is_banned(&self) -> bool { - !matches!(self, BanResult::NotBanned) +impl Display for BanResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BanResult::BadScore => write!(f, "Peer has a bad score"), + BanResult::BannedIp(addr) => write!(f, "Peer address: {} is banned", addr), + } } } +impl std::error::Error for BanResult {} + #[derive(Default)] pub struct BannedPeersCount { /// The number of banned peers in the database. @@ -1274,6 +1248,7 @@ impl BannedPeersCount { #[cfg(test)] mod tests { use super::*; + use libp2p::core::multiaddr::Protocol; use libp2p::core::Multiaddr; use slog::{o, Drain}; use std::net::{Ipv4Addr, Ipv6Addr}; @@ -1874,11 +1849,11 @@ mod tests { } //check that ip1 and ip2 are banned but ip3-5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); //ban also the last peer in peers let _ = pdb.report_peer( @@ -1890,11 +1865,11 @@ mod tests { pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[0] gets unbanned reset_score(&mut pdb, &peers[0]); @@ -1902,11 +1877,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //nothing changed - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[1] gets unbanned reset_score(&mut pdb, &peers[1]); @@ -1914,11 +1889,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //all ips are unbanned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); } #[test] @@ -1943,8 +1918,8 @@ mod tests { } // check ip is banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_none()); // unban a peer reset_score(&mut pdb, &peers[0]); @@ -1952,8 +1927,8 @@ mod tests { let _ = pdb.shrink_to_fit(); // check not banned anymore - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // unban all peers for p in &peers { @@ -1972,8 +1947,8 @@ mod tests { } // both IP's are now banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); // unban all peers for p in &peers { @@ -1989,16 +1964,16 @@ mod tests { } // nothing is banned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // reban last peer let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); } #[test] diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 555266d0e2..44c54511dd 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -2,15 +2,15 @@ use super::client::Client; use super::score::{PeerAction, Score, ScoreState}; use super::sync_status::SyncStatus; use crate::discovery::Eth2Enr; -use crate::Multiaddr; use crate::{rpc::MetaData, types::Subnet}; use discv5::Enr; +use libp2p::core::multiaddr::{Multiaddr, Protocol}; use serde::{ ser::{SerializeStruct, Serializer}, Serialize, }; use std::collections::HashSet; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::time::Instant; use strum::AsRefStr; use types::EthSpec; @@ -29,9 +29,9 @@ pub struct PeerInfo { /// The known listening addresses of this peer. This is given by identify and can be arbitrary /// (including local IPs). listening_addresses: Vec, - /// This is addresses we have physically seen and this is what we use for banning/un-banning + /// These are the multiaddrs we have physically seen and is what we use for banning/un-banning /// peers. - seen_addresses: HashSet, + seen_multiaddrs: HashSet, /// The current syncing state of the peer. The state may be determined after it's initial /// connection. sync_status: SyncStatus, @@ -60,7 +60,7 @@ impl Default for PeerInfo { client: Client::default(), connection_status: Default::default(), listening_addresses: Vec::new(), - seen_addresses: HashSet::new(), + seen_multiaddrs: HashSet::new(), subnets: HashSet::new(), sync_status: SyncStatus::Unknown, meta_data: None, @@ -227,15 +227,21 @@ impl PeerInfo { } /// Returns the seen addresses of the peer. - pub fn seen_addresses(&self) -> impl Iterator + '_ { - self.seen_addresses.iter() + pub fn seen_multiaddrs(&self) -> impl Iterator + '_ { + self.seen_multiaddrs.iter() } /// Returns a list of seen IP addresses for the peer. pub fn seen_ip_addresses(&self) -> impl Iterator + '_ { - self.seen_addresses - .iter() - .map(|socket_addr| socket_addr.ip()) + self.seen_multiaddrs.iter().filter_map(|multiaddr| { + multiaddr.iter().find_map(|protocol| { + match protocol { + Protocol::Ip4(ip) => Some(ip.into()), + Protocol::Ip6(ip) => Some(ip.into()), + _ => None, // Only care for IP addresses + } + }) + }) } /// Returns the connection status of the peer. @@ -415,7 +421,7 @@ impl PeerInfo { /// Modifies the status to Connected and increases the number of ingoing /// connections by one - pub(super) fn connect_ingoing(&mut self, seen_address: Option) { + pub(super) fn connect_ingoing(&mut self, seen_multiaddr: Option) { match &mut self.connection_status { Connected { n_in, .. } => *n_in += 1, Disconnected { .. } @@ -428,14 +434,14 @@ impl PeerInfo { } } - if let Some(socket_addr) = seen_address { - self.seen_addresses.insert(socket_addr); + if let Some(multiaddr) = seen_multiaddr { + self.seen_multiaddrs.insert(multiaddr); } } /// Modifies the status to Connected and increases the number of outgoing /// connections by one - pub(super) fn connect_outgoing(&mut self, seen_address: Option) { + pub(super) fn connect_outgoing(&mut self, seen_multiaddr: Option) { match &mut self.connection_status { Connected { n_out, .. } => *n_out += 1, Disconnected { .. } @@ -447,8 +453,8 @@ impl PeerInfo { self.connection_direction = Some(ConnectionDirection::Outgoing); } } - if let Some(ip_addr) = seen_address { - self.seen_addresses.insert(ip_addr); + if let Some(multiaddr) = seen_multiaddr { + self.seen_multiaddrs.insert(multiaddr); } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index bafa355d68..877d725812 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -330,13 +330,15 @@ impl Eq for Score {} impl PartialOrd for Score { fn partial_cmp(&self, other: &Score) -> Option { - self.score().partial_cmp(&other.score()) + Some(self.cmp(other)) } } impl Ord for Score { fn cmp(&self, other: &Score) -> std::cmp::Ordering { - self.partial_cmp(other).unwrap_or(std::cmp::Ordering::Equal) + self.score() + .partial_cmp(&other.score()) + .unwrap_or(std::cmp::Ordering::Equal) } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index 943d4a3bce..6d622fcc8a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -194,16 +194,19 @@ mod tests { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f1d94da7ec..f1b76674ae 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -15,10 +15,11 @@ use std::io::{Read, Write}; use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::light_client_bootstrap::LightClientBootstrap; +use types::{light_client_bootstrap::LightClientBootstrap, BlobSidecar}; use types::{ EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockMerge, + SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, + SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -71,6 +72,8 @@ impl Encoder> for SSZSnappyInboundCodec< RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), + RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), RPCResponse::MetaData(res) => @@ -132,9 +135,8 @@ impl Decoder for SSZSnappyInboundCodec { if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 { return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2()))); } - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -222,6 +224,8 @@ impl Encoder> for SSZSnappyOutboundCodec< BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(), BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(), }, + OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), + OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -272,9 +276,8 @@ impl Decoder for SSZSnappyOutboundCodec { return Ok(None); } } - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -284,8 +287,8 @@ impl Decoder for SSZSnappyOutboundCodec { .rpc_response_limits::(&self.fork_context); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData(format!( - "RPC response length is out of bounds, length {}", - length + "RPC response length is out of bounds, length {}, max {}, min {}", + length, ssz_limits.max, ssz_limits.min ))); } // Calculate worst case compression length for given uncompressed length @@ -319,9 +322,8 @@ impl OutboundCodec> for SSZSnappyOutbound &mut self, src: &mut BytesMut, ) -> Result, RPCError> { - let length = match handle_length(&mut self.inner, &mut self.len, src)? { - Some(len) => len, - None => return Ok(None), + let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else { + return Ok(None); }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -396,22 +398,24 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Deneb { .. } => { + fork_context.to_context_bytes(ForkName::Deneb) + } SignedBeaconBlock::Capella { .. } => { - // Capella context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Capella) } SignedBeaconBlock::Merge { .. } => { - // Merge context being `None` implies that "merge never happened". fork_context.to_context_bytes(ForkName::Merge) } SignedBeaconBlock::Altair { .. } => { - // Altair context being `None` implies that "altair never happened". - // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. fork_context.to_context_bytes(ForkName::Altair) } SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()), }; } + if let RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) = rpc_variant { + return fork_context.to_context_bytes(ForkName::Deneb); + } } } None @@ -472,6 +476,14 @@ fn handle_rpc_request( block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }), ))), + SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + SupportedProtocol::BlobsByRootV1 => { + Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest { + blob_ids: VariableList::from_ssz_bytes(decoded_buffer)?, + }))) + } SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -526,6 +538,38 @@ fn handle_rpc_response( SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + SupportedProtocol::BlobsByRangeV1 => match fork_name { + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRange(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))), + Some(_) => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for blobs by range".to_string(), + )), + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::BlobsByRootV1 => match fork_name { + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlobsByRoot(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))), + Some(_) => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for blobs by root".to_string(), + )), + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -555,6 +599,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, format!( @@ -578,6 +625,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, format!( @@ -598,9 +648,13 @@ fn context_bytes_to_fork_name( .from_context_bytes(context_bytes) .cloned() .ok_or_else(|| { + let encoded = hex::encode(context_bytes); RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, - "Context bytes does not correspond to a valid fork".to_string(), + format!( + "Context bytes {} do not correspond to a valid fork", + encoded + ), ) }) } @@ -615,8 +669,9 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, - Epoch, ForkContext, FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, + blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockMerge, ChainSpec, EmptyBlock, Epoch, ForkContext, FullPayload, Hash256, + Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -630,16 +685,19 @@ mod tests { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -657,6 +715,10 @@ mod tests { SignedBeaconBlock::from_block(full_block, Signature::empty()) } + fn empty_blob_sidecar() -> Arc> { + Arc::new(BlobSidecar::empty()) + } + /// Merge block with length < max_rpc_size. fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockMerge<_, FullPayload> = @@ -705,6 +767,13 @@ mod tests { OldBlocksByRangeRequest::new(0, 10, 1) } + fn blbrange_request() -> BlobsByRangeRequest { + BlobsByRangeRequest { + start_slot: 0, + count: 10, + } + } + fn bbroot_request_v1() -> BlocksByRootRequest { BlocksByRootRequest::new_v1(vec![Hash256::zero()].into()) } @@ -713,6 +782,15 @@ mod tests { BlocksByRootRequest::new(vec![Hash256::zero()].into()) } + fn blbroot_request() -> BlobsByRootRequest { + BlobsByRootRequest { + blob_ids: VariableList::from(vec![BlobIdentifier { + block_root: Hash256::zero(), + index: 0, + }]), + } + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -846,6 +924,12 @@ mod tests { OutboundRequest::BlocksByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) } + OutboundRequest::BlobsByRange(blbrange) => { + assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange)) + } + OutboundRequest::BlobsByRoot(bbroot) => { + assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } @@ -952,6 +1036,26 @@ mod tests { ), Ok(Some(RPCResponse::MetaData(metadata()))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRangeV1, + RPCCodedResponse::Success(RPCResponse::BlobsByRange(empty_blob_sidecar())), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::BlobsByRange(empty_blob_sidecar()))), + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRootV1, + RPCCodedResponse::Success(RPCResponse::BlobsByRoot(empty_blob_sidecar())), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), + ); } // Test RPCResponse encoding/decoding for V1 messages @@ -1297,6 +1401,8 @@ mod tests { OutboundRequest::BlocksByRoot(bbroot_request_v1()), OutboundRequest::BlocksByRoot(bbroot_request_v2()), OutboundRequest::MetaData(MetadataRequest::new_v1()), + OutboundRequest::BlobsByRange(blbrange_request()), + OutboundRequest::BlobsByRoot(blbroot_request()), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index a0f3acaf76..ad96731141 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -6,7 +6,7 @@ use std::{ use super::{methods, rate_limiter::Quota, Protocol}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Auxiliary struct to aid on configuration parsing. /// @@ -89,6 +89,8 @@ pub struct RateLimiterConfig { pub(super) goodbye_quota: Quota, pub(super) blocks_by_range_quota: Quota, pub(super) blocks_by_root_quota: Quota, + pub(super) blobs_by_range_quota: Quota, + pub(super) blobs_by_root_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, } @@ -100,6 +102,9 @@ impl RateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(methods::MAX_REQUEST_BLOCKS, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = + Quota::n_every(methods::MAX_REQUEST_BLOB_SIDECARS, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); } @@ -112,6 +117,8 @@ impl Default for RateLimiterConfig { goodbye_quota: Self::DEFAULT_GOODBYE_QUOTA, blocks_by_range_quota: Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA, blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, + blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, + blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, } } @@ -136,6 +143,8 @@ impl Debug for RateLimiterConfig { .field("goodbye", fmt_q!(&self.goodbye_quota)) .field("blocks_by_range", fmt_q!(&self.blocks_by_range_quota)) .field("blocks_by_root", fmt_q!(&self.blocks_by_root_quota)) + .field("blobs_by_range", fmt_q!(&self.blobs_by_range_quota)) + .field("blobs_by_root", fmt_q!(&self.blobs_by_root_quota)) .finish() } } @@ -154,6 +163,8 @@ impl FromStr for RateLimiterConfig { let mut goodbye_quota = None; let mut blocks_by_range_quota = None; let mut blocks_by_root_quota = None; + let mut blobs_by_range_quota = None; + let mut blobs_by_root_quota = None; let mut light_client_bootstrap_quota = None; for proto_def in s.split(';') { @@ -164,6 +175,8 @@ impl FromStr for RateLimiterConfig { Protocol::Goodbye => goodbye_quota = goodbye_quota.or(quota), Protocol::BlocksByRange => blocks_by_range_quota = blocks_by_range_quota.or(quota), Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), + Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), + Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -180,6 +193,9 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_BLOCKS_BY_RANGE_QUOTA), blocks_by_root_quota: blocks_by_root_quota .unwrap_or(Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA), + blobs_by_range_quota: blobs_by_range_quota + .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), + blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), }) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 36a5abc086..8d1563fafa 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -1,7 +1,7 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination}; +use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend, ReqId}; @@ -42,6 +42,12 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); +impl SubstreamId { + pub fn new(id: usize) -> Self { + Self(id) + } +} + type InboundSubstream = InboundFramed; /// Events the handler emits to the behaviour. @@ -280,9 +286,7 @@ where // wrong state a response will fail silently. fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse) { // check if the stream matching the response still exists - let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) { - info - } else { + let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { if !matches!(response, RPCCodedResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(self.log, "Inbound stream has expired. Response not sent"; @@ -290,7 +294,6 @@ where } return; }; - // If the response we are sending is an error, report back for handling if let RPCCodedResponse::Error(ref code, ref reason) = response { self.events_out.push(Err(HandlerErr::Inbound { @@ -593,6 +596,9 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!(self.log, "BlocksByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); } + if matches!(info.protocol, Protocol::BlobsByRange) { + debug!(self.log, "BlobsByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); + } // There is nothing more to process on this substream as it has // been closed. Move on to the next one. @@ -616,6 +622,9 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!(self.log, "BlocksByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); } + if matches!(info.protocol, Protocol::BlobsByRange) { + debug!(self.log, "BlobsByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); + } break; } // The sending future has not completed. Leave the state as busy and @@ -777,13 +786,8 @@ where // continue sending responses beyond what we would expect. Here // we simply terminate the stream and report a stream // termination to the application - let termination = match protocol { - Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange), - Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), - _ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream. - }; - if let Some(termination) = termination { + if let Some(termination) = protocol.terminator() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( RPCReceived::EndOfStream(request_id, termination), ))); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index af0ba2510b..438c2c7544 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -6,7 +6,7 @@ use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::{ - typenum::{U1024, U256}, + typenum::{U1024, U128, U256, U768}, VariableList, }; use std::marker::PhantomData; @@ -14,8 +14,11 @@ use std::ops::Deref; use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; +use types::blob_sidecar::BlobIdentifier; +use types::consts::deneb::MAX_BLOBS_PER_BLOCK; use types::{ - light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + blob_sidecar::BlobSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, + Hash256, SignedBeaconBlock, Slot, }; /// Maximum number of blocks in a single request. @@ -26,6 +29,12 @@ pub const MAX_REQUEST_BLOCKS: u64 = 1024; pub type MaxErrorLen = U256; pub const MAX_ERROR_LEN: u64 = 256; +pub type MaxRequestBlocksDeneb = U128; +pub const MAX_REQUEST_BLOCKS_DENEB: u64 = 128; + +pub type MaxRequestBlobSidecars = U768; +pub const MAX_REQUEST_BLOB_SIDECARS: u64 = MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK; + /// Wrapper over SSZ List to represent error message in rpc responses. #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); @@ -278,6 +287,22 @@ impl BlocksByRangeRequest { } } +/// Request a number of beacon blobs from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct BlobsByRangeRequest { + /// The starting slot to request blobs. + pub start_slot: u64, + + /// The number of slots from the start slot. + pub count: u64, +} + +impl BlobsByRangeRequest { + pub fn max_blobs_requested(&self) -> u64 { + self.count.saturating_mul(E::max_blobs_per_block() as u64) + } +} + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -319,7 +344,10 @@ impl OldBlocksByRangeRequest { } /// Request a number of beacon block bodies from a peer. -#[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))] +#[superstruct( + variants(V1, V2), + variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq)) +)] #[derive(Clone, Debug, PartialEq)] pub struct BlocksByRootRequest { /// The list of beacon block bodies being requested. @@ -336,6 +364,13 @@ impl BlocksByRootRequest { } } +/// Request a number of beacon blocks and blobs from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct BlobsByRootRequest { + /// The list of beacon block roots being requested. + pub blob_ids: VariableList, +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -351,9 +386,15 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Arc>), + /// A response to a get BLOBS_BY_RANGE request + BlobsByRange(Arc>), + /// A response to a get LIGHTCLIENT_BOOTSTRAP request. LightClientBootstrap(LightClientBootstrap), + /// A response to a get BLOBS_BY_ROOT request. + BlobsByRoot(Arc>), + /// A PONG response to a PING request. Pong(Ping), @@ -369,6 +410,12 @@ pub enum ResponseTermination { /// Blocks by root stream termination. BlocksByRoot, + + /// Blobs by range stream termination. + BlobsByRange, + + /// Blobs by root stream termination. + BlobsByRoot, } /// The structured response containing a result/code indicating success or failure @@ -395,6 +442,7 @@ pub struct LightClientBootstrapRequest { #[strum(serialize_all = "snake_case")] pub enum RPCResponseErrorCode { RateLimited, + BlobsNotFoundForBlock, InvalidRequest, ServerError, /// Error spec'd to indicate that a peer does not have blocks on a requested range. @@ -424,6 +472,7 @@ impl RPCCodedResponse { 2 => RPCResponseErrorCode::ServerError, 3 => RPCResponseErrorCode::ResourceUnavailable, 139 => RPCResponseErrorCode::RateLimited, + 140 => RPCResponseErrorCode::BlobsNotFoundForBlock, _ => RPCResponseErrorCode::Unknown, }; RPCCodedResponse::Error(code, err) @@ -436,6 +485,8 @@ impl RPCCodedResponse { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, + RPCResponse::BlobsByRange(_) => true, + RPCResponse::BlobsByRoot(_) => true, RPCResponse::Pong(_) => false, RPCResponse::MetaData(_) => false, RPCResponse::LightClientBootstrap(_) => false, @@ -460,6 +511,7 @@ impl RPCResponseErrorCode { RPCResponseErrorCode::ResourceUnavailable => 3, RPCResponseErrorCode::Unknown => 255, RPCResponseErrorCode::RateLimited => 139, + RPCResponseErrorCode::BlobsNotFoundForBlock => 140, } } } @@ -471,6 +523,8 @@ impl RPCResponse { RPCResponse::Status(_) => Protocol::Status, RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, + RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -486,6 +540,7 @@ impl std::fmt::Display for RPCResponseErrorCode { RPCResponseErrorCode::ServerError => "Server error occurred", RPCResponseErrorCode::Unknown => "Unknown error occurred", RPCResponseErrorCode::RateLimited => "Rate limited", + RPCResponseErrorCode::BlobsNotFoundForBlock => "No blobs for the given root", }; f.write_str(repr) } @@ -507,6 +562,12 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlocksByRoot(block) => { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } + RPCResponse::BlobsByRange(blob) => { + write!(f, "BlobsByRange: Blob slot: {}", blob.slot) + } + RPCResponse::BlobsByRoot(sidecar) => { + write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { @@ -565,6 +626,26 @@ impl std::fmt::Display for OldBlocksByRangeRequest { } } +impl std::fmt::Display for BlobsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: BlobsByRoot: Number of Requested Roots: {}", + self.blob_ids.len() + ) + } +} + +impl std::fmt::Display for BlobsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: BlobsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} + impl slog::KV for StatusMessage { fn serialize( &self, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 14f77e4ba2..ab87a533d6 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -324,8 +324,10 @@ where Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols let protocol = req.versioned_protocol().protocol(); - if matches!(protocol, Protocol::BlocksByRange) { - debug!(self.log, "Blocks by range request will never be processed"; "request" => %req); + if matches!(protocol, Protocol::BlocksByRange) + || matches!(protocol, Protocol::BlobsByRange) + { + debug!(self.log, "By range request will never be processed"; "request" => %req, "protocol" => %protocol); } else { crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); } @@ -416,6 +418,8 @@ where match end { ResponseTermination::BlocksByRange => Protocol::BlocksByRange, ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::BlobsByRange => Protocol::BlobsByRange, + ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, }, ), }, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index d12f366861..713e9e0ec9 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -35,6 +35,8 @@ pub enum OutboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), + BlobsByRoot(BlobsByRootRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -70,6 +72,14 @@ impl OutboundRequest { ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy), ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy), ], + OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRangeV1, + Encoding::SSZSnappy, + )], + OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::BlobsByRootV1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -89,6 +99,8 @@ impl OutboundRequest { OutboundRequest::Goodbye(_) => 0, OutboundRequest::BlocksByRange(req) => *req.count(), OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, + OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), + OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -107,6 +119,8 @@ impl OutboundRequest { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, + OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, OutboundRequest::Ping(_) => SupportedProtocol::PingV1, OutboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -123,6 +137,8 @@ impl OutboundRequest { // variants that have `multiple_responses()` can have values. OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, + OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -178,6 +194,8 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index f2a39470b9..95fdc20838 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,7 +22,7 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, - EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + BlobSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; @@ -83,6 +83,12 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field + pub static ref SIGNED_BEACON_BLOCK_DENEB_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + + (::ssz_fixed_len() * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET; // Length offset for the blob commitments field. + pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -95,6 +101,20 @@ lazy_static! { ]) .as_ssz_bytes() .len(); + + pub static ref BLOBS_BY_ROOT_REQUEST_MIN: usize = + VariableList::::from(Vec::::new()) + .as_ssz_bytes() + .len(); + pub static ref BLOBS_BY_ROOT_REQUEST_MAX: usize = + VariableList::::from(vec![ + Hash256::zero(); + MAX_REQUEST_BLOB_SIDECARS + as usize + ]) + .as_ssz_bytes() + .len(); + pub static ref ERROR_TYPE_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -121,6 +141,7 @@ pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize ForkName::Altair | ForkName::Base => max_chunk_size / 10, ForkName::Merge => max_chunk_size, ForkName::Capella => max_chunk_size, + ForkName::Deneb => max_chunk_size, } } @@ -145,6 +166,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks ), + ForkName::Deneb => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_DENEB_MAX, // EIP 4844 block is larger than all prior fork blocks + ), } } @@ -162,6 +187,12 @@ pub enum Protocol { /// The `BlocksByRoot` protocol name. #[strum(serialize = "beacon_blocks_by_root")] BlocksByRoot, + /// The `BlobsByRange` protocol name. + #[strum(serialize = "blob_sidecars_by_range")] + BlobsByRange, + /// The `BlobsByRoot` protocol name. + #[strum(serialize = "blob_sidecars_by_root")] + BlobsByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -172,6 +203,22 @@ pub enum Protocol { LightClientBootstrap, } +impl Protocol { + pub(crate) fn terminator(self) -> Option { + match self { + Protocol::Status => None, + Protocol::Goodbye => None, + Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange), + Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), + Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), + Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), + Protocol::Ping => None, + Protocol::MetaData => None, + Protocol::LightClientBootstrap => None, + } + } +} + /// RPC Encondings supported. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Encoding { @@ -187,6 +234,8 @@ pub enum SupportedProtocol { BlocksByRangeV2, BlocksByRootV1, BlocksByRootV2, + BlobsByRangeV1, + BlobsByRootV1, PingV1, MetaDataV1, MetaDataV2, @@ -202,6 +251,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRangeV2 => "2", SupportedProtocol::BlocksByRootV1 => "1", SupportedProtocol::BlocksByRootV2 => "2", + SupportedProtocol::BlobsByRangeV1 => "1", + SupportedProtocol::BlobsByRootV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -217,6 +268,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange, SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot, SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, + SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, + SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -224,8 +277,8 @@ impl SupportedProtocol { } } - fn currently_supported() -> Vec { - vec![ + fn currently_supported(fork_context: &ForkContext) -> Vec { + let mut supported = vec![ ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy), ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy), // V2 variants have higher preference then V1 @@ -236,7 +289,14 @@ impl SupportedProtocol { ProtocolId::new(Self::PingV1, Encoding::SSZSnappy), ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy), ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy), - ] + ]; + if fork_context.fork_exists(ForkName::Deneb) { + supported.extend_from_slice(&[ + ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy), + ]); + } + supported } } @@ -264,7 +324,7 @@ impl UpgradeInfo for RPCProtocol { /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { - let mut supported_protocols = SupportedProtocol::currently_supported(); + let mut supported_protocols = SupportedProtocol::currently_supported(&self.fork_context); if self.enable_light_client_server { supported_protocols.push(ProtocolId::new( SupportedProtocol::LightClientBootstrapV1, @@ -333,6 +393,13 @@ impl ProtocolId { Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } + Protocol::BlobsByRange => RpcLimits::new( + ::ssz_fixed_len(), + ::ssz_fixed_len(), + ), + Protocol::BlobsByRoot => { + RpcLimits::new(*BLOBS_BY_ROOT_REQUEST_MIN, *BLOBS_BY_ROOT_REQUEST_MAX) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -355,6 +422,8 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlobsByRange => rpc_blob_limits::(), + Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -376,6 +445,8 @@ impl ProtocolId { match self.versioned_protocol { SupportedProtocol::BlocksByRangeV2 | SupportedProtocol::BlocksByRootV2 + | SupportedProtocol::BlobsByRangeV1 + | SupportedProtocol::BlobsByRootV1 | SupportedProtocol::LightClientBootstrapV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 @@ -407,6 +478,13 @@ impl ProtocolId { } } +pub fn rpc_blob_limits() -> RpcLimits { + RpcLimits::new( + BlobSidecar::::empty().as_ssz_bytes().len(), + BlobSidecar::::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -478,6 +556,8 @@ pub enum InboundRequest { Goodbye(GoodbyeReason), BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + BlobsByRange(BlobsByRangeRequest), + BlobsByRoot(BlobsByRootRequest), LightClientBootstrap(LightClientBootstrapRequest), Ping(Ping), MetaData(MetadataRequest), @@ -494,6 +574,8 @@ impl InboundRequest { InboundRequest::Goodbye(_) => 0, InboundRequest::BlocksByRange(req) => *req.count(), InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, + InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), + InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -513,6 +595,8 @@ impl InboundRequest { BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1, BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2, }, + InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, + InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, InboundRequest::Ping(_) => SupportedProtocol::PingV1, InboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -530,6 +614,8 @@ impl InboundRequest { // variants that have `multiple_responses()` can have values. InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, + InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, + InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -636,6 +722,8 @@ impl std::fmt::Display for InboundRequest { InboundRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), + InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index e1634d711b..0b57374e8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -2,7 +2,7 @@ use super::config::RateLimiterConfig; use crate::rpc::Protocol; use fnv::FnvHashMap; use libp2p::PeerId; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::future::Future; use std::hash::Hash; @@ -94,6 +94,10 @@ pub struct RPCRateLimiter { bbrange_rl: Limiter, /// BlocksByRoot rate limiter. bbroots_rl: Limiter, + /// BlobsByRange rate limiter. + blbrange_rl: Limiter, + /// BlobsByRoot rate limiter. + blbroot_rl: Limiter, /// LightClientBootstrap rate limiter. lcbootstrap_rl: Limiter, } @@ -122,6 +126,10 @@ pub struct RPCRateLimiterBuilder { bbrange_quota: Option, /// Quota for the BlocksByRoot protocol. bbroots_quota: Option, + /// Quota for the BlobsByRange protocol. + blbrange_quota: Option, + /// Quota for the BlobsByRoot protocol. + blbroot_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, } @@ -137,6 +145,8 @@ impl RPCRateLimiterBuilder { Protocol::Goodbye => self.goodbye_quota = q, Protocol::BlocksByRange => self.bbrange_quota = q, Protocol::BlocksByRoot => self.bbroots_quota = q, + Protocol::BlobsByRange => self.blbrange_quota = q, + Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, } self @@ -158,6 +168,14 @@ impl RPCRateLimiterBuilder { .lcbootstrap_quota .ok_or("LightClientBootstrap quota not specified")?; + let blbrange_quota = self + .blbrange_quota + .ok_or("BlobsByRange quota not specified")?; + + let blbroots_quota = self + .blbroot_quota + .ok_or("BlobsByRoot quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -165,6 +183,8 @@ impl RPCRateLimiterBuilder { let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let bbroots_rl = Limiter::from_quota(bbroots_quota)?; let bbrange_rl = Limiter::from_quota(bbrange_quota)?; + let blbrange_rl = Limiter::from_quota(blbrange_quota)?; + let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?; // check for peers to prune every 30 seconds, starting in 30 seconds @@ -179,6 +199,8 @@ impl RPCRateLimiterBuilder { goodbye_rl, bbroots_rl, bbrange_rl, + blbrange_rl, + blbroot_rl, lcbootstrap_rl, init_time: Instant::now(), }) @@ -219,6 +241,8 @@ impl RPCRateLimiter { goodbye_quota, blocks_by_range_quota, blocks_by_root_quota, + blobs_by_range_quota, + blobs_by_root_quota, light_client_bootstrap_quota, } = config; @@ -229,6 +253,8 @@ impl RPCRateLimiter { .set_quota(Protocol::Goodbye, goodbye_quota) .set_quota(Protocol::BlocksByRange, blocks_by_range_quota) .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) + .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) + .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .build() } @@ -255,6 +281,8 @@ impl RPCRateLimiter { Protocol::Goodbye => &mut self.goodbye_rl, Protocol::BlocksByRange => &mut self.bbrange_rl, Protocol::BlocksByRoot => &mut self.bbroots_rl, + Protocol::BlobsByRange => &mut self.blbrange_rl, + Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl, }; check(limiter) @@ -268,6 +296,8 @@ impl RPCRateLimiter { self.goodbye_rl.prune(time_since_start); self.bbrange_rl.prune(time_since_start); self.bbroots_rl.prune(time_since_start); + self.blbrange_rl.prune(time_since_start); + self.blbroot_rl.prune(time_since_start); } } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 187c0ab1b1..6ede0666a3 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::light_client_bootstrap::LightClientBootstrap; -use types::{EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -34,10 +35,14 @@ pub enum Request { Status(StatusMessage), /// A blocks by range request. BlocksByRange(BlocksByRangeRequest), + /// A blobs by range request. + BlobsByRange(BlobsByRangeRequest), /// A request blocks root request. BlocksByRoot(BlocksByRootRequest), // light client bootstrap request LightClientBootstrap(LightClientBootstrapRequest), + /// A request blobs root request. + BlobsByRoot(BlobsByRootRequest), } impl std::convert::From for OutboundRequest { @@ -63,6 +68,8 @@ impl std::convert::From for OutboundRequest { Request::LightClientBootstrap(_) => { unreachable!("Lighthouse never makes an outbound light client request") } + Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), + Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -80,8 +87,12 @@ pub enum Response { Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. BlocksByRange(Option>>), + /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. + BlobsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), + /// A response to a get BLOBS_BY_ROOT request. + BlobsByRoot(Option>>), /// A response to a LightClientUpdate request. LightClientBootstrap(LightClientBootstrap), } @@ -97,6 +108,14 @@ impl std::convert::From> for RPCCodedResponse RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), }, + Response::BlobsByRoot(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot), + }, + Response::BlobsByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 6c52a07c14..8dd750429c 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -20,8 +20,6 @@ where AppReqId: ReqId, TSpec: EthSpec, { - /// Peers banned. - pub banned_peers: libp2p::allow_block_list::Behaviour, /// Keep track of active and pending connections to enforce hard limits. pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 2865d5b3f6..5dc0d29ff5 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -20,6 +20,8 @@ pub struct GossipCache { topic_msgs: HashMap, Key>>, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blobs. + blob_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -47,6 +49,8 @@ pub struct GossipCacheBuilder { default_timeout: Option, /// Timeout for blocks. beacon_block: Option, + /// Timeout for blob sidecars. + blob_sidecar: Option, /// Timeout for aggregate attestations. aggregates: Option, /// Timeout for attestations. @@ -147,6 +151,7 @@ impl GossipCacheBuilder { let GossipCacheBuilder { default_timeout, beacon_block, + blob_sidecar, aggregates, attestation, voluntary_exit, @@ -162,6 +167,7 @@ impl GossipCacheBuilder { expirations: DelayQueue::default(), topic_msgs: HashMap::default(), beacon_block: beacon_block.or(default_timeout), + blob_sidecar: blob_sidecar.or(default_timeout), aggregates: aggregates.or(default_timeout), attestation: attestation.or(default_timeout), voluntary_exit: voluntary_exit.or(default_timeout), @@ -187,6 +193,7 @@ impl GossipCache { pub fn insert(&mut self, topic: GossipTopic, data: Vec) { let expire_timeout = match topic.kind() { GossipKind::BeaconBlock => self.beacon_block, + GossipKind::BlobSidecar(_) => self.blob_sidecar, GossipKind::BeaconAggregateAndProof => self.aggregates, GossipKind::Attestation(_) => self.attestation, GossipKind::VoluntaryExit => self.voluntary_exit, @@ -198,9 +205,8 @@ impl GossipCache { GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; - let expire_timeout = match expire_timeout { - Some(expire_timeout) => expire_timeout, - None => return, + let Some(expire_timeout) = expire_timeout else { + return; }; match self .topic_msgs diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 63e5bcbff6..a38b7b2f2e 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -4,6 +4,7 @@ use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; +use crate::metrics::AggregatedBandwidthSinks; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -15,7 +16,8 @@ use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, - SnappyTransform, Subnet, SubnetDiscovery, + SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, + CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -23,14 +25,14 @@ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; -use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::{ self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, + TopicScoreParams, }; -use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; -use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::PeerId; +use libp2p::{identify, SwarmBuilder}; use slog::{crit, debug, info, o, trace, warn}; use std::path::PathBuf; use std::pin::Pin; @@ -40,7 +42,8 @@ use std::{ }; use types::ForkName; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, consts::deneb::BLOB_SIDECAR_SUBNET_COUNT, + EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; @@ -69,6 +72,8 @@ pub enum NetworkEvent { id: AppReqId, /// The peer to which this request was sent. peer_id: PeerId, + /// The error of the failed request. + error: RPCError, }, RequestReceived { /// The peer that sent the request. @@ -123,7 +128,7 @@ pub struct Network { update_gossipsub_scores: tokio::time::Interval, gossip_cache: GossipCache, /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, + pub bandwidth: AggregatedBandwidthSinks, /// This node's PeerId. pub local_peer_id: PeerId, /// Logger for behaviour actions. @@ -156,8 +161,6 @@ impl Network { let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( enr, - config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port), - config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port), meta_data, config .trusted_peers @@ -221,15 +224,27 @@ impl Network { // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); + let max_topics = ctx.chain_spec.attestation_subnet_count as usize + + SYNC_COMMITTEE_SUBNET_COUNT as usize + + BLOB_SIDECAR_SUBNET_COUNT as usize + + BASE_CORE_TOPICS.len() + + ALTAIR_CORE_TOPICS.len() + + CAPELLA_CORE_TOPICS.len() + + DENEB_CORE_TOPICS.len() + + LIGHT_CLIENT_GOSSIP_TOPICS.len(); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, + BLOB_SIDECAR_SUBNET_COUNT, ), - max_subscribed_topics: 200, - max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + // during a fork we subscribe to both the old and new topics + max_subscribed_topics: max_topics * 4, + // 162 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics) * 2 + max_subscriptions_per_request: max_topics * 2, }; let gossipsub_config_params = GossipsubConfigParams { @@ -339,11 +354,8 @@ impl Network { libp2p::connection_limits::Behaviour::new(limits) }; - let banned_peers = libp2p::allow_block_list::Behaviour::default(); - let behaviour = { Behaviour { - banned_peers, gossipsub, eth2_rpc, discovery, @@ -353,34 +365,34 @@ impl Network { } }; - let (swarm, bandwidth) = { - // Set up the transport - tcp/ws with noise and mplex - let (transport, bandwidth) = build_transport(local_keypair.clone()) + // Set up the transport - tcp/quic with noise and mplex + let (transport, bandwidth) = + build_transport(local_keypair.clone(), !config.disable_quic_support) .map_err(|e| format!("Failed to build transport: {:?}", e))?; - // use the executor for libp2p - struct Executor(task_executor::TaskExecutor); - impl libp2p::swarm::Executor for Executor { - fn exec(&self, f: Pin + Send>>) { - self.0.spawn(f, "libp2p"); - } + // use the executor for libp2p + struct Executor(task_executor::TaskExecutor); + impl libp2p::swarm::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f, "libp2p"); } + } - // sets up the libp2p connection limits - - ( - SwarmBuilder::with_executor( - transport, - behaviour, - local_peer_id, - Executor(executor), - ) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .per_connection_event_buffer_size(4) - .build(), - bandwidth, - ) - }; + // sets up the libp2p swarm. + let swarm = SwarmBuilder::with_existing_identity(local_keypair) + .with_tokio() + .with_other_transport(|_key| transport) + .expect("infalible") + .with_behaviour(|_| behaviour) + .expect("infalible") + .with_swarm_config(|_| { + libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size( + std::num::NonZeroUsize::new(7).expect("Not zero"), + ) + .with_per_connection_event_buffer_size(4) + }) + .build(); let mut network = Network { swarm, @@ -411,9 +423,16 @@ impl Network { async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery); + debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery, "quic_enabled" => !config.disable_quic_support); + + for listen_multiaddr in config.listen_addrs().libp2p_addresses() { + // If QUIC is disabled, ignore listening on QUIC ports + if config.disable_quic_support + && listen_multiaddr.iter().any(|v| v == MProtocol::QuicV1) + { + continue; + } - for listen_multiaddr in config.listen_addrs().tcp_addresses() { match self.swarm.listen_on(listen_multiaddr.clone()) { Ok(_) => { let mut log_address = listen_multiaddr; @@ -454,6 +473,20 @@ impl Network { boot_nodes.dedup(); for bootnode_enr in boot_nodes { + // If QUIC is enabled, attempt QUIC connections first + if !config.disable_quic_support { + for quic_multiaddr in &bootnode_enr.multiaddr_quic() { + if !self + .network_globals + .peers + .read() + .is_connected_or_dialing(&bootnode_enr.peer_id()) + { + dial(quic_multiaddr.clone()); + } + } + } + for multiaddr in &bootnode_enr.multiaddr() { // ignore udp multiaddr if it exists let components = multiaddr.iter().collect::>(); @@ -583,7 +616,7 @@ impl Network { } // Subscribe to core topics for the new fork - for kind in fork_core_topics(&new_fork) { + for kind in fork_core_topics::(&new_fork) { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } @@ -601,6 +634,38 @@ impl Network { } } + /// Remove topic weight from all topics that don't have the given fork digest. + pub fn remove_topic_weight_except(&mut self, except: [u8; 4]) { + let new_param = TopicScoreParams { + topic_weight: 0.0, + ..Default::default() + }; + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + { + let libp2p_topic: Topic = topic.clone().into(); + match self + .gossipsub_mut() + .set_topic_params(libp2p_topic, new_param.clone()) + { + Ok(_) => debug!(self.log, "Removed topic weight"; "topic" => %topic), + Err(e) => { + warn!(self.log, "Failed to remove topic weight"; "topic" => %topic, "error" => e) + } + } + } + } + + /// Returns the scoring parameters for a topic if set. + pub fn get_topic_params(&self, topic: GossipTopic) -> Option<&TopicScoreParams> { + self.swarm + .behaviour() + .gossipsub + .get_topic_params(&topic.into()) + } + /// Subscribes to a gossipsub topic. /// /// Returns `true` if the subscription was successful and `false` otherwise. @@ -784,7 +849,7 @@ impl Network { } /// Inform the peer that their request produced an error. - pub fn send_error_reponse( + pub fn send_error_response( &mut self, peer_id: PeerId, id: PeerRequestId, @@ -1028,6 +1093,12 @@ impl Network { Request::BlocksByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } + Request::BlobsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]) + } + Request::BlobsByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1036,30 +1107,27 @@ impl Network { } } - /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't + /// Dial cached Enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { let predicate = subnet_predicate::(vec![subnet], &self.log); - let peers_to_dial: Vec = self + let peers_to_dial: Vec = self .discovery() .cached_enrs() - .filter_map(|(peer_id, enr)| { - let peers = self.network_globals.peers.read(); - if predicate(enr) && peers.should_dial(peer_id) { - Some(*peer_id) + .filter_map(|(_peer_id, enr)| { + if predicate(enr) { + Some(enr.clone()) } else { None } }) .collect(); - for peer_id in peers_to_dial { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); - // Remove the ENR from the cache to prevent continual re-dialing on disconnects - self.discovery_mut().remove_cached_enr(&peer_id); - // For any dial event, inform the peer manager - let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager_mut().dial_peer(&peer_id, enr); + // Remove the ENR from the cache to prevent continual re-dialing on disconnects + for enr in peers_to_dial { + debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %enr.peer_id()); + self.discovery_mut().remove_cached_enr(&enr.peer_id()); + self.peer_manager_mut().dial_peer(enr); } } @@ -1209,9 +1277,9 @@ impl Network { &error, ConnectionDirection::Outgoing, ); - // inform failures of requests comming outside the behaviour + // inform failures of requests coming outside the behaviour if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id }) + Some(NetworkEvent::RPCFailed { peer_id, id, error }) } else { None } @@ -1294,6 +1362,19 @@ impl Network { ); Some(event) } + InboundRequest::BlobsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlobsByRange(req), + ); + Some(event) + } + InboundRequest::BlobsByRoot(req) => { + let event = + self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1326,9 +1407,15 @@ impl Network { RPCResponse::BlocksByRange(resp) => { self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } + RPCResponse::BlobsByRange(resp) => { + self.build_response(id, peer_id, Response::BlobsByRange(Some(resp))) + } RPCResponse::BlocksByRoot(resp) => { self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } + RPCResponse::BlobsByRoot(resp) => { + self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) + } // Should never be reached RPCResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1339,28 +1426,14 @@ impl Network { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), + ResponseTermination::BlobsByRange => Response::BlobsByRange(None), + ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), }; self.build_response(id, peer_id, response) } } } - /// Handle a discovery event. - fn inject_discovery_event( - &mut self, - event: DiscoveredPeers, - ) -> Option> { - let DiscoveredPeers { peers } = event; - let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); - for peer_id in to_dial_peers { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); - // For any dial event, inform the peer manager - let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager_mut().dial_peer(&peer_id, enr); - } - None - } - /// Handle an identify event. fn inject_identify_event( &mut self, @@ -1401,15 +1474,10 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm - .behaviour_mut() - .banned_peers - .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); None } @@ -1458,10 +1526,16 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. - BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), - BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + // Inform the peer manager about discovered peers. + // + // The peer manager will subsequently decide which peers need to be dialed and then dial + // them. + BehaviourEvent::Discovery(DiscoveredPeers { peers }) => { + self.peer_manager_mut().peers_discovered(peers); + None + } BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), @@ -1493,7 +1567,7 @@ impl Network { format!("Dialing local peer id {endpoint:?}") } libp2p::swarm::ListenError::Denied { cause } => { - format!("Connection was denied with cause {cause}") + format!("Connection was denied with cause: {cause:?}") } libp2p::swarm::ListenError::Transport(t) => match t { libp2p::TransportError::MultiaddrNotSupported(m) => { @@ -1527,7 +1601,14 @@ impl Network { SwarmEvent::ListenerClosed { addresses, reason, .. } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + match reason { + Ok(_) => { + debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) + } + Err(reason) => { + crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + } + }; if Swarm::listeners(&self.swarm).count() == 0 { Some(NetworkEvent::ZeroListeners) } else { @@ -1543,13 +1624,7 @@ impl Network { None } } - SwarmEvent::Dialing { - peer_id, - connection_id: _, - } => { - debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id); - None - } + SwarmEvent::Dialing { .. } => None, }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 21fd09b6b0..ab6b3a771b 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,14 +1,16 @@ +use crate::metrics::AggregatedBandwidthSinks; use crate::multiaddr::Protocol; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; use crate::types::{ error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, }; use crate::{GossipTopic, NetworkConfig}; -use libp2p::bandwidth::BandwidthSinks; +use futures::future::Either; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::gossipsub; use libp2p::identity::{secp256k1, Keypair}; use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; +use libp2p_quic; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -37,31 +39,57 @@ pub struct Context<'a> { type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; -/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and -/// mplex as the multiplexing layer. +/// The implementation supports TCP/IP, QUIC (experimental) over UDP, noise as the encryption layer, and +/// mplex/yamux as the multiplexing layer (when using TCP). pub fn build_transport( local_private_key: Keypair, -) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)); - let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; - #[cfg(feature = "libp2p-websocket")] - let transport = { - let trans_clone = transport.clone(); - transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) - }; + quic_support: bool, +) -> std::io::Result<(BoxedTransport, AggregatedBandwidthSinks)> { + // mplex config + let mut mplex_config = libp2p_mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); // yamux config let mut yamux_config = yamux::Config::default(); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); - let (transport, bandwidth) = transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(yamux_config) - .timeout(Duration::from_secs(10)) - .boxed() - .with_bandwidth_logging(); - // Authentication + // Creates the TCP transport layer + let (tcp, tcp_bandwidth) = + libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)) + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) + .timeout(Duration::from_secs(10)) + .with_bandwidth_logging(); + + let (transport, bandwidth) = if quic_support { + // Enables Quic + // The default quic configuration suits us for now. + let quic_config = libp2p_quic::Config::new(&local_private_key); + let (quic, quic_bandwidth) = + libp2p_quic::tokio::Transport::new(quic_config).with_bandwidth_logging(); + let transport = tcp + .or_transport(quic) + .map(|either_output, _| match either_output { + Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + }) + .boxed(); + ( + transport, + AggregatedBandwidthSinks::new(tcp_bandwidth, Some(quic_bandwidth)), + ) + } else { + (tcp, AggregatedBandwidthSinks::new(tcp_bandwidth, None)) + }; + + // Enables DNS over the transport. + let transport = libp2p::dns::tokio::Transport::system(transport)?.boxed(); + Ok((transport, bandwidth)) } @@ -214,6 +242,7 @@ pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, sync_committee_subnet_count: u64, + blob_sidecar_subnet_count: u64, ) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -239,6 +268,9 @@ pub(crate) fn create_whitelist_filter( for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } + for id in 0..blob_sidecar_subnet_count { + add(BlobSidecar(id)); + } } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 97eaaa0051..b2b605e8ae 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -16,10 +16,6 @@ pub struct NetworkGlobals { pub peer_id: RwLock, /// Listening multiaddrs. pub listen_multiaddrs: RwLock>, - /// The TCP port that the libp2p service is listening on over Ipv4. - listen_port_tcp4: Option, - /// The TCP port that the libp2p service is listening on over Ipv6. - listen_port_tcp6: Option, /// The collection of known peers. pub peers: RwLock>, // The local meta data of our node. @@ -35,8 +31,6 @@ pub struct NetworkGlobals { impl NetworkGlobals { pub fn new( enr: Enr, - listen_port_tcp4: Option, - listen_port_tcp6: Option, local_metadata: MetaData, trusted_peers: Vec, disable_peer_scoring: bool, @@ -46,8 +40,6 @@ impl NetworkGlobals { local_enr: RwLock::new(enr.clone()), peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), - listen_port_tcp4, - listen_port_tcp6, local_metadata: RwLock::new(local_metadata), peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), gossipsub_subscriptions: RwLock::new(HashSet::new()), @@ -72,16 +64,6 @@ impl NetworkGlobals { self.listen_multiaddrs.read().clone() } - /// Returns the libp2p TCP port that this node has been configured to listen on. - pub fn listen_port_tcp4(&self) -> Option { - self.listen_port_tcp4 - } - - /// Returns the UDP discovery port that this node has been configured to listen on. - pub fn listen_port_tcp6(&self) -> Option { - self.listen_port_tcp6 - } - /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.peers.read().connected_peer_ids().count() @@ -139,8 +121,6 @@ impl NetworkGlobals { let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); NetworkGlobals::new( enr, - Some(9000), - None, MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index e7457f25da..af9e9ef45d 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -18,5 +18,6 @@ pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, - GossipTopic, LIGHT_CLIENT_GOSSIP_TOPICS, + GossipTopic, ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, + LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 06732ac99f..9efe44f75d 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -12,14 +12,16 @@ use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockMerge, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockDeneb, SignedBeaconBlockMerge, SignedBlobSidecar, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), + /// Gossipsub message providing notification of a [`SignedBlobSidecar`] along with the subnet id where it was received. + BlobSidecar(Box<(u64, SignedBlobSidecar)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -113,6 +115,9 @@ impl PubsubMessage { pub fn kind(&self) -> GossipKind { match self { PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::BlobSidecar(blob_sidecar_data) => { + GossipKind::BlobSidecar(blob_sidecar_data.0) + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -183,6 +188,10 @@ impl PubsubMessage { SignedBeaconBlockCapella::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", @@ -192,6 +201,28 @@ impl PubsubMessage { }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } + GossipKind::BlobSidecar(blob_index) => { + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Deneb) => { + let blob_sidecar = SignedBlobSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::BlobSidecar(Box::new(( + *blob_index, + blob_sidecar, + )))) + } + Some( + ForkName::Base + | ForkName::Altair + | ForkName::Merge + | ForkName::Capella, + ) + | None => Err(format!( + "beacon_blobs_and_sidecar topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -260,6 +291,7 @@ impl PubsubMessage { // messages for us. match &self { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -283,6 +315,11 @@ impl std::fmt::Display for PubsubMessage { block.slot(), block.message().proposer_index() ), + PubsubMessage::BlobSidecar(data) => write!( + f, + "BlobSidecar: slot: {}, blob index: {}", + data.1.message.slot, data.1.message.index, + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 0e4aefbb5c..e7e771e1ad 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,7 +1,8 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::AsRefStr; -use types::{ForkName, SubnetId, SyncSubnetId}; +use types::consts::deneb::BLOB_SIDECAR_SUBNET_COUNT; +use types::{EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; @@ -13,6 +14,7 @@ pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; +pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -39,22 +41,34 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; +pub const DENEB_CORE_TOPICS: [GossipKind; 0] = []; + /// Returns the core topics associated with each fork that are new to the previous fork -pub fn fork_core_topics(fork_name: &ForkName) -> Vec { +pub fn fork_core_topics(fork_name: &ForkName) -> Vec { match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), ForkName::Merge => vec![], ForkName::Capella => CAPELLA_CORE_TOPICS.to_vec(), + ForkName::Deneb => { + // All of deneb blob topics are core topics + let mut deneb_blob_topics = Vec::new(); + for i in 0..BLOB_SIDECAR_SUBNET_COUNT { + deneb_blob_topics.push(GossipKind::BlobSidecar(i)); + } + let mut deneb_topics = DENEB_CORE_TOPICS.to_vec(); + deneb_topics.append(&mut deneb_blob_topics); + deneb_topics + } } } /// Returns all the topics that we need to subscribe to for a given fork /// including topics from older forks and new topics for the current fork. -pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec { - let mut topics = fork_core_topics(¤t_fork); +pub fn core_topics_to_subscribe(mut current_fork: ForkName) -> Vec { + let mut topics = fork_core_topics::(¤t_fork); while let Some(previous_fork) = current_fork.previous_fork() { - let previous_fork_topics = fork_core_topics(&previous_fork); + let previous_fork_topics = fork_core_topics::(&previous_fork); topics.extend(previous_fork_topics); current_fork = previous_fork; } @@ -82,6 +96,8 @@ pub enum GossipKind { BeaconBlock, /// Topic for publishing aggregate attestations and proofs. BeaconAggregateAndProof, + /// Topic for publishing BlobSidecars. + BlobSidecar(u64), /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -111,6 +127,9 @@ impl std::fmt::Display for GossipKind { GossipKind::SyncCommitteeMessage(subnet_id) => { write!(f, "sync_committee_{}", **subnet_id) } + GossipKind::BlobSidecar(blob_index) => { + write!(f, "{}{}", BLOB_SIDECAR_PREFIX, blob_index) + } x => f.write_str(x.as_ref()), } } @@ -178,11 +197,8 @@ impl GossipTopic { BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, - topic => match committee_topic_index(topic) { - Some(subnet) => match subnet { - Subnet::Attestation(s) => GossipKind::Attestation(s), - Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), - }, + topic => match subnet_topic_index(topic) { + Some(kind) => kind, None => return Err(format!("Unknown topic: {}", topic)), }, }; @@ -236,6 +252,9 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::BlobSidecar(blob_index) => { + format!("{}{}", BLOB_SIDECAR_PREFIX, blob_index) + } GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), @@ -267,28 +286,26 @@ pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option { GossipTopic::decode(topic_hash.as_str()).ok()?.subnet_id() } -// Determines if a string is an attestation or sync committee topic. -fn committee_topic_index(topic: &str) -> Option { - if topic.starts_with(BEACON_ATTESTATION_PREFIX) { - return Some(Subnet::Attestation(SubnetId::new( - topic - .trim_start_matches(BEACON_ATTESTATION_PREFIX) - .parse::() - .ok()?, +// Determines if the topic name is of an indexed topic. +fn subnet_topic_index(topic: &str) -> Option { + if let Some(index) = topic.strip_prefix(BEACON_ATTESTATION_PREFIX) { + return Some(GossipKind::Attestation(SubnetId::new( + index.parse::().ok()?, ))); - } else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) { - return Some(Subnet::SyncCommittee(SyncSubnetId::new( - topic - .trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC) - .parse::() - .ok()?, + } else if let Some(index) = topic.strip_prefix(SYNC_COMMITTEE_PREFIX_TOPIC) { + return Some(GossipKind::SyncCommitteeMessage(SyncSubnetId::new( + index.parse::().ok()?, ))); + } else if let Some(index) = topic.strip_prefix(BLOB_SIDECAR_PREFIX) { + return Some(GossipKind::BlobSidecar(index.parse::().ok()?)); } None } #[cfg(test)] mod tests { + use types::MainnetEthSpec; + use super::GossipKind::*; use super::*; @@ -417,12 +434,15 @@ mod tests { #[test] fn test_core_topics_to_subscribe() { + type E = MainnetEthSpec; let mut all_topics = Vec::new(); + let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb); + all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); all_topics.extend(ALTAIR_CORE_TOPICS); all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - assert_eq!(core_topics_to_subscribe(latest_fork), all_topics); + assert_eq!(core_topics_to_subscribe::(latest_fork), all_topics); } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 36a2e52385..dc77e3efe2 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -13,7 +13,6 @@ use tokio::runtime::Runtime; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, }; -use unused_port::unused_tcp4_port; type E = MinimalEthSpec; type ReqId = usize; @@ -26,16 +25,19 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let altair_fork_epoch = Epoch::new(1); let merge_fork_epoch = Epoch::new(2); let capella_fork_epoch = Epoch::new(3); + let deneb_fork_epoch = Epoch::new(4); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); + chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -68,15 +70,19 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } -pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { +pub fn build_config(mut boot_nodes: Vec) -> NetworkConfig { let mut config = NetworkConfig::default(); + + // Find unused ports by using the 0 port. + let port = 0; + + let random_path: u16 = rand::random(); let path = TempBuilder::new() - .prefix(&format!("libp2p_test{}", port)) + .prefix(&format!("libp2p_test_{}", random_path)) .tempdir() .unwrap(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port); - config.enr_udp4_port = Some(port); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port, port); config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); config.network_dir = path.into_path(); @@ -96,8 +102,7 @@ pub async fn build_libp2p_instance( fork_name: ForkName, spec: &ChainSpec, ) -> Libp2pInstance { - let port = unused_tcp4_port().unwrap(); - let config = build_config(port, boot_nodes); + let config = build_config(boot_nodes); // launch libp2p service let (signal, exit) = exit_future::signal(); @@ -124,6 +129,12 @@ pub fn get_enr(node: &LibP2PService) -> Enr { node.local_enr() } +// Protocol for the node pair connection. +pub enum Protocol { + Tcp, + Quic, +} + // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] @@ -132,6 +143,7 @@ pub async fn build_node_pair( log: &slog::Logger, fork_name: ForkName, spec: &ChainSpec, + protocol: Protocol, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); @@ -139,33 +151,57 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await; - let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); - // let the two nodes set up listeners let sender_fut = async { loop { - if let NetworkEvent::NewListenAddr(_) = sender.next_event().await { - return; + if let NetworkEvent::NewListenAddr(addr) = sender.next_event().await { + // Only end once we've listened on the protocol we care about + match protocol { + Protocol::Tcp => { + if addr.iter().any(|multiaddr_proto| { + matches!(multiaddr_proto, libp2p::multiaddr::Protocol::Tcp(_)) + }) { + return addr; + } + } + Protocol::Quic => { + if addr.iter().any(|multiaddr_proto| { + matches!(multiaddr_proto, libp2p::multiaddr::Protocol::QuicV1) + }) { + return addr; + } + } + } } } }; let receiver_fut = async { loop { - if let NetworkEvent::NewListenAddr(_) = receiver.next_event().await { - return; + if let NetworkEvent::NewListenAddr(addr) = receiver.next_event().await { + match protocol { + Protocol::Tcp => { + if addr.iter().any(|multiaddr_proto| { + matches!(multiaddr_proto, libp2p::multiaddr::Protocol::Tcp(_)) + }) { + return addr; + } + } + Protocol::Quic => { + if addr.iter().any(|multiaddr_proto| { + matches!(multiaddr_proto, libp2p::multiaddr::Protocol::QuicV1) + }) { + return addr; + } + } + } } } }; let joined = futures::future::join(sender_fut, receiver_fut); - // wait for either both nodes to listen or a timeout - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(500)) => {} - _ = joined => {} - } + let receiver_multiaddr = joined.await.1; - // sender.dial_peer(peer_id); match sender.testing_dial(receiver_multiaddr.clone()) { Ok(()) => { debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 05fa5ab854..176f783c99 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,4 +1,8 @@ #![cfg(test)] + +mod common; + +use common::Protocol; use lighthouse_network::rpc::methods::*; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; @@ -9,13 +13,11 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, ChainSpec, EmptyBlock, - Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, - Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobSidecar, ChainSpec, + EmptyBlock, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, + SignedBeaconBlock, Slot, }; -mod common; - type E = MinimalEthSpec; /// Merge block with length < max_rpc_size. @@ -49,7 +51,7 @@ fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBloc // Tests the STATUS RPC message #[test] #[allow(clippy::single_match)] -fn test_status_rpc() { +fn test_tcp_status_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -62,8 +64,14 @@ fn test_status_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Base, + &spec, + Protocol::Tcp, + ) + .await; // Dummy STATUS RPC message let rpc_request = Request::Status(StatusMessage { @@ -141,7 +149,7 @@ fn test_status_rpc() { // Tests a streamed BlocksByRange RPC Message #[test] #[allow(clippy::single_match)] -fn test_blocks_by_range_chunked_rpc() { +fn test_tcp_blocks_by_range_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -156,8 +164,14 @@ fn test_blocks_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Merge, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -264,10 +278,122 @@ fn test_blocks_by_range_chunked_rpc() { }) } +// Tests a streamed BlobsByRange RPC Message +#[test] +#[allow(clippy::single_match)] +fn test_blobs_by_range_chunked_rpc() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = false; + + let slot_count = 32; + let messages_to_send = 34; + + let log = common::build_log(log_level, enable_logging); + + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let spec = E::default_spec(); + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Deneb, + &spec, + Protocol::Tcp, + ) + .await; + + // BlobsByRange Request + let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: 0, + count: slot_count, + }); + + // BlocksByRange Response + let blob = BlobSidecar::::empty(); + + let rpc_response = Response::BlobsByRange(Some(Arc::new(blob))); + + // keep count of the number of messages received + let mut messages_received = 0; + let request_id = messages_to_send as usize; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.send_request(peer_id, request_id, rpc_request.clone()); + } + NetworkEvent::ResponseReceived { + peer_id: _, + id: _, + response, + } => { + warn!(log, "Sender received a response"); + match response { + Response::BlobsByRange(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); + } + Response::BlobsByRange(None) => { + // should be exactly `messages_to_send` messages before terminating + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => panic!("Invalid RPC received"), + } + } + _ => {} // Ignore other behaviour events + } + } + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + id, + request, + } => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + for _ in 0..messages_to_send { + // Send first third of responses as base blocks, + // second as altair and third as merge. + receiver.send_response(peer_id, id, rpc_response.clone()); + } + // send the stream termination + receiver.send_response(peer_id, id, Response::BlobsByRange(None)); + } + } + _ => {} // Ignore other events + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + // Tests rejection of blocks over `MAX_RPC_SIZE`. #[test] #[allow(clippy::single_match)] -fn test_blocks_by_range_over_limit() { +fn test_tcp_blocks_by_range_over_limit() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -282,8 +408,14 @@ fn test_blocks_by_range_over_limit() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Merge, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -350,7 +482,7 @@ fn test_blocks_by_range_over_limit() { // Tests that a streamed BlocksByRange RPC Message terminates when all expected chunks were received #[test] -fn test_blocks_by_range_chunked_rpc_terminates_correctly() { +fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -366,8 +498,14 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Base, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); @@ -476,7 +614,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { // Tests an empty response to a BlocksByRange RPC Message #[test] #[allow(clippy::single_match)] -fn test_blocks_by_range_single_empty_rpc() { +fn test_tcp_blocks_by_range_single_empty_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -488,8 +626,14 @@ fn test_blocks_by_range_single_empty_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Base, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); @@ -571,12 +715,12 @@ fn test_blocks_by_range_single_empty_rpc() { } // Tests a streamed, chunked BlocksByRoot RPC Message -// The size of the reponse is a full `BeaconBlock` +// The size of the response is a full `BeaconBlock` // which is greater than the Snappy frame size. Hence, this test // serves to test the snappy framing format as well. #[test] #[allow(clippy::single_match)] -fn test_blocks_by_root_chunked_rpc() { +fn test_tcp_blocks_by_root_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -589,8 +733,14 @@ fn test_blocks_by_root_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Merge, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRoot Request let rpc_request = @@ -702,7 +852,7 @@ fn test_blocks_by_root_chunked_rpc() { // Tests a streamed, chunked BlocksByRoot RPC Message terminates when all expected reponses have been received #[test] -fn test_blocks_by_root_chunked_rpc_terminates_correctly() { +fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -716,8 +866,14 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { - let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Base, + &spec, + Protocol::Tcp, + ) + .await; // BlocksByRoot Request let rpc_request = @@ -833,14 +989,9 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { }) } -// Tests a Goodbye RPC message -#[test] -#[allow(clippy::single_match)] -fn test_goodbye_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Trace; - let enable_logging = false; - +/// Establishes a pair of nodes and disconnects the pair based on the selected protocol via an RPC +/// Goodbye message. +fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); @@ -850,7 +1001,8 @@ fn test_goodbye_rpc() { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await; + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec, protocol) + .await; // build the sender future let sender_future = async { @@ -876,12 +1028,9 @@ fn test_goodbye_rpc() { // build the receiver future let receiver_future = async { loop { - match receiver.next_event().await { - NetworkEvent::PeerDisconnected(_) => { - // Should receive sent RPC request - return; - } - _ => {} // Ignore other events + if let NetworkEvent::PeerDisconnected(_) = receiver.next_event().await { + // Should receive sent RPC request + return; } } }; @@ -896,3 +1045,23 @@ fn test_goodbye_rpc() { } }) } + +// Tests a Goodbye RPC message +#[test] +#[allow(clippy::single_match)] +fn tcp_test_goodbye_rpc() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = true; + goodbye_test(log_level, enable_logging, Protocol::Tcp); +} + +// Tests a Goodbye RPC message +#[test] +#[allow(clippy::single_match)] +fn quic_test_goodbye_rpc() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = true; + goodbye_test(log_level, enable_logging, Protocol::Quic); +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c37b0fa45d..bbd2af2174 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -2,54 +2,58 @@ name = "network" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -sloggers = { version = "2.1.1", features = ["json"] } -genesis = { path = "../genesis" } +sloggers = { workspace = true } +genesis = { workspace = true } matches = "0.1.8" -exit-future = "0.2.0" -slog-term = "2.6.0" -slog-async = "2.5.0" +exit-future = { workspace = true } +slog-term = { workspace = true } +slog-async = { workspace = true } +eth2 = { workspace = true } [dependencies] -beacon_chain = { path = "../beacon_chain" } -store = { path = "../store" } -lighthouse_network = { path = "../lighthouse_network" } -types = { path = "../../consensus/types" } -slot_clock = { path = "../../common/slot_clock" } -slog = { version = "2.5.2", features = ["max_level_trace"] } -hex = "0.4.2" -ethereum_ssz = "0.5.0" -ssz_types = "0.5.3" -futures = "0.3.7" -error-chain = "0.12.4" -tokio = { version = "1.14.0", features = ["full"] } -tokio-stream = "0.1.3" -smallvec = "1.6.1" -rand = "0.8.5" -fnv = "1.0.7" +beacon_chain = { workspace = true } +store = { workspace = true } +lighthouse_network = { workspace = true } +types = { workspace = true } +slot_clock = { workspace = true } +slog = { workspace = true } +hex = { workspace = true } +ethereum_ssz = { workspace = true } +ssz_types = { workspace = true } +futures = { workspace = true } +error-chain = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +smallvec = { workspace = true } +rand = { workspace = true } +fnv = { workspace = true } rlp = "0.5.0" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -logging = { path = "../../common/logging" } -task_executor = { path = "../../common/task_executor" } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +logging = { workspace = true } +task_executor = { workspace = true } igd = "0.12.1" -itertools = "0.10.0" -num_cpus = "1.13.0" -lru_cache = { path = "../../common/lru_cache" } +itertools = { workspace = true } +num_cpus = { workspace = true } +lru_cache = { workspace = true } if-addrs = "0.6.4" -strum = "0.24.0" -tokio-util = { version = "0.6.3", features = ["time"] } -derivative = "2.2.0" -delay_map = "0.3.0" -ethereum-types = { version = "0.14.1", optional = true } -operation_pool = { path = "../operation_pool" } -execution_layer = { path = "../execution_layer" } -beacon_processor = { path = "../beacon_processor" } -parking_lot = "0.12.0" -environment = { path = "../../lighthouse/environment" } +lru = { workspace = true } +strum = { workspace = true } +tokio-util = { workspace = true } +derivative = { workspace = true } +delay_map = { workspace = true } +ethereum-types = { workspace = true } +operation_pool = { workspace = true } +execution_layer = { workspace = true } +beacon_processor = { workspace = true } +parking_lot = { workspace = true } +environment = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill disable-backfill = [] +fork_from_env = ["beacon_chain/fork_from_env"] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 0144824861..799953538a 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -7,8 +7,8 @@ use beacon_chain::{ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use lighthouse_network::{ - peer_manager::peerdb::client::ClientKind, types::GossipKind, BandwidthSinks, GossipTopic, - Gossipsub, NetworkGlobals, + metrics::AggregatedBandwidthSinks, peer_manager::peerdb::client::ClientKind, types::GossipKind, + GossipTopic, Gossipsub, NetworkGlobals, }; use std::sync::Arc; use strum::IntoEnumIterator; @@ -67,6 +67,10 @@ lazy_static! { "beacon_processor_gossip_block_early_seconds", "Whenever a gossip block is received early this metrics is set to how early that block was." ); + pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_gossip_blob_verified_total", + "Total number of gossip blob verified for propagation." + ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result = try_create_int_counter( "beacon_processor_exit_verified_total", @@ -103,6 +107,10 @@ lazy_static! { "beacon_processor_bls_to_execution_change_imported_total", "Total number of address changes imported to the op pool." ); +} + +// Need to split up this `lazy_static!` due to recursion limits. +lazy_static! { // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_rpc_block_imported_total", @@ -218,18 +226,8 @@ lazy_static! { /* * Bandwidth metrics */ - pub static ref INBOUND_LIBP2P_BYTES: Result = - try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); - - pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( - "libp2p_outbound_bytes", - "The outbound bandwidth over libp2p" - ); - pub static ref TOTAL_LIBP2P_BANDWIDTH: Result = try_create_int_gauge( - "libp2p_total_bandwidth", - "The total inbound/outbound bandwidth over libp2p" - ); - + pub static ref LIBP2P_BYTES: Result = + try_create_int_counter_vec("libp2p_inbound_bytes", "The bandwidth over libp2p", &["direction", "transport"]); /* * Sync related metrics @@ -282,6 +280,44 @@ lazy_static! { "Count of times when a gossip block arrived from the network later than the attestation deadline.", ); + /* + * Blob Delay Metrics + */ + pub static ref BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_gossip_propagation_verification_delay_time", + "Duration between when the blob is received over gossip and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3,-1) + ); + pub static ref BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_gossip_slot_start_delay_time", + "Duration between when the blob is received over gossip and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + ); + pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( + "beacon_blob_rpc_slot_start_delay_time", + "Duration between when a blob is received over rpc and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + //decimal_buckets(-1,2) + + ); + pub static ref BEACON_BLOB_LAST_DELAY: Result = try_create_int_gauge( + "beacon_blob_last_delay", + "Keeps track of the last blob's delay from the start of the slot" + ); + + pub static ref BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( + "beacon_blob_gossip_arrived_late_total", + "Count of times when a gossip blob arrived from the network later than the attestation deadline.", + ); + /* * Light client update reprocessing queue metrics. */ @@ -291,13 +327,23 @@ lazy_static! { ); } -pub fn update_bandwidth_metrics(bandwidth: Arc) { - set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); - set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); - set_gauge( - &TOTAL_LIBP2P_BANDWIDTH, - (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, - ); +pub fn update_bandwidth_metrics(bandwidth: &AggregatedBandwidthSinks) { + if let Some(tcp_in_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["inbound", "tcp"]) { + tcp_in_bandwidth.reset(); + tcp_in_bandwidth.inc_by(bandwidth.total_tcp_inbound()); + } + if let Some(tcp_out_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["outbound", "tcp"]) { + tcp_out_bandwidth.reset(); + tcp_out_bandwidth.inc_by(bandwidth.total_tcp_outbound()); + } + if let Some(quic_in_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["inbound", "quic"]) { + quic_in_bandwidth.reset(); + quic_in_bandwidth.inc_by(bandwidth.total_quic_inbound()); + } + if let Some(quic_out_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["outbound", "quic"]) { + quic_out_bandwidth.reset(); + quic_out_bandwidth.inc_by(bandwidth.total_quic_outbound()); + } } pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index 9bf123e8de..d011ac42e8 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -12,20 +12,49 @@ use types::EthSpec; /// Configuration required to construct the UPnP port mappings. pub struct UPnPConfig { - /// The local tcp port. + /// The local TCP port. tcp_port: u16, - /// The local udp port. - udp_port: u16, + /// The local UDP discovery port. + disc_port: u16, + /// The local UDP quic port. + quic_port: u16, /// Whether discovery is enabled or not. disable_discovery: bool, + /// Whether quic is enabled or not. + disable_quic_support: bool, +} + +/// Contains mappings that managed to be established. +#[derive(Default, Debug)] +pub struct EstablishedUPnPMappings { + /// A TCP port mapping for libp2p. + pub tcp_port: Option, + /// A UDP port for the QUIC libp2p transport. + pub udp_quic_port: Option, + /// A UDP port for discv5. + pub udp_disc_port: Option, +} + +impl EstablishedUPnPMappings { + /// Returns true if at least one value is set. + pub fn is_some(&self) -> bool { + self.tcp_port.is_some() || self.udp_quic_port.is_some() || self.udp_disc_port.is_some() + } + + // Iterator over the UDP ports + pub fn udp_ports(&self) -> impl Iterator { + self.udp_quic_port.iter().chain(self.udp_disc_port.iter()) + } } impl UPnPConfig { pub fn from_config(config: &NetworkConfig) -> Option { config.listen_addrs().v4().map(|v4_addr| UPnPConfig { tcp_port: v4_addr.tcp_port, - udp_port: v4_addr.udp_port, + disc_port: v4_addr.disc_port, + quic_port: v4_addr.quic_port, disable_discovery: config.disable_discovery, + disable_quic_support: config.disable_quic_support, }) } } @@ -68,6 +97,8 @@ pub fn construct_upnp_mappings( debug!(log, "UPnP Local IP Discovered"; "ip" => ?local_ip); + let mut mappings = EstablishedUPnPMappings::default(); + match local_ip { IpAddr::V4(address) => { let libp2p_socket = SocketAddrV4::new(address, config.tcp_port); @@ -76,39 +107,46 @@ pub fn construct_upnp_mappings( // one. // I've found this to be more reliable. If multiple users are behind a single // router, they should ideally try to set different port numbers. - let tcp_socket = add_port_mapping( + mappings.tcp_port = add_port_mapping( &gateway, igd::PortMappingProtocol::TCP, libp2p_socket, "tcp", &log, - ).and_then(|_| { + ).map(|_| { let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new((*ip).into(), config.tcp_port)).map_err(|_| ()); info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); - external_socket + config.tcp_port }).ok(); - let udp_socket = if !config.disable_discovery { - let discovery_socket = SocketAddrV4::new(address, config.udp_port); + let set_udp_mapping = |udp_port| { + let udp_socket = SocketAddrV4::new(address, udp_port); add_port_mapping( &gateway, igd::PortMappingProtocol::UDP, - discovery_socket, + udp_socket, "udp", &log, - ).and_then(|_| { - let external_socket = external_ip - .map(|ip| SocketAddr::new(ip.into(), config.udp_port)).map_err(|_| ()); - info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.udp_port)); - external_socket - }).ok() - } else { - None + ).map(|_| { + info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), udp_port)); + }) }; + // Set the discovery UDP port mapping + if !config.disable_discovery && set_udp_mapping(config.disc_port).is_ok() { + mappings.udp_disc_port = Some(config.disc_port); + } + + // Set the quic UDP port mapping + if !config.disable_quic_support && set_udp_mapping(config.quic_port).is_ok() { + mappings.udp_quic_port = Some(config.quic_port); + } + // report any updates to the network service. - network_send.send(NetworkMessage::UPnPMappingEstablished{ tcp_socket, udp_socket }) - .unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e)); + if mappings.is_some() { + network_send.send(NetworkMessage::UPnPMappingEstablished{ mappings }) + .unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e)); + } } _ => debug!(log, "UPnP no routes constructed. IPv6 not supported"), } @@ -161,12 +199,12 @@ fn add_port_mapping( } /// Removes the specified TCP and UDP port mappings. -pub fn remove_mappings(tcp_port: Option, udp_port: Option, log: &slog::Logger) { - if tcp_port.is_some() || udp_port.is_some() { +pub fn remove_mappings(mappings: &EstablishedUPnPMappings, log: &slog::Logger) { + if mappings.is_some() { debug!(log, "Removing UPnP port mappings"); match igd::search_gateway(Default::default()) { Ok(gateway) => { - if let Some(tcp_port) = tcp_port { + if let Some(tcp_port) = mappings.tcp_port { match gateway.remove_port(igd::PortMappingProtocol::TCP, tcp_port) { Ok(()) => debug!(log, "UPnP Removed TCP port mapping"; "port" => tcp_port), Err(e) => { @@ -174,8 +212,8 @@ pub fn remove_mappings(tcp_port: Option, udp_port: Option, log: &slog: } } } - if let Some(udp_port) = udp_port { - match gateway.remove_port(igd::PortMappingProtocol::UDP, udp_port) { + for udp_port in mappings.udp_ports() { + match gateway.remove_port(igd::PortMappingProtocol::UDP, *udp_port) { Ok(()) => debug!(log, "UPnP Removed UDP port mapping"; "port" => udp_port), Err(e) => { debug!(log, "UPnP Failed to remove UDP port mapping"; "port" => udp_port, "error" => %e) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index cb4d6f9c2b..82daf74efe 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,17 +4,21 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use std::collections::HashSet; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; +use beacon_chain::block_verification_types::AsBlock; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, + data_availability_checker::AvailabilityCheckErrorCategory, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, - validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, - NotifyExecutionLayer, + validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, + AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, + GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; @@ -31,8 +35,8 @@ use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use beacon_processor::{ @@ -596,6 +600,223 @@ impl NetworkBeaconProcessor { } } + #[allow(clippy::too_many_arguments)] + pub async fn process_gossip_blob( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + _peer_client: Client, + blob_index: u64, + signed_blob: SignedBlobSidecar, + seen_duration: Duration, + ) { + let slot = signed_blob.message.slot; + let root = signed_blob.message.block_root; + let index = signed_blob.message.index; + let commitment = signed_blob.message.kzg_commitment; + let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); + // Log metrics to track delay from other nodes on the network. + metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); + metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); + match self + .chain + .verify_blob_sidecar_for_gossip(signed_blob, blob_index) + { + Ok(gossip_verified_blob) => { + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); + + if delay >= self.chain.slot_clock.unagg_attestation_production_delay() { + metrics::inc_counter(&metrics::BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL); + debug!( + self.log, + "Gossip blob arrived late"; + "block_root" => ?gossip_verified_blob.block_root(), + "proposer_index" => gossip_verified_blob.proposer_index(), + "slot" => gossip_verified_blob.slot(), + "delay" => ?delay, + "commitment" => %gossip_verified_blob.kzg_commitment(), + ); + } + + debug!( + self.log, + "Successfully verified gossip blob"; + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %gossip_verified_blob.kzg_commitment(), + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Log metrics to keep track of propagation delay times. + if let Some(duration) = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .and_then(|now| now.checked_sub(seen_duration)) + { + metrics::observe_duration( + &metrics::BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME, + duration, + ); + } + self.process_gossip_verified_blob(peer_id, gossip_verified_blob, seen_duration) + .await + } + Err(err) => { + match err { + GossipBlobError::BlobParentUnknown(blob) => { + debug!( + self.log, + "Unknown parent hash for blob"; + "action" => "requesting parent", + "block_root" => %blob.block_root, + "parent_root" => %blob.block_parent_root, + "commitment" => %commitment, + ); + self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); + } + GossipBlobError::ProposerSignatureInvalid + | GossipBlobError::UnknownValidator(_) + | GossipBlobError::ProposerIndexMismatch { .. } + | GossipBlobError::BlobIsNotLaterThanParent { .. } + | GossipBlobError::InvalidSubnet { .. } => { + warn!( + self.log, + "Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; + "error" => ?err, + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %commitment, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_blob_low", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipBlobError::FutureSlot { .. } + | GossipBlobError::BeaconChainError(_) + | GossipBlobError::RepeatBlob { .. } + | GossipBlobError::PastFinalizedSlot { .. } => { + warn!( + self.log, + "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; + "error" => ?err, + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %commitment, + ); + // Prevent recurring behaviour by penalizing the peer slightly. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_blob_high", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + } + } + } + + pub async fn process_gossip_verified_blob( + self: &Arc, + peer_id: PeerId, + verified_blob: GossipVerifiedBlob, + // This value is not used presently, but it might come in handy for debugging. + _seen_duration: Duration, + ) { + let block_root = verified_blob.block_root(); + let blob_slot = verified_blob.slot(); + let blob_index = verified_blob.id().index; + + let delay_lookup = self + .chain + .data_availability_checker + .should_delay_lookup(blob_slot); + + match self.chain.process_gossip_blob(verified_blob).await { + Ok(AvailabilityProcessingStatus::Imported(block_root)) => { + // Note: Reusing block imported metric here + metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + info!( + self.log, + "Gossipsub blob processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(_slot, block_root)) => { + if delay_lookup { + self.cache_peer(peer_id, &block_root); + trace!( + self.log, + "Processed blob, delaying lookup for other components"; + "slot" => %blob_slot, + "blob_index" => %blob_index, + "block_root" => %block_root, + ); + } else { + trace!( + self.log, + "Missing block components for gossip verified blob"; + "slot" => %blob_slot, + "blob_index" => %blob_index, + "block_root" => %block_root, + ); + self.send_sync_message(SyncMessage::MissingGossipBlockComponents( + vec![peer_id], + block_root, + )); + } + } + Err(err) => { + debug!( + self.log, + "Invalid gossip blob"; + "outcome" => ?err, + "block root" => ?block_root, + "block slot" => blob_slot, + "blob index" => blob_index, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_blob_ssz", + ); + trace!( + self.log, + "Invalid gossip blob ssz"; + ); + } + } + } + + /// Cache the peer id for the given block root. + fn cache_peer(self: &Arc, peer_id: PeerId, block_root: &Hash256) { + let mut guard = self.delayed_lookup_peers.lock(); + if let Some(peers) = guard.get_mut(block_root) { + peers.insert(peer_id); + } else { + let mut peers = HashSet::new(); + peers.insert(peer_id); + guard.push(*block_root, peers); + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -751,7 +972,7 @@ impl NetworkBeaconProcessor { "Unknown parent for gossip block"; "root" => ?block_root ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); + self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)); return None; } Err(e @ BlockError::BeaconChainError(_)) => { @@ -763,9 +984,17 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Gossip block is already known"; + "block_root" => %block_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) - | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -809,6 +1038,15 @@ impl NetworkBeaconProcessor { ); return None; } + // Note: This error variant cannot be reached when doing gossip validation + // as we do not do availability checks here. + Err(e @ BlockError::AvailabilityCheck(_)) => { + crit!(self.log, "Internal block gossip validation error. Availability check during + gossip validation"; + "error" => %e + ); + return None; + } }; metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL); @@ -916,21 +1154,21 @@ impl NetworkBeaconProcessor { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block: Arc<_> = verified_block.block.clone(); + let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; + let delay_lookup = self + .chain + .data_availability_checker + .should_delay_lookup(verified_block.block.slot()); + let result = self .chain - .process_block( - block_root, - verified_block, - NotifyExecutionLayer::Yes, - || Ok(()), - ) + .process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes) .await; match &result { - Ok(block_root) => { + Ok(AvailabilityProcessingStatus::Imported(block_root)) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx @@ -957,7 +1195,29 @@ impl NetworkBeaconProcessor { self.chain.recompute_head_at_current_slot().await; } - Err(BlockError::ParentUnknown { .. }) => { + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + if delay_lookup { + self.cache_peer(peer_id, block_root); + trace!( + self.log, + "Processed block, delaying lookup for other components"; + "slot" => slot, + "block_root" => %block_root, + ); + } else { + trace!( + self.log, + "Missing block components for gossip verified block"; + "slot" => slot, + "block_root" => %block_root, + ); + self.send_sync_message(SyncMessage::MissingGossipBlockComponents( + vec![peer_id], + *block_root, + )); + } + } + Err(BlockError::ParentUnknown(block)) => { // Inform the sync manager to find parents for this block // This should not occur. It should be checked by `should_forward_block` error!( @@ -965,7 +1225,7 @@ impl NetworkBeaconProcessor { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock( + self.send_sync_message(SyncMessage::UnknownParentBlock( peer_id, block.clone(), block_root, @@ -978,6 +1238,31 @@ impl NetworkBeaconProcessor { "error" => %e ); } + Err(BlockError::AvailabilityCheck(err)) => { + match err.category() { + AvailabilityCheckErrorCategory::Internal => { + warn!( + self.log, + "Internal availability check error"; + "error" => ?err, + ); + } + AvailabilityCheckErrorCategory::Malicious => { + // Note: we cannot penalize the peer that sent us the block + // over gossip here because these errors imply either an issue + // with: + // 1. Blobs we have received over non-gossip sources + // (from potentially other peers) + // 2. The proposer being malicious and sending inconsistent + // blocks and blobs. + warn!( + self.log, + "Received invalid blob or malicious proposer"; + "error" => ?err + ); + } + } + } other => { debug!( self.log, @@ -1809,7 +2094,10 @@ impl NetworkBeaconProcessor { // We don't know the block, get the sync manager to handle the block lookup, and // send the attestation to be scheduled for re-processing. self.sync_tx - .send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) + .send(SyncMessage::UnknownBlockHashFromAttestation( + peer_id, + *beacon_block_root, + )) .unwrap_or_else(|_| { warn!( self.log, @@ -2062,7 +2350,7 @@ impl NetworkBeaconProcessor { ); } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( - HotColdDBError::AttestationStateIsFinalized { .. }, + HotColdDBError::FinalizedStateNotInHotDatabase { .. }, ))) => { debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f8c4e37ffe..8094d4677c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,6 +2,7 @@ use crate::{ service::NetworkMessage, sync::{manager::BlockProcessType, SyncMessage}, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, test_utils::BeaconChainHarness, BeaconChain, }; @@ -12,12 +13,16 @@ use beacon_processor::{ WorkEvent as BeaconWorkEvent, }; use environment::null_logger; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, }; -use slog::{debug, Logger}; -use slot_clock::ManualSlotClock; +use lru::LruCache; +use parking_lot::Mutex; +use slog::{crit, debug, error, trace, Logger}; +use slot_clock::{ManualSlotClock, SlotClock}; +use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -25,9 +30,11 @@ use store::MemoryStore; use task_executor::test_utils::TestRuntime; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, error::TrySendError}; +use tokio::time::{interval_at, Instant}; use types::*; pub use sync_methods::ChainSegmentProcessId; +use types::blob_sidecar::FixedBlobSidecarList; pub type Error = TrySendError>; @@ -37,6 +44,7 @@ mod sync_methods; mod tests; pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; +pub const DELAYED_PEER_CACHE_SIZE: usize = 16; /// Defines if and where we will store the SSZ files of invalid blocks. #[derive(Clone)] @@ -57,6 +65,7 @@ pub struct NetworkBeaconProcessor { pub reprocess_tx: mpsc::Sender, pub network_globals: Arc>, pub invalid_block_storage: InvalidBlockStorage, + pub delayed_lookup_peers: Mutex>>, pub executor: TaskExecutor, pub log: Logger, } @@ -196,6 +205,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some blob sidecar. + pub fn send_gossip_blob_sidecar( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + blob_index: u64, + blob: SignedBlobSidecar, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_blob( + message_id, + peer_id, + peer_client, + blob_index, + blob, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipSignedBlobSidecar(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -376,7 +415,7 @@ impl NetworkBeaconProcessor { pub fn send_rpc_beacon_block( self: &Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { @@ -392,11 +431,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some blobs, where the result from computation (if any) is + /// sent to the other side of `result_tx`. + pub fn send_rpc_blobs( + self: &Arc, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let blob_count = blobs.iter().filter(|b| b.is_some()).count(); + if blob_count == 0 { + return Ok(()); + } + let process_fn = self.clone().generate_rpc_blobs_process_fn( + block_root, + blobs, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcBlobs { process_fn }, + }) + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, process_id: ChainSegmentProcessId, - blocks: Vec>>, + blocks: Vec>, ) -> Result<(), Error> { let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); let processor = self.clone(); @@ -496,6 +560,40 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `BlobsByRangeRequest`s from the RPC network. + pub fn send_blobs_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_blobs_by_range_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlobsByRangeRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `BlobsByRootRequest`s from the RPC network. + pub fn send_blobs_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_blobs_by_root_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::BlobsByRootsRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_lightclient_bootstrap_request( self: &Arc, @@ -532,6 +630,68 @@ impl NetworkBeaconProcessor { "error" => %e) }); } + + /// This service is responsible for collecting lookup messages and sending them back to sync + /// for processing after a short delay. + /// + /// We want to delay lookups triggered from gossip for the following reasons: + /// + /// - We only want to make one request for components we are unlikely to see on gossip. This means + /// we don't have to repeatedly update our RPC request's state as we receive gossip components. + /// + /// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request. + /// + /// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs + /// over gossip and RPC. + /// + /// - We would prefer to request peers based on whether we've seen them attest, because this gives + /// us an idea about whether they *should* have the block/blobs we're missing. This is because a + /// node should not attest to a block unless it has all the blobs for that block. This gives us a + /// stronger basis for peer scoring. + pub fn spawn_delayed_lookup_service(self: &Arc) { + let processor_clone = self.clone(); + let executor = self.executor.clone(); + let log = self.log.clone(); + let beacon_chain = self.chain.clone(); + executor.spawn( + async move { + let slot_duration = beacon_chain.slot_clock.slot_duration(); + let delay = beacon_chain.slot_clock.single_lookup_delay(); + let interval_start = match ( + beacon_chain.slot_clock.duration_to_next_slot(), + beacon_chain.slot_clock.seconds_from_current_slot_start(), + ) { + (Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => { + let duration_until_start = if seconds_from_current_slot_start > delay { + duration_to_next_slot + delay + } else { + delay - seconds_from_current_slot_start + }; + Instant::now() + duration_until_start + } + _ => { + crit!(log, + "Failed to read slot clock, delayed lookup service timing will be inaccurate.\ + This may degrade performance" + ); + Instant::now() + } + }; + + let mut interval = interval_at(interval_start, slot_duration); + loop { + interval.tick().await; + let Some(slot) = beacon_chain.slot_clock.now_or_genesis() else { + error!(log, "Skipping delayed lookup poll, unable to read slot clock"); + continue + }; + trace!(log, "Polling delayed lookups for slot: {slot}"); + processor_clone.poll_delayed_lookups(slot) + } + }, + "delayed_lookups", + ); + } } type TestBeaconChainType = @@ -574,6 +734,7 @@ impl NetworkBeaconProcessor> { reprocess_tx: work_reprocessing_tx, network_globals, invalid_block_storage: InvalidBlockStorage::Disabled, + delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)), executor: runtime.task_executor.clone(), log, }; diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 19b0a60a43..0e6c76e222 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -5,15 +5,22 @@ use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use beacon_processor::SendOnDrop; use itertools::process_results; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS, MAX_REQUEST_BLOCKS_DENEB, +}; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use task_executor::TaskExecutor; use tokio_stream::StreamExt; -use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot}; +use types::blob_sidecar::BlobIdentifier; +use types::{ + light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, ForkName, Hash256, Slot, +}; impl NetworkBeaconProcessor { /* Auxiliary functions */ @@ -118,7 +125,10 @@ impl NetworkBeaconProcessor { }; self.send_sync_message(SyncMessage::AddPeer(peer_id, info)); } - Err(e) => error!(self.log, "Could not process status message"; "error" => ?e), + Err(e) => error!(self.log, "Could not process status message"; + "peer" => %peer_id, + "error" => ?e + ), } } @@ -207,6 +217,84 @@ impl NetworkBeaconProcessor { "load_blocks_by_root_blocks", ) } + /// Handle a `BlobsByRoot` request from the peer. + pub fn handle_blobs_by_root_request( + self: Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: BlobsByRootRequest, + ) { + let Some(requested_root) = request.blob_ids.get(0).map(|id| id.block_root) else { + // No blob ids requested. + return; + }; + let requested_indices = request + .blob_ids + .iter() + .map(|id| id.index) + .collect::>(); + let mut send_blob_count = 0; + let send_response = true; + + let mut blob_list_results = HashMap::new(); + for id in request.blob_ids.into_iter() { + // First attempt to get the blobs from the RPC cache. + if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(&id) { + self.send_response(peer_id, Response::BlobsByRoot(Some(blob)), request_id); + send_blob_count += 1; + } else { + let BlobIdentifier { + block_root: root, + index, + } = id; + + let blob_list_result = match blob_list_results.entry(root) { + Entry::Vacant(entry) => { + entry.insert(self.chain.get_blobs_checking_early_attester_cache(&root)) + } + Entry::Occupied(entry) => entry.into_mut(), + }; + + match blob_list_result.as_ref() { + Ok(blobs_sidecar_list) => { + 'inner: for blob_sidecar in blobs_sidecar_list.iter() { + if blob_sidecar.index == index { + self.send_response( + peer_id, + Response::BlobsByRoot(Some(blob_sidecar.clone())), + request_id, + ); + send_blob_count += 1; + break 'inner; + } + } + } + Err(e) => { + debug!( + self.log, + "Error fetching blob for peer"; + "peer" => %peer_id, + "request_root" => ?root, + "error" => ?e, + ); + } + } + } + } + debug!( + self.log, + "Received BlobsByRoot Request"; + "peer" => %peer_id, + "request_root" => %requested_root, + "request_indices" => ?requested_indices, + "returned" => send_blob_count + ); + + // send stream termination + if send_response { + self.send_response(peer_id, Response::BlobsByRoot(None), request_id); + } + } /// Handle a `BlocksByRoot` request from the peer. pub fn handle_light_client_bootstrap( @@ -223,7 +311,7 @@ impl NetworkBeaconProcessor { self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), + "Bootstrap not available".into(), request_id, ); return; @@ -233,7 +321,7 @@ impl NetworkBeaconProcessor { self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), + "Bootstrap not available".into(), request_id, ); return; @@ -246,7 +334,7 @@ impl NetworkBeaconProcessor { self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), + "Bootstrap not available".into(), request_id, ); return; @@ -256,23 +344,20 @@ impl NetworkBeaconProcessor { self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), + "Bootstrap not available".into(), request_id, ); return; } }; - let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) { - Ok(bootstrap) => bootstrap, - Err(_) => { - self.send_error_response( - peer_id, - RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not avaiable".into(), - request_id, - ); - return; - } + let Ok(bootstrap) = LightClientBootstrap::from_beacon_state(&mut beacon_state) else { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Bootstrap not available".into(), + request_id, + ); + return; }; self.send_response( peer_id, @@ -288,7 +373,7 @@ impl NetworkBeaconProcessor { send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, - mut req: BlocksByRangeRequest, + req: BlocksByRangeRequest, ) { debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, @@ -297,8 +382,21 @@ impl NetworkBeaconProcessor { ); // Should not send more than max request blocks - if *req.count() > MAX_REQUEST_BLOCKS { - *req.count_mut() = MAX_REQUEST_BLOCKS; + let max_request_size = self.chain.epoch().map_or(MAX_REQUEST_BLOCKS, |epoch| { + match self.chain.spec.fork_name_at_epoch(epoch) { + ForkName::Deneb => MAX_REQUEST_BLOCKS_DENEB, + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + MAX_REQUEST_BLOCKS + } + } + }); + if *req.count() > max_request_size { + return self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + format!("Request exceeded max size {max_request_size}"), + request_id, + ); } let forwards_block_root_iter = match self @@ -312,7 +410,10 @@ impl NetworkBeaconProcessor { oldest_block_slot, }, )) => { - debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot); + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); return self.send_error_response( peer_id, RPCResponseErrorCode::ResourceUnavailable, @@ -320,7 +421,19 @@ impl NetworkBeaconProcessor { request_id, ); } - Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), + Err(e) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database error".into(), + request_id, + ); + return error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + } }; // Pick out the required blocks, ignoring skip-slots. @@ -344,7 +457,13 @@ impl NetworkBeaconProcessor { let block_roots = match maybe_block_roots { Ok(block_roots) => block_roots, - Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e), + Err(e) => { + return error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ) + } }; // remove all skip slots @@ -381,8 +500,17 @@ impl NetworkBeaconProcessor { error!( self.log, "Block in the chain is not in the store"; + "request" => ?req, + "peer" => %peer_id, "request_root" => ?root ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database inconsistency".into(), + request_id, + ); + send_response = false; break; } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { @@ -479,4 +607,210 @@ impl NetworkBeaconProcessor { "load_blocks_by_range_blocks", ); } + + /// Handle a `BlobsByRange` request from the peer. + pub fn handle_blobs_by_range_request( + self: Arc, + peer_id: PeerId, + request_id: PeerRequestId, + req: BlobsByRangeRequest, + ) { + debug!(self.log, "Received BlobsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + + // Should not send more than max request blocks + if req.max_blobs_requested::() > MAX_REQUEST_BLOB_SIDECARS { + return self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`".into(), + request_id, + ); + } + + let request_start_slot = Slot::from(req.start_slot); + + let data_availability_boundary_slot = match self.chain.data_availability_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!(self.log, "Deneb fork is disabled"); + self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Deneb fork is disabled".into(), + request_id, + ); + return; + } + }; + + let oldest_blob_slot = self + .chain + .store + .get_blob_info() + .oldest_blob_slot + .unwrap_or(data_availability_boundary_slot); + if request_start_slot < oldest_blob_slot { + debug!( + self.log, + "Range request start slot is older than data availability boundary."; + "requested_slot" => request_start_slot, + "oldest_blob_slot" => oldest_blob_slot, + "data_availability_boundary" => data_availability_boundary_slot + ); + + return if data_availability_boundary_slot < oldest_blob_slot { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "blobs pruned within boundary".into(), + request_id, + ) + } else { + self.send_error_response( + peer_id, + RPCResponseErrorCode::InvalidRequest, + "Req outside availability period".into(), + request_id, + ) + }; + } + + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(request_start_slot) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot, + oldest_block_slot, + }, + )) => { + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); + return self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Backfilling".into(), + request_id, + ); + } + Err(e) => { + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Database error".into(), + request_id, + ); + return error!(self.log, "Unable to obtain root iter"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + } + }; + + // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to + // `request_start_slot` in order to check whether the `request_start_slot` is a skip. + let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { + self.chain + .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) + .ok() + .flatten() + }); + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + return error!(self.log, "Error during iteration over blocks"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ) + } + }; + + // remove all skip slots + let block_roots = block_roots.into_iter().flatten(); + + let mut blobs_sent = 0; + let mut send_response = true; + + for root in block_roots { + match self.chain.get_blobs(&root) { + Ok(blob_sidecar_list) => { + for blob_sidecar in blob_sidecar_list.iter() { + blobs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlobsByRange(Some(blob_sidecar.clone())), + id: request_id, + }); + } + } + Err(e) => { + error!( + self.log, + "Error fetching blobs block root"; + "request" => ?req, + "peer" => %peer_id, + "block_root" => ?root, + "error" => ?e + ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "No blobs and failed fetching corresponding block".into(), + request_id, + ); + send_response = false; + break; + } + } + } + + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + + debug!( + self.log, + "BlobsByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blobs_sent + ); + + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlobsByRange(None), + id: request_id, + }); + } + } } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index c33e2acf54..7d6bde6344 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use crate::metrics; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; @@ -7,22 +5,30 @@ use crate::sync::{ manager::{BlockProcessType, SyncMessage}, ChainId, }; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::data_availability_checker::AvailabilityCheckError; +use beacon_chain::data_availability_checker::MaybeAvailableBlock; use beacon_chain::{ - observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, - NotifyExecutionLayer, + observed_block_producers::Error as ObserveError, + validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, + AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, + ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, }; use beacon_processor::{ work_reprocessing_queue::{QueuedRpcBlock, ReprocessQueueMessage}, AsyncFn, BlockingFn, DuplicateCache, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, warn}; +use slog::{debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; +use store::KzgCommitment; use tokio::sync::mpsc; -use types::{Epoch, Hash256, SignedBeaconBlock}; +use types::beacon_block_body::format_kzg_commitments; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{Epoch, Hash256, Slot}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -44,14 +50,14 @@ struct ChainSegmentFailed { } impl NetworkBeaconProcessor { - /// Returns an async closure which processes a beacon block recieved via RPC. + /// Returns an async closure which processes a beacon block received via RPC. /// /// This separate function was required to prevent a cycle during compiler /// type checking. pub fn generate_rpc_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { @@ -75,7 +81,7 @@ impl NetworkBeaconProcessor { pub fn generate_rpc_beacon_block_fns( self: Arc, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { @@ -89,9 +95,9 @@ impl NetworkBeaconProcessor { // A closure which will ignore the block. let ignore_fn = move || { // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: crate::sync::manager::BlockProcessResult::Ignored, + result: crate::sync::manager::BlockProcessingResult::Ignored, }); }; (process_fn, Box::new(ignore_fn)) @@ -102,41 +108,38 @@ impl NetworkBeaconProcessor { pub async fn process_rpc_block( self: Arc>, block_root: Hash256, - block: Arc>, + block: RpcBlock, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender, duplicate_cache: DuplicateCache, ) { // Check if the block is already being imported through another source - let handle = match duplicate_cache.check_and_insert(block_root) { - Some(handle) => handle, - None => { - debug!( - self.log, - "Gossip block is being processed"; - "action" => "sending rpc block to reprocessing queue", - "block_root" => %block_root, - ); + let Some(handle) = duplicate_cache.check_and_insert(block_root) else { + debug!( + self.log, + "Gossip block is being processed"; + "action" => "sending rpc block to reprocessing queue", + "block_root" => %block_root, + ); - // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( - block_root, - block, - seen_timestamp, - process_type, - ); - let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { - beacon_block_root: block_root, - process_fn, - ignore_fn, - }); + // Send message to work reprocess queue to retry the block + let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( + block_root, + block, + seen_timestamp, + process_type, + ); + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + beacon_block_root: block_root, + process_fn, + ignore_fn, + }); - if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) - }; - return; - } + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) + }; + return; }; // Returns `true` if the time now is after the 4s attestation deadline. @@ -208,15 +211,26 @@ impl NetworkBeaconProcessor { let slot = block.slot(); let parent_root = block.message().parent_root(); + let commitments_formatted = block.as_block().commitments_formatted(); + + debug!( + self.log, + "Processing RPC block"; + "block_root" => ?block_root, + "proposer" => block.message().proposer_index(), + "slot" => block.slot(), + "commitments" => commitments_formatted, + ); + let result = self .chain - .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(())) + .process_block_with_early_caching(block_root, block, NotifyExecutionLayer::Yes) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); // RPC block imported, regardless of process type - if let &Ok(hash) = &result { + if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); // Trigger processing for work referencing this block. @@ -240,7 +254,7 @@ impl NetworkBeaconProcessor { } } // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, result: result.into(), }); @@ -249,12 +263,143 @@ impl NetworkBeaconProcessor { drop(handle); } + /// Returns an async closure which processes a list of blobs received via RPC. + /// + /// This separate function was required to prevent a cycle during compiler + /// type checking. + pub fn generate_rpc_blobs_process_fn( + self: Arc, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.clone() + .process_rpc_blobs(block_root, blobs, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + + /// Attempt to process a list of blobs received from a direct RPC request. + pub async fn process_rpc_blobs( + self: Arc>, + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let Some(slot) = blobs + .iter() + .find_map(|blob| blob.as_ref().map(|blob| blob.slot)) + else { + return; + }; + + let (indices, commitments): (Vec, Vec) = blobs + .iter() + .filter_map(|blob_opt| { + blob_opt + .as_ref() + .map(|blob| (blob.index, blob.kzg_commitment)) + }) + .unzip(); + let commitments = format_kzg_commitments(&commitments); + + debug!( + self.log, + "RPC blobs received"; + "indices" => ?indices, + "block_root" => %block_root, + "slot" => %slot, + "commitments" => commitments, + ); + + if let Ok(current_slot) = self.chain.slot() { + if current_slot == slot { + // Note: this metric is useful to gauge how long it takes to receive blobs requested + // over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()` + // we can use that as a baseline to measure against. + let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + + metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); + } + } + + let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + + match &result { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and blobs", + "slot" => %slot, + "block_hash" => %hash, + ); + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + warn!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + "slot" => %slot, + ); + } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Blobs have already been imported"; + "block_hash" => %block_root, + "slot" => %slot, + ); + } + Err(e) => { + warn!( + self.log, + "Error when importing rpc blobs"; + "error" => ?e, + "block_hash" => %block_root, + "slot" => %slot, + ); + } + } + + // Sync handles these results + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } + + /// Poll the beacon chain for any delayed lookups that are now available. + pub fn poll_delayed_lookups(&self, slot: Slot) { + let block_roots = self + .chain + .data_availability_checker + .incomplete_processing_components(slot); + if block_roots.is_empty() { + trace!(self.log, "No delayed lookups found on poll"); + } else { + debug!(self.log, "Found delayed lookups on poll"; "lookup_count" => block_roots.len()); + } + for block_root in block_roots { + if let Some(peer_ids) = self.delayed_lookup_peers.lock().pop(&block_root) { + self.send_sync_message(SyncMessage::MissingGossipBlockComponents( + peer_ids.into_iter().collect(), + block_root, + )); + } + } + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>>, + downloaded_blocks: Vec>, notify_execution_layer: NotifyExecutionLayer, ) { let result = match sync_type { @@ -304,6 +449,10 @@ impl NetworkBeaconProcessor { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); + let n_blobs = downloaded_blocks + .iter() + .map(|wrapped| wrapped.n_blobs()) + .sum::(); match self.process_backfill_blocks(downloaded_blocks) { (_, Ok(_)) => { @@ -312,6 +461,7 @@ impl NetworkBeaconProcessor { "first_block_slot" => start_slot, "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, + "processed_blobs" => n_blobs, "service"=> "sync"); BatchProcessResult::Success { was_non_empty: sent_blocks > 0, @@ -322,6 +472,7 @@ impl NetworkBeaconProcessor { "batch_epoch" => epoch, "first_block_slot" => start_slot, "last_block_slot" => end_slot, + "processed_blobs" => n_blobs, "error" => %e.message, "service" => "sync"); match e.peer_action { @@ -373,10 +524,10 @@ impl NetworkBeaconProcessor { /// Helper function to process blocks batches which only consumes the chain and blocks to process. async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>>, + downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks: Vec> = downloaded_blocks.cloned().collect(); + let blocks: Vec<_> = downloaded_blocks.cloned().collect(); match self .chain .process_chain_segment(blocks, notify_execution_layer) @@ -406,19 +557,67 @@ impl NetworkBeaconProcessor { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>>, + downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks - .iter() - .map(|full_block| full_block.clone_as_blinded()) - .map(Arc::new) - .collect(); - match self.chain.import_historical_block_batch(blinded_blocks) { + let total_blocks = downloaded_blocks.len(); + let available_blocks = match downloaded_blocks + .into_iter() + .map(|block| { + self.chain + .data_availability_checker + .check_rpc_block_availability(block) + }) + .collect::, _>>() + { + Ok(blocks) => blocks + .into_iter() + .filter_map(|maybe_available| match maybe_available { + MaybeAvailableBlock::Available(block) => Some(block), + MaybeAvailableBlock::AvailabilityPending { .. } => None, + }) + .collect::>(), + Err(e) => match e { + AvailabilityCheckError::StoreError(_) + | AvailabilityCheckError::KzgNotInitialized => { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: None, + message: "Failed to check block availability".into(), + }), + ); + } + e => { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: Some(PeerAction::LowToleranceError), + message: format!("Failed to check block availability : {:?}", e), + }), + ) + } + }, + }; + + if available_blocks.len() != total_blocks { + return ( + 0, + Err(ChainSegmentFailed { + peer_action: Some(PeerAction::LowToleranceError), + message: format!( + "{} out of {} blocks were unavailable", + (total_blocks - available_blocks.len()), + total_blocks + ), + }), + ); + } + + match self.chain.import_historical_block_batch(available_blocks) { Ok(imported_blocks) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL, ); - (imported_blocks, Ok(())) } Err(error) => { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index ce5b914117..0945aa7431 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] +use crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE; use crate::{ network_beacon_processor::{ ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor, @@ -8,26 +9,33 @@ use crate::{ service::NetworkMessage, sync::{manager::BlockProcessType, SyncMessage}, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; -use beacon_chain::BeaconChain; +use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use lighthouse_network::discovery::ConnectionId; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::SubstreamId; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, - Client, MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, Response, }; +use lru::LruCache; +use parking_lot::Mutex; use slot_clock::SlotClock; -use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; +use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecarList, SignedVoluntaryExit, Slot, + SubnetId, }; type E = MainnetEthSpec; @@ -38,7 +46,6 @@ const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; const SMALL_CHAIN: u64 = 2; const LONG_CHAIN: u64 = SLOTS_PER_EPOCH * 2; -const TCP_PORT: u16 = 42; const SEQ_NUMBER: u64 = 0; /// The default time to wait for `BeaconProcessor` events. @@ -48,6 +55,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); struct TestRig { chain: Arc>, next_block: Arc>, + next_blobs: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -84,13 +92,14 @@ impl TestRig { pub async fn new_parametric(chain_length: u64, enable_backfill_rate_limiting: bool) -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); + let mut spec = test_spec::(); spec.shard_committee_period = 2; let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .mock_execution_layer() .chain_config(<_>::default()) .build(); @@ -116,7 +125,7 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = harness + let (next_block_tuple, next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; @@ -142,9 +151,9 @@ impl TestRig { .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &next_state, - next_block.state_root(), - next_block.canonical_root(), - next_block.slot(), + next_block_tuple.0.state_root(), + next_block_tuple.0.canonical_root(), + next_block_tuple.0.slot(), ) .into_iter() .flatten() @@ -154,9 +163,9 @@ impl TestRig { .make_attestations( &harness.get_all_validators(), &next_state, - next_block.state_root(), - next_block.canonical_root().into(), - next_block.slot(), + next_block_tuple.0.state_root(), + next_block_tuple.0.canonical_root().into(), + next_block_tuple.0.slot(), ) .into_iter() .filter_map(|(_, aggregate_opt)| aggregate_opt) @@ -196,15 +205,7 @@ impl TestRig { }); let enr_key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); - let network_globals = Arc::new(NetworkGlobals::new( - enr, - Some(TCP_PORT), - None, - meta_data, - vec![], - false, - &log, - )); + let network_globals = Arc::new(NetworkGlobals::new(enr, meta_data, vec![], false, &log)); let executor = harness.runtime.task_executor.clone(); @@ -220,6 +221,7 @@ impl TestRig { reprocess_tx: work_reprocessing_tx.clone(), network_globals: network_globals.clone(), invalid_block_storage: InvalidBlockStorage::Disabled, + delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)), executor: executor.clone(), log: log.clone(), }; @@ -228,7 +230,6 @@ impl TestRig { let beacon_processor = BeaconProcessor { network_globals, executor, - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: log.clone(), @@ -246,7 +247,8 @@ impl TestRig { Self { chain, - next_block: Arc::new(next_block), + next_block: Arc::new(next_block_tuple.0), + next_blobs: next_block_tuple.1, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -283,11 +285,28 @@ impl TestRig { .unwrap(); } + pub fn enqueue_gossip_blob(&self, blob_index: usize) { + if let Some(blobs) = self.next_blobs.as_ref() { + let blob = blobs.get(blob_index).unwrap(); + self.network_beacon_processor + .send_gossip_blob_sidecar( + junk_message_id(), + junk_peer_id(), + Client::default(), + blob.message.index, + blob.clone(), + Duration::from_secs(0), + ) + .unwrap(); + } + } + pub fn enqueue_rpc_block(&self) { + let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), + block_root, + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -297,15 +316,47 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_block(&self) { + let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( - self.next_block.canonical_root(), - self.next_block.clone(), + block_root, + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) .unwrap(); } + pub fn enqueue_single_lookup_rpc_blobs(&self) { + if let Some(blobs) = self.next_blobs.clone() { + let blobs = FixedBlobSidecarList::from( + blobs + .into_iter() + .map(|b| Some(b.message)) + .collect::>(), + ); + self.network_beacon_processor + .send_rpc_blobs( + self.next_block.canonical_root(), + blobs, + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ) + .unwrap(); + } + } + + pub fn enqueue_blobs_by_range_request(&self, count: u64) { + self.network_beacon_processor + .send_blobs_by_range_request( + PeerId::random(), + (ConnectionId::new_unchecked(42), SubstreamId::new(24)), + BlobsByRangeRequest { + start_slot: 0, + count, + }, + ) + .unwrap(); + } pub fn enqueue_backfill_batch(&self) { self.network_beacon_processor @@ -531,6 +582,13 @@ async fn import_gossip_block_acceptably_early() { rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for // processing. @@ -539,6 +597,7 @@ async fn import_gossip_block_acceptably_early() { // processing, instead of just ADDITIONAL_QUEUED_BLOCK_DELAY. Speak to @paulhauner if this test // starts failing. rig.chain.slot_clock.set_slot(rig.next_block.slot().into()); + assert!( rig.head_root() != rig.next_block.canonical_root(), "block not yet imported" @@ -606,6 +665,19 @@ async fn import_gossip_block_at_current_slot() { rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + + rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + assert_eq!( rig.head_root(), rig.next_block.canonical_root(), @@ -658,20 +730,34 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - - let block_event = match import_method { + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + let mut events = vec![]; + match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - GOSSIP_BLOCK + events.push(GOSSIP_BLOCK); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + events.push(GOSSIP_BLOBS_SIDECAR); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - RPC_BLOCK + events.push(RPC_BLOCK); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); + events.push(RPC_BLOBS); + } } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) - .await; + events.push(UNKNOWN_BLOCK_ATTESTATION); + + rig.assert_event_journal_contains_ordered(&events).await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. @@ -727,20 +813,34 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - - let block_event = match import_method { + let num_blobs = rig + .next_blobs + .as_ref() + .map(|blobs| blobs.len()) + .unwrap_or(0); + let mut events = vec![]; + match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - GOSSIP_BLOCK + events.push(GOSSIP_BLOCK); + for i in 0..num_blobs { + rig.enqueue_gossip_blob(i); + events.push(GOSSIP_BLOBS_SIDECAR); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - RPC_BLOCK + events.push(RPC_BLOCK); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); + events.push(RPC_BLOBS); + } } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) - .await; + events.push(UNKNOWN_BLOCK_AGGREGATE); + + rig.assert_event_journal_contains_ordered(&events).await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. @@ -908,9 +1008,15 @@ async fn test_rpc_block_reprocessing() { // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); rig.enqueue_single_lookup_rpc_block(); - rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) .await; + + rig.enqueue_single_lookup_rpc_blobs(); + if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { + rig.assert_event_journal(&[RPC_BLOBS, WORKER_FREED, NOTHING_TO_DO]) + .await; + } + // next_block shouldn't be processed since it couldn't get the // duplicate cache handle assert_ne!(next_block_root, rig.head_root()); @@ -971,3 +1077,42 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { ) .await; } + +#[tokio::test] +async fn test_blobs_by_range() { + if test_spec::().deneb_fork_epoch.is_none() { + return; + }; + let mut rig = TestRig::new(64).await; + let slot_count = 32; + rig.enqueue_blobs_by_range_request(slot_count); + + let mut blob_count = 0; + for slot in 0..slot_count { + let root = rig + .chain + .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap(); + blob_count += root + .map(|root| rig.chain.get_blobs(&root).unwrap_or_default().len()) + .unwrap_or(0); + } + let mut actual_count = 0; + while let Some(next) = rig._network_rx.recv().await { + if let NetworkMessage::SendResponse { + peer_id: _, + response: Response::BlobsByRange(blob), + id: _, + } = next + { + if blob.is_some() { + actual_count += 1; + } else { + break; + } + } else { + panic!("unexpected message {:?}", next); + } + } + assert_eq!(blob_count, actual_count); +} diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index c8332705cf..4df940a3b7 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -21,13 +21,15 @@ use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; use logging::TimeLatch; -use slog::{debug, o, trace}; +use lru::LruCache; +use parking_lot::Mutex; +use slog::{crit, debug, o, trace}; use slog::{error, warn}; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -68,6 +70,7 @@ pub enum RouterMessage { RPCFailed { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this /// message, the message itself and a bool which indicates if the message should be processed @@ -108,10 +111,14 @@ impl Router { reprocess_tx: beacon_processor_reprocess_tx, network_globals: network_globals.clone(), invalid_block_storage, + delayed_lookup_peers: Mutex::new(LruCache::new( + crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE, + )), executor: executor.clone(), log: log.clone(), }; let network_beacon_processor = Arc::new(network_beacon_processor); + network_beacon_processor.spawn_delayed_lookup_service(); // spawn the sync thread crate::sync::manager::spawn( @@ -177,8 +184,9 @@ impl Router { RouterMessage::RPCFailed { peer_id, request_id, + error, } => { - self.on_rpc_error(peer_id, request_id); + self.on_rpc_error(peer_id, request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); @@ -206,6 +214,14 @@ impl Router { self.network_beacon_processor .send_blocks_by_roots_request(peer_id, request_id, request), ), + Request::BlobsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blobs_by_range_request(peer_id, request_id, request), + ), + Request::BlobsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_blobs_by_roots_request(peer_id, request_id, request), + ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor .send_lightclient_bootstrap_request(peer_id, request_id, request), @@ -234,6 +250,12 @@ impl Router { Response::BlocksByRoot(beacon_block) => { self.on_blocks_by_root_response(peer_id, request_id, beacon_block); } + Response::BlobsByRange(blob) => { + self.on_blobs_by_range_response(peer_id, request_id, blob); + } + Response::BlobsByRoot(blob) => { + self.on_blobs_by_root_response(peer_id, request_id, blob); + } Response::LightClientBootstrap(_) => unreachable!(), } } @@ -279,6 +301,19 @@ impl Router { timestamp_now(), ), ), + PubsubMessage::BlobSidecar(data) => { + let (blob_index, signed_blob) = *data; + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_blob_sidecar( + message_id, + peer_id, + self.network_globals.client(&peer_id), + blob_index, + signed_blob, + timestamp_now(), + ), + ) + } PubsubMessage::VoluntaryExit(exit) => { debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); self.handle_beacon_processor_send_result( @@ -408,12 +443,13 @@ impl Router { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { // Check if the failed RPC belongs to sync if let RequestId::Sync(request_id) = request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, request_id, + error, }); } } @@ -452,12 +488,22 @@ impl Router { ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { - SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { - unreachable!("Block lookups do not request BBRange requests") + SyncId::SingleBlock { .. } + | SyncId::SingleBlob { .. } + | SyncId::ParentLookup { .. } + | SyncId::ParentLookupBlob { .. } => { + crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); + return; } - id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id, + id @ (SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::BackFillBlockAndBlobs { .. } + | SyncId::RangeBlockAndBlobs { .. }) => id, }, - RequestId::Router => unreachable!("All BBRange requests belong to sync"), + RequestId::Router => { + crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); + return; + } }; trace!( @@ -474,6 +520,33 @@ impl Router { }); } + pub fn on_blobs_by_range_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_sidecar: Option>>, + ) { + trace!( + self.log, + "Received BlobsByRange Response"; + "peer" => %peer_id, + ); + + if let RequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcBlob { + peer_id, + request_id: id, + blob_sidecar, + seen_timestamp: timestamp_now(), + }); + } else { + crit!( + self.log, + "All blobs by range responses should belong to sync" + ); + } + } + /// Handle a `BlocksByRoot` response from the peer. pub fn on_blocks_by_root_response( &mut self, @@ -484,11 +557,22 @@ impl Router { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, - SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => { - unreachable!("Batch syncing do not request BBRoot requests") + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { + crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); + return; + } + SyncId::SingleBlob { .. } | SyncId::ParentLookupBlob { .. } => { + crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); + return; } }, - RequestId::Router => unreachable!("All BBRoot requests belong to sync"), + RequestId::Router => { + crit!(self.log, "All BBRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } }; trace!( @@ -504,6 +588,47 @@ impl Router { }); } + /// Handle a `BlobsByRoot` response from the peer. + pub fn on_blobs_by_root_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blob_sidecar: Option>>, + ) { + let request_id = match request_id { + RequestId::Sync(sync_id) => match sync_id { + id @ (SyncId::SingleBlob { .. } | SyncId::ParentLookupBlob { .. }) => id, + SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => { + crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); + return; + } + SyncId::BackFillBlocks { .. } + | SyncId::RangeBlocks { .. } + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { + crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); + return; + } + }, + RequestId::Router => { + crit!(self.log, "All BlobsByRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } + }; + + trace!( + self.log, + "Received BlobsByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcBlob { + request_id, + peer_id, + blob_sidecar, + seen_timestamp: timestamp_now(), + }); + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c355c671e8..03715dd99f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,5 @@ use super::sync::manager::RequestId as SyncId; +use crate::nat::EstablishedUPnPMappings; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; @@ -26,7 +27,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; -use std::{collections::HashSet, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; +use std::{collections::HashSet, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; use task_executor::ShutdownReason; @@ -93,12 +94,10 @@ pub enum NetworkMessage { /// The result of the validation validation_result: MessageAcceptance, }, - /// Called if a known external TCP socket address has been updated. + /// Called if UPnP managed to establish an external port mapping. UPnPMappingEstablished { - /// The external TCP address has been updated. - tcp_socket: Option, - /// The external UDP address has been updated. - udp_socket: Option, + /// The mappings that were established. + mappings: EstablishedUPnPMappings, }, /// Reports a peer to the peer manager for performing an action. ReportPeer { @@ -190,11 +189,8 @@ pub struct NetworkService { /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, /// Stores potentially created UPnP mappings to be removed on shutdown. (TCP port and UDP - /// port). - upnp_mappings: (Option, Option), - /// Keeps track of if discovery is auto-updating or not. This is used to inform us if we should - /// update the UDP socket of discovery if the UPnP mappings get established. - discovery_auto_update: bool, + /// ports). + upnp_mappings: EstablishedUPnPMappings, /// A delay that expires when a new fork takes place. next_fork_update: Pin>>, /// A delay that expires when we need to subscribe to a new fork's topics. @@ -219,18 +215,21 @@ pub struct NetworkService { } impl NetworkService { - #[allow(clippy::type_complexity)] - pub async fn start( + async fn build( beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> error::Result<( + NetworkService, + Arc>, + NetworkSenders, + )> { let network_log = executor.log().clone(); // build the channels for external comms - let (network_senders, network_recievers) = NetworkSenders::new(); + let (network_senders, network_receivers) = NetworkSenders::new(); #[cfg(feature = "disable-backfill")] warn!( @@ -345,7 +344,7 @@ impl NetworkService { let NetworkReceivers { network_recv, validator_subscription_recv, - } = network_recievers; + } = network_receivers; // create the network service and spawn the task let network_log = network_log.new(o!("service" => "network")); @@ -359,8 +358,7 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - upnp_mappings: (None, None), - discovery_auto_update: config.discv5_config.enr_update, + upnp_mappings: EstablishedUPnPMappings::default(), next_fork_update, next_fork_subscriptions, next_unsubscribe, @@ -374,6 +372,28 @@ impl NetworkService { enable_light_client_server: config.enable_light_client_server, }; + Ok((network_service, network_globals, network_senders)) + } + + #[allow(clippy::type_complexity)] + pub async fn start( + beacon_chain: Arc>, + config: &NetworkConfig, + executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, + ) -> error::Result<(Arc>, NetworkSenders)> { + let (network_service, network_globals, network_senders) = Self::build( + beacon_chain, + config, + executor.clone(), + gossipsub_registry, + beacon_processor_send, + beacon_processor_reprocess_tx, + ) + .await?; + network_service.spawn_service(executor); Ok((network_globals, network_senders)) @@ -477,7 +497,7 @@ impl NetworkService { } } } - metrics::update_bandwidth_metrics(self.libp2p.bandwidth.clone()); + metrics::update_bandwidth_metrics(&self.libp2p.bandwidth); } }; executor.spawn(service_fut, "network"); @@ -521,10 +541,11 @@ impl NetworkService { response, }); } - NetworkEvent::RPCFailed { id, peer_id } => { + NetworkEvent::RPCFailed { id, peer_id, error } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, + error, }); } NetworkEvent::StatusPeer(peer_id) => { @@ -614,34 +635,20 @@ impl NetworkService { id, reason, } => { - self.libp2p.send_error_reponse(peer_id, id, error, reason); + self.libp2p.send_error_response(peer_id, id, error, reason); } - NetworkMessage::UPnPMappingEstablished { - tcp_socket, - udp_socket, - } => { - self.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); + NetworkMessage::UPnPMappingEstablished { mappings } => { + self.upnp_mappings = mappings; // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_socket) = tcp_socket { - if let Err(e) = self - .libp2p - .discovery_mut() - .update_enr_tcp_port(tcp_socket.port()) - { + if let Some(tcp_port) = self.upnp_mappings.tcp_port { + if let Err(e) = self.libp2p.discovery_mut().update_enr_tcp_port(tcp_port) { warn!(self.log, "Failed to update ENR"; "error" => e); } } - // if the discovery service is not auto-updating, update it with the - // UPnP mappings - if !self.discovery_auto_update { - if let Some(udp_socket) = udp_socket { - if let Err(e) = self - .libp2p - .discovery_mut() - .update_enr_udp_socket(udp_socket) - { - warn!(self.log, "Failed to update ENR"; "error" => e); - } + // If there is an external QUIC port update, modify our local ENR. + if let Some(quic_port) = self.upnp_mappings.udp_quic_port { + if let Err(e) = self.libp2p.discovery_mut().update_enr_quic_port(quic_port) { + warn!(self.log, "Failed to update ENR"; "error" => e); } } } @@ -710,7 +717,9 @@ impl NetworkService { } let mut subscribed_topics: Vec = vec![]; - for topic_kind in core_topics_to_subscribe(self.fork_context.current_fork()) { + for topic_kind in + core_topics_to_subscribe::(self.fork_context.current_fork()) + { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( topic_kind.clone(), @@ -901,9 +910,10 @@ impl NetworkService { fn update_next_fork(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { info!( self.log, "Transitioned to new fork"; @@ -926,13 +936,17 @@ impl NetworkService { Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + + // Remove topic weight from old fork topics to prevent peers that left on the mesh on + // old topics from being penalized for not sending us messages. + self.libp2p.remove_topic_weight_except(new_fork_digest); } else { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } } fn subscribed_core_topics(&self) -> bool { - let core_topics = core_topics_to_subscribe(self.fork_context.current_fork()); + let core_topics = core_topics_to_subscribe::(self.fork_context.current_fork()); let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); let subscriptions = self.network_globals.gossipsub_subscriptions.read(); let subscribed_topics: HashSet<&GossipKind> = @@ -994,7 +1008,7 @@ impl Drop for NetworkService { } // attempt to remove port mappings - crate::nat::remove_mappings(self.upnp_mappings.0, self.upnp_mappings.1, &self.log); + crate::nat::remove_mappings(&self.upnp_mappings, &self.log); info!(self.log, "Network service shutdown"); } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 67f62ff90d..35a7f1eab7 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,14 +4,26 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_processor::BeaconProcessorChannels; - use lighthouse_network::Enr; + use beacon_chain::BeaconChainTypes; + use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; + use futures::StreamExt; + use lighthouse_network::types::{GossipEncoding, GossipKind}; + use lighthouse_network::{Enr, GossipTopic}; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; - use types::MinimalEthSpec; + use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + + impl NetworkService { + fn get_topic_params( + &self, + topic: GossipTopic, + ) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) + } + } fn get_logger(actual_log: bool) -> Logger { if actual_log { @@ -60,7 +72,7 @@ mod tests { ); let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; config.boot_nodes_enr = enrs.clone(); @@ -102,4 +114,126 @@ mod tests { "should have persisted the second ENR to store" ); } + + // Test removing topic weight on old topics when a fork happens. + #[test] + fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = task_executor::TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + get_logger(false), + shutdown_tx, + ); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + &config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.attestation_service.next().await { + network_service.on_attestation_service_msg(msg); + } + } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_subnets_for_epoch::( + network_globals.local_enr().node_id().raw().into(), + beacon_chain.epoch().unwrap(), + &spec, + ) + .unwrap() + .0 + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); + } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); + } } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index b4f52df39d..1cae6299e1 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -302,9 +302,16 @@ impl AttestationService { /// Gets the long lived subnets the node should be subscribed to during the current epoch and /// the remaining duration for which they remain valid. fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err( - |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), - )?; + let current_epoch = self.beacon_chain.epoch().map_err(|e| { + if !self + .beacon_chain + .slot_clock + .is_prior_to_genesis() + .unwrap_or(false) + { + error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e) + } + })?; let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( self.node_id.raw().into(), diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index a1c2404e5e..e580846976 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -14,6 +14,7 @@ use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -24,7 +25,7 @@ use std::collections::{ HashMap, HashSet, }; use std::sync::Arc; -use types::{Epoch, EthSpec, SignedBeaconBlock}; +use types::{Epoch, EthSpec}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -32,7 +33,7 @@ use types::{Epoch, EthSpec, SignedBeaconBlock}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 2; +pub const BACKFILL_EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BACKFILL_BATCH_BUFFER_SIZE: u8 = 20; @@ -54,7 +55,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -391,7 +392,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -508,16 +509,13 @@ impl BackFillSync { return Ok(ProcessResult::Successful); } - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Trying to process a batch that does not exist: {}", - batch_id - ))) - .map(|_| ProcessResult::Successful); - } + let Some(batch) = self.batches.get_mut(&batch_id) else { + return self + .fail_sync(BackFillError::InvalidSyncState(format!( + "Trying to process a batch that does not exist: {}", + batch_id + ))) + .map(|_| ProcessResult::Successful); }; // NOTE: We send empty batches to the processor in order to trigger the block processor @@ -908,9 +906,8 @@ impl BackFillSync { network: &mut SyncNetworkContext, batch_id: BatchId, ) -> Result<(), BackFillError> { - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => return Ok(()), + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Ok(()); }; // Find a peer to request the batch @@ -954,8 +951,8 @@ impl BackFillSync { peer: PeerId, ) -> Result<(), BackFillError> { if let Some(batch) = self.batches.get_mut(&batch_id) { - let request = batch.to_blocks_by_range_request(); - match network.backfill_blocks_by_range_request(peer, request, batch_id) { + let (request, is_blob_batch) = batch.to_blocks_by_range_request(); + match network.backfill_blocks_by_range_request(peer, is_blob_batch, request, batch_id) { Ok(request_id) => { // inform the batch about the new request if let Err(e) = batch.start_downloading_from_peer(peer, request_id) { @@ -1055,7 +1052,7 @@ impl BackFillSync { idle_peers.shuffle(&mut rng); while let Some(peer) = idle_peers.pop() { - if let Some(batch_id) = self.include_next_batch() { + if let Some(batch_id) = self.include_next_batch(network) { // send the batch self.send_batch(network, batch_id, peer)?; } else { @@ -1068,7 +1065,7 @@ impl BackFillSync { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. - fn include_next_batch(&mut self) -> Option { + fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond genesis; if self.last_batch_downloaded { return None; @@ -1105,10 +1102,15 @@ impl BackFillSync { self.to_be_downloaded = self .to_be_downloaded .saturating_sub(BACKFILL_EPOCHS_PER_BATCH); - self.include_next_batch() + self.include_next_batch(network) } Entry::Vacant(entry) => { - entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); + let batch_type = network.batch_type(batch_id); + entry.insert(BatchInfo::new( + &batch_id, + BACKFILL_EPOCHS_PER_BATCH, + batch_type, + )); if self.would_complete(batch_id) { self.last_batch_downloaded = true; } diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs new file mode 100644 index 0000000000..7f141edb5b --- /dev/null +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -0,0 +1,478 @@ +use crate::sync::block_lookups::parent_lookup::PARENT_FAIL_TOLERANCE; +use crate::sync::block_lookups::single_block_lookup::{ + LookupRequestError, LookupVerifyError, SingleBlockLookup, SingleLookupRequestState, State, +}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockLookups, BlockRequestState, PeerShouldHave, + SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; +use crate::sync::network_context::SyncNetworkContext; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents}; +use beacon_chain::{get_block_root, BeaconChainTypes}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; +use lighthouse_network::rpc::BlocksByRootRequest; +use lighthouse_network::PeerId; +use rand::prelude::IteratorRandom; +use ssz_types::VariableList; +use std::ops::IndexMut; +use std::sync::Arc; +use std::time::Duration; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; + +#[derive(Debug, Copy, Clone)] +pub enum ResponseType { + Block, + Blob, +} + +#[derive(Debug, Copy, Clone)] +pub enum LookupType { + Current, + Parent, +} + +/// This trait helps differentiate `SingleBlockLookup`s from `ParentLookup`s .This is useful in +/// ensuring requests and responses are handled separately and enables us to use different failure +/// tolerances for each, while re-using the same basic request and retry logic. +pub trait Lookup { + const MAX_ATTEMPTS: u8; + fn lookup_type() -> LookupType; + fn max_attempts() -> u8 { + Self::MAX_ATTEMPTS + } +} + +/// A `Lookup` that is a part of a `ParentLookup`. +pub struct Parent; + +impl Lookup for Parent { + const MAX_ATTEMPTS: u8 = PARENT_FAIL_TOLERANCE; + fn lookup_type() -> LookupType { + LookupType::Parent + } +} + +/// A `Lookup` that part of a single block lookup. +pub struct Current; + +impl Lookup for Current { + const MAX_ATTEMPTS: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; + fn lookup_type() -> LookupType { + LookupType::Current + } +} + +/// This trait unifies common single block lookup functionality across blocks and blobs. This +/// includes making requests, verifying responses, and handling processing results. A +/// `SingleBlockLookup` includes both a `BlockRequestState` and a `BlobRequestState`, this trait is +/// implemented for each. +/// +/// The use of the `ResponseType` associated type gives us a degree of type +/// safety when handling a block/blob response ensuring we only mutate the correct corresponding +/// state. +pub trait RequestState { + /// The type of the request . + type RequestType; + + /// A block or blob response. + type ResponseType; + + /// The type created after validation. + type VerifiedResponseType: Clone; + + /// We convert a `VerifiedResponseType` to this type prior to sending it to the beacon processor. + type ReconstructedResponseType; + + /* Request building methods */ + + /// Construct a new request. + fn build_request(&mut self) -> Result<(PeerShouldHave, Self::RequestType), LookupRequestError> { + // Verify and construct request. + self.too_many_attempts()?; + let peer = self.get_peer()?; + let request = self.new_request(); + Ok((peer, request)) + } + + /// Construct a new request and send it. + fn build_request_and_send( + &mut self, + id: Id, + already_downloaded: bool, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + // Check if request is necessary. + if already_downloaded || !matches!(self.get_state().state, State::AwaitingDownload) { + return Ok(()); + } + + // Construct request. + let (peer_id, request) = self.build_request()?; + + // Update request state. + self.get_state_mut().state = State::Downloading { peer_id }; + self.get_state_mut().req_counter += 1; + + // Make request + let id = SingleLookupReqId { + id, + req_counter: self.get_state().req_counter, + }; + Self::make_request(id, peer_id.to_peer_id(), request, cx) + } + + /// Verify the current request has not exceeded the maximum number of attempts. + fn too_many_attempts(&self) -> Result<(), LookupRequestError> { + let max_attempts = L::max_attempts(); + let request_state = self.get_state(); + + if request_state.failed_attempts() >= max_attempts { + let cannot_process = + request_state.failed_processing >= request_state.failed_downloading; + Err(LookupRequestError::TooManyAttempts { cannot_process }) + } else { + Ok(()) + } + } + + /// Get the next peer to request. Draws from the set of peers we think should have both the + /// block and blob first. If that fails, we draw from the set of peers that may have either. + fn get_peer(&mut self) -> Result { + let request_state = self.get_state_mut(); + let available_peer_opt = request_state + .available_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::BlockAndBlobs); + + let Some(peer_id) = available_peer_opt.or_else(|| { + request_state + .potential_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::Neither) + }) else { + return Err(LookupRequestError::NoPeers); + }; + request_state.used_peers.insert(peer_id.to_peer_id()); + Ok(peer_id) + } + + /// Initialize `Self::RequestType`. + fn new_request(&self) -> Self::RequestType; + + /// Send the request to the network service. + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError>; + + /* Response handling methods */ + + /// Verify the response is valid based on what we requested. + fn verify_response( + &mut self, + expected_block_root: Hash256, + response: Option, + ) -> Result, LookupVerifyError> { + let request_state = self.get_state_mut(); + match request_state.state { + State::AwaitingDownload => { + request_state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) + } + State::Downloading { peer_id } => { + self.verify_response_inner(expected_block_root, response, peer_id) + } + State::Processing { peer_id: _ } => match response { + Some(_) => { + // We sent the block for processing and received an extra block. + request_state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) + } + None => { + // This is simply the stream termination and we are already processing the + // block + Ok(None) + } + }, + } + } + + /// The response verification unique to block or blobs. + fn verify_response_inner( + &mut self, + expected_block_root: Hash256, + response: Option, + peer_id: PeerShouldHave, + ) -> Result, LookupVerifyError>; + + /// A getter for the parent root of the response. Returns an `Option` because we won't know + /// the blob parent if we don't end up getting any blobs in the response. + fn get_parent_root(verified_response: &Self::VerifiedResponseType) -> Option; + + /// Caches the verified response in the lookup if necessary. This is only necessary for lookups + /// triggered by `UnknownParent` errors. + fn add_to_child_components( + verified_response: Self::VerifiedResponseType, + components: &mut ChildComponents, + ); + + /// Convert a verified response to the type we send to the beacon processor. + fn verified_to_reconstructed( + block_root: Hash256, + verified: Self::VerifiedResponseType, + ) -> Self::ReconstructedResponseType; + + /// Send the response to the beacon processor. + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + verified: Self::ReconstructedResponseType, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError>; + + /// Remove the peer from the lookup if it is useless. + fn remove_if_useless(&mut self, peer: &PeerId) { + self.get_state_mut().remove_peer_if_useless(peer) + } + + /// Register a failure to process the block or blob. + fn register_failure_downloading(&mut self) { + self.get_state_mut().register_failure_downloading() + } + + /* Utility methods */ + + /// Returns the `ResponseType` associated with this trait implementation. Useful in logging. + fn response_type() -> ResponseType; + + /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + + /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. + fn get_state(&self) -> &SingleLookupRequestState; + + /// A getter for a mutable reference to the SingleLookupRequestState associated with this trait. + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; +} + +impl RequestState for BlockRequestState { + type RequestType = BlocksByRootRequest; + type ResponseType = Arc>; + type VerifiedResponseType = Arc>; + type ReconstructedResponseType = RpcBlock; + + fn new_request(&self) -> BlocksByRootRequest { + BlocksByRootRequest::new(VariableList::from(vec![self.requested_block_root])) + } + + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + cx.block_lookup_request(id, peer_id, request, L::lookup_type()) + .map_err(LookupRequestError::SendFailed) + } + + fn verify_response_inner( + &mut self, + expected_block_root: Hash256, + response: Option, + peer_id: PeerShouldHave, + ) -> Result>>, LookupVerifyError> { + match response { + Some(block) => { + // Compute the block root using this specific function so that we can get timing + // metrics. + let block_root = get_block_root(&block); + if block_root != expected_block_root { + // return an error and drop the block + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.state.register_failure_downloading(); + Err(LookupVerifyError::RootMismatch) + } else { + // Return the block for processing. + self.state.state = State::Processing { peer_id }; + Ok(Some(block)) + } + } + None => { + if peer_id.should_have_block() { + self.state.register_failure_downloading(); + Err(LookupVerifyError::NoBlockReturned) + } else { + self.state.state = State::AwaitingDownload; + Err(LookupVerifyError::BenignFailure) + } + } + } + } + + fn get_parent_root(verified_response: &Arc>) -> Option { + Some(verified_response.parent_root()) + } + + fn add_to_child_components( + verified_response: Arc>, + components: &mut ChildComponents, + ) { + components.merge_block(verified_response); + } + + fn verified_to_reconstructed( + block_root: Hash256, + block: Arc>, + ) -> RpcBlock { + RpcBlock::new_without_blobs(Some(block_root), block) + } + + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + constructed: RpcBlock, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + bl.send_block_for_processing( + block_root, + constructed, + duration, + BlockProcessType::SingleBlock { id }, + cx, + ) + } + + fn response_type() -> ResponseType { + ResponseType::Block + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.block_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} + +impl RequestState for BlobRequestState { + type RequestType = BlobsByRootRequest; + type ResponseType = Arc>; + type VerifiedResponseType = FixedBlobSidecarList; + type ReconstructedResponseType = FixedBlobSidecarList; + + fn new_request(&self) -> BlobsByRootRequest { + let blob_id_vec: Vec = self.requested_ids.clone().into(); + let blob_ids = VariableList::from(blob_id_vec); + BlobsByRootRequest { blob_ids } + } + + fn make_request( + id: SingleLookupReqId, + peer_id: PeerId, + request: Self::RequestType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + cx.blob_lookup_request(id, peer_id, request, L::lookup_type()) + .map_err(LookupRequestError::SendFailed) + } + + fn verify_response_inner( + &mut self, + _expected_block_root: Hash256, + blob: Option, + peer_id: PeerShouldHave, + ) -> Result>, LookupVerifyError> { + match blob { + Some(blob) => { + let received_id = blob.id(); + if !self.requested_ids.contains(&received_id) { + self.state.register_failure_downloading(); + Err(LookupVerifyError::UnrequestedBlobId) + } else { + // State should remain downloading until we receive the stream terminator. + self.requested_ids.remove(&received_id); + let blob_index = blob.index; + + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(LookupVerifyError::InvalidIndex(blob.index)); + } + *self.blob_download_queue.index_mut(blob_index as usize) = Some(blob); + Ok(None) + } + } + None => { + self.state.state = State::Processing { peer_id }; + let blobs = std::mem::take(&mut self.blob_download_queue); + Ok(Some(blobs)) + } + } + } + + fn get_parent_root(verified_response: &FixedBlobSidecarList) -> Option { + verified_response + .into_iter() + .filter_map(|blob| blob.as_ref()) + .map(|blob| blob.block_parent_root) + .next() + } + + fn add_to_child_components( + verified_response: FixedBlobSidecarList, + components: &mut ChildComponents, + ) { + components.merge_blobs(verified_response); + } + + fn verified_to_reconstructed( + _block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> FixedBlobSidecarList { + blobs + } + + fn send_reconstructed_for_processing( + id: Id, + bl: &BlockLookups, + block_root: Hash256, + verified: FixedBlobSidecarList, + duration: Duration, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + bl.send_blobs_for_processing( + block_root, + verified, + duration, + BlockProcessType::SingleBlob { id }, + cx, + ) + } + + fn response_type() -> ResponseType { + ResponseType::Blob + } + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + &mut request.blob_request_state + } + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 4340aa41d8..d13bb8cb88 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,64 +1,110 @@ -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::time::Duration; - +use self::parent_lookup::ParentVerifyError; +use self::single_block_lookup::SingleBlockLookup; +use super::manager::BlockProcessingResult; +use super::BatchProcessResult; +use super::{manager::BlockProcessType, network_context::SyncNetworkContext}; +use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; -use beacon_chain::{BeaconChainTypes, BlockError}; +use crate::sync::block_lookups::common::LookupType; +use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; +use crate::sync::block_lookups::single_block_lookup::{ + CachedChild, LookupRequestError, LookupVerifyError, +}; +use crate::sync::manager::{Id, SingleLookupReqId}; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +pub use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckErrorCategory, DataAvailabilityChecker, +}; +use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; +pub use common::Current; +pub use common::Lookup; +pub use common::Parent; +pub use common::RequestState; use fnv::FnvHashMap; +use lighthouse_network::rpc::RPCError; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; +pub use single_block_lookup::{BlobRequestState, BlockRequestState}; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::collections::{HashMap, VecDeque}; +use std::fmt::Debug; use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; - -use crate::metrics; - -use self::parent_lookup::PARENT_FAIL_TOLERANCE; -use self::{ - parent_lookup::{ParentLookup, VerifyError}, - single_block_lookup::SingleBlockRequest, -}; - -use super::manager::BlockProcessResult; -use super::BatchProcessResult; -use super::{ - manager::{BlockProcessType, Id}, - network_context::SyncNetworkContext, -}; +use std::time::Duration; +use store::Hash256; +use strum::Display; +use types::blob_sidecar::FixedBlobSidecarList; +use types::Slot; +pub mod common; mod parent_lookup; mod single_block_lookup; #[cfg(test)] mod tests; -pub type RootBlockTuple = (Hash256, Arc>); +pub type DownloadedBlock = (Hash256, RpcBlock); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; -const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; -pub(crate) struct BlockLookups { +/// This enum is used to track what a peer *should* be able to respond with based on +/// other messages we've seen from this peer on the network. This is useful for peer scoring. +/// We expect a peer tracked by the `BlockAndBlobs` variant to be able to respond to all +/// components of a block. This peer has either sent an attestation for the requested block +/// or has forwarded a block or blob that is a descendant of the requested block. An honest node +/// should not attest unless it has all components of a block, and it should not forward +/// messages if it does not have all components of the parent block. A peer tracked by the +/// `Neither` variant has likely just sent us a block or blob over gossip, in which case we +/// can't know whether the peer has all components of the block, and could be acting honestly +/// by forwarding a message without any other block components. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Display)] +pub enum PeerShouldHave { + BlockAndBlobs(PeerId), + Neither(PeerId), +} + +impl PeerShouldHave { + fn as_peer_id(&self) -> &PeerId { + match self { + PeerShouldHave::BlockAndBlobs(id) => id, + PeerShouldHave::Neither(id) => id, + } + } + fn to_peer_id(self) -> PeerId { + match self { + PeerShouldHave::BlockAndBlobs(id) => id, + PeerShouldHave::Neither(id) => id, + } + } + fn should_have_block(&self) -> bool { + match self { + PeerShouldHave::BlockAndBlobs(_) => true, + PeerShouldHave::Neither(_) => false, + } + } +} + +pub struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, - processing_parent_lookups: - HashMap, SingleBlockRequest)>, + processing_parent_lookups: HashMap, SingleBlockLookup)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - /// A collection of block hashes being searched for and a flag indicating if a result has been - /// received or not. - /// - /// The flag allows us to determine if the peer returned data or sent us nothing. - single_block_lookups: FnvHashMap>, + single_block_lookups: FnvHashMap>, + + pub(crate) da_checker: Arc>, /// The logger for the import manager. log: Logger, } impl BlockLookups { - pub fn new(log: Logger) -> Self { + pub fn new(da_checker: Arc>, log: Logger) -> Self { Self { parent_lookups: Default::default(), processing_parent_lookups: Default::default(), @@ -66,27 +112,95 @@ impl BlockLookups { FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), + da_checker, log, } } /* Lookup requests */ + /// Creates a lookup for the block with the given `block_root` and immediately triggers it. + pub fn search_block( + &mut self, + block_root: Hash256, + peer_source: &[PeerShouldHave], + cx: &mut SyncNetworkContext, + ) { + self.new_current_lookup(block_root, None, peer_source, cx) + } + + /// Creates a lookup for the block with the given `block_root`, while caching other block + /// components we've already received. The block components are cached here because we haven't + /// imported its parent and therefore can't fully validate it and store it in the data + /// availability cache. + /// + /// The request is immediately triggered. + pub fn search_child_block( + &mut self, + block_root: Hash256, + child_components: ChildComponents, + peer_source: &[PeerShouldHave], + cx: &mut SyncNetworkContext, + ) { + self.new_current_lookup(block_root, Some(child_components), peer_source, cx) + } + + /// Attempts to trigger the request matching the given `block_root`. + pub fn trigger_single_lookup( + &mut self, + mut single_block_lookup: SingleBlockLookup, + cx: &SyncNetworkContext, + ) { + let block_root = single_block_lookup.block_root(); + match single_block_lookup.request_block_and_blobs(cx) { + Ok(()) => self.add_single_lookup(single_block_lookup), + Err(e) => { + debug!(self.log, "Single block lookup failed"; + "error" => ?e, + "block_root" => ?block_root, + ); + } + } + } + + /// Adds a lookup to the `single_block_lookups` map. + pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { + self.single_block_lookups + .insert(single_block_lookup.id, single_block_lookup); + + metrics::set_gauge( + &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, + self.single_block_lookups.len() as i64, + ); + } + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn search_block(&mut self, hash: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext) { + pub fn new_current_lookup( + &mut self, + block_root: Hash256, + child_components: Option>, + peers: &[PeerShouldHave], + cx: &mut SyncNetworkContext, + ) { // Do not re-request a block that is already being requested - if self + if let Some((_, lookup)) = self .single_block_lookups - .values_mut() - .any(|single_block_request| single_block_request.add_peer(&hash, &peer_id)) + .iter_mut() + .find(|(_id, lookup)| lookup.is_for_block(block_root)) { + lookup.add_peers(peers); + if let Some(components) = child_components { + lookup.add_child_components(components); + } return; } - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.is_for_block(block_root) || parent_req.contains_block(&block_root) }) { + parent_lookup.add_peers(peers); + // If the block was already downloaded, or is being downloaded in this moment, do not // request it. return; @@ -95,57 +209,61 @@ impl BlockLookups { if self .processing_parent_lookups .values() - .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + .any(|(hashes, _last_parent_request)| hashes.contains(&block_root)) { // we are already processing this block, ignore it. return; } - debug!( - self.log, - "Searching for block"; - "peer_id" => %peer_id, - "block" => %hash + let msg = if child_components.is_some() { + "Searching for components of a block with unknown parent" + } else { + "Searching for block components" + }; + + let lookup = SingleBlockLookup::new( + block_root, + child_components, + peers, + self.da_checker.clone(), + cx.next_id(), ); - let mut single_block_request = SingleBlockRequest::new(hash, peer_id); - - let (peer_id, request) = single_block_request.request_block().unwrap(); - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups - .insert(request_id, single_block_request); - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - } + debug!( + self.log, + "{}", msg; + "peer_ids" => ?peers, + "block" => ?block_root, + ); + self.trigger_single_lookup(lookup, cx); } /// If a block is attempted to be processed but we do not know its parent, this function is /// called in order to find the block's parent. pub fn search_parent( &mut self, + slot: Slot, block_root: Hash256, - block: Arc>, + parent_root: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - let parent_root = block.parent_root(); + // Gossip blocks or blobs shouldn't be propagated if parents are unavailable. + let peer_source = PeerShouldHave::BlockAndBlobs(peer_id); + // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { debug!(self.log, "Block is from a past failed chain. Dropping"; - "block_root" => ?block_root, "block_slot" => block.slot()); + "block_root" => ?block_root, "block_slot" => slot); return; } // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.contains_block(&block_root) - || parent_req.add_peer(&block_root, &peer_id) - || parent_req.add_peer(&parent_root, &peer_id) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.is_for_block(block_root) }) { + parent_lookup.add_peer(peer_source); // we are already searching for this block, ignore it return; } @@ -158,68 +276,84 @@ impl BlockLookups { // we are already processing this block, ignore it. return; } - - let parent_lookup = ParentLookup::new(block_root, block, peer_id); + let parent_lookup = ParentLookup::new( + block_root, + parent_root, + peer_source, + self.da_checker.clone(), + cx, + ); self.request_parent(parent_lookup, cx); } /* Lookup responses */ - pub fn single_block_lookup_response( + /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` + /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests + /// that have been retried are ignored. + fn get_single_lookup>( &mut self, - id: Id, + id: SingleLookupReqId, + ) -> Option> { + let mut lookup = self.single_block_lookups.remove(&id.id)?; + + let request_state = R::request_state_mut(&mut lookup); + if id.req_counter != request_state.get_state().req_counter { + // We don't want to drop the lookup, just ignore the old response. + self.single_block_lookups.insert(id.id, lookup); + return None; + } + Some(lookup) + } + + /// Checks whether a single block lookup is waiting for a parent lookup to complete. This is + /// necessary because we want to make sure all parents are processed before sending a child + /// for processing, otherwise the block will fail validation and will be returned to the network + /// layer with an `UnknownParent` error. + pub fn has_pending_parent_request(&self, block_root: Hash256) -> bool { + self.parent_lookups + .iter() + .any(|parent_lookup| parent_lookup.chain_hash() == block_root) + } + + /// Process a block or blob response received from a single lookup request. + pub fn single_lookup_response>( + &mut self, + lookup_id: SingleLookupReqId, peer_id: PeerId, - block: Option>>, + response: Option, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, ) { - let mut request = match self.single_block_lookups.entry(id) { - Entry::Occupied(req) => req, - Entry::Vacant(_) => { - if block.is_some() { - debug!( - self.log, - "Block returned for single block lookup not present" - ); - } - return; + let id = lookup_id.id; + let response_type = R::response_type(); + + let Some(lookup) = self.get_single_lookup::(lookup_id) else { + if response.is_some() { + // We don't have the ability to cancel in-flight RPC requests. So this can happen + // if we started this RPC request, and later saw the block/blobs via gossip. + debug!( + self.log, + "Block returned for single block lookup not present"; + "response_type" => ?response_type, + ); } + return; }; - match request.get_mut().verify_block(block) { - Ok(Some((block_root, block))) => { - // This is the correct block, send it for processing - if self - .send_block_for_processing( - block_root, - block, - seen_timestamp, - BlockProcessType::SingleBlock { id }, - cx, - ) - .is_err() - { - // Remove to avoid inconsistencies - self.single_block_lookups.remove(&id); - } - } - Ok(None) => { - // request finished correctly, it will be removed after the block is processed. - } - Err(error) => { - let msg: &str = error.into(); - cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); - // Remove the request, if it can be retried it will be added with a new id. - let mut req = request.remove(); + let expected_block_root = lookup.block_root(); - debug!(self.log, "Single block lookup failed"; - "peer_id" => %peer_id, "error" => msg, "block_root" => %req.hash); - // try the request again if possible - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups.insert(id, req); - } - } + match self.single_lookup_response_inner::(peer_id, response, seen_timestamp, cx, lookup) + { + Ok(lookup) => { + self.single_block_lookups.insert(id, lookup); + } + Err(e) => { + debug!(self.log, + "Single lookup request failed"; + "error" => ?e, + "block_root" => ?expected_block_root, + ); } } @@ -229,82 +363,185 @@ impl BlockLookups { ); } - /// Process a response received from a parent lookup request. - pub fn parent_lookup_response( - &mut self, - id: Id, + /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean + /// the lookup is dropped. + fn single_lookup_response_inner>( + &self, peer_id: PeerId, - block: Option>>, + response: Option, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - ) { + cx: &SyncNetworkContext, + mut lookup: SingleBlockLookup, + ) -> Result, LookupRequestError> { + let response_type = R::response_type(); + let log = self.log.clone(); + let expected_block_root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + + match request_state.verify_response(expected_block_root, response) { + Ok(Some(verified_response)) => { + self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::SingleBlock { id: lookup.id }, + verified_response, + &mut lookup, + )?; + } + Ok(None) => {} + Err(e) => { + debug!( + log, + "Single lookup response verification failed, retrying"; + "block_root" => ?expected_block_root, + "peer_id" => %peer_id, + "response_type" => ?response_type, + "error" => ?e + ); + if matches!(e, LookupVerifyError::BenignFailure) { + request_state + .get_state_mut() + .remove_peer_if_useless(&peer_id); + } else { + let msg = e.into(); + cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); + }; + + request_state.register_failure_downloading(); + lookup.request_block_and_blobs(cx)?; + } + } + Ok(lookup) + } + + fn handle_verified_response>( + &self, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + process_type: BlockProcessType, + verified_response: R::VerifiedResponseType, + lookup: &mut SingleBlockLookup, + ) -> Result<(), LookupRequestError> { + let id = lookup.id; + let block_root = lookup.block_root(); + + R::request_state_mut(lookup) + .get_state_mut() + .component_downloaded = true; + + let cached_child = lookup.add_response::(verified_response.clone()); + match cached_child { + CachedChild::Ok(block) => { + // If we have an outstanding parent request for this block, delay sending the response until + // all parent blocks have been processed, otherwise we will fail validation with an + // `UnknownParent`. + let delay_send = match L::lookup_type() { + LookupType::Parent => false, + LookupType::Current => self.has_pending_parent_request(lookup.block_root()), + }; + + if !delay_send { + self.send_block_for_processing( + block_root, + block, + seen_timestamp, + process_type, + cx, + )? + } + } + CachedChild::DownloadIncomplete => { + // If this was the result of a block request, we can't determine if the block peer + // did anything wrong. If we already had both a block and blobs response processed, + // we should penalize the blobs peer because they did not provide all blobs on the + // initial request. + if lookup.both_components_downloaded() { + lookup.penalize_blob_peer(false, cx); + lookup + .blob_request_state + .state + .register_failure_downloading(); + } + lookup.request_block_and_blobs(cx)?; + } + CachedChild::NotRequired => R::send_reconstructed_for_processing( + id, + self, + block_root, + R::verified_to_reconstructed(block_root, verified_response), + seen_timestamp, + cx, + )?, + CachedChild::Err(e) => { + warn!(self.log, "Consistency error in cached block"; + "error" => ?e, + "block_root" => ?block_root + ); + lookup.handle_consistency_failure(cx); + lookup.request_block_and_blobs(cx)?; + } + } + Ok(()) + } + + /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` + /// matches the current `req_counter` for the lookup. This any stale responses from requests + /// that have been retried are ignored. + fn get_parent_lookup>( + &mut self, + id: SingleLookupReqId, + ) -> Option> { let mut parent_lookup = if let Some(pos) = self .parent_lookups .iter() - .position(|request| request.pending_response(id)) + .position(|request| request.current_parent_request.id == id.id) { self.parent_lookups.remove(pos) } else { - if block.is_some() { + return None; + }; + + if R::request_state_mut(&mut parent_lookup.current_parent_request) + .get_state() + .req_counter + != id.req_counter + { + self.parent_lookups.push(parent_lookup); + return None; + } + Some(parent_lookup) + } + + /// Process a response received from a parent lookup request. + pub fn parent_lookup_response>( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + response: Option, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + ) { + let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { + if response.is_some() { debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); } return; }; - match parent_lookup.verify_block(block, &mut self.failed_chains) { - Ok(Some((block_root, block))) => { - // Block is correct, send to the beacon processor. - let chain_hash = parent_lookup.chain_hash(); - if self - .send_block_for_processing( - block_root, - block, - seen_timestamp, - BlockProcessType::ParentLookup { chain_hash }, - cx, - ) - .is_ok() - { - self.parent_lookups.push(parent_lookup) - } - } - Ok(None) => { - // Request finished successfully, nothing else to do. It will be removed after the - // processing result arrives. + match self.parent_lookup_response_inner::( + peer_id, + response, + seen_timestamp, + cx, + &mut parent_lookup, + ) { + Ok(()) => { self.parent_lookups.push(parent_lookup); } - Err(e) => match e { - VerifyError::RootMismatch - | VerifyError::NoBlockReturned - | VerifyError::ExtraBlocksReturned => { - let e = e.into(); - warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => %e); - - // We do not tolerate these kinds of errors. We will accept a few but these are signs - // of a faulty peer. - cx.report_peer(peer_id, PeerAction::LowToleranceError, e); - - // We try again if possible. - self.request_parent(parent_lookup, cx); - } - VerifyError::PreviousFailure { parent_root } => { - debug!( - self.log, - "Parent chain ignored due to past failure"; - "block" => %parent_root, - ); - // Add the root block to failed chains - self.failed_chains.insert(parent_lookup.chain_hash()); - - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "bbroot_failed_chains", - ); - } - }, - }; + Err(e) => { + self.handle_parent_request_error(&mut parent_lookup, cx, e); + } + } metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, @@ -312,47 +549,138 @@ impl BlockLookups { ); } - /* Error responses */ + /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean + /// the lookup is dropped. + fn parent_lookup_response_inner>( + &mut self, + peer_id: PeerId, + response: Option, + seen_timestamp: Duration, + cx: &SyncNetworkContext, + parent_lookup: &mut ParentLookup, + ) -> Result<(), RequestError> { + match parent_lookup.verify_response::(response, &mut self.failed_chains) { + Ok(Some(verified_response)) => { + self.handle_verified_response::( + seen_timestamp, + cx, + BlockProcessType::ParentLookup { + chain_hash: parent_lookup.chain_hash(), + }, + verified_response, + &mut parent_lookup.current_parent_request, + )?; + } + Ok(None) => {} + Err(e) => self.handle_parent_verify_error::(peer_id, parent_lookup, e, cx)?, + }; + Ok(()) + } - #[allow(clippy::needless_collect)] // false positive - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { - /* Check disconnection for single block lookups */ - // better written after https://github.com/rust-lang/rust/issues/59618 - let remove_retry_ids: Vec = self - .single_block_lookups - .iter_mut() - .filter_map(|(id, req)| { - if req.check_peer_disconnected(peer_id).is_err() { - Some(*id) - } else { - None - } - }) - .collect(); + /// Handle logging and peer scoring for `ParentVerifyError`s during parent lookup requests. + fn handle_parent_verify_error>( + &mut self, + peer_id: PeerId, + parent_lookup: &mut ParentLookup, + e: ParentVerifyError, + cx: &SyncNetworkContext, + ) -> Result<(), RequestError> { + match e { + ParentVerifyError::RootMismatch + | ParentVerifyError::NoBlockReturned + | ParentVerifyError::NotEnoughBlobsReturned + | ParentVerifyError::ExtraBlocksReturned + | ParentVerifyError::UnrequestedBlobId + | ParentVerifyError::ExtraBlobsReturned + | ParentVerifyError::InvalidIndex(_) => { + let e = e.into(); + warn!(self.log, "Peer sent invalid response to parent request."; + "peer_id" => %peer_id, "reason" => %e); - for mut req in remove_retry_ids - .into_iter() - .map(|id| self.single_block_lookups.remove(&id).unwrap()) - .collect::>() - { - // retry the request - match req.request_block() { - Ok((peer_id, block_request)) => { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, req); - } - } - Err(e) => { - trace!( - self.log, - "Single block request failed on peer disconnection"; - "block_root" => %req.hash, - "peer_id" => %peer_id, - "reason" => <&str>::from(e), - ); - } + // We do not tolerate these kinds of errors. We will accept a few but these are signs + // of a faulty peer. + cx.report_peer(peer_id, PeerAction::LowToleranceError, e); + + // We try again if possible. + parent_lookup.request_parent(cx)?; + } + ParentVerifyError::PreviousFailure { parent_root } => { + debug!( + self.log, + "Parent chain ignored due to past failure"; + "block" => %parent_root, + ); + // Add the root block to failed chains + self.failed_chains.insert(parent_lookup.chain_hash()); + + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); + } + ParentVerifyError::BenignFailure => { + trace!( + self.log, + "Requested peer could not respond to block request, requesting a new peer"; + ); + let request_state = R::request_state_mut(&mut parent_lookup.current_parent_request); + request_state.remove_if_useless(&peer_id); + parent_lookup.request_parent(cx)?; } } + Ok(()) + } + + /// Handle logging and peer scoring for `RequestError`s during parent lookup requests. + fn handle_parent_request_error( + &mut self, + parent_lookup: &mut ParentLookup, + cx: &SyncNetworkContext, + e: RequestError, + ) { + debug!(self.log, "Failed to request parent"; "error" => e.as_static()); + match e { + RequestError::SendFailed(_) => { + // Probably shutting down, nothing to do here. Drop the request + } + RequestError::ChainTooLong => { + self.failed_chains.insert(parent_lookup.chain_hash()); + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } + RequestError::TooManyAttempts { cannot_process } => { + // We only consider the chain failed if we were unable to process it. + // We could have failed because one peer continually failed to send us + // bad blocks. We still allow other peers to send us this chain. Note + // that peers that do this, still get penalised. + if cannot_process { + self.failed_chains.insert(parent_lookup.chain_hash()); + } + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } + RequestError::NoPeers => { + // This happens if the peer disconnects while the block is being + // processed. Drop the request without extra penalty + } + } + } + + /* Error responses */ + + pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { + /* Check disconnection for single lookups */ + self.single_block_lookups.retain(|_, req| { + let should_drop_lookup = + req.should_drop_lookup_on_disconnected_peer(peer_id, cx, &self.log); + + !should_drop_lookup + }); /* Check disconnection for parent lookups */ while let Some(pos) = self @@ -367,39 +695,67 @@ impl BlockLookups { } /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed( + pub fn parent_lookup_failed>( &mut self, - id: Id, + id: SingleLookupReqId, peer_id: PeerId, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, + error: RPCError, ) { - if let Some(pos) = self - .parent_lookups - .iter() - .position(|request| request.pending_response(id)) - { - let mut parent_lookup = self.parent_lookups.remove(pos); - parent_lookup.download_failed(); - trace!(self.log, "Parent lookup request failed"; &parent_lookup); - self.request_parent(parent_lookup, cx); - } else { - return debug!(self.log, "RPC failure for a parent lookup request that was not found"; "peer_id" => %peer_id); + let msg = error.as_static_str(); + let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { + debug!(self.log, + "RPC failure for a block parent lookup request that was not found"; + "peer_id" => %peer_id, + "error" => msg + ); + return; }; + R::request_state_mut(&mut parent_lookup.current_parent_request) + .register_failure_downloading(); + trace!(self.log, "Parent lookup block request failed"; &parent_lookup, "error" => msg); + + self.request_parent(parent_lookup, cx); + metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, self.parent_lookups.len() as i64, ); } - pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { - if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure_downloading(); - trace!(self.log, "Single block lookup failed"; "block" => %request.hash); - if let Ok((peer_id, block_request)) = request.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, request); - } - } + /// An RPC error has occurred during a single lookup. This function handles this case.\ + pub fn single_block_lookup_failed>( + &mut self, + id: SingleLookupReqId, + peer_id: &PeerId, + cx: &SyncNetworkContext, + error: RPCError, + ) { + let msg = error.as_static_str(); + let log = self.log.clone(); + let Some(mut lookup) = self.get_single_lookup::(id) else { + debug!(log, "Error response to dropped lookup"; "error" => ?error); + return; + }; + let block_root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + let response_type = R::response_type(); + trace!(log, + "Single lookup failed"; + "block_root" => ?block_root, + "error" => msg, + "peer_id" => %peer_id, + "response_type" => ?response_type + ); + let id = id.id; + request_state.register_failure_downloading(); + if let Err(e) = lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Single lookup retry failed"; + "error" => ?e, + "block_root" => ?block_root, + ); + self.single_block_lookups.remove(&id); } metrics::set_gauge( @@ -410,33 +766,47 @@ impl BlockLookups { /* Processing responses */ - pub fn single_block_processed( + pub fn single_block_component_processed>( &mut self, - id: Id, - result: BlockProcessResult, + target_id: Id, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { - let mut req = match self.single_block_lookups.remove(&id) { - Some(req) => req, - None => { - return debug!( - self.log, - "Block processed for single block lookup not present" - ); - } + let Some(mut lookup) = self.single_block_lookups.remove(&target_id) else { + return; }; - let root = req.hash; - let peer_id = match req.processing_peer() { - Ok(peer) => peer, - Err(_) => return, + let root = lookup.block_root(); + let request_state = R::request_state_mut(&mut lookup); + + let Ok(peer_id) = request_state.get_state().processing_peer() else { + return; }; + debug!( + self.log, + "Block component processed for lookup"; + "response_type" => ?R::response_type(), + "result" => ?result, + ); match result { - BlockProcessResult::Ok => { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(root) => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + } + AvailabilityProcessingStatus::MissingComponents(_, _block_root) => { + match self.handle_missing_components::(cx, &mut lookup) { + Ok(()) => { + self.single_block_lookups.insert(target_id, lookup); + } + Err(e) => { + // Drop with an additional error. + warn!(self.log, "Single block lookup failed"; "block" => %root, "error" => ?e); + } + } + } + }, + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -445,84 +815,158 @@ impl BlockLookups { "action" => "dropping single block request" ); } - BlockProcessResult::Err(e) => { - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); - match e { - BlockError::BlockIsAlreadyKnown => { - // No error here + BlockProcessingResult::Err(e) => { + match self.handle_single_lookup_block_error(cx, lookup, peer_id, e) { + Ok(Some(lookup)) => { + self.single_block_lookups.insert(target_id, lookup); } - BlockError::BeaconChainError(e) => { - // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + Ok(None) => { + // Drop without an additional error. } - BlockError::ParentUnknown(block) => { - self.search_parent(root, block, peer_id, cx); - } - ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "root" => %root, - "error" => ?e - ); - } - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - // Try it again if possible. - req.register_failure_processing(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) - { - // insert with the new id - self.single_block_lookups.insert(request_id, req); - } - } + Err(e) => { + // Drop with an additional error. + warn!(self.log, "Single block lookup failed"; "block" => %root, "error" => ?e); } } } - } + }; + } - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); + /// Handles a `MissingComponents` block processing error. Handles peer scoring and retries. + /// + /// If this was the result of a block request, we can't determined if the block peer did anything + /// wrong. If we already had both a block and blobs response processed, we should penalize the + /// blobs peer because they did not provide all blobs on the initial request. + fn handle_missing_components>( + &self, + cx: &SyncNetworkContext, + lookup: &mut SingleBlockLookup, + ) -> Result<(), LookupRequestError> { + let request_state = R::request_state_mut(lookup); + + request_state.get_state_mut().component_processed = true; + if lookup.both_components_processed() { + lookup.penalize_blob_peer(false, cx); + + // Try it again if possible. + lookup + .blob_request_state + .state + .register_failure_processing(); + lookup.request_block_and_blobs(cx)?; + } + Ok(()) + } + + /// Handles peer scoring and retries related to a `BlockError` in response to a single block + /// or blob lookup processing result. + fn handle_single_lookup_block_error( + &mut self, + cx: &mut SyncNetworkContext, + mut lookup: SingleBlockLookup, + peer_id: PeerShouldHave, + e: BlockError, + ) -> Result>, LookupRequestError> { + let root = lookup.block_root(); + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + match e { + BlockError::BlockIsAlreadyKnown => { + // No error here + return Ok(None); + } + BlockError::BeaconChainError(e) => { + // Internal error + error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + return Ok(None); + } + BlockError::ParentUnknown(block) => { + let slot = block.slot(); + let parent_root = block.parent_root(); + lookup.add_child_components(block.into()); + lookup.request_block_and_blobs(cx)?; + self.search_parent(slot, root, parent_root, peer_id.to_peer_id(), cx); + } + ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; + "root" => %root, + "error" => ?e + ); + return Ok(None); + } + BlockError::AvailabilityCheck(e) => match e.category() { + AvailabilityCheckErrorCategory::Internal => { + warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup + .block_request_state + .state + .register_failure_downloading(); + lookup + .blob_request_state + .state + .register_failure_downloading(); + lookup.request_block_and_blobs(cx)? + } + AvailabilityCheckErrorCategory::Malicious => { + warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); + lookup.handle_availability_check_failure(cx); + lookup.request_block_and_blobs(cx)? + } + }, + other => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { + cx.report_peer( + block_peer.to_peer_id(), + PeerAction::MidToleranceError, + "single_block_failure", + ); + + // Try it again if possible. + lookup + .block_request_state + .state + .register_failure_processing(); + lookup.request_block_and_blobs(cx)? + } + } + } + Ok(Some(lookup)) } pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: BlockProcessResult, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { - let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self + let index = self .parent_lookups .iter() .enumerate() - .find_map(|(pos, request)| { - request - .get_processing_peer(chain_hash) - .map(|peer| (pos, peer)) - }) { - (self.parent_lookups.remove(pos), peer) - } else { + .find(|(_, lookup)| lookup.chain_hash() == chain_hash) + .map(|(index, _)| index); + + let Some(mut parent_lookup) = index.map(|index| self.parent_lookups.remove(index)) else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; match &result { - BlockProcessResult::Ok => { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup) - } - BlockProcessResult::Err(e) => { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(block_root) => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup, "block_root" => ?block_root) + } + AvailabilityProcessingStatus::MissingComponents(_, block_root) => { + trace!(self.log, "Parent missing parts, triggering single block lookup "; &parent_lookup,"block_root" => ?block_root) + } + }, + BlockProcessingResult::Err(e) => { trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { trace!( self.log, "Parent block processing job was ignored"; @@ -533,32 +977,62 @@ impl BlockLookups { } match result { - BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { - // need to keep looking for parents - // add the block back to the queue and continue the search - parent_lookup.add_block(block); + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + _, + block_root, + )) => { + let expected_block_root = parent_lookup.current_parent_request.block_root(); + if block_root != expected_block_root { + warn!( + self.log, + "Parent block processing result/request root mismatch"; + "request" =>?expected_block_root, + "result" => ?block_root + ); + return; + } + + // We only send parent blocks + blobs for processing together. This means a + // `MissingComponents` response here indicates missing blobs. Therefore we always + // register a blob processing failure here. + parent_lookup + .current_parent_request + .blob_request_state + .state + .register_failure_processing(); + match parent_lookup + .current_parent_request + .request_block_and_blobs(cx) + { + Ok(()) => self.parent_lookups.push(parent_lookup), + Err(e) => self.handle_parent_request_error(&mut parent_lookup, cx, e.into()), + } + } + BlockProcessingResult::Err(BlockError::ParentUnknown(block)) => { + parent_lookup.add_unknown_parent_block(block); self.request_parent(parent_lookup, cx); } - BlockProcessResult::Ok - | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) + | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { // Check if the beacon processor is available - let beacon_processor = match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => beacon_processor, - None => { - return trace!( - self.log, - "Dropping parent chain segment that was ready for processing."; - parent_lookup - ); - } + let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { + return trace!( + self.log, + "Dropping parent chain segment that was ready for processing."; + parent_lookup + ); }; - let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); + let (chain_hash, blocks, hashes, block_request) = + parent_lookup.parts_for_processing(); + + let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); + let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); match beacon_processor.send_chain_segment(process_id, blocks) { Ok(_) => { self.processing_parent_lookups - .insert(chain_hash, (hashes, request)); + .insert(chain_hash, (hashes, block_request)); } Err(e) => { error!( @@ -569,7 +1043,7 @@ impl BlockLookups { } } } - ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + ref e @ BlockProcessingResult::Err(BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -581,25 +1055,10 @@ impl BlockLookups { "error" => ?e ); } - BlockProcessResult::Err(outcome) => { - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "last_peer" => %peer_id, - ); - - // This currently can be a host of errors. We permit this due to the partial - // ambiguity. - cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); - - // Try again if possible - parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); + BlockProcessingResult::Err(outcome) => { + self.handle_parent_block_error(outcome, cx, parent_lookup); } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -616,31 +1075,192 @@ impl BlockLookups { ); } + /// Find the child block that spawned the parent lookup request and add it to the chain + /// to send for processing. + fn add_child_block_to_chain( + &mut self, + chain_hash: Hash256, + mut blocks: VecDeque>, + cx: &SyncNetworkContext, + ) -> VecDeque> { + // Find the child block that spawned the parent lookup request and add it to the chain + // to send for processing. + if let Some(child_lookup_id) = self + .single_block_lookups + .iter() + .find_map(|(id, lookup)| (lookup.block_root() == chain_hash).then_some(*id)) + { + let Some(child_lookup) = self.single_block_lookups.get_mut(&child_lookup_id) else { + debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); + return blocks; + }; + match child_lookup.get_cached_child_block() { + CachedChild::Ok(rpc_block) => { + // Insert this block at the front. This order is important because we later check + // for linear roots in `filter_chain_segment` + blocks.push_front(rpc_block); + } + CachedChild::DownloadIncomplete => { + trace!(self.log, "Parent lookup chain complete, awaiting child response"; "chain_hash" => ?chain_hash); + } + CachedChild::NotRequired => { + warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); + } + CachedChild::Err(e) => { + warn!( + self.log, + "Consistency error in child block triggering chain or parent lookups"; + "error" => ?e, + "chain_hash" => ?chain_hash + ); + child_lookup.handle_consistency_failure(cx); + if let Err(e) = child_lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Failed to request block and blobs, dropping lookup"; + "error" => ?e + ); + self.single_block_lookups.remove(&child_lookup_id); + } + } + } + } else { + debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); + }; + blocks + } + + /// Handle the peer scoring, retries, and logging related to a `BlockError` returned from + /// processing a block + blobs for a parent lookup. + fn handle_parent_block_error( + &mut self, + outcome: BlockError<::EthSpec>, + cx: &SyncNetworkContext, + mut parent_lookup: ParentLookup, + ) { + // We should always have a block peer. + let Ok(block_peer_id) = parent_lookup.block_processing_peer() else { + return; + }; + let block_peer_id = block_peer_id.to_peer_id(); + + // We may not have a blob peer, if there were no blobs required for this block. + let blob_peer_id = parent_lookup + .blob_processing_peer() + .ok() + .map(PeerShouldHave::to_peer_id); + + // all else we consider the chain a failure and downvote the peer that sent + // us the last block + warn!( + self.log, "Invalid parent chain"; + "score_adjustment" => %PeerAction::MidToleranceError, + "outcome" => ?outcome, + "block_peer_id" => %block_peer_id, + ); + // This currently can be a host of errors. We permit this due to the partial + // ambiguity. + cx.report_peer( + block_peer_id, + PeerAction::MidToleranceError, + "parent_request_err", + ); + // Don't downscore the same peer twice + if let Some(blob_peer_id) = blob_peer_id { + if block_peer_id != blob_peer_id { + debug!( + self.log, "Additionally down-scoring blob peer"; + "score_adjustment" => %PeerAction::MidToleranceError, + "outcome" => ?outcome, + "blob_peer_id" => %blob_peer_id, + ); + cx.report_peer( + blob_peer_id, + PeerAction::MidToleranceError, + "parent_request_err", + ); + } + } + + // Try again if possible + parent_lookup.processing_failed(); + self.request_parent(parent_lookup, cx); + } + pub fn parent_chain_processed( &mut self, chain_hash: Hash256, result: BatchProcessResult, - cx: &mut SyncNetworkContext, + cx: &SyncNetworkContext, ) { - let request = match self.processing_parent_lookups.remove(&chain_hash) { - Some((_hashes, request)) => request, - None => { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result) - } + let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else { + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result); }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); match result { BatchProcessResult::Success { .. } => { - // nothing to do. + let Some(id) = self + .single_block_lookups + .iter() + .find_map(|(id, req)| (req.block_root() == chain_hash).then_some(*id)) + else { + warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); + return; + }; + + let Some(lookup) = self.single_block_lookups.get_mut(&id) else { + warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); + return; + }; + + match lookup.get_cached_child_block() { + CachedChild::Ok(rpc_block) => { + // This is the correct block, send it for processing + if self + .send_block_for_processing( + chain_hash, + rpc_block, + timestamp_now(), + BlockProcessType::SingleBlock { id }, + cx, + ) + .is_err() + { + // Remove to avoid inconsistencies + self.single_block_lookups.remove(&id); + } + } + CachedChild::DownloadIncomplete => { + trace!(self.log, "Parent chain complete, awaiting child response"; "chain_hash" => %chain_hash); + } + CachedChild::NotRequired => { + warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); + } + CachedChild::Err(e) => { + warn!( + self.log, + "Consistency error in child block triggering parent lookup"; + "chain_hash" => %chain_hash, + "error" => ?e + ); + lookup.handle_consistency_failure(cx); + if let Err(e) = lookup.request_block_and_blobs(cx) { + debug!(self.log, + "Failed to request block and blobs, dropping lookup"; + "error" => ?e + ); + self.single_block_lookups.remove(&id); + } + } + } } BatchProcessResult::FaultyFailure { imported_blocks: _, penalty, } => { self.failed_chains.insert(chain_hash); - for peer_id in request.used_peers { - cx.report_peer(peer_id, penalty, "parent_chain_failure") + for peer_source in request.all_peers() { + cx.report_peer(peer_source, penalty, "parent_chain_failure") } } BatchProcessResult::NonFaultyFailure => { @@ -657,13 +1277,13 @@ impl BlockLookups { /* Helper functions */ fn send_block_for_processing( - &mut self, + &self, block_root: Hash256, - block: Arc>, + block: RpcBlock, duration: Duration, process_type: BlockProcessType, - cx: &mut SyncNetworkContext, - ) -> Result<(), ()> { + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { match cx.beacon_processor_if_enabled() { Some(beacon_processor) => { trace!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); @@ -678,61 +1298,68 @@ impl BlockLookups { "Failed to send sync block to processor"; "error" => ?e ); - Err(()) + Err(LookupRequestError::SendFailed( + "beacon processor send failure", + )) } else { Ok(()) } } None => { trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root); - Err(()) + Err(LookupRequestError::SendFailed( + "beacon processor unavailable", + )) } } } - fn request_parent( - &mut self, - mut parent_lookup: ParentLookup, - cx: &mut SyncNetworkContext, - ) { - match parent_lookup.request_parent(cx) { - Err(e) => { - debug!(self.log, "Failed to request parent"; &parent_lookup, "error" => e.as_static()); - match e { - parent_lookup::RequestError::SendFailed(_) => { - // Probably shutting down, nothing to do here. Drop the request - } - parent_lookup::RequestError::ChainTooLong => { - self.failed_chains.insert(parent_lookup.chain_hash()); - // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - parent_lookup::RequestError::TooManyAttempts { cannot_process } => { - // We only consider the chain failed if we were unable to process it. - // We could have failed because one peer continually failed to send us - // bad blocks. We still allow other peers to send us this chain. Note - // that peers that do this, still get penalised. - if cannot_process { - self.failed_chains.insert(parent_lookup.chain_hash()); - } - // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - parent_lookup::RequestError::NoPeers => { - // This happens if the peer disconnects while the block is being - // processed. Drop the request without extra penalty - } + fn send_blobs_for_processing( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + duration: Duration, + process_type: BlockProcessType, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + match cx.beacon_processor_if_enabled() { + Some(beacon_processor) => { + trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); + if let Err(e) = + beacon_processor.send_rpc_blobs(block_root, blobs, duration, process_type) + { + error!( + self.log, + "Failed to send sync blobs to processor"; + "error" => ?e + ); + Err(LookupRequestError::SendFailed( + "beacon processor send failure", + )) + } else { + Ok(()) } } - Ok(_) => { - debug!(self.log, "Requesting parent"; &parent_lookup); - self.parent_lookups.push(parent_lookup) + None => { + trace!(self.log, "Dropping blobs ready for processing. Beacon processor not available"; "block_root" => %block_root); + Err(LookupRequestError::SendFailed( + "beacon processor unavailable", + )) } } + } + + /// Attempts to request the next unknown parent. This method handles peer scoring and dropping + /// the lookup in the event of failure. + fn request_parent(&mut self, mut parent_lookup: ParentLookup, cx: &SyncNetworkContext) { + let response = parent_lookup.request_parent(cx); + + match response { + Err(e) => { + self.handle_parent_request_error(&mut parent_lookup, cx, e); + } + Ok(_) => self.parent_lookups.push(parent_lookup), + } // We remove and add back again requests so we want this updated regardless of outcome. metrics::set_gauge( @@ -743,7 +1370,9 @@ impl BlockLookups { /// Drops all the single block requests and returns how many requests were dropped. pub fn drop_single_block_requests(&mut self) -> usize { - self.single_block_lookups.drain().len() + let requests_to_drop = self.single_block_lookups.len(); + self.single_block_lookups.clear(); + requests_to_drop } /// Drops all the parent chain requests and returns how many requests were dropped. diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a2c2f1d1ce..93bd2f57c0 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,17 +1,19 @@ -use super::RootBlockTuple; +use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup}; +use super::{DownloadedBlock, PeerShouldHave}; +use crate::sync::block_lookups::common::Parent; +use crate::sync::block_lookups::common::RequestState; +use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker}; use beacon_chain::BeaconChainTypes; +use itertools::Itertools; use lighthouse_network::PeerId; +use std::collections::VecDeque; use std::sync::Arc; -use store::{Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; -use crate::sync::{ - manager::{Id, SLOT_IMPORT_TOLERANCE}, - network_context::SyncNetworkContext, -}; - -use super::single_block_lookup::{self, SingleBlockRequest}; - /// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any @@ -24,19 +26,22 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, /// Request of the last parent. - current_parent_request: SingleBlockRequest, - /// Id of the last parent request. - current_parent_request_id: Option, + pub current_parent_request: SingleBlockLookup, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum ParentVerifyError { RootMismatch, NoBlockReturned, + NotEnoughBlobsReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + InvalidIndex(u64), PreviousFailure { parent_root: Hash256 }, + BenignFailure, } #[derive(Debug, PartialEq, Eq)] @@ -53,62 +58,86 @@ pub enum RequestError { } impl ParentLookup { + pub fn new( + block_root: Hash256, + parent_root: Hash256, + peer_id: PeerShouldHave, + da_checker: Arc>, + cx: &mut SyncNetworkContext, + ) -> Self { + let current_parent_request = SingleBlockLookup::new( + parent_root, + Some(ChildComponents::empty(block_root)), + &[peer_id], + da_checker, + cx.next_id(), + ); + + Self { + chain_hash: block_root, + downloaded_blocks: vec![], + current_parent_request, + } + } + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() .any(|(root, _d_block)| root == block_root) } - pub fn new( - block_root: Hash256, - block: Arc>, - peer_id: PeerId, - ) -> Self { - let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); - - Self { - chain_hash: block_root, - downloaded_blocks: vec![(block_root, block)], - current_parent_request, - current_parent_request_id: None, - } + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.current_parent_request.is_for_block(block_root) } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { + pub fn request_parent(&mut self, cx: &SyncNetworkContext) -> Result<(), RequestError> { // check to make sure this request hasn't failed - if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); } - let (peer_id, request) = self.current_parent_request.request_block()?; - match cx.parent_lookup_request(peer_id, request) { - Ok(request_id) => { - self.current_parent_request_id = Some(request_id); - Ok(()) - } - Err(reason) => { - self.current_parent_request_id = None; - Err(RequestError::SendFailed(reason)) - } - } + self.current_parent_request + .request_block_and_blobs(cx) + .map_err(Into::into) } pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { - self.current_parent_request.check_peer_disconnected(peer_id) + self.current_parent_request + .block_request_state + .state + .check_peer_disconnected(peer_id) + .and_then(|()| { + self.current_parent_request + .blob_request_state + .state + .check_peer_disconnected(peer_id) + }) } - pub fn add_block(&mut self, block: Arc>) { + pub fn add_unknown_parent_block(&mut self, block: RpcBlock) { let next_parent = block.parent_root(); - let current_root = self.current_parent_request.hash; + // Cache the block. + let current_root = self.current_parent_request.block_root(); self.downloaded_blocks.push((current_root, block)); - self.current_parent_request.hash = next_parent; - self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; - self.current_parent_request_id = None; + + // Update the parent request. + self.current_parent_request + .update_requested_parent_block(next_parent) } - pub fn pending_response(&self, req_id: Id) -> bool { - self.current_parent_request_id == Some(req_id) + pub fn block_processing_peer(&self) -> Result { + self.current_parent_request + .block_request_state + .state + .processing_peer() + } + + pub fn blob_processing_peer(&self) -> Result { + self.current_parent_request + .blob_request_state + .state + .processing_peer() } /// Consumes the parent request and destructures it into it's parts. @@ -117,21 +146,20 @@ impl ParentLookup { self, ) -> ( Hash256, - Vec>>, + VecDeque>, Vec, - SingleBlockRequest, + SingleBlockLookup, ) { let ParentLookup { chain_hash, downloaded_blocks, current_parent_request, - current_parent_request_id: _, } = self; let block_count = downloaded_blocks.len(); - let mut blocks = Vec::with_capacity(block_count); + let mut blocks = VecDeque::with_capacity(block_count); let mut hashes = Vec::with_capacity(block_count); - for (hash, block) in downloaded_blocks { - blocks.push(block); + for (hash, block) in downloaded_blocks.into_iter() { + blocks.push_back(block); hashes.push(hash); } (chain_hash, blocks, hashes, current_parent_request) @@ -142,81 +170,98 @@ impl ParentLookup { self.chain_hash } - pub fn download_failed(&mut self) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; - } - pub fn processing_failed(&mut self) { - self.current_parent_request.register_failure_processing(); - self.current_parent_request_id = None; + self.current_parent_request + .block_request_state + .state + .register_failure_processing(); + self.current_parent_request + .blob_request_state + .state + .register_failure_processing(); + if let Some(components) = self.current_parent_request.child_components.as_mut() { + components.downloaded_block = None; + components.downloaded_blobs = <_>::default(); + } } /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. - pub fn verify_block( + pub fn verify_response>( &mut self, - block: Option>>, + block: Option, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>, VerifyError> { - let root_and_block = self.current_parent_request.verify_block(block)?; + ) -> Result, ParentVerifyError> { + let expected_block_root = self.current_parent_request.block_root(); + let request_state = R::request_state_mut(&mut self.current_parent_request); + let root_and_verified = request_state.verify_response(expected_block_root, block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should // be dropped and the peer downscored. - if let Some(parent_root) = root_and_block + if let Some(parent_root) = root_and_verified .as_ref() - .map(|(_, block)| block.parent_root()) + .and_then(|block| R::get_parent_root(block)) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; - return Err(VerifyError::PreviousFailure { parent_root }); + request_state.register_failure_downloading(); + return Err(ParentVerifyError::PreviousFailure { parent_root }); } } - Ok(root_and_block) + Ok(root_and_verified) } - pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option { - if self.chain_hash == chain_hash { - return self.current_parent_request.processing_peer().ok(); - } - None + pub fn add_peer(&mut self, peer: PeerShouldHave) { + self.current_parent_request.add_peer(peer) } - #[cfg(test)] - pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts() - } - - pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { - self.current_parent_request.add_peer(block_root, peer_id) + /// Adds a list of peers to the parent request. + pub fn add_peers(&mut self, peers: &[PeerShouldHave]) { + self.current_parent_request.add_peers(peers) } pub fn used_peers(&self) -> impl Iterator + '_ { - self.current_parent_request.used_peers.iter() + self.current_parent_request + .block_request_state + .state + .used_peers + .iter() + .chain( + self.current_parent_request + .blob_request_state + .state + .used_peers + .iter(), + ) + .unique() } } -impl From for VerifyError { - fn from(e: super::single_block_lookup::VerifyError) -> Self { - use super::single_block_lookup::VerifyError as E; +impl From for ParentVerifyError { + fn from(e: LookupVerifyError) -> Self { + use LookupVerifyError as E; match e { - E::RootMismatch => VerifyError::RootMismatch, - E::NoBlockReturned => VerifyError::NoBlockReturned, - E::ExtraBlocksReturned => VerifyError::ExtraBlocksReturned, + E::RootMismatch => ParentVerifyError::RootMismatch, + E::NoBlockReturned => ParentVerifyError::NoBlockReturned, + E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned, + E::UnrequestedBlobId => ParentVerifyError::UnrequestedBlobId, + E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned, + E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index), + E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned, + E::BenignFailure => ParentVerifyError::BenignFailure, } } } -impl From for RequestError { - fn from(e: super::single_block_lookup::LookupRequestError) -> Self { - use super::single_block_lookup::LookupRequestError as E; +impl From for RequestError { + fn from(e: LookupRequestError) -> Self { + use LookupRequestError as E; match e { E::TooManyAttempts { cannot_process } => { RequestError::TooManyAttempts { cannot_process } } E::NoPeers => RequestError::NoPeers, + E::SendFailed(msg) => RequestError::SendFailed(msg), } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 62ca68e7bc..e0f7d88094 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,43 +1,44 @@ +use super::PeerShouldHave; +use crate::sync::block_lookups::common::{Lookup, RequestState}; +use crate::sync::block_lookups::Id; +use crate::sync::network_context::SyncNetworkContext; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::{ + AvailabilityCheckError, DataAvailabilityChecker, MissingBlobs, +}; +use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents}; +use beacon_chain::BeaconChainTypes; +use lighthouse_network::{PeerAction, PeerId}; +use slog::{trace, Logger}; use std::collections::HashSet; +use std::fmt::Debug; +use std::marker::PhantomData; use std::sync::Arc; - -use super::RootBlockTuple; -use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; -use rand::seq::IteratorRandom; -use ssz_types::VariableList; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use store::Hash256; use strum::IntoStaticStr; - -/// Object representing a single block lookup request. -#[derive(PartialEq, Eq)] -pub struct SingleBlockRequest { - /// The hash of the requested block. - pub hash: Hash256, - /// State of this request. - pub state: State, - /// Peers that should have this block. - pub available_peers: HashSet, - /// Peers from which we have requested this block. - pub used_peers: HashSet, - /// How many times have we attempted to process this block. - failed_processing: u8, - /// How many times have we attempted to download this block. - failed_downloading: u8, -} +use types::blob_sidecar::FixedBlobSidecarList; +use types::EthSpec; #[derive(Debug, PartialEq, Eq)] pub enum State { AwaitingDownload, - Downloading { peer_id: PeerId }, - Processing { peer_id: PeerId }, + Downloading { peer_id: PeerShouldHave }, + Processing { peer_id: PeerShouldHave }, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum LookupVerifyError { RootMismatch, NoBlockReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + NotEnoughBlobsReturned, + InvalidIndex(u64), + /// We don't have enough information to know + /// whether the peer is at fault or simply missed + /// what was requested on gossip. + BenignFailure, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -48,17 +49,398 @@ pub enum LookupRequestError { cannot_process: bool, }, NoPeers, + SendFailed(&'static str), } -impl SingleBlockRequest { - pub fn new(hash: Hash256, peer_id: PeerId) -> Self { +pub struct SingleBlockLookup { + pub id: Id, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, + pub da_checker: Arc>, + /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` + /// because any blocks or blobs without parents won't hit the data availability cache. + pub child_components: Option>, +} + +impl SingleBlockLookup { + pub fn new( + requested_block_root: Hash256, + child_components: Option>, + peers: &[PeerShouldHave], + da_checker: Arc>, + id: Id, + ) -> Self { + let is_deneb = da_checker.is_deneb(); + Self { + id, + block_request_state: BlockRequestState::new(requested_block_root, peers), + blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), + da_checker, + child_components, + } + } + + /// Get the block root that is being requested. + pub fn block_root(&self) -> Hash256 { + self.block_request_state.requested_block_root + } + + /// Check the block root matches the requested block root. + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.block_root() == block_root + } + + /// Update the requested block, this should only be used in a chain of parent lookups to request + /// the next parent. + pub fn update_requested_parent_block(&mut self, block_root: Hash256) { + self.block_request_state.requested_block_root = block_root; + self.block_request_state.state.state = State::AwaitingDownload; + self.blob_request_state.state.state = State::AwaitingDownload; + self.child_components = Some(ChildComponents::empty(block_root)); + } + + /// Get all unique peers across block and blob requests. + pub fn all_peers(&self) -> HashSet { + let mut all_peers = self.block_request_state.state.used_peers.clone(); + all_peers.extend(self.blob_request_state.state.used_peers.clone()); + all_peers + } + + /// Send the necessary requests for blocks and/or blobs. This will check whether we have + /// downloaded the block and/or blobs already and will not send requests if so. It will also + /// inspect the request state or blocks and blobs to ensure we are not already processing or + /// downloading the block and/or blobs. + pub fn request_block_and_blobs( + &mut self, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let block_root = self.block_root(); + let block_already_downloaded = self.block_already_downloaded(); + let blobs_already_downloaded = self.blobs_already_downloaded(); + + if block_already_downloaded && blobs_already_downloaded { + trace!(cx.log, "Lookup request already completed"; "block_root"=> ?block_root); + return Ok(()); + } + let id = self.id; + self.block_request_state + .build_request_and_send(id, block_already_downloaded, cx)?; + self.blob_request_state + .build_request_and_send(id, blobs_already_downloaded, cx) + } + + /// Returns a `CachedChild`, which is a wrapper around a `RpcBlock` that is either: + /// + /// 1. `NotRequired`: there is no child caching required for this lookup. + /// 2. `DownloadIncomplete`: Child caching is required, but all components are not yet downloaded. + /// 3. `Ok`: The child is required and we have downloaded it. + /// 4. `Err`: The child is required, but has failed consistency checks. + pub fn get_cached_child_block(&self) -> CachedChild { + if let Some(components) = self.child_components.as_ref() { + let Some(block) = components.downloaded_block.as_ref() else { + return CachedChild::DownloadIncomplete; + }; + + if !self.missing_blob_ids().is_empty() { + return CachedChild::DownloadIncomplete; + } + + match RpcBlock::new_from_fixed( + self.block_request_state.requested_block_root, + block.clone(), + components.downloaded_blobs.clone(), + ) { + Ok(rpc_block) => CachedChild::Ok(rpc_block), + Err(e) => CachedChild::Err(e), + } + } else { + CachedChild::NotRequired + } + } + + /// Accepts a verified response, and adds it to the child components if required. This method + /// returns a `CachedChild` which provides a completed block + blob response if all components have been + /// received, or information about whether the child is required and if it has been downloaded. + pub fn add_response>( + &mut self, + verified_response: R::VerifiedResponseType, + ) -> CachedChild { + if let Some(child_components) = self.child_components.as_mut() { + R::add_to_child_components(verified_response, child_components); + self.get_cached_child_block() + } else { + CachedChild::NotRequired + } + } + + /// Add a child component to the lookup request. Merges with any existing child components. + pub fn add_child_components(&mut self, components: ChildComponents) { + if let Some(ref mut existing_components) = self.child_components { + let ChildComponents { + block_root: _, + downloaded_block, + downloaded_blobs, + } = components; + if let Some(block) = downloaded_block { + existing_components.merge_block(block); + } + existing_components.merge_blobs(downloaded_blobs); + } else { + self.child_components = Some(components); + } + } + + /// Add all given peers to both block and blob request states. + pub fn add_peer(&mut self, peer: PeerShouldHave) { + match peer { + PeerShouldHave::BlockAndBlobs(peer_id) => { + self.block_request_state.state.add_peer(&peer_id); + self.blob_request_state.state.add_peer(&peer_id); + } + PeerShouldHave::Neither(peer_id) => { + self.block_request_state.state.add_potential_peer(&peer_id); + self.blob_request_state.state.add_potential_peer(&peer_id); + } + } + } + + /// Add all given peers to both block and blob request states. + pub fn add_peers(&mut self, peers: &[PeerShouldHave]) { + for peer in peers { + self.add_peer(*peer); + } + } + + /// Returns true if the block has already been downloaded. + pub fn both_components_downloaded(&self) -> bool { + self.block_request_state.state.component_downloaded + && self.blob_request_state.state.component_downloaded + } + + /// Returns true if the block has already been downloaded. + pub fn both_components_processed(&self) -> bool { + self.block_request_state.state.component_processed + && self.blob_request_state.state.component_processed + } + + /// Checks both the block and blob request states to see if the peer is disconnected. + /// + /// Returns true if the lookup should be dropped. + pub fn should_drop_lookup_on_disconnected_peer( + &mut self, + peer_id: &PeerId, + cx: &SyncNetworkContext, + log: &Logger, + ) -> bool { + let block_root = self.block_root(); + let block_peer_disconnected = self + .block_request_state + .state + .check_peer_disconnected(peer_id) + .is_err(); + let blob_peer_disconnected = self + .blob_request_state + .state + .check_peer_disconnected(peer_id) + .is_err(); + + if block_peer_disconnected || blob_peer_disconnected { + if let Err(e) = self.request_block_and_blobs(cx) { + trace!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); + return true; + } + } + false + } + + /// Returns `true` if the block has already been downloaded. + pub(crate) fn block_already_downloaded(&self) -> bool { + if let Some(components) = self.child_components.as_ref() { + components.block_exists() + } else { + self.da_checker.has_block(&self.block_root()) + } + } + + /// Updates the `requested_ids` field of the `BlockRequestState` with the most recent picture + /// of which blobs still need to be requested. Returns `true` if there are no more blobs to + /// request. + pub(crate) fn blobs_already_downloaded(&mut self) -> bool { + self.update_blobs_request(); + self.blob_request_state.requested_ids.is_empty() + } + + /// Updates this request with the most recent picture of which blobs still need to be requested. + pub fn update_blobs_request(&mut self) { + self.blob_request_state.requested_ids = self.missing_blob_ids(); + } + + /// If `child_components` is `Some`, we know block components won't hit the data + /// availability cache, so we don't check its processing cache unless `child_components` + /// is `None`. + pub(crate) fn missing_blob_ids(&self) -> MissingBlobs { + let block_root = self.block_root(); + if let Some(components) = self.child_components.as_ref() { + self.da_checker.get_missing_blob_ids(block_root, components) + } else { + let Some(processing_availability_view) = + self.da_checker.get_processing_components(block_root) + else { + return MissingBlobs::new_without_block(block_root, self.da_checker.is_deneb()); + }; + self.da_checker + .get_missing_blob_ids(block_root, &processing_availability_view) + } + } + + /// Penalizes a blob peer if it should have blobs but didn't return them to us. Does not penalize + /// a peer who we request blobs from based on seeing a block or blobs over gossip. This may + /// have been a benign failure. + pub fn penalize_blob_peer(&mut self, penalize_always: bool, cx: &SyncNetworkContext) { + if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() { + if penalize_always || matches!(blob_peer, PeerShouldHave::BlockAndBlobs(_)) { + cx.report_peer( + blob_peer.to_peer_id(), + PeerAction::MidToleranceError, + "single_blob_failure", + ); + } + self.blob_request_state + .state + .remove_peer_if_useless(blob_peer.as_peer_id()); + } + } + + /// This failure occurs on download, so register a failure downloading, penalize the peer if + /// necessary and clear the blob cache. + pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext) { + self.penalize_blob_peer(false, cx); + if let Some(cached_child) = self.child_components.as_mut() { + cached_child.clear_blobs(); + } + self.blob_request_state.state.register_failure_downloading() + } + + /// This failure occurs after processing, so register a failure processing, penalize the peer if + /// necessary and clear the blob cache. + pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext) { + self.penalize_blob_peer(true, cx); + if let Some(cached_child) = self.child_components.as_mut() { + cached_child.clear_blobs(); + } + self.blob_request_state.state.register_failure_processing() + } +} + +/// The state of the blob request component of a `SingleBlockLookup`. +pub struct BlobRequestState { + /// The latest picture of which blobs still need to be requested. This includes information + /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in + /// the data availability checker. + pub requested_ids: MissingBlobs, + /// Where we store blobs until we receive the stream terminator. + pub blob_download_queue: FixedBlobSidecarList, + pub state: SingleLookupRequestState, + _phantom: PhantomData, +} + +impl BlobRequestState { + pub fn new(block_root: Hash256, peer_source: &[PeerShouldHave], is_deneb: bool) -> Self { + let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); + Self { + requested_ids: default_ids, + blob_download_queue: <_>::default(), + state: SingleLookupRequestState::new(peer_source), + _phantom: PhantomData, + } + } +} + +/// The state of the block request component of a `SingleBlockLookup`. +pub struct BlockRequestState { + pub requested_block_root: Hash256, + pub state: SingleLookupRequestState, + _phantom: PhantomData, +} + +impl BlockRequestState { + pub fn new(block_root: Hash256, peers: &[PeerShouldHave]) -> Self { + Self { + requested_block_root: block_root, + state: SingleLookupRequestState::new(peers), + _phantom: PhantomData, + } + } +} + +/// This is the status of cached components for a lookup if they are required. It provides information +/// about whether we should send a responses immediately for processing, whether we require more +/// responses, or whether all cached components have been received and the reconstructed block +/// should be sent for processing. +pub enum CachedChild { + /// All child components have been received, this is the reconstructed block, including all. + /// It has been checked for consistency between blobs and block, but no consensus checks have + /// been performed and no kzg verification has been performed. + Ok(RpcBlock), + /// All child components have not yet been received. + DownloadIncomplete, + /// Child components should not be cached, send this directly for processing. + NotRequired, + /// There was an error during consistency checks between block and blobs. + Err(AvailabilityCheckError), +} +/// Object representing the state of a single block or blob lookup request. +#[derive(PartialEq, Eq, Debug)] +pub struct SingleLookupRequestState { + /// State of this request. + pub state: State, + /// Peers that should have this block or blob. + pub available_peers: HashSet, + /// Peers that mar or may not have this block or blob. + pub potential_peers: HashSet, + /// Peers from which we have requested this block. + pub used_peers: HashSet, + /// How many times have we attempted to process this block or blob. + pub failed_processing: u8, + /// How many times have we attempted to download this block or blob. + pub failed_downloading: u8, + /// Whether or not we have downloaded this block or blob. + pub component_downloaded: bool, + /// Whether or not we have processed this block or blob. + pub component_processed: bool, + /// Should be incremented everytime this request is retried. The purpose of this is to + /// differentiate retries of the same block/blob request within a lookup. We currently penalize + /// peers and retry requests prior to receiving the stream terminator. This means responses + /// from a prior request may arrive after a new request has been sent, this counter allows + /// us to differentiate these two responses. + pub req_counter: u32, +} + +impl SingleLookupRequestState { + pub fn new(peers: &[PeerShouldHave]) -> Self { + let mut available_peers = HashSet::default(); + let mut potential_peers = HashSet::default(); + for peer in peers { + match peer { + PeerShouldHave::BlockAndBlobs(peer_id) => { + available_peers.insert(*peer_id); + } + PeerShouldHave::Neither(peer_id) => { + potential_peers.insert(*peer_id); + } + } + } + Self { - hash, state: State::AwaitingDownload, - available_peers: HashSet::from([peer_id]), + available_peers, + potential_peers, used_peers: HashSet::default(), failed_processing: 0, failed_downloading: 0, + component_downloaded: false, + component_processed: false, + req_counter: 0, } } @@ -80,19 +462,25 @@ impl SingleBlockRequest { self.failed_processing + self.failed_downloading } - pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { - let is_useful = &self.hash == hash; - if is_useful { - self.available_peers.insert(*peer_id); + /// This method should be used for peers wrapped in `PeerShouldHave::BlockAndBlobs`. + pub fn add_peer(&mut self, peer_id: &PeerId) { + self.potential_peers.remove(peer_id); + self.available_peers.insert(*peer_id); + } + + /// This method should be used for peers wrapped in `PeerShouldHave::Neither`. + pub fn add_potential_peer(&mut self, peer_id: &PeerId) { + if !self.available_peers.contains(peer_id) { + self.potential_peers.insert(*peer_id); } - is_useful } /// If a peer disconnects, this request could be failed. If so, an error is returned pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { self.available_peers.remove(dc_peer_id); + self.potential_peers.remove(dc_peer_id); if let State::Downloading { peer_id } = &self.state { - if peer_id == dc_peer_id { + if peer_id.as_peer_id() == dc_peer_id { // Peer disconnected before providing a block self.register_failure_downloading(); return Err(()); @@ -101,88 +489,59 @@ impl SingleBlockRequest { Ok(()) } - /// Verifies if the received block matches the requested one. - /// Returns the block for processing if the response is what we expected. - pub fn verify_block( - &mut self, - block: Option>>, - ) -> Result>, VerifyError> { - match self.state { - State::AwaitingDownload => { - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) - } - State::Downloading { peer_id } => match block { - Some(block) => { - // Compute the block root using this specific function so that we can get timing - // metrics. - let block_root = get_block_root(&block); - if block_root != self.hash { - // return an error and drop the block - // NOTE: we take this is as a download failure to prevent counting the - // attempt as a chain failure, but simply a peer failure. - self.register_failure_downloading(); - Err(VerifyError::RootMismatch) - } else { - // Return the block for processing. - self.state = State::Processing { peer_id }; - Ok(Some((block_root, block))) - } - } - None => { - self.register_failure_downloading(); - Err(VerifyError::NoBlockReturned) - } - }, - State::Processing { peer_id: _ } => match block { - Some(_) => { - // We sent the block for processing and received an extra block. - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) - } - None => { - // This is simply the stream termination and we are already processing the - // block - Ok(None) - } - }, - } - } - - pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { - debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts() >= MAX_ATTEMPTS { - Err(LookupRequestError::TooManyAttempts { - cannot_process: self.failed_processing >= self.failed_downloading, - }) - } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest::new(VariableList::from(vec![self.hash])); - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) - } else { - Err(LookupRequestError::NoPeers) - } - } - - pub fn processing_peer(&self) -> Result { + /// Returns the id peer we downloaded from if we have downloaded a verified block, otherwise + /// returns an error. + pub fn processing_peer(&self) -> Result { if let State::Processing { peer_id } = &self.state { Ok(*peer_id) } else { Err(()) } } + + /// Remove the given peer from the set of potential peers, so long as there is at least one + /// other potential peer or we have any available peers. + pub fn remove_peer_if_useless(&mut self, peer_id: &PeerId) { + if !self.available_peers.is_empty() || self.potential_peers.len() > 1 { + self.potential_peers.remove(peer_id); + } + } } -impl slog::Value for SingleBlockRequest { +impl slog::Value for SingleBlockLookup { + fn serialize( + &self, + _record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_str("request", key)?; + serializer.emit_arguments("lookup_type", &format_args!("{:?}", L::lookup_type()))?; + serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; + serializer.emit_arguments( + "blob_ids", + &format_args!("{:?}", self.blob_request_state.requested_ids.indices()), + )?; + serializer.emit_arguments( + "block_request_state.state", + &format_args!("{:?}", self.block_request_state.state), + )?; + serializer.emit_arguments( + "blob_request_state.state", + &format_args!("{:?}", self.blob_request_state.state), + )?; + slog::Result::Ok(()) + } +} + +impl slog::Value for SingleLookupRequestState { fn serialize( &self, record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer, ) -> slog::Result { - serializer.emit_str("request", key)?; - serializer.emit_arguments("hash", &format_args!("{}", self.hash))?; + serializer.emit_str("request_state", key)?; match &self.state { State::AwaitingDownload => { "awaiting_download".serialize(record, "state", serializer)? @@ -203,8 +562,19 @@ impl slog::Value for SingleBlockRequest { #[cfg(test)] mod tests { use super::*; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::MinimalEthSpec as E; + use crate::sync::block_lookups::common::LookupType; + use crate::sync::block_lookups::common::{Lookup, RequestState}; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use slot_clock::{SlotClock, TestingSlotClock}; + use std::time::Duration; + use store::{HotColdDB, MemoryStore, StoreConfig}; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + ChainSpec, EthSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot, + }; fn rand_block() -> SignedBeaconBlock { let mut rng = XorShiftRng::from_seed([42; 16]); @@ -215,37 +585,124 @@ mod tests { types::Signature::random_for_test(&mut rng), ) } + type T = Witness, E, MemoryStore, MemoryStore>; + + struct TestLookup1; + + impl Lookup for TestLookup1 { + const MAX_ATTEMPTS: u8 = 3; + + fn lookup_type() -> LookupType { + panic!() + } + } + + struct TestLookup2; + + impl Lookup for TestLookup2 { + const MAX_ATTEMPTS: u8 = 4; + + fn lookup_type() -> LookupType { + panic!() + } + } #[test] fn test_happy_path() { - let peer_id = PeerId::random(); + let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); let block = rand_block(); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) + .expect("store"); + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec) + .expect("data availability checker"), + ); + let mut sl = SingleBlockLookup::::new( + block.canonical_root(), + None, + &[peer_id], + da_checker, + 1, + ); + as RequestState>::build_request( + &mut sl.block_request_state, + ) + .unwrap(); + sl.block_request_state.state.state = State::Downloading { peer_id }; - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); - sl.request_block().unwrap(); - sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + as RequestState>::verify_response( + &mut sl.block_request_state, + block.canonical_root(), + Some(block.into()), + ) + .unwrap() + .unwrap(); } #[test] fn test_block_lookup_failures() { - const FAILURES: u8 = 3; - let peer_id = PeerId::random(); + let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); let block = rand_block(); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) + .expect("store"); - let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); - for _ in 1..FAILURES { - sl.request_block().unwrap(); - sl.register_failure_downloading(); + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec) + .expect("data availability checker"), + ); + + let mut sl = SingleBlockLookup::::new( + block.canonical_root(), + None, + &[peer_id], + da_checker, + 1, + ); + for _ in 1..TestLookup2::MAX_ATTEMPTS { + as RequestState>::build_request( + &mut sl.block_request_state, + ) + .unwrap(); + sl.block_request_state.state.register_failure_downloading(); } // Now we receive the block and send it for processing - sl.request_block().unwrap(); - sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + as RequestState>::build_request( + &mut sl.block_request_state, + ) + .unwrap(); + sl.block_request_state.state.state = State::Downloading { peer_id }; + + as RequestState>::verify_response( + &mut sl.block_request_state, + block.canonical_root(), + Some(block.into()), + ) + .unwrap() + .unwrap(); // One processing failure maxes the available attempts - sl.register_failure_processing(); + sl.block_request_state.state.register_failure_processing(); assert_eq!( - sl.request_block(), + as RequestState>::build_request( + &mut sl.block_request_state + ), Err(LookupRequestError::TooManyAttempts { cannot_process: false }) diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index c588f867bd..bd1e72ee18 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,22 +1,28 @@ -use std::sync::Arc; - use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::service::RequestId; -use crate::sync::manager::RequestId as SyncId; +use crate::sync::manager::{RequestId as SyncId, SingleLookupReqId}; use crate::NetworkMessage; +use std::sync::Arc; use super::*; +use crate::sync::block_lookups::common::ResponseType; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{ + build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, +}; use beacon_processor::WorkEvent; +use lighthouse_network::rpc::RPCResponseErrorCode; use lighthouse_network::{NetworkGlobals, Request}; -use slog::{Drain, Level}; -use slot_clock::ManualSlotClock; +use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; -use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::MinimalEthSpec as E; +use types::{ + test_utils::{SeedableRng, XorShiftRng}, + BlobSidecar, EthSpec, ForkName, MinimalEthSpec as E, SignedBeaconBlock, +}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -24,23 +30,29 @@ struct TestRig { beacon_processor_rx: mpsc::Receiver>, network_rx: mpsc::UnboundedReceiver>, rng: XorShiftRng, + harness: BeaconChainHarness, } const D: Duration = Duration::new(0, 0); impl TestRig { - fn test_setup(log_level: Option) -> (BlockLookups, SyncNetworkContext, Self) { - let log = { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); + fn test_setup(enable_log: bool) -> (BlockLookups, SyncNetworkContext, Self) { + let log = build_log(slog::Level::Debug, enable_log); - if let Some(log_level) = log_level { - slog::Logger::root(drain.filter_level(log_level).fuse(), slog::o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), slog::o!()) - } - }; + // Initialise a new beacon chain + let harness = BeaconChainHarness::>::builder(E) + .default_spec() + .logger(log.clone()) + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .testing_slot_clock(TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(12), + )) + .build(); + + let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); @@ -51,12 +63,18 @@ impl TestRig { beacon_processor_rx, network_rx, rng, + harness, }; - let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); + + let bl = BlockLookups::new( + chain.data_availability_checker.clone(), + log.new(slog::o!("component" => "block_lookups")), + ); let cx = { SyncNetworkContext::new( network_tx, Arc::new(network_beacon_processor), + chain, log.new(slog::o!("component" => "network_context")), ) }; @@ -64,48 +82,82 @@ impl TestRig { (bl, cx, rig) } - fn rand_block(&mut self) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + fn rand_block(&mut self, fork_name: ForkName) -> SignedBeaconBlock { + self.rand_block_and_blobs(fork_name, NumBlobs::None).0 + } + + fn rand_block_and_blobs( + &mut self, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let rng = &mut self.rng; + generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } #[track_caller] - fn expect_block_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::SingleBlock { id }), - }) => id, - other => { - panic!("Expected block request, found {:?}", other); - } + fn expect_lookup_request(&mut self, response_type: ResponseType) -> SingleLookupReqId { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlock { id }), + }) => id, + other => { + panic!("Expected block request, found {:?}", other); + } + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlob { id }), + }) => id, + other => { + panic!("Expected blob request, found {:?}", other); + } + }, } } #[track_caller] - fn expect_parent_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::ParentLookup { id }), - }) => id, - other => panic!("Expected parent request, found {:?}", other), + fn expect_parent_request(&mut self, response_type: ResponseType) -> SingleLookupReqId { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookup { id }), + }) => id, + other => panic!("Expected parent request, found {:?}", other), + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookupBlob { id }), + }) => id, + other => panic!("Expected parent blobs request, found {:?}", other), + }, } } #[track_caller] - fn expect_block_process(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); - } - other => panic!("Expected block process, found {:?}", other), + fn expect_block_process(&mut self, response_type: ResponseType) { + match response_type { + ResponseType::Block => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); + } + other => panic!("Expected block process, found {:?}", other), + }, + ResponseType::Blob => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), beacon_processor::RPC_BLOBS); + } + other => panic!("Expected blob process, found {:?}", other), + }, } } @@ -127,6 +179,14 @@ impl TestRig { ); } + #[track_caller] + fn expect_empty_beacon_processor(&mut self) { + assert_eq!( + self.beacon_processor_rx.try_recv().expect_err("must err"), + mpsc::error::TryRecvError::Empty + ); + } + #[track_caller] pub fn expect_penalty(&mut self) { match self.network_rx.try_recv() { @@ -135,151 +195,288 @@ impl TestRig { } } - pub fn block_with_parent(&mut self, parent_root: Hash256) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - parent_root, - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + pub fn block_with_parent( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + ) -> SignedBeaconBlock { + let mut block = self.rand_block(fork_name); + *block.message_mut().parent_root_mut() = parent_root; + block + } + + pub fn block_with_parent_and_blobs( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs); + *block.message_mut().parent_root_mut() = parent_root; + let block_root = block.canonical_root(); + blobs.iter_mut().for_each(|blob| { + blob.block_parent_root = parent_root; + blob.block_root = block_root; + }); + (block, blobs) } } #[test] fn test_single_block_lookup_happy_path() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let block = rig.rand_block(); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); - + let block_root = block.canonical_root(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block_root, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.into()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()).into(), &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); + bl.single_block_component_processed::>( + id.id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_single_block_lookup_empty_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block_hash, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer does not have the block. It should be penalized. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); - rig.expect_block_request(); // it should be retried + rig.expect_lookup_request(response_type); // it should be retried } #[test] fn test_single_block_lookup_wrong_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block_hash, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // Peer sends something else. It should be penalized. - let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.single_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); - rig.expect_block_request(); // should be retried + rig.expect_lookup_request(response_type); // should be retried // Send the stream termination. This should not produce an additional penalty. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_empty_network(); } #[test] fn test_single_block_lookup_failure() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block_hash, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. - bl.single_block_lookup_failed(id, &mut cx); - rig.expect_block_request(); + bl.single_block_lookup_failed::>( + id, + &peer_id, + &cx, + RPCError::UnsupportedProtocol, + ); + rig.expect_lookup_request(response_type); rig.expect_empty_network(); } #[test] fn test_single_block_lookup_becomes_parent_request() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = Arc::new(rig.rand_block(fork_name)); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block.canonical_root(), + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed( - id, - BlockError::ParentUnknown(Arc::new(block)).into(), + bl.single_block_component_processed::>( + id.id, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), &mut cx, ); - assert_eq!(bl.single_block_lookups.len(), 0); - rig.expect_parent_request(); + assert_eq!(bl.single_block_lookups.len(), 1); + rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 1); } #[test] fn test_parent_lookup_happy_path() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -288,140 +485,264 @@ fn test_parent_lookup_happy_path() { let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_wrong_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the wrong block, peer should be penalized and the block re-requested. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id1, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the stream termination for the first request. This should not produce extra penalties. - bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id1, peer_id, None, D, &cx); rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_empty_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends an empty response, peer should be penalized and the block re-requested. - bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id1, peer_id, None, D, &cx); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_rpc_failure() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. - bl.parent_lookup_failed(id1, peer_id, &mut cx); - let id2 = rig.expect_parent_request(); + bl.parent_lookup_failed::>( + id1, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id2, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_too_many_attempts() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } match i % 2 { // make sure every error is accounted for 0 => { // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } _ => { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); // Send the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + + // Note, previously we would send the same lookup id with a stream terminator, + // we'd ignore it because we'd intrepret it as an unrequested response, since + // we already got one response for the block. I'm not sure what the intent is + // for having this stream terminator line in this test at all. Receiving an invalid + // block and a stream terminator with the same Id now results in two failed attempts, + // I'm unsure if this is how it should behave? + // + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -430,29 +751,63 @@ fn test_parent_lookup_too_many_attempts() { #[test] fn test_parent_lookup_too_many_download_attempts_no_blacklist() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let block_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { assert!(!bl.failed_chains.contains(&block_hash)); - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } if i % 2 != 0 { // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } else { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + let bad_block = rig.rand_block(fork_name); + bl.parent_lookup_response::>( + id, + peer_id, + Some(bad_block.into()), + D, + &cx, + ); rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -463,70 +818,126 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { #[test] fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + let response_type = ResponseType::Block; const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let parent = Arc::new(rig.rand_block()); - let block = rig.block_with_parent(parent.canonical_root()); - let block_hash = block.canonical_root(); + let parent = Arc::new(rig.rand_block(fork_name)); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); // Fail downloading the block - for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { - let id = rig.expect_parent_request(); + for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. - bl.parent_lookup_failed(id, peer_id, &mut cx); + bl.parent_lookup_failed::>( + id, + peer_id, + &cx, + RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + "older than deneb".into(), + ), + ); } // Now fail processing a block in the parent request - for _ in 0..PROCESSING_FAILURES { - let id = dbg!(rig.expect_parent_request()); - assert!(!bl.failed_chains.contains(&block_hash)); + for i in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request(response_type)); + if matches!(fork_name, ForkName::Deneb) && i != 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + assert!(!bl.failed_chains.contains(&block_root)); // send the right parent but fail processing - bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); - bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.clone()), + D, + &cx, + ); + bl.parent_block_processed(block_root, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); rig.expect_penalty(); } - assert!(bl.failed_chains.contains(&block_hash)); + assert!(bl.failed_chains.contains(&block_root)); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_too_deep() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let mut blocks = - Vec::>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); + Vec::>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE { let parent = blocks .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = rig.block_with_parent(parent); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for block in blocks.into_iter().rev() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); // the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result bl.parent_block_processed( chain_hash, - BlockError::ParentUnknown(Arc::new(block)).into(), + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), &mut cx, ) } @@ -537,68 +948,121 @@ fn test_parent_lookup_too_deep() { #[test] fn test_parent_lookup_disconnection() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); let peer_id = PeerId::random(); - let trigger_block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let trigger_block = rig.rand_block(fork_name); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); bl.search_parent( - trigger_block.canonical_root(), - Arc::new(trigger_block), + trigger_slot, + trigger_block_root, + trigger_parent_root, peer_id, &mut cx, ); + bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_lookups.is_empty()); } #[test] fn test_single_block_lookup_ignored_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block.canonical_root(), + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let id = rig.expect_lookup_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_lookup_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + bl.single_lookup_response::>( + id, + peer_id, + Some(block.into()), + D, + &cx, + ); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. - bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + bl.single_lookup_response::>(id, peer_id, None, D, &cx); // Send an Ignored response, the request should be dropped - bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + bl.single_block_component_processed::>( + id.id, + BlockProcessingResult::Ignored, + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_parent_lookup_ignored_response() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); - rig.expect_block_process(); + bl.parent_lookup_response::>( + id, + peer_id, + Some(parent.into()), + D, + &cx, + ); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Return an Ignored result. The request should be dropped - bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + bl.parent_block_processed(chain_hash, BlockProcessingResult::Ignored, &mut cx); rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 0); } @@ -606,8 +1070,13 @@ fn test_parent_lookup_ignored_response() { /// This is a regression test. #[test] fn test_same_chain_race_condition() { - let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); + let response_type = ResponseType::Block; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(true); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); #[track_caller] fn parent_lookups_consistency(bl: &BlockLookups) { let hashes: Vec<_> = bl @@ -634,29 +1103,53 @@ fn test_same_chain_race_condition() { .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = Arc::new(rig.block_with_parent(parent)); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for (i, block) in blocks.into_iter().rev().enumerate() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); + bl.parent_lookup_response::>( + id, + peer_id, + Some(block.clone()), + D, + &cx, + ); // the stream termination - bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + bl.parent_lookup_response::>(id, peer_id, None, D, &cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result if i + 2 == depth { // one block was removed bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) } else { - bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) + bl.parent_block_processed( + chain_hash, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + &mut cx, + ) } parent_lookups_consistency(&bl) } @@ -666,12 +1159,1086 @@ fn test_same_chain_race_condition() { // Try to get this block again while the chain is being processed. We should not request it again. let peer_id = PeerId::random(); - bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); parent_lookups_consistency(&bl); let process_result = BatchProcessResult::Success { was_non_empty: true, }; - bl.parent_chain_processed(chain_hash, process_result, &mut cx); + bl.parent_chain_processed(chain_hash, process_result, &cx); assert_eq!(bl.parent_lookups.len(), 0); } + +mod deneb_only { + use super::*; + use crate::sync::block_lookups::common::ResponseType; + use beacon_chain::data_availability_checker::AvailabilityCheckError; + use beacon_chain::test_utils::NumBlobs; + use std::ops::IndexMut; + use std::str::FromStr; + + struct DenebTester { + bl: BlockLookups, + cx: SyncNetworkContext, + rig: TestRig, + block: Option>>, + blobs: Vec>>, + parent_block: Option>>, + parent_blobs: Vec>>, + peer_id: PeerId, + block_req_id: Option, + parent_block_req_id: Option, + blob_req_id: Option, + parent_blob_req_id: Option, + slot: Slot, + block_root: Hash256, + } + + enum RequestTrigger { + AttestationUnknownBlock, + GossipUnknownParentBlock, + GossipUnknownParentBlob, + GossipUnknownBlockOrBlob, + } + + impl DenebTester { + fn new(request_trigger: RequestTrigger) -> Option { + let fork_name = get_fork_name(); + if !matches!(fork_name, ForkName::Deneb) { + return None; + } + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + rig.harness.chain.slot_clock.set_slot( + E::slots_per_epoch() * rig.harness.spec.deneb_fork_epoch.unwrap().as_u64(), + ); + let (block, blobs) = rig.rand_block_and_blobs(fork_name, NumBlobs::Random); + let block = Arc::new(block); + let slot = block.slot(); + let mut block_root = block.canonical_root(); + let mut block = Some(block); + let mut blobs = blobs.into_iter().map(Arc::new).collect::>(); + + let mut parent_block = None; + let mut parent_blobs = vec![]; + + let peer_id = PeerId::random(); + + // Trigger the request + let (block_req_id, blob_req_id, parent_block_req_id, parent_blob_req_id) = + match request_trigger { + RequestTrigger::AttestationUnknownBlock => { + bl.search_block( + block_root, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut cx, + ); + let block_req_id = rig.expect_lookup_request(ResponseType::Block); + let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); + (Some(block_req_id), Some(blob_req_id), None, None) + } + RequestTrigger::GossipUnknownParentBlock => { + let (child_block, child_blobs) = rig.block_with_parent_and_blobs( + block_root, + get_fork_name(), + NumBlobs::Random, + ); + parent_block = Some(Arc::new(child_block)); + parent_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); + std::mem::swap(&mut parent_block, &mut block); + std::mem::swap(&mut parent_blobs, &mut blobs); + + let child_block = block.as_ref().expect("block").clone(); + let child_root = child_block.canonical_root(); + let parent_root = block_root; + block_root = child_root; + bl.search_child_block( + child_root, + ChildComponents::new(child_root, Some(child_block), None), + &[PeerShouldHave::Neither(peer_id)], + &mut cx, + ); + + let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, child_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + None, + Some(blob_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + RequestTrigger::GossipUnknownParentBlob => { + let (child_block, child_blobs) = rig.block_with_parent_and_blobs( + block_root, + get_fork_name(), + NumBlobs::Random, + ); + + parent_block = Some(Arc::new(child_block)); + parent_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); + std::mem::swap(&mut parent_block, &mut block); + std::mem::swap(&mut parent_blobs, &mut blobs); + + let child_blob = blobs.first().cloned().unwrap(); + let parent_root = block_root; + let child_root = child_blob.block_root; + block_root = child_root; + + let mut blobs = FixedBlobSidecarList::default(); + *blobs.index_mut(0) = Some(child_blob); + bl.search_child_block( + child_root, + ChildComponents::new(child_root, None, Some(blobs)), + &[PeerShouldHave::Neither(peer_id)], + &mut cx, + ); + + let block_req_id = rig.expect_lookup_request(ResponseType::Block); + let blobs_req_id = rig.expect_lookup_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, child_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + Some(block_req_id), + Some(blobs_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + RequestTrigger::GossipUnknownBlockOrBlob => { + bl.search_block(block_root, &[PeerShouldHave::Neither(peer_id)], &mut cx); + let block_req_id = rig.expect_lookup_request(ResponseType::Block); + let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); + (Some(block_req_id), Some(blob_req_id), None, None) + } + }; + + Some(Self { + bl, + cx, + rig, + block, + blobs, + parent_block, + parent_blobs, + peer_id, + block_req_id, + parent_block_req_id, + blob_req_id, + parent_blob_req_id, + slot, + block_root, + }) + } + + fn parent_block_response(mut self) -> Self { + self.rig.expect_empty_network(); + self.bl.parent_lookup_response::>( + self.parent_block_req_id.expect("parent request id"), + self.peer_id, + self.parent_block.clone(), + D, + &self.cx, + ); + + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn parent_blob_response(mut self) -> Self { + for blob in &self.parent_blobs { + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("parent blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + } + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + + self + } + + fn block_response_triggering_process(self) -> Self { + let mut me = self.block_response(); + me.rig.expect_block_process(ResponseType::Block); + + // The request should still be active. + assert_eq!(me.bl.single_block_lookups.len(), 1); + me + } + + fn block_response(mut self) -> Self { + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + self.bl + .single_lookup_response::>( + self.block_req_id.expect("block request id"), + self.peer_id, + self.block.clone(), + D, + &self.cx, + ); + self.rig.expect_empty_network(); + + // The request should still be active. + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn blobs_response(mut self) -> Self { + for blob in &self.blobs { + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + } + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn blobs_response_was_valid(mut self) -> Self { + self.rig.expect_empty_network(); + if !self.blobs.is_empty() { + self.rig.expect_block_process(ResponseType::Blob); + } + self + } + + fn expect_empty_beacon_processor(mut self) -> Self { + self.rig.expect_empty_beacon_processor(); + self + } + + fn empty_block_response(mut self) -> Self { + self.bl + .single_lookup_response::>( + self.block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_blobs_response(mut self) -> Self { + self.bl + .single_lookup_response::>( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_parent_block_response(mut self) -> Self { + self.bl.parent_lookup_response::>( + self.parent_block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn empty_parent_blobs_response(mut self) -> Self { + self.bl + .parent_lookup_response::>( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &self.cx, + ); + self + } + + fn block_imported(mut self) -> Self { + // Missing blobs should be the request is not removed, the outstanding blobs request should + // mean we do not send a new request. + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( + self.block_root, + )), + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.single_block_lookups.len(), 0); + self + } + + fn parent_block_imported(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.parent_lookups.len(), 0); + self + } + + fn parent_block_unknown_parent(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( + Some(self.block_root), + self.parent_block.clone().expect("parent block"), + ))), + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_parent_processed(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_block_processed(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn invalid_blob_processed(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.blob_req_id.expect("blob request id").id, + BlockProcessingResult::Err(BlockError::AvailabilityCheck( + AvailabilityCheckError::KzgVerificationFailed, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_block_request(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.block_req_id.expect("block request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_blob_request(mut self) -> Self { + self.bl + .single_block_component_processed::>( + self.blob_req_id.expect("blob request id").id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn expect_penalty(mut self) -> Self { + self.rig.expect_penalty(); + self + } + fn expect_no_penalty(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_block_request(mut self) -> Self { + let id = self.rig.expect_lookup_request(ResponseType::Block); + self.block_req_id = Some(id); + self + } + fn expect_blobs_request(mut self) -> Self { + let id = self.rig.expect_lookup_request(ResponseType::Blob); + self.blob_req_id = Some(id); + self + } + fn expect_parent_block_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Block); + self.parent_block_req_id = Some(id); + self + } + fn expect_parent_blobs_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Blob); + self.parent_blob_req_id = Some(id); + self + } + fn expect_no_blobs_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_no_block_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn invalidate_blobs_too_few(mut self) -> Self { + self.blobs.pop().expect("blobs"); + self + } + fn invalidate_blobs_too_many(mut self) -> Self { + let first_blob = self.blobs.get(0).expect("blob").clone(); + self.blobs.push(first_blob); + self + } + fn expect_parent_chain_process(mut self) -> Self { + self.rig.expect_parent_chain_process(); + self + } + fn expect_block_process(mut self) -> Self { + self.rig.expect_block_process(ResponseType::Block); + self + } + } + + fn get_fork_name() -> ForkName { + ForkName::from_str( + &std::env::var(beacon_chain::test_utils::FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + beacon_chain::test_utils::FORK_NAME_ENV_VAR, + e + ) + }), + ) + .unwrap() + } + + #[test] + fn single_block_and_blob_lookup_block_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .blobs_response() + .blobs_response_was_valid() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_blobs_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .block_response_triggering_process() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_empty_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .empty_blobs_response() + .expect_empty_beacon_processor() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .block_response_triggering_process() + .missing_components_from_block_request(); + } + + #[test] + fn single_block_response_then_empty_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .empty_blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_blob_response_then_empty_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .missing_components_from_blob_request() + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request(); + } + + #[test] + fn single_invalid_block_response_then_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalid_block_processed() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .blobs_response() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_invalid_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .blobs_response() + .invalid_blob_processed() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_few_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .invalidate_blobs_too_few() + .blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_many_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + #[test] + fn too_few_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_few() + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn too_many_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn single_block_and_blob_lookup_block_returned_first_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .blobs_response() + .blobs_response_was_valid() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_blobs_returned_first_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .block_response_triggering_process() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_empty_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .empty_block_response() + .expect_block_request() + .expect_no_penalty() + .expect_no_blobs_request() + .empty_blobs_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .block_response_triggering_process() + .missing_components_from_block_request(); + } + + #[test] + fn single_block_response_then_empty_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .empty_blobs_response() + .missing_components_from_blob_request() + .expect_blobs_request() + .expect_no_penalty() + .expect_no_block_request(); + } + + #[test] + fn single_blob_response_then_empty_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .missing_components_from_blob_request() + .empty_block_response() + .expect_block_request() + .expect_no_penalty() + .expect_no_blobs_request(); + } + + #[test] + fn single_invalid_block_response_then_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .invalid_block_processed() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .blobs_response() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_invalid_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .blobs_response() + .invalid_blob_processed() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_few_blobs_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .invalidate_blobs_too_few() + .blobs_response() + .missing_components_from_blob_request() + .expect_blobs_request() + .expect_no_penalty() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_many_blobs_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + #[test] + fn too_few_blobs_response_then_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .invalidate_blobs_too_few() + .blobs_response() + .blobs_response_was_valid() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .block_response_triggering_process() + .missing_components_from_block_request() + .expect_blobs_request(); + } + + #[test] + fn too_many_blobs_response_then_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn parent_block_unknown_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_invalid_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn parent_blob_unknown_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_blob_invalid_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } +} diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs new file mode 100644 index 0000000000..819ea8e30b --- /dev/null +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -0,0 +1,83 @@ +use beacon_chain::block_verification_types::RpcBlock; +use ssz_types::VariableList; +use std::{collections::VecDeque, sync::Arc}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +#[derive(Debug, Default)] +pub struct BlocksAndBlobsRequestInfo { + /// Blocks we have received awaiting for their corresponding sidecar. + accumulated_blocks: VecDeque>>, + /// Sidecars we have received awaiting for their corresponding block. + accumulated_sidecars: VecDeque>>, + /// Whether the individual RPC request for blocks is finished or not. + is_blocks_stream_terminated: bool, + /// Whether the individual RPC request for sidecars is finished or not. + is_sidecars_stream_terminated: bool, +} + +impl BlocksAndBlobsRequestInfo { + pub fn add_block_response(&mut self, block_opt: Option>>) { + match block_opt { + Some(block) => self.accumulated_blocks.push_back(block), + None => self.is_blocks_stream_terminated = true, + } + } + + pub fn add_sidecar_response(&mut self, sidecar_opt: Option>>) { + match sidecar_opt { + Some(sidecar) => self.accumulated_sidecars.push_back(sidecar), + None => self.is_sidecars_stream_terminated = true, + } + } + + pub fn into_responses(self) -> Result>, String> { + let BlocksAndBlobsRequestInfo { + accumulated_blocks, + accumulated_sidecars, + .. + } = self; + + // There can't be more more blobs than blocks. i.e. sending any blob (empty + // included) for a skipped slot is not permitted. + let mut responses = Vec::with_capacity(accumulated_blocks.len()); + let mut blob_iter = accumulated_sidecars.into_iter().peekable(); + for block in accumulated_blocks.into_iter() { + let mut blob_list = Vec::with_capacity(T::max_blobs_per_block()); + while { + let pair_next_blob = blob_iter + .peek() + .map(|sidecar| sidecar.slot == block.slot()) + .unwrap_or(false); + pair_next_blob + } { + blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); + } + + let mut blobs_buffer = vec![None; T::max_blobs_per_block()]; + for blob in blob_list { + let blob_index = blob.index as usize; + let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { + return Err("Invalid blob index".to_string()); + }; + if blob_opt.is_some() { + return Err("Repeat blob index".to_string()); + } else { + *blob_opt = Some(blob); + } + } + let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::>()); + responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) + } + + // if accumulated sidecars is not empty, throw an error. + if blob_iter.next().is_some() { + return Err("Received sidecars that don't pair well".to_string()); + } + + Ok(responses) + } + + pub fn is_finished(&self) -> bool { + self.is_blocks_stream_terminated && self.is_sidecars_stream_terminated + } +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b910f7b33c..c1e9cde3fe 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,26 +34,38 @@ //! search for the block and subsequently search for parents if needed. use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; -use super::block_lookups::BlockLookups; -use super::network_context::SyncNetworkContext; +use super::block_lookups::{BlockLookups, PeerShouldHave}; +use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; +use crate::sync::block_lookups::common::{Current, Parent}; +use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; +use crate::sync::network_context::BlocksAndBlobsByRangeRequest; +use crate::sync::range_sync::ByRangeRequestType; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, +}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; +use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use std::boxed::Box; +use std::ops::IndexMut; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -66,21 +78,37 @@ pub const SLOT_IMPORT_TOLERANCE: usize = 32; pub type Id = u32; +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct SingleLookupReqId { + pub id: Id, + pub req_counter: Id, +} + /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum RequestId { /// Request searching for a block given a hash. - SingleBlock { id: Id }, - /// Request searching for a block's parent. The id is the chain - ParentLookup { id: Id }, + SingleBlock { id: SingleLookupReqId }, + /// Request searching for a set of blobs given a hash. + SingleBlob { id: SingleLookupReqId }, + /// Request searching for a block's parent. The id is the chain, share with the corresponding + /// blob id. + ParentLookup { id: SingleLookupReqId }, + /// Request searching for a block's parent blobs. The id is the chain, shared with the corresponding + /// block id. + ParentLookupBlob { id: SingleLookupReqId }, /// Request was from the backfill sync algorithm. - BackFillSync { id: Id }, + BackFillBlocks { id: Id }, + /// Backfill request that is composed by both a block range request and a blob range request. + BackFillBlockAndBlobs { id: Id }, /// The request was from a chain in the range sync algorithm. - RangeSync { id: Id }, + RangeBlocks { id: Id }, + /// Range request that is composed by both a block range request and a blob range request. + RangeBlockAndBlobs { id: Id }, } #[derive(Debug)] -/// A message than can be sent to the sync manager thread. +/// A message that can be sent to the sync manager thread. pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), @@ -93,12 +121,30 @@ pub enum SyncMessage { seen_timestamp: Duration, }, - /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Arc>, Hash256), + /// A blob has been received from the RPC. + RpcBlob { + request_id: RequestId, + peer_id: PeerId, + blob_sidecar: Option>>, + seen_timestamp: Duration, + }, - /// A peer has sent an object that references a block that is unknown. This triggers the + /// A block with an unknown parent has been received. + UnknownParentBlock(PeerId, RpcBlock, Hash256), + + /// A blob with an unknown parent has been received. + UnknownParentBlob(PeerId, Arc>), + + /// A peer has sent an attestation that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. - UnknownBlockHash(PeerId, Hash256), + UnknownBlockHashFromAttestation(PeerId, Hash256), + + /// A peer has sent a blob that references a block that is unknown or a peer has sent a block for + /// which we haven't received blobs. + /// + /// We will either attempt to find the block matching the unknown hash immediately or queue a lookup, + /// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`. + MissingGossipBlockComponents(Vec, Hash256), /// A peer has disconnected. Disconnect(PeerId), @@ -107,6 +153,7 @@ pub enum SyncMessage { RpcError { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A batch has been processed by the block processor thread. @@ -116,9 +163,9 @@ pub enum SyncMessage { }, /// Block processed - BlockProcessed { + BlockComponentProcessed { process_type: BlockProcessType, - result: BlockProcessResult, + result: BlockProcessingResult, }, } @@ -126,12 +173,13 @@ pub enum SyncMessage { #[derive(Debug, Clone)] pub enum BlockProcessType { SingleBlock { id: Id }, + SingleBlob { id: Id }, ParentLookup { chain_hash: Hash256 }, } #[derive(Debug)] -pub enum BlockProcessResult { - Ok, +pub enum BlockProcessingResult { + Ok(AvailabilityProcessingStatus), Err(BlockError), Ignored, } @@ -198,10 +246,18 @@ pub fn spawn( let mut sync_manager = SyncManager { chain: beacon_chain.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new(network_send, beacon_processor, log.clone()), + network: SyncNetworkContext::new( + network_send, + beacon_processor.clone(), + beacon_chain.clone(), + log.clone(), + ), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), - backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), - block_lookups: BlockLookups::new(log.clone()), + backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()), + block_lookups: BlockLookups::new( + beacon_chain.data_availability_checker.clone(), + log.clone(), + ), log: log.clone(), }; @@ -250,19 +306,50 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId) { + fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { RequestId::SingleBlock { id } => { self.block_lookups - .single_block_lookup_failed(id, &mut self.network); + .single_block_lookup_failed::>( + id, + &peer_id, + &self.network, + error, + ); + } + RequestId::SingleBlob { id } => { + self.block_lookups + .single_block_lookup_failed::>( + id, + &peer_id, + &self.network, + error, + ); } RequestId::ParentLookup { id } => { self.block_lookups - .parent_lookup_failed(id, peer_id, &mut self.network); + .parent_lookup_failed::>( + id, + peer_id, + &self.network, + error, + ); } - RequestId::BackFillSync { id } => { - if let Some(batch_id) = self.network.backfill_sync_response(id, true) { + RequestId::ParentLookupBlob { id } => { + self.block_lookups + .parent_lookup_failed::>( + id, + peer_id, + &self.network, + error, + ); + } + RequestId::BackFillBlocks { id } => { + if let Some(batch_id) = self + .network + .backfill_request_failed(id, ByRangeRequestType::Blocks) + { match self .backfill_sync .inject_error(&mut self.network, batch_id, &peer_id, id) @@ -272,8 +359,41 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id)) = self.network.range_sync_response(id, true) { + + RequestId::BackFillBlockAndBlobs { id } => { + if let Some(batch_id) = self + .network + .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) + { + match self + .backfill_sync + .inject_error(&mut self.network, batch_id, &peer_id, id) + { + Ok(_) => {} + Err(_) => self.update_sync_state(), + } + } + } + RequestId::RangeBlocks { id } => { + if let Some((chain_id, batch_id)) = self + .network + .range_sync_request_failed(id, ByRangeRequestType::Blocks) + { + self.range_sync.inject_error( + &mut self.network, + peer_id, + batch_id, + chain_id, + id, + ); + self.update_sync_state() + } + } + RequestId::RangeBlockAndBlobs { id } => { + if let Some((chain_id, batch_id)) = self + .network + .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) + { self.range_sync.inject_error( &mut self.network, peer_id, @@ -499,37 +619,70 @@ impl SyncManager { } => { self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); } - SyncMessage::UnknownBlock(peer_id, block, block_root) => { - // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals().sync_state.read().is_synced() { - let head_slot = self.chain.canonical_head.cached_head().head_slot(); - let unknown_block_slot = block.slot(); - - // if the block is far in the future, ignore it. If its within the slot tolerance of - // our current head, regardless of the syncing state, fetch it. - if (head_slot >= unknown_block_slot - && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - || (head_slot < unknown_block_slot - && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - { - return; - } + SyncMessage::RpcBlob { + request_id, + peer_id, + blob_sidecar, + seen_timestamp, + } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { + let block_slot = block.slot(); + let parent_root = block.parent_root(); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + block_slot, + block.into(), + ); + } + SyncMessage::UnknownParentBlob(peer_id, blob) => { + let blob_slot = blob.slot; + let block_root = blob.block_root; + let parent_root = blob.block_parent_root; + let blob_index = blob.index; + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); + return; } - if self.network_globals().peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { - self.block_lookups - .search_parent(block_root, block, peer_id, &mut self.network); + let mut blobs = FixedBlobSidecarList::default(); + *blobs.index_mut(blob_index as usize) = Some(blob); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + blob_slot, + ChildComponents::new(block_root, None, Some(blobs)), + ); + } + SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => { + // If we are not synced, ignore this block. + if self.synced_and_connected(&peer_id) { + self.block_lookups.search_block( + block_hash, + &[PeerShouldHave::BlockAndBlobs(peer_id)], + &mut self.network, + ); } } - SyncMessage::UnknownBlockHash(peer_id, block_hash) => { + SyncMessage::MissingGossipBlockComponents(peer_id, block_root) => { + let peers_guard = self.network_globals().peers.read(); + let connected_peers = peer_id + .into_iter() + .filter_map(|peer_id| { + if peers_guard.is_connected(&peer_id) { + Some(PeerShouldHave::Neither(peer_id)) + } else { + None + } + }) + .collect::>(); + drop(peers_guard); + // If we are not synced, ignore this block. - if self.network_globals().sync_state.read().is_synced() - && self.network_globals().peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { + if self.synced() && !connected_peers.is_empty() { self.block_lookups - .search_block(block_hash, peer_id, &mut self.network); + .search_block(block_root, &connected_peers, &mut self.network) } } SyncMessage::Disconnect(peer_id) => { @@ -538,15 +691,26 @@ impl SyncManager { SyncMessage::RpcError { peer_id, request_id, - } => self.inject_error(peer_id, request_id), - SyncMessage::BlockProcessed { + error, + } => self.inject_error(peer_id, request_id, error), + SyncMessage::BlockComponentProcessed { process_type, result, } => match process_type { - BlockProcessType::SingleBlock { id } => { - self.block_lookups - .single_block_processed(id, result, &mut self.network) - } + BlockProcessType::SingleBlock { id } => self + .block_lookups + .single_block_component_processed::>( + id, + result, + &mut self.network, + ), + BlockProcessType::SingleBlob { id } => self + .block_lookups + .single_block_component_processed::>( + id, + result, + &mut self.network, + ), BlockProcessType::ParentLookup { chain_hash } => self .block_lookups .parent_block_processed(chain_hash, result, &mut self.network), @@ -578,11 +742,64 @@ impl SyncManager { } ChainSegmentProcessId::ParentLookup(chain_hash) => self .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), + .parent_chain_processed(chain_hash, result, &self.network), }, } } + fn handle_unknown_parent( + &mut self, + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + child_components: ChildComponents, + ) { + if self.should_search_for_block(slot, &peer_id) { + self.block_lookups.search_parent( + slot, + block_root, + parent_root, + peer_id, + &mut self.network, + ); + self.block_lookups.search_child_block( + block_root, + child_components, + &[PeerShouldHave::Neither(peer_id)], + &mut self.network, + ); + } + } + + fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool { + if !self.network_globals().sync_state.read().is_synced() { + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= block_slot + && head_slot.sub(block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < block_slot + && block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return false; + } + } + + self.network_globals().peers.read().is_connected(peer_id) + && self.network.is_execution_engine_online() + } + + fn synced(&mut self) -> bool { + self.network_globals().sync_state.read().is_synced() + && self.network.is_execution_engine_online() + } + + fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool { + self.synced() && self.network_globals().peers.read().is_connected(peer_id) + } + fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { self.network.update_execution_engine_state(engine_state); @@ -642,35 +859,46 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + block: Option>>, seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( - id, - peer_id, - beacon_block, - seen_timestamp, - &mut self.network, - ), - RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( - id, - peer_id, - beacon_block, - seen_timestamp, - &mut self.network, - ), - RequestId::BackFillSync { id } => { + RequestId::SingleBlock { id } => self + .block_lookups + .single_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &self.network, + ), + RequestId::SingleBlob { .. } => { + crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); + } + RequestId::ParentLookup { id } => self + .block_lookups + .parent_lookup_response::>( + id, + peer_id, + block, + seen_timestamp, + &self.network, + ), + RequestId::ParentLookupBlob { id: _ } => { + crit!(self.log, "Block received during parent blob request"; "peer_id" => %peer_id ); + } + RequestId::BackFillBlocks { id } => { + let is_stream_terminator = block.is_none(); if let Some(batch_id) = self .network - .backfill_sync_response(id, beacon_block.is_none()) + .backfill_sync_only_blocks_response(id, is_stream_terminator) { match self.backfill_sync.on_block_response( &mut self.network, batch_id, &peer_id, id, - beacon_block, + block.map(|b| RpcBlock::new_without_blobs(None, b)), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -682,9 +910,11 @@ impl SyncManager { } } } - RequestId::RangeSync { id } => { - if let Some((chain_id, batch_id)) = - self.network.range_sync_response(id, beacon_block.is_none()) + RequestId::RangeBlocks { id } => { + let is_stream_terminator = block.is_none(); + if let Some((chain_id, batch_id)) = self + .network + .range_sync_block_only_response(id, is_stream_terminator) { self.range_sync.blocks_by_range_response( &mut self.network, @@ -692,26 +922,222 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block, + block.map(|b| RpcBlock::new_without_blobs(None, b)), ); self.update_sync_state(); } } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, block.into()) + } + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, block.into()) + } + } + } + + fn rpc_blob_received( + &mut self, + request_id: RequestId, + peer_id: PeerId, + blob: Option>>, + seen_timestamp: Duration, + ) { + match request_id { + RequestId::SingleBlock { .. } => { + crit!(self.log, "Single blob received during block request"; "peer_id" => %peer_id ); + } + RequestId::SingleBlob { id } => { + if let Some(blob) = blob.as_ref() { + debug!(self.log, + "Peer returned blob for single lookup"; + "peer_id" => %peer_id , + "blob_id" =>?blob.id() + ); + } + self.block_lookups + .single_lookup_response::>( + id, + peer_id, + blob, + seen_timestamp, + &self.network, + ) + } + + RequestId::ParentLookup { id: _ } => { + crit!(self.log, "Single blob received during parent block request"; "peer_id" => %peer_id ); + } + RequestId::ParentLookupBlob { id } => { + if let Some(blob) = blob.as_ref() { + debug!(self.log, + "Peer returned blob for parent lookup"; + "peer_id" => %peer_id , + "blob_id" =>?blob.id() + ); + } + self.block_lookups + .parent_lookup_response::>( + id, + peer_id, + blob, + seen_timestamp, + &self.network, + ) + } + RequestId::BackFillBlocks { id: _ } => { + crit!(self.log, "Blob received during backfill block request"; "peer_id" => %peer_id ); + } + RequestId::RangeBlocks { id: _ } => { + crit!(self.log, "Blob received during range block request"; "peer_id" => %peer_id ); + } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, blob.into()) + } + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, blob.into()) + } + } + } + + /// Handles receiving a response for a range sync request that should have both blocks and + /// blobs. + fn range_block_and_blobs_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some((chain_id, resp)) = self + .network + .range_sync_block_and_blob_response(id, block_or_blob) + { + match resp.responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(Some) + // chain the stream terminator + .chain(vec![None]) + { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + resp.batch_id, + id, + block, + ); + self.update_sync_state(); + } + } + Err(e) => { + // Re-insert the request so we can retry + let new_req = BlocksAndBlobsByRangeRequest { + chain_id, + batch_id: resp.batch_id, + block_blob_info: <_>::default(), + }; + self.network + .insert_range_blocks_and_blobs_request(id, new_req); + // inform range that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, + "Blocks and blobs request for range received invalid data"; + "peer_id" => %peer_id, + "batch_id" => resp.batch_id, + "error" => e.clone() + ); + let id = RequestId::RangeBlockAndBlobs { id }; + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "block_blob_faulty_batch", + ); + self.inject_error(peer_id, id, RPCError::InvalidData(e)) + } + } + } + } + + /// Handles receiving a response for a Backfill sync request that should have both blocks and + /// blobs. + fn backfill_block_and_blobs_response( + &mut self, + id: Id, + peer_id: PeerId, + block_or_blob: BlockOrBlob, + ) { + if let Some(resp) = self + .network + .backfill_sync_block_and_blob_response(id, block_or_blob) + { + match resp.responses { + Ok(blocks) => { + for block in blocks + .into_iter() + .map(Some) + // chain the stream terminator + .chain(vec![None]) + { + match self.backfill_sync.on_block_response( + &mut self.network, + resp.batch_id, + &peer_id, + id, + block, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); + } + } + } + } + Err(e) => { + // Re-insert the request so we can retry + self.network.insert_backfill_blocks_and_blobs_requests( + id, + resp.batch_id, + <_>::default(), + ); + + // inform backfill that the request needs to be treated as failed + // With time we will want to downgrade this log + warn!( + self.log, "Blocks and blobs request for backfill received invalid data"; + "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e.clone() + ); + let id = RequestId::BackFillBlockAndBlobs { id }; + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "block_blob_faulty_backfill_batch", + ); + self.inject_error(peer_id, id, RPCError::InvalidData(e)) + } + } } } } -impl From>> for BlockProcessResult { - fn from(result: Result>) -> Self { +impl From>> + for BlockProcessingResult +{ + fn from(result: Result>) -> Self { match result { - Ok(_) => BlockProcessResult::Ok, - Err(e) => e.into(), + Ok(status) => BlockProcessingResult::Ok(status), + Err(e) => BlockProcessingResult::Err(e), } } } -impl From> for BlockProcessResult { +impl From> for BlockProcessingResult { fn from(e: BlockError) -> Self { - BlockProcessResult::Err(e) + BlockProcessingResult::Err(e) } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index dc18a5c981..7b244bcece 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -3,6 +3,7 @@ //! Stores the various syncing methods for the beacon chain. mod backfill_sync; mod block_lookups; +mod block_sidecar_coupling; pub mod manager; mod network_context; mod peer_sync_info; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index adc235130b..c01bc3e428 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,21 +1,38 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. +use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::manager::{Id, RequestId as SyncRequestId}; -use super::range_sync::{BatchId, ChainId}; +use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChainTypes, EngineState}; +use crate::sync::block_lookups::common::LookupType; +use crate::sync::manager::SingleLookupReqId; +use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; +use std::collections::hash_map::Entry; use std::sync::Arc; use tokio::sync::mpsc; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; + +pub struct BlocksAndBlobsByRangeResponse { + pub batch_id: BatchId, + pub responses: Result>, String>, +} + +pub struct BlocksAndBlobsByRangeRequest { + pub chain_id: ChainId, + pub batch_id: BatchId, + pub block_blob_info: BlocksAndBlobsRequestInfo, +} /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. - pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, @@ -29,6 +46,13 @@ pub struct SyncNetworkContext { /// BlocksByRange requests made by backfill syncing. backfill_requests: FnvHashMap, + /// BlocksByRange requests paired with BlobsByRange requests made by the range. + range_blocks_and_blobs_requests: FnvHashMap>, + + /// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync. + backfill_blocks_and_blobs_requests: + FnvHashMap)>, + /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. execution_engine_state: EngineState, @@ -36,23 +60,47 @@ pub struct SyncNetworkContext { /// Sends work to the beacon processor via a channel. network_beacon_processor: Arc>, + pub chain: Arc>, + /// Logger for the `SyncNetworkContext`. - log: slog::Logger, + pub log: slog::Logger, +} + +/// Small enumeration to make dealing with block and blob requests easier. +pub enum BlockOrBlob { + Block(Option>>), + Blob(Option>>), +} + +impl From>>> for BlockOrBlob { + fn from(block: Option>>) -> Self { + BlockOrBlob::Block(block) + } +} + +impl From>>> for BlockOrBlob { + fn from(blob: Option>>) -> Self { + BlockOrBlob::Blob(blob) + } } impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, network_beacon_processor: Arc>, + chain: Arc>, log: slog::Logger, ) -> Self { - Self { + SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), + range_blocks_and_blobs_requests: FnvHashMap::default(), + backfill_blocks_and_blobs_requests: FnvHashMap::default(), network_beacon_processor, + chain, log, } } @@ -71,11 +119,7 @@ impl SyncNetworkContext { .unwrap_or_default() } - pub fn status_peers( - &mut self, - chain: &C, - peers: impl Iterator, - ) { + pub fn status_peers(&self, chain: &C, peers: impl Iterator) { let status_message = chain.status_message(); for peer_id in peers { debug!( @@ -103,123 +147,357 @@ impl SyncNetworkContext { pub fn blocks_by_range_request( &mut self, peer_id: PeerId, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, chain_id: ChainId, batch_id: BatchId, ) -> Result { - trace!( - self.log, - "Sending BlocksByRange Request"; - "method" => "BlocksByRange", - "count" => request.count(), - "peer" => %peer_id, - ); - let request = Request::BlocksByRange(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeSync { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - self.range_requests.insert(id, (chain_id, batch_id)); - Ok(id) + match batch_type { + ByRangeRequestType::Blocks => { + trace!( + self.log, + "Sending BlocksByRange request"; + "method" => "BlocksByRange", + "count" => request.count(), + "peer" => %peer_id, + ); + let request = Request::BlocksByRange(request); + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::RangeBlocks { id }); + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + })?; + self.range_requests.insert(id, (chain_id, batch_id)); + Ok(id) + } + ByRangeRequestType::BlocksAndBlobs => { + debug!( + self.log, + "Sending BlocksByRange and BlobsByRange requests"; + "method" => "Mixed by range request", + "count" => request.count(), + "peer" => %peer_id, + ); + + // create the shared request id. This is fine since the rpc handles substream ids. + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }); + + // Create the blob request based on the blob request. + let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }); + let blocks_request = Request::BlocksByRange(request); + + // Send both requests. Make sure both can be sent. + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blocks_request, + request_id, + })?; + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blobs_request, + request_id, + })?; + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.range_blocks_and_blobs_requests.insert( + id, + BlocksAndBlobsByRangeRequest { + chain_id, + batch_id, + block_blob_info, + }, + ); + Ok(id) + } + } } /// A blocks by range request sent by the backfill sync algorithm pub fn backfill_blocks_by_range_request( &mut self, peer_id: PeerId, + batch_type: ByRangeRequestType, request: BlocksByRangeRequest, batch_id: BatchId, ) -> Result { - trace!( - self.log, - "Sending backfill BlocksByRange Request"; - "method" => "BlocksByRange", - "count" => request.count(), - "peer" => %peer_id, - ); - let request = Request::BlocksByRange(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillSync { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - self.backfill_requests.insert(id, batch_id); - Ok(id) + match batch_type { + ByRangeRequestType::Blocks => { + trace!( + self.log, + "Sending backfill BlocksByRange request"; + "method" => "BlocksByRange", + "count" => request.count(), + "peer" => %peer_id, + ); + let request = Request::BlocksByRange(request); + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlocks { id }); + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + })?; + self.backfill_requests.insert(id, batch_id); + Ok(id) + } + ByRangeRequestType::BlocksAndBlobs => { + debug!( + self.log, + "Sending backfill BlocksByRange and BlobsByRange requests"; + "method" => "Mixed by range request", + "count" => request.count(), + "peer" => %peer_id, + ); + + // create the shared request id. This is fine since the rpc handles substream ids. + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlockAndBlobs { id }); + + // Create the blob request based on the blob request. + let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }); + let blocks_request = Request::BlocksByRange(request); + + // Send both requests. Make sure both can be sent. + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blocks_request, + request_id, + })?; + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: blobs_request, + request_id, + })?; + let block_blob_info = BlocksAndBlobsRequestInfo::default(); + self.backfill_blocks_and_blobs_requests + .insert(id, (batch_id, block_blob_info)); + Ok(id) + } + } } - /// Received a blocks by range response. - pub fn range_sync_response( + /// Response for a request that is only for blocks. + pub fn range_sync_block_only_response( &mut self, request_id: Id, - remove: bool, + is_stream_terminator: bool, ) -> Option<(ChainId, BatchId)> { - if remove { + if is_stream_terminator { self.range_requests.remove(&request_id) } else { - self.range_requests.get(&request_id).cloned() + self.range_requests.get(&request_id).copied() } } - /// Received a blocks by range response. - pub fn backfill_sync_response(&mut self, request_id: Id, remove: bool) -> Option { - if remove { + /// Received a blocks by range response for a request that couples blocks and blobs. + pub fn range_sync_block_and_blob_response( + &mut self, + request_id: Id, + block_or_blob: BlockOrBlob, + ) -> Option<(ChainId, BlocksAndBlobsByRangeResponse)> { + match self.range_blocks_and_blobs_requests.entry(request_id) { + Entry::Occupied(mut entry) => { + let req = entry.get_mut(); + let info = &mut req.block_blob_info; + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, dequeue everything + let BlocksAndBlobsByRangeRequest { + chain_id, + batch_id, + block_blob_info, + } = entry.remove(); + Some(( + chain_id, + BlocksAndBlobsByRangeResponse { + batch_id, + responses: block_blob_info.into_responses(), + }, + )) + } else { + None + } + } + Entry::Vacant(_) => None, + } + } + + pub fn range_sync_request_failed( + &mut self, + request_id: Id, + batch_type: ByRangeRequestType, + ) -> Option<(ChainId, BatchId)> { + let req = match batch_type { + ByRangeRequestType::BlocksAndBlobs => self + .range_blocks_and_blobs_requests + .remove(&request_id) + .map(|req| (req.chain_id, req.batch_id)), + ByRangeRequestType::Blocks => self.range_requests.remove(&request_id), + }; + if let Some(req) = req { + debug!( + self.log, + "Range sync request failed"; + "request_id" => request_id, + "batch_type" => ?batch_type, + "chain_id" => ?req.0, + "batch_id" => ?req.1 + ); + Some(req) + } else { + debug!(self.log, "Range sync request failed"; "request_id" => request_id, "batch_type" => ?batch_type); + None + } + } + + pub fn backfill_request_failed( + &mut self, + request_id: Id, + batch_type: ByRangeRequestType, + ) -> Option { + let batch_id = match batch_type { + ByRangeRequestType::BlocksAndBlobs => self + .backfill_blocks_and_blobs_requests + .remove(&request_id) + .map(|(batch_id, _info)| batch_id), + ByRangeRequestType::Blocks => self.backfill_requests.remove(&request_id), + }; + if let Some(batch_id) = batch_id { + debug!( + self.log, + "Backfill sync request failed"; + "request_id" => request_id, + "batch_type" => ?batch_type, + "batch_id" => ?batch_id + ); + Some(batch_id) + } else { + debug!(self.log, "Backfill sync request failed"; "request_id" => request_id, "batch_type" => ?batch_type); + None + } + } + + /// Response for a request that is only for blocks. + pub fn backfill_sync_only_blocks_response( + &mut self, + request_id: Id, + is_stream_terminator: bool, + ) -> Option { + if is_stream_terminator { self.backfill_requests.remove(&request_id) } else { - self.backfill_requests.get(&request_id).cloned() + self.backfill_requests.get(&request_id).copied() } } - /// Sends a blocks by root request for a single block lookup. - pub fn single_block_lookup_request( + /// Received a blocks by range or blobs by range response for a request that couples blocks ' + /// and blobs. + pub fn backfill_sync_block_and_blob_response( &mut self, - peer_id: PeerId, - request: BlocksByRootRequest, - ) -> Result { - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots().len(), - "peer" => %peer_id - ); - let request = Request::BlocksByRoot(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id }); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - })?; - Ok(id) + request_id: Id, + block_or_blob: BlockOrBlob, + ) -> Option> { + match self.backfill_blocks_and_blobs_requests.entry(request_id) { + Entry::Occupied(mut entry) => { + let (_, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, dequeue everything + let (batch_id, info) = entry.remove(); + + let responses = info.into_responses(); + Some(BlocksAndBlobsByRangeResponse { + batch_id, + responses, + }) + } else { + None + } + } + Entry::Vacant(_) => None, + } } - /// Sends a blocks by root request for a parent request. - pub fn parent_lookup_request( - &mut self, + pub fn block_lookup_request( + &self, + id: SingleLookupReqId, peer_id: PeerId, request: BlocksByRootRequest, - ) -> Result { - trace!( + lookup_type: LookupType, + ) -> Result<(), &'static str> { + let sync_id = match lookup_type { + LookupType::Current => SyncRequestId::SingleBlock { id }, + LookupType::Parent => SyncRequestId::ParentLookup { id }, + }; + let request_id = RequestId::Sync(sync_id); + + debug!( self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "count" => request.block_roots().len(), - "peer" => %peer_id + "block_roots" => ?request.block_roots().to_vec(), + "peer" => %peer_id, + "lookup_type" => ?lookup_type ); - let request = Request::BlocksByRoot(request); - let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id }); + self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request, + request: Request::BlocksByRoot(request), request_id, })?; - Ok(id) + Ok(()) + } + + pub fn blob_lookup_request( + &self, + id: SingleLookupReqId, + blob_peer_id: PeerId, + blob_request: BlobsByRootRequest, + lookup_type: LookupType, + ) -> Result<(), &'static str> { + let sync_id = match lookup_type { + LookupType::Current => SyncRequestId::SingleBlob { id }, + LookupType::Parent => SyncRequestId::ParentLookupBlob { id }, + }; + let request_id = RequestId::Sync(sync_id); + + if let Some(block_root) = blob_request.blob_ids.first().map(|id| id.block_root) { + let indices = blob_request + .blob_ids + .iter() + .map(|id| id.index) + .collect::>(); + debug!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "block_root" => ?block_root, + "blob_indices" => ?indices, + "peer" => %blob_peer_id, + "lookup_type" => ?lookup_type + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id: blob_peer_id, + request: Request::BlobsByRoot(blob_request), + request_id, + })?; + } + Ok(()) } pub fn is_execution_engine_online(&self) -> bool { @@ -246,7 +524,7 @@ impl SyncNetworkContext { } /// Reports to the scoring algorithm the behaviour of a peer. - pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction, msg: &'static str) { + pub fn report_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); self.network_send .send(NetworkMessage::ReportPeer { @@ -261,7 +539,7 @@ impl SyncNetworkContext { } /// Subscribes to core topics. - pub fn subscribe_core_topics(&mut self) { + pub fn subscribe_core_topics(&self) { self.network_send .send(NetworkMessage::SubscribeCoreTopics) .unwrap_or_else(|e| { @@ -270,7 +548,7 @@ impl SyncNetworkContext { } /// Sends an arbitrary network message. - fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { + fn send_network_msg(&self, msg: NetworkMessage) -> Result<(), &'static str> { self.network_send.send(msg).map_err(|_| { debug!(self.log, "Could not send message to the network service"); "Network channel send Failed" @@ -286,9 +564,49 @@ impl SyncNetworkContext { &self.network_beacon_processor } - fn next_id(&mut self) -> Id { + pub fn next_id(&mut self) -> Id { let id = self.request_id; self.request_id += 1; id } + + /// Check whether a batch for this epoch (and only this epoch) should request just blocks or + /// blocks and blobs. + pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType { + // Induces a compile time panic if this doesn't hold true. + #[allow(clippy::assertions_on_constants)] + const _: () = assert!( + super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 + && super::range_sync::EPOCHS_PER_BATCH == 1, + "To deal with alignment with deneb boundaries, batches need to be of just one epoch" + ); + + if let Some(data_availability_boundary) = self.chain.data_availability_boundary() { + if epoch >= data_availability_boundary { + ByRangeRequestType::BlocksAndBlobs + } else { + ByRangeRequestType::Blocks + } + } else { + ByRangeRequestType::Blocks + } + } + + pub fn insert_range_blocks_and_blobs_request( + &mut self, + id: Id, + request: BlocksAndBlobsByRangeRequest, + ) { + self.range_blocks_and_blobs_requests.insert(id, request); + } + + pub fn insert_backfill_blocks_and_blobs_requests( + &mut self, + id: Id, + batch_id: BatchId, + request: BlocksAndBlobsRequestInfo, + ) { + self.backfill_blocks_and_blobs_requests + .insert(id, (batch_id, request)); + } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 723ea9b59d..f5c320cb88 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,11 +1,12 @@ use crate::sync::manager::Id; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use std::sync::Arc; -use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; +use strum::Display; +use types::{Epoch, EthSpec, Slot}; /// The number of times to retry a batch before it is considered failed. const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; @@ -14,6 +15,14 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3; +/// Type of expected batch. +#[derive(Debug, Copy, Clone, Display)] +#[strum(serialize_all = "snake_case")] +pub enum ByRangeRequestType { + BlocksAndBlobs, + Blocks, +} + /// Allows customisation of the above constants used in other sync methods such as BackFillSync. pub trait BatchConfig { /// The maximum batch download attempts. @@ -47,7 +56,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[Arc>]) -> u64; + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -59,7 +68,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { + fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -96,6 +105,8 @@ pub struct BatchInfo { failed_download_attempts: Vec, /// State of the batch. state: BatchState, + /// Whether this batch contains all blocks or all blocks and blobs. + batch_type: ByRangeRequestType, /// Pin the generic marker: std::marker::PhantomData, } @@ -105,9 +116,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>>, Id), + Downloading(PeerId, Vec>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>>), + AwaitingProcessing(PeerId, Vec>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -139,8 +150,13 @@ impl BatchInfo { /// Epoch boundary | | /// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 | /// Batch 1 | Batch 2 | Batch 3 - pub fn new(start_epoch: &Epoch, num_of_epochs: u64) -> Self { - let start_slot = start_epoch.start_slot(T::slots_per_epoch()) + 1; + /// + /// NOTE: Removed the shift by one for deneb because otherwise the last batch before the blob + /// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to + /// deal with this for now. + /// This means finalization might be slower in deneb + pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ByRangeRequestType) -> Self { + let start_slot = start_epoch.start_slot(T::slots_per_epoch()); let end_slot = start_slot + num_of_epochs * T::slots_per_epoch(); BatchInfo { start_slot, @@ -149,6 +165,7 @@ impl BatchInfo { failed_download_attempts: Vec::new(), non_faulty_processing_attempts: 0, state: BatchState::AwaitingDownload, + batch_type, marker: std::marker::PhantomData, } } @@ -201,10 +218,13 @@ impl BatchInfo { } /// Returns a BlocksByRange request associated with the batch. - pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { - BlocksByRangeRequest::new( - self.start_slot.into(), - self.end_slot.sub(self.start_slot).into(), + pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { + ( + BlocksByRangeRequest::new( + self.start_slot.into(), + self.end_slot.sub(self.start_slot).into(), + ), + self.batch_type, ) } @@ -231,7 +251,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: RpcBlock) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -363,7 +383,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>>, WrongState> { + pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -461,10 +481,7 @@ pub struct Attempt { } impl Attempt { - fn new( - peer_id: PeerId, - blocks: &[Arc>], - ) -> Self { + fn new(peer_id: PeerId, blocks: &[RpcBlock]) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } @@ -498,6 +515,7 @@ impl slog::KV for BatchInfo { serializer.emit_usize("processed", self.failed_processing_attempts.len())?; serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; + serializer.emit_arguments("batch_ty", &format_args!("{}", self.batch_type))?; slog::Result::Ok(()) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index af547885dc..1e5bf09b3a 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -3,6 +3,7 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -10,8 +11,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; -use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of /// blocks per batch are requested _at most_. A batch may request less blocks to account for @@ -19,7 +19,7 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which /// case the responder will fill the response up to the max request size, assuming they have the /// bandwidth to do so. -pub const EPOCHS_PER_BATCH: u64 = 2; +pub const EPOCHS_PER_BATCH: u64 = 1; /// The maximum number of batches to queue before requesting more. const BATCH_BUFFER_SIZE: u8 = 5; @@ -221,7 +221,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -294,19 +294,15 @@ impl SyncingChain { return Ok(KeepChain); } - let beacon_processor = match network.beacon_processor_if_enabled() { - Some(beacon_processor) => beacon_processor, - None => return Ok(KeepChain), + let Some(beacon_processor) = network.beacon_processor_if_enabled() else { + return Ok(KeepChain); }; - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => { - return Err(RemoveChain::WrongChainState(format!( - "Trying to process a batch that does not exist: {}", - batch_id - ))); - } + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Err(RemoveChain::WrongChainState(format!( + "Trying to process a batch that does not exist: {}", + batch_id + ))); }; // NOTE: We send empty batches to the processor in order to trigger the block processor @@ -598,6 +594,7 @@ impl SyncingChain { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. + #[allow(clippy::modulo_one)] fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { @@ -826,9 +823,24 @@ impl SyncingChain { // sending an error /timeout) if the peer is removed from the chain for other // reasons. Check that this block belongs to the expected peer if !batch.is_expecting_block(peer_id, &request_id) { + debug!( + self.log, + "Batch not expecting block"; + "batch_epoch" => batch_id, + "batch_state" => ?batch.state(), + "peer_id" => %peer_id, + "request_id" => %request_id + ); return Ok(KeepChain); } - debug!(self.log, "Batch failed. RPC Error"; "batch_epoch" => batch_id); + debug!( + self.log, + "Batch failed. RPC Error"; + "batch_epoch" => batch_id, + "batch_state" => ?batch.state(), + "peer_id" => %peer_id, + "request_id" => %request_id + ); if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); } @@ -840,6 +852,13 @@ impl SyncingChain { } self.retry_batch_download(network, batch_id) } else { + debug!( + self.log, + "Batch not found"; + "batch_epoch" => batch_id, + "peer_id" => %peer_id, + "request_id" => %request_id + ); // this could be an error for an old batch, removed when the chain advances Ok(KeepChain) } @@ -851,9 +870,8 @@ impl SyncingChain { network: &mut SyncNetworkContext, batch_id: BatchId, ) -> ProcessingResult { - let batch = match self.batches.get_mut(&batch_id) { - Some(batch) => batch, - None => return Ok(KeepChain), + let Some(batch) = self.batches.get_mut(&batch_id) else { + return Ok(KeepChain); }; // Find a peer to request the batch @@ -886,8 +904,8 @@ impl SyncingChain { peer: PeerId, ) -> ProcessingResult { if let Some(batch) = self.batches.get_mut(&batch_id) { - let request = batch.to_blocks_by_range_request(); - match network.blocks_by_range_request(peer, request, self.id, batch_id) { + let (request, batch_type) = batch.to_blocks_by_range_request(); + match network.blocks_by_range_request(peer, batch_type, request, self.id, batch_id) { Ok(request_id) => { // inform the batch about the new request batch.start_downloading_from_peer(peer, request_id)?; @@ -991,7 +1009,8 @@ impl SyncingChain { if let Some(epoch) = self.optimistic_start { if let Entry::Vacant(entry) = self.batches.entry(epoch) { if let Some(peer) = idle_peers.pop() { - let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH); + let batch_type = network.batch_type(epoch); + let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH, batch_type); entry.insert(optimistic_batch); self.send_batch(network, epoch, peer)?; } @@ -1000,7 +1019,7 @@ impl SyncingChain { } while let Some(peer) = idle_peers.pop() { - if let Some(batch_id) = self.include_next_batch() { + if let Some(batch_id) = self.include_next_batch(network) { // send the batch self.send_batch(network, batch_id, peer)?; } else { @@ -1014,7 +1033,7 @@ impl SyncingChain { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. - fn include_next_batch(&mut self) -> Option { + fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond the target head slot if self .to_be_downloaded @@ -1048,10 +1067,11 @@ impl SyncingChain { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch self.to_be_downloaded += EPOCHS_PER_BATCH; - self.include_next_batch() + self.include_next_batch(network) } Entry::Vacant(entry) => { - entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH)); + let batch_type = network.batch_type(batch_id); + entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH, batch_type)); self.to_be_downloaded += EPOCHS_PER_BATCH; Some(batch_id) } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 65ddcefe85..364514a358 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -280,7 +280,7 @@ impl ChainCollection { old_id = Some(Some(syncing_id)); } else { // chains have the same number of peers, pick the currently syncing - // chain to avoid unnecesary switchings and try to advance it + // chain to avoid unnecessary switchings and try to advance it new_id = syncing_id; old_id = Some(None); } diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index f4db32bc96..d0f2f9217e 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,10 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState}; +pub use batch::{ + BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, + ByRangeRequestType, +}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 05ad5204b9..e42fd936e6 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,6 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; use crate::sync::BatchProcessResult; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; @@ -55,7 +56,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -141,13 +142,20 @@ where debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); self.awaiting_head_peers.remove(&peer_id); + // Because of our change in finalized sync batch size from 2 to 1 and our transition + // to using exact epoch boundaries for batches (rather than one slot past the epoch + // boundary), we need to sync finalized sync to 2 epochs + 1 slot past our peer's + // finalized slot in order to finalize the chain locally. + let target_head_slot = + remote_finalized_slot + (2 * T::EthSpec::slots_per_epoch()) + 1; + // Note: We keep current head chains. These can continue syncing whilst we complete // this new finalized chain. self.chains.add_peer_or_create_chain( local_info.finalized_epoch, remote_info.finalized_root, - remote_finalized_slot, + target_head_slot, peer_id, RangeSyncType::Finalized, network, @@ -202,7 +210,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>>, + beacon_block: Option>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -376,22 +384,21 @@ mod tests { use crate::NetworkMessage; use super::*; + use crate::sync::network_context::BlockOrBlob; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::EngineState; use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::rpc::BlocksByRangeRequest; - use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; - use tokio::sync::mpsc; - - use slot_clock::ManualSlotClock; + use slot_clock::TestingSlotClock; use std::collections::HashSet; use std::sync::Arc; use store::MemoryStore; - use types::{Hash256, MinimalEthSpec as E}; + use tokio::sync::mpsc; + use types::{ForkName, Hash256, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { @@ -438,7 +445,7 @@ mod tests { } type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, E, MemoryStore, MemoryStore>; fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); @@ -457,7 +464,7 @@ mod tests { log: slog::Logger, /// To check what does sync send to the beacon processor. beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unkown blocks. + /// To set up different scenarios where sync is told about known/unknown blocks. chain: Arc, /// Needed by range to handle communication with the network. cx: SyncNetworkContext, @@ -507,18 +514,39 @@ mod tests { /// Reads an BlocksByRange request to a given peer from the network receiver channel. #[track_caller] - fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) { - if let Ok(NetworkMessage::SendRequest { + fn grab_request( + &mut self, + expected_peer: &PeerId, + fork_name: ForkName, + ) -> (RequestId, Option) { + let block_req_id = if let Ok(NetworkMessage::SendRequest { peer_id, - request: Request::BlocksByRange(request), + request: _, request_id, }) = self.network_rx.try_recv() { assert_eq!(&peer_id, expected_peer); - (request_id, request) + request_id } else { panic!("Should have sent a batch request to the peer") - } + }; + let blob_req_id = match fork_name { + ForkName::Deneb => { + if let Ok(NetworkMessage::SendRequest { + peer_id, + request: _, + request_id, + }) = self.network_rx.try_recv() + { + assert_eq!(&peer_id, expected_peer); + Some(request_id) + } else { + panic!("Should have sent a batch request to the peer") + } + } + _ => None, + }; + (block_req_id, blob_req_id) } /// Produce a head peer @@ -592,10 +620,19 @@ mod tests { } fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let chain = Arc::new(FakeStorage::default()); let log = build_log(slog::Level::Trace, log_enabled); + // Initialise a new beacon chain + let harness = BeaconChainHarness::>::builder(E) + .default_spec() + .logger(log.clone()) + .deterministic_keypairs(1) + .fresh_ephemeral_store() + .build(); + let chain = harness.chain; + + let fake_store = Arc::new(FakeStorage::default()); let range_sync = RangeSync::::new( - chain.clone(), + fake_store.clone(), log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -605,12 +642,13 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, Arc::new(network_beacon_processor), + chain, log.new(o!("component" => "network_context")), ); let test_rig = TestRig { log, beacon_processor_rx, - chain, + chain: fake_store, cx, network_rx, globals, @@ -628,8 +666,14 @@ mod tests { range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); range.assert_state(RangeSyncType::Head); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); + // Sync should have requested a batch, grab the request. - let _request = rig.grab_request(&head_peer); + let _ = rig.grab_request(&head_peer, fork); // Now get a peer with an advanced finalized epoch. let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); @@ -637,7 +681,7 @@ mod tests { range.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _second_request = rig.grab_request(&finalized_peer); + let _ = rig.grab_request(&finalized_peer, fork); // Fail the head chain by disconnecting the peer. range.remove_peer(&mut rig.cx, &head_peer); @@ -655,8 +699,14 @@ mod tests { range.add_peer(&mut rig.cx, local_info, head_peer, head_info); range.assert_state(RangeSyncType::Head); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); + // Sync should have requested a batch, grab the request. - let _request = rig.grab_request(&head_peer); + let _ = rig.grab_request(&head_peer, fork); // Now get a peer with an advanced finalized epoch. let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); @@ -665,7 +715,7 @@ mod tests { range.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _second_request = rig.grab_request(&finalized_peer); + let _ = rig.grab_request(&finalized_peer, fork); // Now the chain knows both chains target roots. rig.chain.remember_block(head_peer_root); @@ -679,15 +729,39 @@ mod tests { #[test] fn pause_and_resume_on_ee_offline() { let (mut rig, mut range) = range(true); + let fork = rig + .cx + .chain + .spec + .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); // add some peers let (peer1, local_info, head_info) = rig.head_peer(); range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { - (rig.cx.range_sync_response(id, true).unwrap(), id) + let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); + + let (chain1, batch1, id1) = if blob_req_opt.is_some() { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + let _ = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Block(None)); + let (chain1, response) = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None)) + .unwrap(); + (chain1, response.batch_id, id) + } + other => panic!("unexpected request {:?}", other), + } + } else { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap(); + (chain, batch, id) + } + other => panic!("unexpected request {:?}", other), } - other => panic!("unexpected request {:?}", other), }; // make the ee offline @@ -702,11 +776,30 @@ mod tests { // while the ee is offline, more peers might arrive. Add a new finalized peer. let (peer2, local_info, finalized_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { - RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { - (rig.cx.range_sync_response(id, true).unwrap(), id) + let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); + + let (chain2, batch2, id2) = if blob_req_opt.is_some() { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + let _ = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Block(None)); + let (chain2, response) = rig + .cx + .range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None)) + .unwrap(); + (chain2, response.batch_id, id) + } + other => panic!("unexpected request {:?}", other), + } + } else { + match block_req { + RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { + let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap(); + (chain, batch, id) + } + other => panic!("unexpected request {:?}", other), } - other => panic!("unexpected request {:?}", other), }; // send the response to the request diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index fdbecb656f..36595994f0 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -2,26 +2,28 @@ name = "operation_pool" version = "0.2.0" authors = ["Michael Sproul "] -edition = "2021" +edition = { workspace = true } [dependencies] -derivative = "2.1.1" -itertools = "0.10.0" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -parking_lot = "0.12.0" -types = { path = "../../consensus/types" } -state_processing = { path = "../../consensus/state_processing" } -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -rayon = "1.5.0" -serde = "1.0.116" -serde_derive = "1.0.116" -store = { path = "../store" } -bitvec = "1" -rand = "0.8.5" +derivative = { workspace = true } +itertools = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +parking_lot = { workspace = true } +types = { workspace = true } +state_processing = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +store = { workspace = true } +bitvec = { workspace = true } +rand = { workspace = true } [dev-dependencies] -beacon_chain = { path = "../beacon_chain" } -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } -maplit = "1.0.2" +beacon_chain = { workspace = true } +tokio = { workspace = true } +maplit = { workspace = true } + +[features] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index fbbd5d7ddc..97c291aa85 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -30,7 +30,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { if let BeaconState::Base(ref base_state) = state { Self::new_for_base(att, state, base_state, total_active_balance, spec) } else { - Self::new_for_altair(att, state, reward_cache, total_active_balance, spec) + Self::new_for_altair_deneb(att, state, reward_cache, total_active_balance, spec) } } @@ -69,7 +69,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { } /// Initialise an attestation cover object for Altair or later. - pub fn new_for_altair( + pub fn new_for_altair_deneb( att: AttestationRef<'a, T>, state: &BeaconState, reward_cache: &'a RewardCache, diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs index b65975787e..f0dc6536a5 100644 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ b/beacon_node/operation_pool/src/attestation_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 0fb9bafd82..dac5e25b34 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -151,14 +151,8 @@ impl AttestationMap { indexed, } = SplitAttestation::new(attestation, attesting_indices); - let attestation_map = self - .checkpoint_map - .entry(checkpoint) - .or_insert_with(AttestationDataMap::default); - let attestations = attestation_map - .attestations - .entry(data) - .or_insert_with(Vec::new); + let attestation_map = self.checkpoint_map.entry(checkpoint).or_default(); + let attestations = attestation_map.attestations.entry(data).or_default(); // Greedily aggregate the attestation with all existing attestations. // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 24c0623f5c..7e1ddb1fd2 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1852,7 +1852,21 @@ mod release_tests { // Sign an exit with the Altair domain and a phase0 epoch. This is a weird type of exit // that is valid because after the Bellatrix fork we'll use the Altair fork domain to verify // all prior epochs. - let exit2 = harness.make_voluntary_exit(2, Epoch::new(0)); + let unsigned_exit = VoluntaryExit { + epoch: Epoch::new(0), + validator_index: 2, + }; + let exit2 = SignedVoluntaryExit { + message: unsigned_exit.clone(), + signature: harness.validator_keypairs[2] + .sk + .sign(unsigned_exit.signing_root(spec.compute_domain( + Domain::VoluntaryExit, + harness.spec.altair_fork_version, + harness.chain.genesis_validators_root, + ))), + }; + let verified_exit2 = exit2 .clone() .validate(&bellatrix_head.beacon_state, &harness.chain.spec) diff --git a/beacon_node/operation_pool/src/sync_aggregate_id.rs b/beacon_node/operation_pool/src/sync_aggregate_id.rs index 401e0c5f82..40d6e36490 100644 --- a/beacon_node/operation_pool/src/sync_aggregate_id.rs +++ b/beacon_node/operation_pool/src/sync_aggregate_id.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 974dabbf0c..7b9cd757a5 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,4 +1,4 @@ -use clap::{App, Arg}; +use clap::{App, Arg, ArgGroup}; use strum::VariantNames; use types::ProgressiveBalancesMode; @@ -29,6 +29,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for the freezer database.") .takes_value(true) ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true) + ) /* * Network parameters. */ @@ -75,11 +82,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("The address lighthouse will listen for UDP and TCP connections. To listen \ over IpV4 and IpV6 set this flag twice with the different values.\n\ Examples:\n\ - - --listen-address '0.0.0.0' will listen over Ipv4.\n\ - - --listen-address '::' will listen over Ipv6.\n\ + - --listen-address '0.0.0.0' will listen over IPv4.\n\ + - --listen-address '::' will listen over IPv6.\n\ - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ - Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ - multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") + IPv4 and IPv6. The order of the given addresses is not relevant. However, \ + multiple IPv4, or multiple IPv6 addresses will not be accepted.") .multiple(true) .max_values(2) .default_value("0.0.0.0") @@ -89,9 +96,10 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("port") .long("port") .value_name("PORT") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the \ - --discovery-port flag. If listening over both Ipv4 and Ipv6 the --port flag \ - will apply to the Ipv4 address and --port6 to the Ipv6 address.") + .help("The TCP/UDP ports to listen on. There are two UDP ports. \ + The discovery UDP port will be set to this value and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the \ + --discovery-port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 and IPv6 the --port flag \ + will apply to the IPv4 address and --port6 to the IPv6 address.") .default_value("9000") .takes_value(true), ) @@ -99,8 +107,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("port6") .long("port6") .value_name("PORT") - .help("The TCP/UDP port to listen on over IpV6 when listening over both Ipv4 and \ - Ipv6. Defaults to 9090 when required.") + .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ + IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") .default_value("9090") .takes_value(true), ) @@ -111,12 +119,27 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("The UDP port that discovery will listen on. Defaults to `port`") .takes_value(true), ) + .arg( + Arg::with_name("quic-port") + .long("quic-port") + .value_name("PORT") + .help("The UDP port that quic will listen on. Defaults to `port` + 1") + .takes_value(true), + ) .arg( Arg::with_name("discovery-port6") .long("discovery-port6") .value_name("PORT") - .help("The UDP port that discovery will listen on over IpV6 if listening over \ - both Ipv4 and IpV6. Defaults to `port6`") + .help("The UDP port that discovery will listen on over IPv6 if listening over \ + both IPv4 and IPv6. Defaults to `port6`") + .takes_value(true), + ) + .arg( + Arg::with_name("quic-port6") + .long("quic-port6") + .value_name("PORT") + .help("The UDP port that quic will listen on over IPv6 if listening over \ + both IPv4 and IPv6. Defaults to `port6` + 1") .takes_value(true), ) .arg( @@ -159,7 +182,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("enr-udp-port") .value_name("PORT") .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ - can connect to your local node on this port over IpV4.") + can connect to your local node on this port over IPv4.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-quic-port") + .long("enr-quic-port") + .value_name("PORT") + .help("The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IPv4.") .takes_value(true), ) .arg( @@ -167,7 +198,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ - can connect to your local node on this port over IpV6.") + can connect to your local node on this port over IPv6.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-quic6-port") + .long("enr-quic6-port") + .value_name("PORT") + .help("The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes \ + can connect to your local node on this port over IPv6.") .takes_value(true), ) .arg( @@ -175,7 +214,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("enr-tcp-port") .value_name("PORT") .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ - can connect to your local node on this port over IpV4. The --port flag is \ + can connect to your local node on this port over IPv4. The --port flag is \ used if this is not set.") .takes_value(true), ) @@ -184,7 +223,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("enr-tcp6-port") .value_name("PORT") .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ - can connect to your local node on this port over IpV6. The --port6 flag is \ + can connect to your local node on this port over IPv6. The --port6 flag is \ used if this is not set.") .takes_value(true), ) @@ -225,11 +264,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { without an ENR.") .takes_value(true), ) + // NOTE: This is hidden because it is primarily a developer feature for testnets and + // debugging. We remove it from the list to avoid clutter. .arg( Arg::with_name("disable-discovery") .long("disable-discovery") .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") - .takes_value(false), + .hidden(true) + ) + .arg( + Arg::with_name("disable-quic") + .long("disable-quic") + .help("Disables the quic transport. The node will rely solely on the TCP transport for libp2p connections.") ) .arg( Arg::with_name("disable-peer-scoring") @@ -316,22 +362,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-address") .long("http-address") + .requires("enable_http") .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") - .default_value("127.0.0.1") + .default_value_if("enable_http", None, "127.0.0.1") .takes_value(true), ) .arg( Arg::with_name("http-port") .long("http-port") + .requires("enable_http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value("5052") + .default_value_if("enable_http", None, "5052") .takes_value(true), ) .arg( Arg::with_name("http-allow-origin") .long("http-allow-origin") + .requires("enable_http") .value_name("ORIGIN") .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ @@ -342,11 +391,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-disable-legacy-spec") .long("http-disable-legacy-spec") + .requires("enable_http") .hidden(true) ) .arg( Arg::with_name("http-spec-fork") .long("http-spec-fork") + .requires("enable_http") .value_name("FORK") .help("Serve the spec for a specific hard fork on /eth/v1/config/spec. It should \ not be necessary to set this flag.") @@ -364,6 +415,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-tls-cert") .long("http-tls-cert") + .requires("enable_http") .help("The path of the certificate to be used when serving the HTTP API server \ over TLS.") .takes_value(true) @@ -371,6 +423,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-tls-key") .long("http-tls-key") + .requires("enable_http") .help("The path of the private key to be used when serving the HTTP API server \ over TLS. Must not be password-protected.") .takes_value(true) @@ -378,6 +431,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-allow-sync-stalled") .long("http-allow-sync-stalled") + .requires("enable_http") .help("Forces the HTTP to indicate that the node is synced when sync is actually \ stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \ MAINNET.") @@ -385,22 +439,34 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") + .requires("enable_http") .takes_value(true) - .default_value("1") + .default_value_if("enable_http", None, "1") .value_name("N") .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ Increasing this value can prevent messages from being dropped.") ) + .arg( + Arg::with_name("http-duplicate-block-status") + .long("http-duplicate-block-status") + .requires("enable_http") + .takes_value(true) + .default_value_if("enable_http", None, "202") + .value_name("STATUS_CODE") + .help("Status code to send when a block that is already known is POSTed to the \ + HTTP API.") + ) .arg( Arg::with_name("http-enable-beacon-processor") .long("http-enable-beacon-processor") + .requires("enable_http") .value_name("BOOLEAN") .help("The beacon processor is a scheduler which provides quality-of-service and \ DoS protection. When set to \"true\", HTTP API requests will be queued and scheduled \ alongside other tasks. When set to \"false\", HTTP API responses will be executed \ immediately.") .takes_value(true) - .default_value("true") + .default_value_if("enable_http", None, "true") ) /* Prometheus metrics HTTP server related arguments */ .arg( @@ -413,22 +479,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("metrics-address") .long("metrics-address") .value_name("ADDRESS") + .requires("metrics") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value("127.0.0.1") + .default_value_if("metrics", None, "127.0.0.1") .takes_value(true), ) .arg( Arg::with_name("metrics-port") .long("metrics-port") + .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value("5054") + .default_value_if("metrics", None, "5054") .takes_value(true), ) .arg( Arg::with_name("metrics-allow-origin") .long("metrics-allow-origin") .value_name("ORIGIN") + .requires("metrics") .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ @@ -668,6 +737,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("1") .takes_value(true) ) + /* Deneb settings */ + .arg( + Arg::with_name("trusted-setup-file-override") + .long("trusted-setup-file-override") + .value_name("FILE") + .help("Path to a json file containing the trusted setup params. \ + NOTE: This will override the trusted setup that is generated \ + from the mainnet kzg ceremony. Use with caution") + .takes_value(true) + ) /* * Database purging and compaction. */ @@ -698,6 +777,34 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) + .arg( + Arg::with_name("prune-blobs") + .long("prune-blobs") + .value_name("BOOLEAN") + .help("Prune blobs from Lighthouse's database when they are older than the data \ + data availability boundary relative to the current epoch.") + .takes_value(true) + .default_value("true") + ) + .arg( + Arg::with_name("epochs-per-blob-prune") + .long("epochs-per-blob-prune") + .value_name("EPOCHS") + .help("The epoch interval with which to prune blobs from Lighthouse's \ + database when they are older than the data availability boundary \ + relative to the current epoch.") + .takes_value(true) + .default_value("1") + ) + .arg( + Arg::with_name("blob-prune-margin-epochs") + .long("blob-prune-margin-epochs") + .value_name("EPOCHS") + .help("The margin for blob pruning in epochs. The oldest blobs are pruned \ + up until data_availability_boundary - blob_prune_margin_epochs.") + .takes_value(true) + .default_value("0") + ) /* * Misc. @@ -1067,6 +1174,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("0") .takes_value(true) ) + .arg( + Arg::with_name("ignore-builder-override-suggestion-threshold") + .long("ignore-builder-override-suggestion-threshold") + .value_name("PERCENTAGE") + .help("When the EE advises Lighthouse to ignore the builder payload, this flag \ + specifies a percentage threshold for the difference between the reward from \ + the builder payload and the local EE's payload. This threshold must be met \ + for Lighthouse to consider ignoring the EE's suggestion. If the reward from \ + the builder's payload doesn't exceed the local payload by at least this \ + percentage, the local payload will be used. The conditions under which the \ + EE may make this suggestion depend on the EE's implementation, with the \ + primary intent being to safeguard against potential censorship attacks \ + from builders. Setting this flag to 0 will cause Lighthouse to always \ + ignore the EE's suggestion. Default: 10.0 (equivalent to 10%).") + .default_value("10.0") + .takes_value(true) + ) .arg( Arg::with_name("builder-user-agent") .long("builder-user-agent") @@ -1125,7 +1249,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("gui") .long("gui") - .hidden(true) .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") .takes_value(false) @@ -1138,6 +1261,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { // to local payloads, therefore it fundamentally conflicts with // always using the builder. .conflicts_with("builder-profit-threshold") + .conflicts_with("ignore-builder-override-suggestion-threshold") ) .arg( Arg::with_name("invalid-gossip-verified-blocks-path") @@ -1212,4 +1336,5 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("64") .takes_value(true) ) + .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index d2e92b48f3..94b94677a5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,6 +2,7 @@ use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; +use beacon_chain::TrustedSetup; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use clap_utils::parse_required; @@ -21,6 +22,7 @@ use std::fmt::Debug; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; +use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -94,67 +96,70 @@ pub fn get_config( * Http API server */ - if cli_args.is_present("http") { + if cli_args.is_present("enable_http") { client_config.http_api.enabled = true; + + if let Some(address) = cli_args.value_of("http-address") { + client_config.http_api.listen_addr = address + .parse::() + .map_err(|_| "http-address is not a valid IP address.")?; + } + + if let Some(port) = cli_args.value_of("http-port") { + client_config.http_api.listen_port = port + .parse::() + .map_err(|_| "http-port is not a valid u16.")?; + } + + if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + // Pre-validate the config value to give feedback to the user on node startup, instead of + // as late as when the first API response is produced. + hyper::header::HeaderValue::from_str(allow_origin) + .map_err(|_| "Invalid allow-origin value")?; + + client_config.http_api.allow_origin = Some(allow_origin.to_string()); + } + + if cli_args.is_present("http-disable-legacy-spec") { + warn!( + log, + "The flag --http-disable-legacy-spec is deprecated and will be removed" + ); + } + + if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { + client_config.http_api.spec_fork_name = Some(fork_name); + } + + if cli_args.is_present("http-enable-tls") { + client_config.http_api.tls_config = Some(TlsConfig { + cert: cli_args + .value_of("http-tls-cert") + .ok_or("--http-tls-cert was not provided.")? + .parse::() + .map_err(|_| "http-tls-cert is not a valid path name.")?, + key: cli_args + .value_of("http-tls-key") + .ok_or("--http-tls-key was not provided.")? + .parse::() + .map_err(|_| "http-tls-key is not a valid path name.")?, + }); + } + + if cli_args.is_present("http-allow-sync-stalled") { + client_config.http_api.allow_sync_stalled = true; + } + + client_config.http_api.sse_capacity_multiplier = + parse_required(cli_args, "http-sse-capacity-multiplier")?; + + client_config.http_api.enable_beacon_processor = + parse_required(cli_args, "http-enable-beacon-processor")?; + + client_config.http_api.duplicate_block_status_code = + parse_required(cli_args, "http-duplicate-block-status")?; } - if let Some(address) = cli_args.value_of("http-address") { - client_config.http_api.listen_addr = address - .parse::() - .map_err(|_| "http-address is not a valid IP address.")?; - } - - if let Some(port) = cli_args.value_of("http-port") { - client_config.http_api.listen_port = port - .parse::() - .map_err(|_| "http-port is not a valid u16.")?; - } - - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { - // Pre-validate the config value to give feedback to the user on node startup, instead of - // as late as when the first API response is produced. - hyper::header::HeaderValue::from_str(allow_origin) - .map_err(|_| "Invalid allow-origin value")?; - - client_config.http_api.allow_origin = Some(allow_origin.to_string()); - } - - if cli_args.is_present("http-disable-legacy-spec") { - warn!( - log, - "The flag --http-disable-legacy-spec is deprecated and will be removed" - ); - } - - if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { - client_config.http_api.spec_fork_name = Some(fork_name); - } - - if cli_args.is_present("http-enable-tls") { - client_config.http_api.tls_config = Some(TlsConfig { - cert: cli_args - .value_of("http-tls-cert") - .ok_or("--http-tls-cert was not provided.")? - .parse::() - .map_err(|_| "http-tls-cert is not a valid path name.")?, - key: cli_args - .value_of("http-tls-key") - .ok_or("--http-tls-key was not provided.")? - .parse::() - .map_err(|_| "http-tls-key is not a valid path name.")?, - }); - } - - if cli_args.is_present("http-allow-sync-stalled") { - client_config.http_api.allow_sync_stalled = true; - } - - client_config.http_api.sse_capacity_multiplier = - parse_required(cli_args, "http-sse-capacity-multiplier")?; - - client_config.http_api.enable_beacon_processor = - parse_required(cli_args, "http-enable-beacon-processor")?; - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } @@ -323,7 +328,7 @@ pub fn get_config( .write_all(jwt_secret_key.as_bytes()) .map_err(|e| { format!( - "Error occured while writing to jwt_secret_key file: {:?}", + "Error occurred while writing to jwt_secret_key file: {:?}", e ) })?; @@ -353,7 +358,8 @@ pub fn get_config( clap_utils::parse_required(cli_args, "builder-profit-threshold")?; el_config.always_prefer_builder_payload = cli_args.is_present("always-prefer-builder-payload"); - + el_config.ignore_builder_override_suggestion_threshold = + clap_utils::parse_required(cli_args, "ignore-builder-override-suggestion-threshold")?; let execution_timeout_multiplier = clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); @@ -379,10 +385,32 @@ pub fn get_config( client_config.execution_layer = Some(el_config); } + // 4844 params + client_config.trusted_setup = context + .eth2_network_config + .as_ref() + .and_then(|config| config.kzg_trusted_setup.as_ref()) + .map(|trusted_setup_bytes| serde_json::from_slice(trusted_setup_bytes)) + .transpose() + .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + + // Override default trusted setup file if required + if let Some(trusted_setup_file_path) = cli_args.value_of("trusted-setup-file-override") { + let file = std::fs::File::open(trusted_setup_file_path) + .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; + let trusted_setup: TrustedSetup = serde_json::from_reader(file) + .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; + client_config.trusted_setup = Some(trusted_setup); + } + if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } + if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; @@ -416,6 +444,22 @@ pub fn get_config( client_config.chain.epochs_per_migration = epochs_per_migration; } + if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? { + client_config.store.prune_blobs = prune_blobs; + } + + if let Some(epochs_per_blob_prune) = + clap_utils::parse_optional(cli_args, "epochs-per-blob-prune")? + { + client_config.store.epochs_per_blob_prune = epochs_per_blob_prune; + } + + if let Some(blob_prune_margin_epochs) = + clap_utils::parse_optional(cli_args, "blob-prune-margin-epochs")? + { + client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; + } + /* * Zero-ports * @@ -468,9 +512,30 @@ pub fn get_config( client_config.chain.checkpoint_sync_url_timeout = clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; - client_config.genesis = if let Some(genesis_state_bytes) = - eth2_network_config.genesis_state_bytes.clone() - { + client_config.genesis_state_url_timeout = + clap_utils::parse_required(cli_args, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + let genesis_state_url_opt = + clap_utils::parse_optional::(cli_args, "genesis-state-url")?; + let checkpoint_sync_url_opt = + clap_utils::parse_optional::(cli_args, "checkpoint-sync-url")?; + + // If the `--genesis-state-url` is defined, use that to download the + // genesis state bytes. If it's not defined, try `--checkpoint-sync-url`. + client_config.genesis_state_url = if let Some(genesis_state_url) = genesis_state_url_opt { + Some(genesis_state_url) + } else if let Some(checkpoint_sync_url) = checkpoint_sync_url_opt { + // If the checkpoint sync URL is going to be used to download the + // genesis state, adopt the timeout from the checkpoint sync URL too. + client_config.genesis_state_url_timeout = + Duration::from_secs(client_config.chain.checkpoint_sync_url_timeout); + Some(checkpoint_sync_url) + } else { + None + }; + + client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( cli_args.value_of("checkpoint-state"), @@ -492,7 +557,6 @@ pub fn get_config( let anchor_block_bytes = read(initial_block_path)?; ClientGenesis::WeakSubjSszBytes { - genesis_state_bytes, anchor_state_bytes, anchor_block_bytes, } @@ -500,17 +564,9 @@ pub fn get_config( let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } + ClientGenesis::CheckpointSyncUrl { url } } else { - // Note: re-serializing the genesis state is not so efficient, however it avoids adding - // trait bounds to the `ClientGenesis` enum. This would have significant flow-on - // effects. - ClientGenesis::SszBytes { - genesis_state_bytes, - } + ClientGenesis::GenesisState } } else { if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { @@ -899,15 +955,15 @@ pub fn parse_listening_addresses( .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? .unwrap_or(9090); - // parse the possible udp ports - let maybe_udp_port = cli_args + // parse the possible discovery ports. + let maybe_disc_port = cli_args .value_of("discovery-port") .map(str::parse::) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port as an integer: {parse_error}") })?; - let maybe_udp6_port = cli_args + let maybe_disc6_port = cli_args .value_of("discovery-port6") .map(str::parse::) .transpose() @@ -915,6 +971,24 @@ pub fn parse_listening_addresses( format!("Failed to parse --discovery-port6 as an integer: {parse_error}") })?; + // parse the possible quic port. + let maybe_quic_port = cli_args + .value_of("quic-port") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --quic-port as an integer: {parse_error}") + })?; + + // parse the possible quic port. + let maybe_quic6_port = cli_args + .value_of("quic-port6") + .map(str::parse::) + .transpose() + .map_err(|parse_error| { + format!("Failed to parse --quic6-port as an integer: {parse_error}") + })?; + // Now put everything together let listening_addresses = match (maybe_ipv4, maybe_ipv6) { (None, None) => { @@ -925,7 +999,7 @@ pub fn parse_listening_addresses( // A single ipv6 address was provided. Set the ports if cli_args.is_present("port6") { - warn!(log, "When listening only over IpV6, use the --port flag. The value of --port6 will be ignored.") + warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored.") } // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports @@ -933,20 +1007,32 @@ pub fn parse_listening_addresses( .transpose()? .unwrap_or(port); - if maybe_udp6_port.is_some() { - warn!(log, "When listening only over IpV6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") + if maybe_disc6_port.is_some() { + warn!(log, "When listening only over IPv6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") } + + if maybe_quic6_port.is_some() { + warn!(log, "When listening only over IPv6, use the --quic-port flag. The value of --quic-port6 will be ignored.") + } + // use zero ports if required. If not, use the specific udp port. If none given, use // the tcp port. - let udp_port = use_zero_ports + let disc_port = use_zero_ports .then(unused_port::unused_udp6_port) .transpose()? - .or(maybe_udp_port) + .or(maybe_disc_port) .unwrap_or(port); + let quic_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_quic_port) + .unwrap_or(port + 1); + ListenAddress::V6(lighthouse_network::ListenAddr { addr: ipv6, - udp_port, + quic_port, + disc_port, tcp_port, }) } @@ -958,16 +1044,25 @@ pub fn parse_listening_addresses( .then(unused_port::unused_tcp4_port) .transpose()? .unwrap_or(port); - // use zero ports if required. If not, use the specific udp port. If none given, use + // use zero ports if required. If not, use the specific discovery port. If none given, use // the tcp port. - let udp_port = use_zero_ports + let disc_port = use_zero_ports .then(unused_port::unused_udp4_port) .transpose()? - .or(maybe_udp_port) + .or(maybe_disc_port) .unwrap_or(port); + // use zero ports if required. If not, use the specific quic port. If none given, use + // the tcp port + 1. + let quic_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_quic_port) + .unwrap_or(port + 1); + ListenAddress::V4(lighthouse_network::ListenAddr { addr: ipv4, - udp_port, + disc_port, + quic_port, tcp_port, }) } @@ -976,31 +1071,44 @@ pub fn parse_listening_addresses( .then(unused_port::unused_tcp4_port) .transpose()? .unwrap_or(port); - let ipv4_udp_port = use_zero_ports + let ipv4_disc_port = use_zero_ports .then(unused_port::unused_udp4_port) .transpose()? - .or(maybe_udp_port) + .or(maybe_disc_port) .unwrap_or(ipv4_tcp_port); + let ipv4_quic_port = use_zero_ports + .then(unused_port::unused_udp4_port) + .transpose()? + .or(maybe_quic_port) + .unwrap_or(port + 1); // Defaults to 9090 when required let ipv6_tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) .transpose()? .unwrap_or(port6); - let ipv6_udp_port = use_zero_ports + let ipv6_disc_port = use_zero_ports .then(unused_port::unused_udp6_port) .transpose()? - .or(maybe_udp6_port) + .or(maybe_disc6_port) .unwrap_or(ipv6_tcp_port); + let ipv6_quic_port = use_zero_ports + .then(unused_port::unused_udp6_port) + .transpose()? + .or(maybe_quic6_port) + .unwrap_or(ipv6_tcp_port + 1); + ListenAddress::DualStack( lighthouse_network::ListenAddr { addr: ipv4, - udp_port: ipv4_udp_port, + disc_port: ipv4_disc_port, + quic_port: ipv4_quic_port, tcp_port: ipv4_tcp_port, }, lighthouse_network::ListenAddr { addr: ipv6, - udp_port: ipv6_udp_port, + disc_port: ipv6_disc_port, + quic_port: ipv6_quic_port, tcp_port: ipv6_tcp_port, }, ) @@ -1111,15 +1219,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { + config.enr_quic4_port = Some( + enr_quic_port_str + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1127,41 +1243,62 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { + config.enr_quic6_port = Some( + enr_quic_port_str + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } if cli_args.is_present("enr-match") { - // Match the Ip and UDP port in the enr. + // Match the IP and UDP port in the ENR. - // set the enr address to localhost if the address is unspecified if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv4_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { Ipv4Addr::LOCALHOST } else { ipv4_addr.addr }; config.enr_address.0 = Some(ipv4_enr_addr); - config.enr_udp4_port = Some(ipv4_addr.udp_port); + config.enr_udp4_port = Some(disc_port); } if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv6_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { Ipv6Addr::LOCALHOST } else { ipv6_addr.addr }; config.enr_address.1 = Some(ipv6_enr_addr); - config.enr_udp6_port = Some(ipv6_addr.udp_port); + config.enr_udp6_port = Some(disc_port); } } @@ -1194,11 +1331,11 @@ pub fn set_network_config( // actually matters. Just use the udp port. let port = match config.listen_addrs() { - ListenAddress::V4(v4_addr) => v4_addr.udp_port, - ListenAddress::V6(v6_addr) => v6_addr.udp_port, + ListenAddress::V4(v4_addr) => v4_addr.disc_port, + ListenAddress::V6(v6_addr) => v6_addr.disc_port, ListenAddress::DualStack(v4_addr, _v6_addr) => { // NOTE: slight preference for ipv4 that I don't think is of importance. - v4_addr.udp_port + v4_addr.disc_port } }; @@ -1257,6 +1394,10 @@ pub fn set_network_config( warn!(log, "Discovery is disabled. New peers will not be found"); } + if cli_args.is_present("disable-quic") { + config.disable_quic_support = true; + } + if cli_args.is_present("disable-upnp") { config.upnp_enabled = false; } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 3bef69ce83..085a2a7824 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -64,6 +64,7 @@ impl ProductionBeaconNode { let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; + let blobs_db_path = client_config.create_blobs_db_path()?; let executor = context.executor.clone(); if let Some(legacy_dir) = client_config.get_existing_legacy_data_dir() { @@ -85,7 +86,13 @@ impl ProductionBeaconNode { .chain_spec(spec) .beacon_processor(client_config.beacon_processor.clone()) .http_api_config(client_config.http_api.clone()) - .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; + .disk_store( + &db_path, + &freezer_db_path, + blobs_db_path, + store_config, + log.clone(), + )?; let builder = if let Some(mut slasher_config) = client_config.slasher.clone() { match slasher_config.override_backend() { diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index a952f1b2ff..7bf1ef76be 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -2,27 +2,26 @@ name = "store" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" -beacon_chain = {path = "../beacon_chain"} +tempfile = { workspace = true } +beacon_chain = { workspace = true } [dependencies] db-key = "0.0.5" -leveldb = { version = "0.8.6" } -parking_lot = "0.12.0" -itertools = "0.10.0" -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -types = { path = "../../consensus/types" } -state_processing = { path = "../../consensus/state_processing" } -slog = "2.5.2" -serde = "1.0.116" -serde_derive = "1.0.116" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.7.1" -sloggers = { version = "2.1.1", features = ["json"] } -directory = { path = "../../common/directory" } -strum = { version = "0.24.0", features = ["derive"] } \ No newline at end of file +leveldb = { version = "0.8" } +parking_lot = { workspace = true } +itertools = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +types = { workspace = true } +state_processing = { workspace = true } +slog = { workspace = true } +serde = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +lru = { workspace = true } +sloggers = { workspace = true } +directory = { workspace = true } +strum = { workspace = true } diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 8ef0b6d201..b3322b5225 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -30,16 +30,16 @@ where /// Create a new iterator which can yield elements from `start_vindex` up to the last /// index stored by the restore point at `last_restore_point_slot`. /// - /// The `last_restore_point` slot should be the slot of a recent restore point as obtained from - /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can + /// The `freezer_upper_limit` slot should be the slot of a recent restore point as obtained from + /// `Root::freezer_upper_limit`. We pass it as a parameter so that the caller can /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). pub fn new( store: &'a HotColdDB, start_vindex: usize, - last_restore_point_slot: Slot, + freezer_upper_limit: Slot, spec: &ChainSpec, ) -> Self { - let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec); + let (_, end_vindex) = F::start_and_end_vindex(freezer_upper_limit, spec); // Set the next chunk to the one containing `start_vindex`. let next_cindex = start_vindex / F::chunk_size(); diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 73edfbb074..537614f281 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -299,7 +299,8 @@ macro_rules! field { } fn update_pattern(spec: &ChainSpec) -> UpdatePattern { - $update_pattern(spec) + let update_pattern = $update_pattern; + update_pattern(spec) } fn get_value( @@ -307,7 +308,8 @@ macro_rules! field { vindex: u64, spec: &ChainSpec, ) -> Result { - $get_value(state, vindex, spec) + let get_value = $get_value; + get_value(state, vindex, spec) } fn is_fixed_length() -> bool { diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 581003b4fa..4e516b091b 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,5 +1,5 @@ use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{EthSpec, MinimalEthSpec}; @@ -8,6 +8,8 @@ pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; +pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; +pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -26,6 +28,13 @@ pub struct StoreConfig { pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// Whether to prune blobs older than the blob data availability boundary. + pub prune_blobs: bool, + /// Frequency of blob pruning in epochs. Default: 1 (every epoch). + pub epochs_per_blob_prune: u64, + /// The margin for blob pruning in epochs. The oldest blobs are pruned up until + /// data_availability_boundary - blob_prune_margin_epochs. Default: 0. + pub blob_prune_margin_epochs: u64, } /// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params. @@ -50,6 +59,9 @@ impl Default for StoreConfig { compact_on_init: false, compact_on_prune: true, prune_payloads: true, + prune_blobs: true, + epochs_per_blob_prune: DEFAULT_EPOCHS_PER_BLOB_PRUNE, + blob_prune_margin_epochs: DEFAULT_BLOB_PUNE_MARGIN_EPOCHS, } } } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index fcc40706b3..96e02b80ff 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -25,6 +25,8 @@ pub enum Error { SchemaMigrationError(String), /// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied. AnchorInfoConcurrentMutation, + /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. + BlobInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. @@ -43,6 +45,8 @@ pub enum Error { BlockReplayError(BlockReplayError), AddPayloadLogicError, SlotClockUnavailableForMigration, + InvalidKey, + InvalidBytes, UnableToDowngrade, InconsistentFork(InconsistentFork), } diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 353be6bf05..1ccf1da1b7 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -19,6 +19,14 @@ pub trait Root: Field { end_state: BeaconState, end_root: Hash256, ) -> Result; + + /// The first slot for which this field is *no longer* stored in the freezer database. + /// + /// If `None`, then this field is not stored in the freezer database at all due to pruning + /// configuration. + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option; } impl Root for BlockRoots { @@ -39,6 +47,13 @@ impl Root for BlockRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option { + // Block roots are stored for all slots up to the split slot (exclusive). + Some(store.get_split_slot()) + } } impl Root for StateRoots { @@ -59,6 +74,15 @@ impl Root for StateRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option { + // State roots are stored for all slots up to the latest restore point (exclusive). + // There may not be a latest restore point if state pruning is enabled, in which + // case this function will return `None`. + store.get_latest_restore_point_slot() + } } /// Forwards root iterator that makes use of a flat field table in the freezer DB. @@ -118,6 +142,7 @@ impl Iterator for SimpleForwardsIterator { pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { PreFinalization { iter: Box>, + end_slot: Option, /// Data required by the `PostFinalization` iterator when we get to it. continuation_data: Option, Hash256)>>, }, @@ -129,6 +154,7 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, C PostFinalization { iter: SimpleForwardsIterator, }, + Finished, } impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> @@ -138,8 +164,8 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> /// /// The `get_state` closure should return a beacon state and final block/state root to backtrack /// from in the case where the iterated range does not lie entirely within the frozen portion of - /// the database. If an `end_slot` is provided and it is before the database's latest restore - /// point slot then the `get_state` closure will not be called at all. + /// the database. If an `end_slot` is provided and it is before the database's freezer upper + /// limit for the field then the `get_state` closure will not be called at all. /// /// It is OK for `get_state` to hold a lock while this function is evaluated, as the returned /// iterator is as lazy as possible and won't do any work apart from calling `get_state`. @@ -150,18 +176,20 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> store: &'a HotColdDB, start_slot: Slot, end_slot: Option, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256)>, spec: &ChainSpec, ) -> Result { use HybridForwardsIterator::*; - let latest_restore_point_slot = store.get_latest_restore_point_slot(); + // First slot at which this field is *not* available in the freezer. i.e. all slots less + // than this slot have their data available in the freezer. + let freezer_upper_limit = F::freezer_upper_limit(store).unwrap_or(Slot::new(0)); - let result = if start_slot < latest_restore_point_slot { + let result = if start_slot < freezer_upper_limit { let iter = Box::new(FrozenForwardsIterator::new( store, start_slot, - latest_restore_point_slot, + freezer_upper_limit, spec, )); @@ -169,18 +197,19 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) { + if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) { None } else { - Some(Box::new(get_state())) + Some(Box::new(get_state()?)) }; PreFinalization { iter, + end_slot, continuation_data, } } else { PostFinalizationLazy { - continuation_data: Some(Box::new(get_state())), + continuation_data: Some(Box::new(get_state()?)), store, start_slot, } @@ -195,6 +224,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> match self { PreFinalization { iter, + end_slot, continuation_data, } => { match iter.next() { @@ -203,10 +233,17 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { + // If the iterator has an end slot (inclusive) which has already been + // covered by the (exclusive) frozen forwards iterator, then we're done! + let iter_end_slot = Slot::from(iter.inner.end_vindex); + if end_slot.map_or(false, |end_slot| iter_end_slot == end_slot + 1) { + *self = Finished; + return Ok(None); + } + let continuation_data = continuation_data.take(); let store = iter.inner.store; - let start_slot = Slot::from(iter.inner.end_vindex); - + let start_slot = iter_end_slot; *self = PostFinalizationLazy { continuation_data, store, @@ -230,6 +267,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> self.do_next() } PostFinalization { iter } => iter.next().transpose(), + Finished => Ok(None), } } } diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 3291336328..c70ef89869 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -31,7 +31,7 @@ where "Garbage collecting {} temporary states", delete_ops.len() / 2 ); - self.do_atomically(delete_ops)?; + self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7695ea520e..640647db35 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -12,20 +12,20 @@ use crate::leveldb_store::BytesKey; use crate::leveldb_store::LevelDB; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, - COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY, - SCHEMA_VERSION_KEY, SPLIT_KEY, + AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, + BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, + PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; use crate::{ - get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, + get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, }; use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -35,9 +35,11 @@ use state_processing::{ use std::cmp::min; use std::convert::TryInto; use std::marker::PhantomData; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use types::blob_sidecar::BlobSidecarList; +use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::*; /// On-disk database that stores finalized states efficiently. @@ -53,15 +55,19 @@ pub struct HotColdDB, Cold: ItemStore> { pub(crate) split: RwLock, /// The starting slots for the range of blocks & states stored in the database. anchor_info: RwLock>, + /// The starting slots for the range of blobs stored in the database. + blob_info: RwLock, pub(crate) config: StoreConfig, /// Cold database containing compact historical data. pub cold_db: Cold, + /// Database containing blobs. If None, store falls back to use `cold_db`. + pub blobs_db: Option, /// Hot database containing duplicated but quick-to-access recent data. /// /// The hot database also contains all blocks. pub hot_db: Hot, - /// LRU cache of deserialized blocks. Updated whenever a block is loaded. - block_cache: Mutex>>, + /// LRU cache of deserialized blocks and blobs. Updated whenever a block or blob is loaded. + block_cache: Mutex>, /// LRU cache of replayed states. state_cache: Mutex>>, /// Chain spec. @@ -72,6 +78,43 @@ pub struct HotColdDB, Cold: ItemStore> { _phantom: PhantomData, } +#[derive(Debug)] +struct BlockCache { + block_cache: LruCache>, + blob_cache: LruCache>, +} + +impl BlockCache { + pub fn new(size: usize) -> Self { + Self { + block_cache: LruCache::new(size), + blob_cache: LruCache::new(size), + } + } + pub fn put_block(&mut self, block_root: Hash256, block: SignedBeaconBlock) { + self.block_cache.put(block_root, block); + } + pub fn put_blobs(&mut self, block_root: Hash256, blobs: BlobSidecarList) { + self.blob_cache.put(block_root, blobs); + } + pub fn get_block<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a SignedBeaconBlock> { + self.block_cache.get(block_root) + } + pub fn get_blobs<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a BlobSidecarList> { + self.blob_cache.get(block_root) + } + pub fn delete_block(&mut self, block_root: &Hash256) { + let _ = self.block_cache.pop(block_root); + } + pub fn delete_blobs(&mut self, block_root: &Hash256) { + let _ = self.blob_cache.pop(block_root); + } + pub fn delete(&mut self, block_root: &Hash256) { + let _ = self.block_cache.pop(block_root); + let _ = self.blob_cache.pop(block_root); + } +} + #[derive(Debug, PartialEq)] pub enum HotColdDBError { UnsupportedSchemaVersion { @@ -95,6 +138,8 @@ pub enum HotColdDBError { MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, + MissingPathToBlobsDatabase, + BlobsPreviouslyInDefaultStore, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), @@ -106,15 +151,18 @@ pub enum HotColdDBError { slots_per_historical_root: u64, slots_per_epoch: u64, }, + ZeroEpochsPerBlobPrune, + BlobPruneLogicError, RestorePointBlockHashError(BeaconStateError), IterationError { unexpected_key: BytesKey, }, - AttestationStateIsFinalized { + FinalizedStateNotInHotDatabase { split_slot: Slot, - request_slot: Option, - state_root: Hash256, + request_slot: Slot, + block_root: Hash256, }, + Rollback, } impl HotColdDB, MemoryStore> { @@ -123,14 +171,16 @@ impl HotColdDB, MemoryStore> { spec: ChainSpec, log: Logger, ) -> Result, MemoryStore>, Error> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + Self::verify_config(&config)?; let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), + blob_info: RwLock::new(BlobInfo::default()), cold_db: MemoryStore::open(), + blobs_db: Some(MemoryStore::open()), hot_db: MemoryStore::open(), - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, @@ -152,6 +202,7 @@ impl HotColdDB, LevelDB> { pub fn open( hot_path: &Path, cold_path: &Path, + blobs_db_path: Option, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, spec: ChainSpec, @@ -162,9 +213,11 @@ impl HotColdDB, LevelDB> { let mut db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), + blob_info: RwLock::new(BlobInfo::default()), cold_db: LevelDB::open(cold_path)?, + blobs_db: None, hot_db: LevelDB::open(hot_path)?, - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, @@ -207,6 +260,52 @@ impl HotColdDB, LevelDB> { ); } + // Open separate blobs directory if configured and same configuration was used on previous + // run. + let blob_info = db.load_blob_info()?; + let deneb_fork_slot = db + .spec + .deneb_fork_epoch + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_blob_info = match &blob_info { + Some(blob_info) => { + // If the oldest block slot is already set do not allow the blob DB path to be + // changed (require manual migration). + if blob_info.oldest_blob_slot.is_some() { + if blobs_db_path.is_some() && !blob_info.blobs_db { + return Err(HotColdDBError::BlobsPreviouslyInDefaultStore.into()); + } else if blobs_db_path.is_none() && blob_info.blobs_db { + return Err(HotColdDBError::MissingPathToBlobsDatabase.into()); + } + } + // Set the oldest blob slot to the Deneb fork slot if it is not yet set. + let oldest_blob_slot = blob_info.oldest_blob_slot.or(deneb_fork_slot); + BlobInfo { + oldest_blob_slot, + blobs_db: blobs_db_path.is_some(), + } + } + // First start. + None => BlobInfo { + // Set the oldest blob slot to the Deneb fork slot if it is not yet set. + oldest_blob_slot: deneb_fork_slot, + blobs_db: blobs_db_path.is_some(), + }, + }; + if new_blob_info.blobs_db { + if let Some(path) = &blobs_db_path { + db.blobs_db = Some(LevelDB::open(path.as_path())?); + } + } + db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; + info!( + db.log, + "Blob DB initialized"; + "separate_db" => new_blob_info.blobs_db, + "path" => ?blobs_db_path, + "oldest_blob_slot" => ?new_blob_info.oldest_blob_slot, + ); + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -275,7 +374,7 @@ impl, Cold: ItemStore> HotColdDB let block = self.block_as_kv_store_ops(block_root, block, &mut ops)?; self.hot_db.do_atomically(ops)?; // Update cache. - self.block_cache.lock().put(*block_root, block); + self.block_cache.lock().put_block(*block_root, block); Ok(()) } @@ -327,15 +426,14 @@ impl, Cold: ItemStore> HotColdDB metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); // Check the cache. - if let Some(block) = self.block_cache.lock().get(block_root) { + if let Some(block) = self.block_cache.lock().get_block(block_root) { metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT); return Ok(Some(DatabaseBlock::Full(block.clone()))); } // Load the blinded block. - let blinded_block = match self.get_blinded_block(block_root)? { - Some(block) => block, - None => return Ok(None), + let Some(blinded_block) = self.get_blinded_block(block_root)? else { + return Ok(None); }; // If the block is after the split point then we should have the full execution payload @@ -352,7 +450,9 @@ impl, Cold: ItemStore> HotColdDB let full_block = self.make_full_block(block_root, blinded_block)?; // Add to cache. - self.block_cache.lock().put(*block_root, full_block.clone()); + self.block_cache + .lock() + .put_block(*block_root, full_block.clone()); DatabaseBlock::Full(full_block) } else if !self.config.prune_payloads { @@ -473,6 +573,12 @@ impl, Cold: ItemStore> HotColdDB .map(|payload| payload.is_some()) } + /// Check if the blobs for a block exists on disk. + pub fn blobs_exist(&self, block_root: &Hash256) -> Result { + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + blobs_db.key_exists(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + } + /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db @@ -481,11 +587,34 @@ impl, Cold: ItemStore> HotColdDB /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { - self.block_cache.lock().pop(block_root); + self.block_cache.lock().delete(block_root); self.hot_db .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) + .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())?; + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + blobs_db.key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + } + + pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + blobs_db.put_bytes( + DBColumn::BeaconBlob.into(), + block_root.as_bytes(), + &blobs.as_ssz_bytes(), + )?; + self.block_cache.lock().put_blobs(*block_root, blobs); + Ok(()) + } + + pub fn blobs_as_kv_store_ops( + &self, + key: &Hash256, + blobs: BlobSidecarList, + ops: &mut Vec, + ) { + let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); } pub fn put_state_summary( @@ -545,7 +674,7 @@ impl, Cold: ItemStore> HotColdDB /// upon that state (e.g., state roots). Additionally, only states from the hot store are /// returned. /// - /// See `Self::get_state` for information about `slot`. + /// See `Self::get_advanced_hot_state` for information about `max_slot`. /// /// ## Warning /// @@ -557,23 +686,78 @@ impl, Cold: ItemStore> HotColdDB /// - `state.block_roots` pub fn get_inconsistent_state_for_attestation_verification_only( &self, - state_root: &Hash256, - slot: Option, - ) -> Result>, Error> { + block_root: &Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); + self.get_advanced_hot_state_with_strategy( + *block_root, + max_slot, + state_root, + StateProcessingStrategy::Inconsistent, + ) + } - let split_slot = self.get_split_slot(); + /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. + /// + /// The `state_root` argument is used to look up the block's un-advanced state in case an + /// advanced state is not found. + /// + /// Return the `(result_state_root, state)` satisfying: + /// + /// - `result_state_root == state.canonical_root()` + /// - `state.slot() <= max_slot` + /// - `state.get_latest_block_root(result_state_root) == block_root` + /// + /// Presently this is only used to avoid loading the un-advanced split state, but in future will + /// be expanded to return states from an in-memory cache. + pub fn get_advanced_hot_state( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { + self.get_advanced_hot_state_with_strategy( + block_root, + max_slot, + state_root, + StateProcessingStrategy::Accurate, + ) + } - if slot.map_or(false, |slot| slot < split_slot) { - Err(HotColdDBError::AttestationStateIsFinalized { - split_slot, - request_slot: slot, - state_root: *state_root, + /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. + pub fn get_advanced_hot_state_with_strategy( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + state_processing_strategy: StateProcessingStrategy, + ) -> Result)>, Error> { + // Hold a read lock on the split point so it can't move while we're trying to load the + // state. + let split = self.split.read_recursive(); + + // Sanity check max-slot against the split slot. + if max_slot < split.slot { + return Err(HotColdDBError::FinalizedStateNotInHotDatabase { + split_slot: split.slot, + request_slot: max_slot, + block_root, } - .into()) - } else { - self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) + .into()); } + + let state_root = if block_root == split.block_root && split.slot <= max_slot { + split.state_root + } else { + state_root + }; + let state = self + .load_hot_state(&state_root, state_processing_strategy)? + .map(|state| (state_root, state)); + drop(split); + Ok(state) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -607,7 +791,7 @@ impl, Cold: ItemStore> HotColdDB self, start_slot, None, - || (end_state, end_block_root), + || Ok((end_state, end_block_root)), spec, ) } @@ -616,7 +800,7 @@ impl, Cold: ItemStore> HotColdDB &self, start_slot: Slot, end_slot: Slot, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, spec: &ChainSpec, ) -> Result, Error> { HybridForwardsBlockRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) @@ -633,7 +817,7 @@ impl, Cold: ItemStore> HotColdDB self, start_slot, None, - || (end_state, end_state_root), + || Ok((end_state, end_state_root)), spec, ) } @@ -642,7 +826,7 @@ impl, Cold: ItemStore> HotColdDB &self, start_slot: Slot, end_slot: Slot, - get_state: impl FnOnce() -> (BeaconState, Hash256), + get_state: impl FnOnce() -> Result<(BeaconState, Hash256), Error>, spec: &ChainSpec, ) -> Result, Error> { HybridForwardsStateRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) @@ -718,6 +902,10 @@ impl, Cold: ItemStore> HotColdDB self.store_hot_state(&state_root, state, &mut key_value_batch)?; } + StoreOp::PutBlobs(block_root, blobs) => { + self.blobs_as_kv_store_ops(&block_root, blobs, &mut key_value_batch); + } + StoreOp::PutStateSummary(state_root, summary) => { key_value_batch.push(summary.as_kv_store_op(state_root)); } @@ -737,6 +925,11 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + StoreOp::DeleteBlobs(block_root) => { + let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_bytes()); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } + StoreOp::DeleteState(state_root, slot) => { let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_bytes()); @@ -762,17 +955,81 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } - pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { - // Update the block cache whilst holding a lock, to ensure that the cache updates atomically - // with the database. + pub fn do_atomically_with_block_and_blobs_cache( + &self, + batch: Vec>, + ) -> Result<(), Error> { + let mut blobs_to_delete = Vec::new(); + let (blobs_ops, hot_db_ops): (Vec>, Vec>) = + batch.into_iter().partition(|store_op| match store_op { + StoreOp::PutBlobs(_, _) => true, + StoreOp::DeleteBlobs(block_root) => { + match self.get_blobs(block_root) { + Ok(Some(blob_sidecar_list)) => { + blobs_to_delete.push((*block_root, blob_sidecar_list)); + } + Err(e) => { + error!( + self.log, "Error getting blobs"; + "block_root" => %block_root, + "error" => ?e + ); + } + _ => (), + } + true + } + StoreOp::PutBlock(_, _) | StoreOp::DeleteBlock(_) => false, + _ => false, + }); + + // Update database whilst holding a lock on cache, to ensure that the cache updates + // atomically with the database. let mut guard = self.block_cache.lock(); - for op in &batch { + let blob_cache_ops = blobs_ops.clone(); + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + // Try to execute blobs store ops. + blobs_db.do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; + + let hot_db_cache_ops = hot_db_ops.clone(); + // Try to execute hot db store ops. + let tx_res = match self.convert_to_kv_batch(hot_db_ops) { + Ok(kv_store_ops) => self.hot_db.do_atomically(kv_store_ops), + Err(e) => Err(e), + }; + // Rollback on failure + if let Err(e) = tx_res { + error!( + self.log, + "Database write failed"; + "error" => ?e, + "action" => "reverting blob DB changes" + ); + let mut blob_cache_ops = blob_cache_ops; + for op in blob_cache_ops.iter_mut() { + let reverse_op = match op { + StoreOp::PutBlobs(block_root, _) => StoreOp::DeleteBlobs(*block_root), + StoreOp::DeleteBlobs(_) => match blobs_to_delete.pop() { + Some((block_root, blobs)) => StoreOp::PutBlobs(block_root, blobs), + None => return Err(HotColdDBError::Rollback.into()), + }, + _ => return Err(HotColdDBError::Rollback.into()), + }; + *op = reverse_op; + } + blobs_db.do_atomically(self.convert_to_kv_batch(blob_cache_ops)?)?; + return Err(e); + } + + for op in hot_db_cache_ops { match op { StoreOp::PutBlock(block_root, block) => { - guard.put(*block_root, (**block).clone()); + guard.put_block(block_root, (*block).clone()); } + StoreOp::PutBlobs(_, _) => (), + StoreOp::PutState(_, _) => (), StoreOp::PutStateSummary(_, _) => (), @@ -782,9 +1039,11 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteStateTemporaryFlag(_) => (), StoreOp::DeleteBlock(block_root) => { - guard.pop(block_root); + guard.delete_block(&block_root); } + StoreOp::DeleteBlobs(_) => (), + StoreOp::DeleteState(_, _) => (), StoreOp::DeleteExecutionPayload(_) => (), @@ -793,8 +1052,20 @@ impl, Cold: ItemStore> HotColdDB } } - self.hot_db - .do_atomically(self.convert_to_kv_batch(batch)?)?; + for op in blob_cache_ops { + match op { + StoreOp::PutBlobs(block_root, blobs) => { + guard.put_blobs(block_root, blobs); + } + + StoreOp::DeleteBlobs(block_root) => { + guard.delete_blobs(&block_root); + } + + _ => (), + } + } + drop(guard); Ok(()) @@ -908,6 +1179,9 @@ impl, Cold: ItemStore> HotColdDB ops.push(op); // 2. Store updated vector entries. + // Block roots need to be written here as well as by the `ChunkWriter` in `migrate_db` + // because states may require older block roots, and the writer only stores block roots + // between the previous split point and the new split point. let db = &self.cold_db; store_updated_vector(BlockRoots, db, state, &self.spec, ops)?; store_updated_vector(StateRoots, db, state, &self.spec, ops)?; @@ -1032,7 +1306,7 @@ impl, Cold: ItemStore> HotColdDB let state_root_iter = self.forwards_state_roots_iterator_until( low_slot, slot, - || (high_restore_point, Hash256::zero()), + || Ok((high_restore_point, Hash256::zero())), &self.spec, )?; @@ -1160,6 +1434,28 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch blobs for a given block from the store. + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + + // Check the cache. + if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { + metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); + return Ok(Some(blobs.clone())); + } + + match blobs_db.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { + Some(ref blobs_bytes) => { + let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; + self.block_cache + .lock() + .put_blobs(*block_root, blobs.clone()); + Ok(Some(blobs)) + } + None => Ok(None), + } + } + /// Get a reference to the `ChainSpec` used by the database. pub fn get_chain_spec(&self) -> &ChainSpec { &self.spec @@ -1180,14 +1476,36 @@ impl, Cold: ItemStore> HotColdDB *self.split.read_recursive() } - pub fn set_split(&self, slot: Slot, state_root: Hash256) { - *self.split.write() = Split { slot, state_root }; + pub fn set_split(&self, slot: Slot, state_root: Hash256, block_root: Hash256) { + *self.split.write() = Split { + slot, + state_root, + block_root, + }; } - /// Fetch the slot of the most recently stored restore point. - pub fn get_latest_restore_point_slot(&self) -> Slot { - (self.get_split_slot() - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point + /// Fetch the slot of the most recently stored restore point (if any). + pub fn get_latest_restore_point_slot(&self) -> Option { + let split_slot = self.get_split_slot(); + let anchor = self.get_anchor_info(); + + // There are no restore points stored if the state upper limit lies in the hot database, + // and the lower limit is zero. It hasn't been reached yet, and may never be. + if anchor.as_ref().map_or(false, |a| { + a.state_upper_limit >= split_slot && a.state_lower_limit == 0 + }) { + None + } else if let Some(lower_limit) = anchor + .map(|a| a.state_lower_limit) + .filter(|limit| *limit > 0) + { + Some(lower_limit) + } else { + Some( + (split_slot - 1) / self.config.slots_per_restore_point + * self.config.slots_per_restore_point, + ) + } } /// Load the database schema version from disk. @@ -1216,25 +1534,36 @@ impl, Cold: ItemStore> HotColdDB } /// Initialise the anchor info for checkpoint sync starting from `block`. - pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn init_anchor_info( + &self, + block: BeaconBlockRef<'_, E>, + retain_historic_states: bool, + ) -> Result { let anchor_slot = block.slot(); let slots_per_restore_point = self.config.slots_per_restore_point; - // Set the `state_upper_limit` to the slot of the *next* restore point. - // See `get_state_upper_limit` for rationale. - let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 { + let state_upper_limit = if !retain_historic_states { + STATE_UPPER_LIMIT_NO_RETAIN + } else if anchor_slot % slots_per_restore_point == 0 { anchor_slot } else { + // Set the `state_upper_limit` to the slot of the *next* restore point. + // See `get_state_upper_limit` for rationale. (anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point }; - let anchor_info = AnchorInfo { - anchor_slot, - oldest_block_slot: anchor_slot, - oldest_block_parent: block.parent_root(), - state_upper_limit: next_restore_point_slot, - state_lower_limit: self.spec.genesis_slot, + let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 { + // Genesis archive node: no anchor because we *will* store all states. + None + } else { + Some(AnchorInfo { + anchor_slot, + oldest_block_slot: anchor_slot, + oldest_block_parent: block.parent_root(), + state_upper_limit, + state_lower_limit: self.spec.genesis_slot, + }) }; - self.compare_and_set_anchor_info(None, Some(anchor_info)) + self.compare_and_set_anchor_info(None, anchor_info) } /// Get a clone of the store's anchor info. @@ -1304,6 +1633,70 @@ impl, Cold: ItemStore> HotColdDB .map(|a| a.anchor_slot) } + /// Initialize the `BlobInfo` when starting from genesis or a checkpoint. + pub fn init_blob_info(&self, anchor_slot: Slot) -> Result { + let oldest_blob_slot = self.spec.deneb_fork_epoch.map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let blob_info = BlobInfo { + oldest_blob_slot, + blobs_db: self.blobs_db.is_some(), + }; + self.compare_and_set_blob_info(self.get_blob_info(), blob_info) + } + + /// Get a clone of the store's blob info. + /// + /// To do mutations, use `compare_and_set_blob_info`. + pub fn get_blob_info(&self) -> BlobInfo { + self.blob_info.read_recursive().clone() + } + + /// Atomically update the blob info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `BlobInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_blob_info( + &self, + prev_value: BlobInfo, + new_value: BlobInfo, + ) -> Result { + let mut blob_info = self.blob_info.write(); + if *blob_info == prev_value { + let kv_op = self.store_blob_info_in_batch(&new_value); + *blob_info = new_value; + Ok(kv_op) + } else { + Err(Error::BlobInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_blob_info`, but also writes the blob info to disk immediately. + pub fn compare_and_set_blob_info_with_write( + &self, + prev_value: BlobInfo, + new_value: BlobInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_blob_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the blob info from disk, but do not set `self.blob_info`. + fn load_blob_info(&self) -> Result, Error> { + self.hot_db.get(&BLOB_INFO_KEY) + } + + /// Store the given `blob_info` to disk. + /// + /// The argument is intended to be `self.blob_info`, but is passed manually to avoid issues + /// with recursive locking. + fn store_blob_info_in_batch(&self, blob_info: &BlobInfo) -> KeyValueStoreOp { + blob_info.as_kv_store_op(BLOB_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -1361,11 +1754,26 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config()) } - /// Load the split point from disk. - fn load_split(&self) -> Result, Error> { + /// Load the split point from disk, sans block root. + fn load_split_partial(&self) -> Result, Error> { self.hot_db.get(&SPLIT_KEY) } + /// Load the split point from disk, including block root. + fn load_split(&self) -> Result, Error> { + match self.load_split_partial()? { + Some(mut split) => { + // Load the hot state summary to get the block root. + let summary = self.load_hot_state_summary(&split.state_root)?.ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; + split.block_root = summary.latest_block_root; + Ok(Some(split)) + } + None => Ok(None), + } + } + /// Stage the split for storage to disk. pub fn store_split_in_batch(&self) -> KeyValueStoreOp { self.split.read_recursive().as_kv_store_op(SPLIT_KEY) @@ -1424,6 +1832,12 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } + /// Verify that a parsed config is valid. + fn verify_config(config: &StoreConfig) -> Result<(), HotColdDBError> { + Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + Self::verify_epochs_per_blob_prune(config.epochs_per_blob_prune) + } + /// Check that the restore point frequency is valid. /// /// Specifically, check that it is: @@ -1454,6 +1868,16 @@ impl, Cold: ItemStore> HotColdDB } } + // Check that epochs_per_blob_prune is at least 1 epoch to avoid attempting to prune the same + // epochs over and over again. + fn verify_epochs_per_blob_prune(epochs_per_blob_prune: u64) -> Result<(), HotColdDBError> { + if epochs_per_blob_prune > 0 { + Ok(()) + } else { + Err(HotColdDBError::ZeroEpochsPerBlobPrune) + } + } + /// Run a compaction pass to free up space used by deleted states. pub fn compact(&self) -> Result<(), Error> { self.hot_db.compact()?; @@ -1500,6 +1924,25 @@ impl, Cold: ItemStore> HotColdDB ) } + /// Update the linear array of frozen block roots with the block root for several skipped slots. + /// + /// Write the block root at all slots from `start_slot` (inclusive) to `end_slot` (exclusive). + pub fn store_frozen_block_root_at_skip_slots( + &self, + start_slot: Slot, + end_slot: Slot, + block_root: Hash256, + ) -> Result, Error> { + let mut ops = vec![]; + let mut block_root_writer = + ChunkWriter::::new(&self.cold_db, start_slot.as_usize())?; + for slot in start_slot.as_usize()..end_slot.as_usize() { + block_root_writer.set(slot, block_root, &mut ops)?; + } + block_root_writer.write(&mut ops)?; + Ok(ops) + } + /// Try to prune all execution payloads, returning early if there is no need to prune. pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> { let split = self.get_split_info(); @@ -1598,7 +2041,7 @@ impl, Cold: ItemStore> HotColdDB } } let payloads_pruned = ops.len(); - self.do_atomically(ops)?; + self.do_atomically_with_block_and_blobs_cache(ops)?; info!( self.log, "Execution payload pruning complete"; @@ -1606,47 +2049,253 @@ impl, Cold: ItemStore> HotColdDB ); Ok(()) } + + /// Try to prune blobs, approximating the current epoch from the split slot. + pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> { + let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else { + debug!(self.log, "Deneb fork is disabled"); + return Ok(()); + }; + // The current epoch is >= split_epoch + 2. It could be greater if the database is + // configured to delay updating the split or finalization has ceased. In this instance we + // choose to also delay the pruning of blobs (we never prune without finalization anyway). + let min_current_epoch = self.get_split_slot().epoch(E::slots_per_epoch()) + 2; + let min_data_availability_boundary = std::cmp::max( + deneb_fork_epoch, + min_current_epoch.saturating_sub(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), + ); + + self.try_prune_blobs(force, min_data_availability_boundary) + } + + /// Try to prune blobs older than the data availability boundary. + /// + /// Blobs from the epoch `data_availability_boundary - blob_prune_margin_epochs` are retained. + /// This epoch is an _exclusive_ endpoint for the pruning process. + /// + /// This function only supports pruning blobs older than the split point, which is older than + /// (or equal to) finalization. Pruning blobs newer than finalization is not supported. + /// + /// This function also assumes that the split is stationary while it runs. It should only be + /// run from the migrator thread (where `migrate_database` runs) or the database manager. + pub fn try_prune_blobs( + &self, + force: bool, + data_availability_boundary: Epoch, + ) -> Result<(), Error> { + if self.spec.deneb_fork_epoch.is_none() { + debug!(self.log, "Deneb fork is disabled"); + return Ok(()); + } + + let pruning_enabled = self.get_config().prune_blobs; + let margin_epochs = self.get_config().blob_prune_margin_epochs; + let epochs_per_blob_prune = self.get_config().epochs_per_blob_prune; + + if !force && !pruning_enabled { + debug!( + self.log, + "Blob pruning is disabled"; + "prune_blobs" => pruning_enabled + ); + return Ok(()); + } + + let blob_info = self.get_blob_info(); + let Some(oldest_blob_slot) = blob_info.oldest_blob_slot else { + error!(self.log, "Slot of oldest blob is not known"); + return Err(HotColdDBError::BlobPruneLogicError.into()); + }; + + // Start pruning from the epoch of the oldest blob stored. + // The start epoch is inclusive (blobs in this epoch will be pruned). + let start_epoch = oldest_blob_slot.epoch(E::slots_per_epoch()); + + // Prune blobs up until the `data_availability_boundary - margin` or the split + // slot's epoch, whichever is older. We can't prune blobs newer than the split. + // The end epoch is also inclusive (blobs in this epoch will be pruned). + let split = self.get_split_info(); + let end_epoch = std::cmp::min( + data_availability_boundary - margin_epochs - 1, + split.slot.epoch(E::slots_per_epoch()) - 1, + ); + let end_slot = end_epoch.end_slot(E::slots_per_epoch()); + + let can_prune = end_epoch != 0 && start_epoch <= end_epoch; + let should_prune = start_epoch + epochs_per_blob_prune <= end_epoch + 1; + + if !force && !should_prune || !can_prune { + debug!( + self.log, + "Blobs are pruned"; + "oldest_blob_slot" => oldest_blob_slot, + "data_availability_boundary" => data_availability_boundary, + "split_slot" => split.slot, + "end_epoch" => end_epoch, + "start_epoch" => start_epoch, + ); + return Ok(()); + } + + // Sanity checks. + if let Some(anchor) = self.get_anchor_info() { + if oldest_blob_slot < anchor.oldest_block_slot { + error!( + self.log, + "Oldest blob is older than oldest block"; + "oldest_blob_slot" => oldest_blob_slot, + "oldest_block_slot" => anchor.oldest_block_slot + ); + return Err(HotColdDBError::BlobPruneLogicError.into()); + } + } + + // Iterate block roots forwards from the oldest blob slot. + debug!( + self.log, + "Pruning blobs"; + "start_epoch" => start_epoch, + "end_epoch" => end_epoch, + "data_availability_boundary" => data_availability_boundary, + ); + + let mut ops = vec![]; + let mut last_pruned_block_root = None; + + for res in self.forwards_block_roots_iterator_until( + oldest_blob_slot, + end_slot, + || { + let (_, split_state) = self + .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; + + Ok((split_state, split.block_root)) + }, + &self.spec, + )? { + let (block_root, slot) = match res { + Ok(tuple) => tuple, + Err(e) => { + warn!( + self.log, + "Stopping blob pruning early"; + "error" => ?e, + ); + break; + } + }; + + if Some(block_root) != last_pruned_block_root && self.blobs_exist(&block_root)? { + trace!( + self.log, + "Pruning blobs of block"; + "slot" => slot, + "block_root" => ?block_root, + ); + last_pruned_block_root = Some(block_root); + ops.push(StoreOp::DeleteBlobs(block_root)); + } + + if slot >= end_slot { + break; + } + } + let blob_lists_pruned = ops.len(); + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(end_slot + 1), + blobs_db: blob_info.blobs_db, + }; + let update_blob_info = self.compare_and_set_blob_info(blob_info, new_blob_info)?; + ops.push(StoreOp::KeyValueOp(update_blob_info)); + + self.do_atomically_with_block_and_blobs_cache(ops)?; + debug!( + self.log, + "Blob pruning complete"; + "blob_lists_pruned" => blob_lists_pruned, + ); + + Ok(()) + } + + pub fn heal_freezer_block_roots(&self) -> Result<(), Error> { + let split = self.get_split_info(); + let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point + * self.config.slots_per_restore_point; + + // Load split state (which has access to block roots). + let (_, split_state) = self + .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + ))?; + + let mut batch = vec![]; + let mut chunk_writer = ChunkWriter::::new( + &self.cold_db, + last_restore_point_slot.as_usize(), + )?; + + for slot in (last_restore_point_slot.as_u64()..split.slot.as_u64()).map(Slot::new) { + let block_root = *split_state.get_block_root(slot)?; + chunk_writer.set(slot.as_usize(), block_root, &mut batch)?; + } + chunk_writer.write(&mut batch)?; + self.cold_db.do_atomically(batch)?; + + Ok(()) + } } /// Advance the split point of the store, moving new finalized states to the freezer. pub fn migrate_database, Cold: ItemStore>( store: Arc>, - frozen_head_root: Hash256, - frozen_head: &BeaconState, + finalized_state_root: Hash256, + finalized_block_root: Hash256, + finalized_state: &BeaconState, ) -> Result<(), Error> { debug!( store.log, "Freezer migration started"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); // 0. Check that the migration is sensible. - // The new frozen head must increase the current split slot, and lie on an epoch + // The new finalized state must increase the current split slot, and lie on an epoch // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read_recursive().slot; - let anchor_slot = store - .anchor_info - .read_recursive() - .as_ref() - .map(|a| a.anchor_slot); + let anchor_info = store.anchor_info.read_recursive().clone(); + let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot); - if frozen_head.slot() < current_split_slot { + if finalized_state.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { current_split_slot, - proposed_split_slot: frozen_head.slot(), + proposed_split_slot: finalized_state.slot(), } .into()); } - if frozen_head.slot() % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into()); + if finalized_state.slot() % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } - let mut hot_db_ops: Vec> = Vec::new(); + let mut hot_db_ops = vec![]; + let mut cold_db_ops = vec![]; - // 1. Copy all of the states between the head and the split slot, from the hot DB + // Chunk writer for the linear block roots in the freezer DB. + // Start at the new upper limit because we iterate backwards. + let new_frozen_block_root_upper_limit = finalized_state.slot().as_usize().saturating_sub(1); + let mut block_root_writer = + ChunkWriter::::new(&store.cold_db, new_frozen_block_root_upper_limit)?; + + // 1. Copy all of the states between the new finalized state and the split slot, from the hot DB // to the cold DB. Delete the execution payloads of these now-finalized blocks. - let state_root_iter = RootsIterator::new(&store, frozen_head); + let state_root_iter = RootsIterator::new(&store, finalized_state); for maybe_tuple in state_root_iter.take_while(|result| match result { Ok((_, _, slot)) => { slot >= ¤t_split_slot @@ -1656,13 +2305,31 @@ pub fn migrate_database, Cold: ItemStore>( }) { let (block_root, state_root, slot) = maybe_tuple?; - let mut cold_db_ops: Vec = Vec::new(); + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. The payload fetching code is also + // forgiving of missing payloads. + if store.config.prune_payloads { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + } - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? - .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; + // Delete the old summary, and the full state if we lie on an epoch boundary. + hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + // Store the block root for this slot in the linear array of frozen block roots. + block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?; + + // Do not try to store states if a restore point is yet to be stored, or will never be + // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state + // which always needs to be copied from the hot DB to the freezer and should not be deleted. + if slot != 0 + && anchor_info + .as_ref() + .map_or(false, |anchor| slot < anchor.state_upper_limit) + { + debug!(store.log, "Pruning finalized state"; "slot" => slot); + + continue; } // Store a pointer from this state root to its slot, so we can later reconstruct states @@ -1671,24 +2338,26 @@ pub fn migrate_database, Cold: ItemStore>( let op = cold_state_summary.as_kv_store_op(state_root); cold_db_ops.push(op); - // There are data dependencies between calls to `store_cold_state()` that prevent us from - // doing one big call to `store.cold_db.do_atomically()` at end of the loop. - store.cold_db.do_atomically(cold_db_ops)?; + if slot % store.config.slots_per_restore_point == 0 { + let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? + .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + // Commit the batch of cold DB ops whenever a full state is written. Each state stored + // may read the linear fields of previous states stored. + store + .cold_db + .do_atomically(std::mem::take(&mut cold_db_ops))?; } } + // Finish writing the block roots and commit the remaining cold DB ops. + block_root_writer.write(&mut cold_db_ops)?; + store.cold_db.do_atomically(cold_db_ops)?; + // Warning: Critical section. We have to take care not to put any of the two databases in an - // inconsistent state if the OS process dies at any point during the freezeing + // inconsistent state if the OS process dies at any point during the freezing // procedure. // // Since it is pretty much impossible to be atomic across more than one database, we trade @@ -1704,7 +2373,7 @@ pub fn migrate_database, Cold: ItemStore>( let mut split_guard = store.split.write(); let latest_split_slot = split_guard.slot; - // Detect a sitation where the split point is (erroneously) changed from more than one + // Detect a situation where the split point is (erroneously) changed from more than one // place in code. if latest_split_slot != current_split_slot { error!( @@ -1724,8 +2393,9 @@ pub fn migrate_database, Cold: ItemStore>( // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. let split = Split { - slot: frozen_head.slot(), - state_root: frozen_head_root, + slot: finalized_state.slot(), + state_root: finalized_state_root, + block_root: finalized_block_root, }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; @@ -1736,22 +2406,30 @@ pub fn migrate_database, Cold: ItemStore>( } // Delete the states from the hot database if we got this far. - store.do_atomically(hot_db_ops)?; + store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; debug!( store.log, "Freezer migration complete"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); Ok(()) } /// Struct for storing the split slot and state root in the database. -#[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Encode, Decode, Deserialize, Serialize)] pub struct Split { - pub(crate) slot: Slot, - pub(crate) state_root: Hash256, + pub slot: Slot, + pub state_root: Hash256, + /// The block root of the split state. + /// + /// This is used to provide special handling for the split state in the case where there are + /// skipped slots. The split state will *always* be the advanced state, so callers + /// who only have the finalized block root should use `get_advanced_hot_state` to get this state, + /// rather than fetching `block.state_root()` (the unaligned state) which will have been pruned. + #[ssz(skip_serializing, skip_deserializing)] + pub block_root: Hash256, } impl StoreItem for Split { diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index b5753f3797..6445dad388 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,6 +1,9 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; -use types::{EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; +use types::{ + BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadMerge, +}; macro_rules! impl_store_item { ($ty_name:ident) => { @@ -21,6 +24,8 @@ macro_rules! impl_store_item { } impl_store_item!(ExecutionPayloadMerge); impl_store_item!(ExecutionPayloadCapella); +impl_store_item!(ExecutionPayloadDeneb); +impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. /// @@ -36,9 +41,13 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - ExecutionPayloadCapella::from_ssz_bytes(bytes) - .map(Self::Capella) - .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + ExecutionPayloadDeneb::from_ssz_bytes(bytes) + .map(Self::Deneb) + .or_else(|_| { + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge)) + }) .map_err(Into::into) } } diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 86bd4ffacc..0e4cd0e3b1 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,7 +1,6 @@ use super::*; use crate::hot_cold_store::HotColdDBError; use crate::metrics; -use db_key::Key; use leveldb::compaction::Compaction; use leveldb::database::batch::{Batch, Writebatch}; use leveldb::database::kv::KV; @@ -167,7 +166,7 @@ impl KeyValueStore for LevelDB { ) }; - for (start_key, end_key) in vec![ + for (start_key, end_key) in [ endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), ] { @@ -176,10 +175,8 @@ impl KeyValueStore for LevelDB { Ok(()) } - /// Iterate through all keys and values in a particular column. - fn iter_column(&self, column: DBColumn) -> ColumnIter { - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); let iter = self.db.iter(self.read_options()); iter.seek(&start_key); @@ -187,13 +184,42 @@ impl KeyValueStore for LevelDB { Box::new( iter.take_while(move |(key, _)| key.matches_column(column)) .map(move |(bytes_key, value)| { - let key = - bytes_key - .remove_column(column) - .ok_or(HotColdDBError::IterationError { - unexpected_key: bytes_key, - })?; - Ok((key, value)) + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + Ok((K::from_bytes(key)?, value)) + }), + ) + } + + fn iter_raw_entries(&self, column: DBColumn, prefix: &[u8]) -> RawEntryIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); + + let iter = self.db.iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |(key, _)| key.key.starts_with(start_key.key.as_slice())) + .map(move |(bytes_key, value)| { + let subkey = &bytes_key.key[column.as_bytes().len()..]; + Ok((Vec::from(subkey), value)) + }), + ) + } + + fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.key.starts_with(start_key.key.as_slice())) + .map(move |bytes_key| { + let subkey = &bytes_key.key[column.as_bytes().len()..]; + Ok(Vec::from(subkey)) }), ) } @@ -224,12 +250,12 @@ impl KeyValueStore for LevelDB { impl ItemStore for LevelDB {} /// Used for keying leveldb. -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct BytesKey { key: Vec, } -impl Key for BytesKey { +impl db_key::Key for BytesKey { fn from_u8(key: &[u8]) -> Self { Self { key: key.to_vec() } } @@ -245,12 +271,20 @@ impl BytesKey { self.key.starts_with(column.as_bytes()) } - /// Remove the column from a key, returning its `Hash256` portion. + /// Remove the column from a 32 byte key, yielding the `Hash256` key. pub fn remove_column(&self, column: DBColumn) -> Option { + let key = self.remove_column_variable(column)?; + (column.key_size() == 32).then(|| Hash256::from_slice(key)) + } + + /// Remove the column from a key. + /// + /// Will return `None` if the value doesn't match the column or has the wrong length. + pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { if self.matches_column(column) { let subkey = &self.key[column.as_bytes().len()..]; - if subkey.len() == 32 { - return Some(Hash256::from_slice(subkey)); + if subkey.len() == column.key_size() { + return Some(subkey); } } None diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ee01fa1ae1..0f54b4664a 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -34,6 +34,7 @@ pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use self::partial_beacon_state::PartialBeaconState; +pub use crate::metadata::BlobInfo; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; @@ -43,9 +44,12 @@ use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; -pub type ColumnIter<'a> = Box), Error>> + 'a>; +pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a> = Box> + 'a>; +pub type RawEntryIter<'a> = Box, Vec), Error>> + 'a>; +pub type RawKeyIter<'a> = Box, Error>> + 'a>; + pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; @@ -80,15 +84,41 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error>; /// Iterate through all keys and values in a particular column. - fn iter_column(&self, _column: DBColumn) -> ColumnIter { - // Default impl for non LevelDB databases + fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter; + + fn iter_raw_entries(&self, _column: DBColumn, _prefix: &[u8]) -> RawEntryIter { + Box::new(std::iter::empty()) + } + + fn iter_raw_keys(&self, _column: DBColumn, _prefix: &[u8]) -> RawKeyIter { Box::new(std::iter::empty()) } /// Iterate through all keys in a particular column. - fn iter_column_keys(&self, _column: DBColumn) -> ColumnKeyIter { - // Default impl for non LevelDB databases - Box::new(std::iter::empty()) + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; +} + +pub trait Key: Sized + 'static { + fn from_bytes(key: &[u8]) -> Result; +} + +impl Key for Hash256 { + fn from_bytes(key: &[u8]) -> Result { + if key.len() == 32 { + Ok(Hash256::from_slice(key)) + } else { + Err(Error::InvalidKey) + } + } +} + +impl Key for Vec { + fn from_bytes(key: &[u8]) -> Result { + Ok(key.to_vec()) } } @@ -99,6 +129,7 @@ pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { } #[must_use] +#[derive(Clone)] pub enum KeyValueStoreOp { PutKeyValue(Vec, Vec), DeleteKey(Vec), @@ -152,13 +183,16 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 +#[derive(Clone)] pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), + PutBlobs(Hash256, BlobSidecarList), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), + DeleteBlobs(Hash256), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), KeyValueOp(KeyValueStoreOp), @@ -172,6 +206,8 @@ pub enum DBColumn { BeaconMeta, #[strum(serialize = "blk")] BeaconBlock, + #[strum(serialize = "blb")] + BeaconBlob, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, @@ -214,6 +250,8 @@ pub enum DBColumn { OptimisticTransitionBlock, #[strum(serialize = "bhs")] BeaconHistoricalSummaries, + #[strum(serialize = "olc")] + OverflowLRUCache, } /// A block from the database, which might have an execution payload or not. @@ -230,6 +268,35 @@ impl DBColumn { pub fn as_bytes(self) -> &'static [u8] { self.as_str().as_bytes() } + + /// Most database keys are 32 bytes, but some freezer DB keys are 8 bytes. + /// + /// This function returns the number of bytes used by keys in a given column. + pub fn key_size(self) -> usize { + match self { + Self::OverflowLRUCache => 40, + Self::BeaconMeta + | Self::BeaconBlock + | Self::BeaconState + | Self::BeaconBlob + | Self::BeaconStateSummary + | Self::BeaconStateTemporary + | Self::ExecPayload + | Self::BeaconChain + | Self::OpPool + | Self::Eth1Cache + | Self::ForkChoice + | Self::PubkeyCache + | Self::BeaconRestorePoint + | Self::DhtEnrs + | Self::OptimisticTransitionBlock => 32, + Self::BeaconBlockRoots + | Self::BeaconStateRoots + | Self::BeaconHistoricalRoots + | Self::BeaconHistoricalSummaries + | Self::BeaconRandaoMixes => 8, + } + } } /// An item that may stored in a `Store` by serializing and deserializing from bytes. diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 1473f59a4e..7a276a1fad 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,17 +1,17 @@ -use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp}; -use crate::{ColumnIter, DBColumn}; +use crate::{ + get_key_for_col, leveldb_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, + ItemStore, Key, KeyValueStore, KeyValueStoreOp, +}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::{HashMap, HashSet}; +use std::collections::BTreeMap; use std::marker::PhantomData; use types::*; -type DBHashMap = HashMap, Vec>; -type DBKeyMap = HashMap, HashSet>>; +type DBMap = BTreeMap>; -/// A thread-safe `HashMap` wrapper. +/// A thread-safe `BTreeMap` wrapper. pub struct MemoryStore { - db: RwLock, - col_keys: RwLock, + db: RwLock, transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -20,36 +20,24 @@ impl MemoryStore { /// Create a new, empty database. pub fn open() -> Self { Self { - db: RwLock::new(HashMap::new()), - col_keys: RwLock::new(HashMap::new()), + db: RwLock::new(BTreeMap::new()), transaction_mutex: Mutex::new(()), _phantom: PhantomData, } } - - fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - let mut col = col.as_bytes().to_vec(); - col.append(&mut key.to_vec()); - col - } } impl KeyValueStore for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().get(&column_key).cloned()) } /// Puts a key in the database. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().insert(column_key, val.to_vec()); - self.col_keys - .write() - .entry(col.as_bytes().to_vec()) - .or_insert_with(HashSet::new) - .insert(key.to_vec()); Ok(()) } @@ -64,18 +52,14 @@ impl KeyValueStore for MemoryStore { /// Return true if some key exists in some column. fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().remove(&column_key); - self.col_keys - .write() - .get_mut(&col.as_bytes().to_vec()) - .map(|set| set.remove(key)); Ok(()) } @@ -83,35 +67,41 @@ impl KeyValueStore for MemoryStore { for op in batch { match op { KeyValueStoreOp::PutKeyValue(key, value) => { - self.db.write().insert(key, value); + self.db.write().insert(BytesKey::from_vec(key), value); } - KeyValueStoreOp::DeleteKey(hash) => { - self.db.write().remove(&hash); + KeyValueStoreOp::DeleteKey(key) => { + self.db.write().remove(&BytesKey::from_vec(key)); } } } Ok(()) } - // pub type ColumnIter<'a> = Box), Error>> + 'a>; - fn iter_column(&self, column: DBColumn) -> ColumnIter { + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a + // reference to the lock guard across calls to `.next()`. This would be require a + // struct with a field (the iterator) which references another field (the lock guard). + let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), from)); let col = column.as_str(); - if let Some(keys) = self - .col_keys + let keys = self + .db .read() - .get(col.as_bytes()) - .map(|set| set.iter().cloned().collect::>()) - { - Box::new(keys.into_iter().filter_map(move |key| { - let hash = Hash256::from_slice(&key); - self.get_bytes(col, &key) - .transpose() - .map(|res| res.map(|bytes| (hash, bytes))) - })) - } else { - Box::new(std::iter::empty()) - } + .range(start_key..) + .take_while(|(k, _)| k.remove_column_variable(column).is_some()) + .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) + .collect::>(); + Box::new(keys.into_iter().filter_map(move |key| { + self.get_bytes(col, &key).transpose().map(|res| { + let k = K::from_bytes(&key)?; + let v = res?; + Ok((k, v)) + }) + })) + } + + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } fn begin_rw_transaction(&self) -> MutexGuard<()> { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 6f50d7038f..124c9a2f58 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -1,10 +1,10 @@ use crate::{DBColumn, Error, StoreItem}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(17); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(18); // All the keys that get stored under the `BeaconMeta` column. // @@ -15,6 +15,10 @@ pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2); pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); +pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); + +/// State upper limit value used to indicate that a node is not storing historic states. +pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct SchemaVersion(pub u64); @@ -119,3 +123,32 @@ impl StoreItem for AnchorInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to blob sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct BlobInfo { + /// The slot after which blobs are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the Deneb fork, from which blobs + /// will be available. + /// + /// If the `oldest_blob_slot` is `None` then this means that the Deneb fork epoch is not yet + /// known. + pub oldest_blob_slot: Option, + /// A separate blobs database is in use. + pub blobs_db: bool, +} + +impl StoreItem for BlobInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 72c5e61969..2d901fdd93 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -101,6 +101,10 @@ lazy_static! { "store_beacon_block_cache_hit_total", "Number of hits to the store's block cache" ); + pub static ref BEACON_BLOBS_CACHE_HIT_COUNT: Result = try_create_int_counter( + "store_beacon_blobs_cache_hit_total", + "Number of hits to the store's blob cache" + ); pub static ref BEACON_BLOCK_READ_TIMES: Result = try_create_histogram( "store_beacon_block_read_overhead_seconds", "Overhead on reading a beacon block from the DB (e.g., decoding)" diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 9f2532d0a7..1fb5751a0a 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -15,7 +15,7 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -67,9 +67,9 @@ where pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_epoch_participation: VariableList, // Finality @@ -79,13 +79,13 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub next_sync_committee: Arc>, // Execution @@ -99,15 +99,20 @@ where partial_getter(rename = "latest_execution_payload_header_capella") )] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, + #[superstruct( + only(Deneb), + partial_getter(rename = "latest_execution_payload_header_deneb") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, // Capella - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub next_withdrawal_validator_index: u64, #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub historical_summaries: Option>, } @@ -222,6 +227,23 @@ impl PartialBeaconState { ], [historical_summaries] ), + BeaconState::Deneb(s) => impl_from_state_forgetful!( + s, + outer, + Deneb, + PartialBeaconStateDeneb, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), } } @@ -451,6 +473,22 @@ impl TryInto> for PartialBeaconState { ], [historical_summaries] ), + PartialBeaconState::Deneb(inner) => impl_try_into_beacon_state!( + inner, + Deneb, + BeaconStateDeneb, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index + ], + [historical_summaries] + ), }; Ok(state) } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index bac5d3cc82..8fe13777ac 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -17,9 +17,7 @@ where Cold: ItemStore, { pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { - let mut anchor = if let Some(anchor) = self.get_anchor_info() { - anchor - } else { + let Some(mut anchor) = self.get_anchor_info() else { // Nothing to do, history is complete. return Ok(()); }; diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index bd20f24ee6..afb93f3657 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -2,11 +2,11 @@ name = "timer" version = "0.2.0" authors = ["Sigma Prime "] -edition = "2021" +edition = { workspace = true } [dependencies] -beacon_chain = { path = "../beacon_chain" } -slot_clock = { path = "../../common/slot_clock" } -tokio = { version = "1.14.0", features = ["full"] } -slog = "2.5.2" -task_executor = { path = "../../common/task_executor" } +beacon_chain = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +slog = { workspace = true } +task_executor = { workspace = true } diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 944846c863..7c2db69604 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -16,12 +16,10 @@ pub fn spawn_timer( let log = executor.log().clone(); let timer_future = async move { loop { - let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => duration, - None => { - warn!(log, "Unable to determine duration to next slot"); - return; - } + let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() + else { + warn!(log, "Unable to determine duration to next slot"); + return; }; sleep(duration_to_next_slot).await; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 507896f431..3f58d8aa45 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -56,3 +56,4 @@ * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) +* [Protocol Developers](./developers.md) \ No newline at end of file diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index ba07a6f87f..b1f05450c4 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -121,13 +121,13 @@ nodes that do not run directly on a public network. To listen over only IPv6 use the same parameters as done when listening over IPv4 only: -- `--listen-addresses :: --port 9909` will listen over IPv6 using port `9909` for +- `--listen-address :: --port 9909` will listen over IPv6 using port `9909` for TCP and UDP. -- `--listen-addresses :: --port 9909 --discovery-port 9999` will listen over +- `--listen-address :: --port 9909 --discovery-port 9999` will listen over IPv6 using port `9909` for TCP and port `9999` for UDP. To listen over both IPv4 and IPv6: -- Set two listening addresses using the `--listen-addresses` flag twice ensuring +- Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note that this behaviour differs from the Ipv6 only case described above. @@ -139,16 +139,16 @@ To listen over both IPv4 and IPv6: ##### Configuration Examples -- `--listen-addresses :: --listen-addresses 0.0.0.0 --port 9909` will listen +- `--listen-address :: --listen-address 0.0.0.0 --port 9909` will listen over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but using the default value for `--port6` for UDP and TCP (`9090`). -- `--listen-addresses :: --listen-addresses --port 9909 --discovery-port6 9999` +- `--listen-address :: --listen-address --port 9909 --discovery-port6 9999` will have the same configuration as before except for the IPv6 UDP socket, which will use port `9999`. #### Configuring Lighthouse to advertise IPv6 reachable addresses Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively, -and dual stack using one socket for IPv6 and another socket for IPv6. In both +and dual stack using one socket for IPv4 and another socket for IPv6. In both scenarios, the previous sections still apply. In summary: > Beacon nodes must advertise their publicly reachable socket address diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 11a006493a..519ce57055 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -126,6 +126,22 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " ``` You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. +### Events API +The [events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) provides information such as the payload attributes that are of interest to block builders and relays. To query the payload attributes, it is necessary to run Lighthouse beacon node with the flag `--always-prepare-payload`. It is also recommended to add the flag `--prepare-payload-lookahead 8000` which configures the payload attributes to be sent at 4s into each slot (or 8s from the start of the next slot). An example of the command is: + +```bash +curl -X 'GET' \ +'http://localhost:5052/eth/v1/events?topics=payload_attributes' \ +-H 'accept: text/event-stream' +``` + +An example of response is: + +```json +data:{"version":"capella","data":{"proposal_slot":"11047","proposer_index":"336057","parent_block_root":"0x26f8999d270dd4677c2a1c815361707157a531f6c599f78fa942c98b545e1799","parent_block_number":"9259","parent_block_hash":"0x7fb788cd7afa814e578afa00a3edd250cdd4c8e35c22badd327d981b5bda33d2","payload_attributes":{"timestamp":"1696034964","prev_randao":"0xeee34d7a3f6b99ade6c6a881046c9c0e96baab2ed9469102d46eb8d6e4fde14c","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"40705","validator_index":"360712","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1202941"},{"index":"40706","validator_index":"360713","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1201138"},{"index":"40707","validator_index":"360714","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1215255"},{"index":"40708","validator_index":"360715","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1161977"},{"index":"40709","validator_index":"360716","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1257278"},{"index":"40710","validator_index":"360717","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1247740"},{"index":"40711","validator_index":"360718","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1204337"},{"index":"40712","validator_index":"360719","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1183575"},{"index":"40713","validator_index":"360720","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1157785"},{"index":"40714","validator_index":"360721","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1143371"},{"index":"40715","validator_index":"360722","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1234787"},{"index":"40716","validator_index":"360723","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1286673"},{"index":"40717","validator_index":"360724","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1419241"},{"index":"40718","validator_index":"360725","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1231015"},{"index":"40719","validator_index":"360726","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1304321"},{"index":"40720","validator_index":"360727","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1236543"}]}}} +``` + + ## Serving the HTTP API over TLS > **Warning**: This feature is currently experimental. diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 7626d64013..32c967c9e0 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -125,7 +125,7 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap ### `/lighthouse/ui/validator_metrics` -Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. +Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -356,7 +356,7 @@ health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - For correct execution client voting this timestamp should be later than the -`voting_target_timestamp`. +`voting_target_timestamp`. - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the @@ -480,9 +480,9 @@ curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list -of objects, each including the validator index, epoch, and `is_live` status of a requested validator. +of objects, each including the validator index, epoch, and `is_live` status of a requested validator. -This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. +This endpoint is used in doppelganger detection, and can only provide accurate information for the current, previous, or next epoch. > Note that for this API, if you insert an arbitrary epoch other than the previous, current or next epoch of the network, it will return `"code:400"` and `BAD_REQUEST`. @@ -547,26 +547,6 @@ reconstruction has yet to be completed. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). -### `/lighthouse/database/reconstruct` - -Instruct Lighthouse to begin reconstructing historic states, see -[Reconstructing States](./checkpoint-sync.md#reconstructing-states). This is an alternative -to the `--reconstruct-historic-states` flag. - -``` -curl -X POST "http://localhost:5052/lighthouse/database/reconstruct" | jq -``` - -```json -"success" -``` - -The endpoint will return immediately. See the beacon node logs for an indication of progress. - -### `/lighthouse/database/historical_blocks` - -Manually provide `SignedBeaconBlock`s to backfill the database. This is intended -for use by Lighthouse developers during testing only. ### `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: @@ -719,7 +699,7 @@ The first few lines of the response would look like: ] } } -] +] ``` Caveats: @@ -816,4 +796,4 @@ An open port will return: ```json { "data": true -} \ No newline at end of file +} diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index ee0cfd2001..f41625ad88 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -243,6 +243,7 @@ Example Response Body "INACTIVITY_SCORE_RECOVERY_RATE": "16", "EJECTION_BALANCE": "16000000000", "MIN_PER_EPOCH_CHURN_LIMIT": "4", + "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT": "8", "CHURN_LIMIT_QUOTIENT": "65536", "PROPOSER_SCORE_BOOST": "40", "DEPOSIT_CHAIN_ID": "5", diff --git a/book/src/builders.md b/book/src/builders.md index 2be4841ddf..d5cb4c61b2 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -21,9 +21,7 @@ The beacon node and validator client each require a new flag for lighthouse to b ``` lighthouse bn --builder https://mainnet-builder.test ``` -The `--builder` flag will cause the beacon node to query the provided URL during block production for a block -payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local -execution engine and produce a block using transactions gathered and verified locally. +The `--builder` flag will cause the beacon node to simultaneously query the provided URL and the local execution engine during block production for a block payload with stubbed-out transactions. If either fails, the successful result will be used; If both succeed, the more profitable result will be used. The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for @@ -258,6 +256,9 @@ used in place of one from the builder: INFO Reconstructing a full block using a local payload ``` +## Information for block builders and relays +Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) + [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost [gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 5788382894..0c375a5f00 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -75,7 +75,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will > Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node. -> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. +> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. ## FAQ @@ -116,8 +116,9 @@ states: database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. -* `state_upper_limit`: All states with slots _greater than or equal to_ this value are available - in the database. +* `state_upper_limit`: All states with slots _greater than or equal to_ `min(split.slot, + state_upper_limit)` are available in the database. In the case where the `state_upper_limit` is + higher than the `split.slot`, this means states are not being written to the freezer database. Reconstruction runs from the state lower limit to the upper limit, narrowing the window of unavailable states as it goes. It will log messages like the following to show its progress: @@ -153,18 +154,8 @@ To manually specify a checkpoint use the following two flags: * `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob * `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob -_Both_ the state and block must be provided and **must** adhere to the [Alignment -Requirements](#alignment-requirements) described below. - -### Alignment Requirements - -* The block must be a finalized block from an epoch boundary, i.e. `block.slot() % 32 == 0`. -* The state must be the state corresponding to `block` with `state.slot() == block.slot()` - and `state.hash_tree_root() == block.state_root()`. - -These requirements are imposed to align with Lighthouse's database schema, and notably exclude -finalized blocks from skipped slots. You can avoid alignment issues by using -[Automatic Checkpoint Sync](#automatic-checkpoint-sync), which will search for a suitable block -and state pair. +_Both_ the state and block must be provided and the state **must** match the block. The +state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, +in which case it will be assumed to be finalized at that epoch. [weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/ diff --git a/book/src/developers.md b/book/src/developers.md new file mode 100644 index 0000000000..ab12bed5b9 --- /dev/null +++ b/book/src/developers.md @@ -0,0 +1,52 @@ +# For Protocol Developers + +_Documentation for protocol developers._ + +This section lists Lighthouse-specific decisions that are not strictly spec'd and may be useful for +other protocol developers wishing to interact with lighthouse. + + +## Custom ENR Fields + +Lighthouse currently uses the following ENR fields: + +### Ethereum Consensus Specified + +| Field | Description | +| ---- | ---- | +| `eth2` | The `ENRForkId` in SSZ bytes specifying which fork the node is on | +| `attnets` | An SSZ bitfield which indicates which of the 64 subnets the node is subscribed to for an extended period of time | +| `syncnets` | An SSZ bitfield which indicates which of the sync committee subnets the node is subscribed to | + + +### Lighthouse Custom Fields + +Lighthouse is currently using the following custom ENR fields. +| Field | Description | +| ---- | ---- | +| `quic` | The UDP port on which the QUIC transport is listening on IPv4 | +| `quic6` | The UDP port on which the QUIC transport is listening on IPv6 | + + +## Custom RPC Messages + +The specification leaves room for implementation-specific errors. Lighthouse uses the following +custom RPC error messages. + +### Goodbye Reason Codes + +| Code | Message | Description | +| ---- | ---- | ---- | +| 128 | Unable to Verify Network | Teku uses this, so we adopted it. It relates to having a fork mismatch | +| 129 | Too Many Peers | Lighthouse can close a connection because it has reached its peer-limit and pruned excess peers | +| 250 | Bad Score | The node has been dropped due to having a bad peer score | +| 251 | Banned | The peer has been banned and disconnected | +| 252 | Banned IP | The IP the node is connected to us with has been banned | + + +### Error Codes + +| Code | Message | Description | +| ---- | ---- | ---- | +| 139 | Rate Limited | The peer has been rate limited so we return this error as a response | +| 140 | Blobs Not Found For Block | We do not possess the blobs for the requested block | diff --git a/book/src/docker.md b/book/src/docker.md index d67b084da6..defa89517e 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -82,7 +82,7 @@ The `modernity` is: The `features` is: -* `-dev` for a development build with `minimal-spec` preset enabled. +* `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). * empty for a standard build with no custom feature enabled. diff --git a/book/src/faq.md b/book/src/faq.md index 15c5757064..b3f3d55edf 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -40,6 +40,7 @@ - [How do I check the version of Lighthouse that is running?](#misc-version) - [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune) - [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer) +- [Can Lighthouse log in local timestamp instead of UTC?](#misc-timestamp) ## Beacon Node @@ -436,16 +437,14 @@ Monitoring](./validator-monitoring.md) for more information. Lighthouse has also ### My beacon node and validator client are on different servers. How can I point the validator client to the beacon node? -The settings are as follows: - -1. On the beacon node: - - Specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than on the `localhost`. - -1. On the validator client: +The setting on the beacon node is the same for both cases below. In the beacon node, specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than `localhost`. You can find the `local_IP` by running the command `hostname -I | awk '{print $1}'` on the server running the beacon node. +1. If the beacon node and validator clients are on different servers *in the same network*, the setting in the validator client is as follows: + Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node. + If you have firewall setup, e.g., `ufw`, you will need to allow port 5052 (assuming that the default port is used) with `sudo ufw allow 5052`. Note: this will allow all IP addresses to access the HTTP API of the beacon node. If you are on an untrusted network (e.g., a university or public WiFi) or the host is exposed to the internet, use apply IP-address filtering as described later in this section. + You can test that the setup is working with by running the following command on the validator client host: ```bash @@ -453,8 +452,25 @@ The settings are as follows: ``` You can refer to [Redundancy](./redundancy.md) for more information. - - It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://local_IP:5052` on the validator client. + +2. If the beacon node and validator clients are on different servers *and different networks*, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. + + + + In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command ` dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node. + + Additionally, port forwarding of port 5052 on the router connected to the beacon node is required for the vc to connect to the bn. To do port forwarding, refer to [how to open ports](./advanced_networking.md#how-to-open-ports). + + + If you have firewall setup, e.g., `ufw`, you will need to allow connections to port 5052 (assuming that the default port is used). Since the beacon node HTTP/HTTPS API is public-facing (i.e., the 5052 port is now exposed to the internet due to port forwarding), we strongly recommend users to apply IP-address filtering to the BN/VC connection from malicious actors. This can be done using the command: + + ``` + sudo ufw allow from vc_IP_address proto tcp to any port 5052 + ``` + where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet. + + +It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://IP-address:5052` on the validator client. ### Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address? No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). @@ -513,11 +529,9 @@ There is no pruning of Lighthouse database for now. However, since v4.2.0, a fea Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the beacon node. - - - - - +### Can Lighthouse log in local timestamp instead of UTC? + +The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps. diff --git a/book/src/homebrew.md b/book/src/homebrew.md index 317dc0e0fa..486de371f8 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index acca0bbeb3..bab520b569 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -21,7 +21,7 @@ engine to a merge-ready version. ## When? -All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred: +All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Chiado**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred:
@@ -31,6 +31,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln | Sepolia | 20th June 2022 | 6th July 2022 | | | Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| | Mainnet | 6th September 2022 | 15th September 2022 | +| Chiado | 10th October 2022 | 4th November 2022 | | Gnosis| 30th November 2022 | 8th December 2022
diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index a31aedf785..1ea1427335 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -58,7 +58,8 @@ Notable flags: - `lighthouse --network mainnet`: Mainnet. - `lighthouse --network goerli`: Goerli (testnet). - `lighthouse --network sepolia`: Sepolia (testnet). - - `lighthouse --network gnosis`: Gnosis chain + - `lighthouse --network chiado`: Chiado (testnet). + - `lighthouse --network gnosis`: Gnosis chain. > Note: Using the correct `--network` flag is very important; using the wrong flag can result in penalties, slashings or lost deposits. As a rule of thumb, *always* diff --git a/book/src/setup.md b/book/src/setup.md index 533e1d463d..87f431f9ba 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,12 +9,12 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: -- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to +- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. -- [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, +- [`java 17 runtime`](https://openjdk.java.net/projects/jdk/). 17 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as `libpq-devel` on some systems. @@ -25,7 +25,8 @@ Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: -- `$ make cargo-fmt`: (fast) runs a Rust code linter. +- `$ make cargo-fmt`: (fast) runs a Rust code formatting check. +- `$ make lint`: (fast) runs a Rust code linter. - `$ make test`: (medium) runs unit tests across the whole project. - `$ make test-ef`: (medium) runs the Ethereum Foundation test vectors. - `$ make test-full`: (slow) runs the full test suite (including all previous diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index 98f3041391..1a5daa5441 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -9,23 +9,20 @@ following configuration screen. ## Connecting to the Clients -This allows you to enter the address and ports of the associated Lighthouse +Both the Beacon node and the Validator client need to have their HTTP APIs enabled. These ports should be accessible from the computer running Siren. This allows you to enter the address and ports of the associated Lighthouse Beacon node and Lighthouse Validator client. -> The Beacon Node must be run with the `--gui` flag set. +To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. -If you run Siren in the browser (by entering `localhost` in the browser), you will need to allow CORS in the HTTP API. This can be done by adding the flag `--http-allow-origin "*"` for both beacon node and validator client. If you would like to access Siren beyond the local computer, we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: -```bash -ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip -``` +> The Beacon Node must be run with the `--gui` flag set. -where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser. +If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. -You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command: +> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. -```bash -ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip -``` +In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. + +If you run Siren in the browser (by entering `localhost` in the browser), you will need to allow CORS in the HTTP API. This can be done by adding the flag `--http-allow-origin "*"` for both beacon node and validator client. A green tick will appear once Siren is able to connect to both clients. You can specify different ports for each client by clicking on the advanced tab. @@ -37,7 +34,7 @@ The API Token is a secret key that allows you to connect to the validator client. The validator client's HTTP API is guarded by this key because it contains sensitive validator information and the ability to modify validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) -for further details. +for further details. Siren requires this token in order to connect to the Validator client. The token is located in the default data directory of the validator @@ -49,7 +46,7 @@ entered. ## Name -This is your name, it can be modified and is solely used for aesthetics. +This is your name, it can be modified and is solely used for aesthetics. ## Device diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index f4cbf1c40a..92e06270c2 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -1,7 +1,7 @@ # Frequently Asked Questions ## 1. Are there any requirements to run Siren? -Yes, Siren requires Lighthouse v3.5.1 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. +Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## 2. Where can I find my API token? The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). @@ -9,13 +9,41 @@ The required Api token may be found in the default data directory of the validat ## 3. How do I fix the Node Network Errors? If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). -## 4. How do I change my Beacon or Validator address after logging in? -Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. +## 4. How do I connect Siren to Lighthouse from a different computer on the same network? +The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. +Subsequently, by designating the host as `192.168.0.200`, you can seamlessly connect Siren to this specific beacon node and validator client pair from any computer situated within the same network. -## 5. Why doesn't my validator balance graph show any data? -If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. +## 5. How can I use Siren to monitor my validators remotely when I am not at home? -## 4. Does Siren support reverse proxy or DNS named addresses? +There are two primary methods to access your Beacon Node and Validator Client remotely: setting up a VPN or utilizing SSH-reverse tunneling. + +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). + +In the absence of a VPN, an alternative approach involves utilizing an SSH tunnel. To achieve this, you need remote SSH access to the computer hosting the Beacon Node and Validator Client pair (which necessitates a port forward in your router). In this context, while it is not obligatory to set a `--http-address` flag on the Beacon Node and Validator Client, you can configure an SSH tunnel to the local ports on the node and establish a connection through the tunnel. For instructions on setting up an SSH tunnel, refer to [`Connecting Siren via SSH tunnel`](./ui-faqs.md#6-how-do-i-connect-siren-to-lighthouse-via-a-ssh-tunnel) for detailed guidance. + +## 6. How do I connect Siren to Lighthouse via a ssh tunnel? +If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: + +```bash + +ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip + +``` + + +Where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser. + + +You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command: + + +```bash + +ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip + +``` + +## 7. Does Siren support reverse proxy or DNS named addresses? Yes, if you need to access your beacon or validator from an address such as `https://merp-server:9909/eth2-vc` you should follow the following steps for configuration: 1. Toggle `https` as your protocol 2. Add your address as `merp-server/eth2-vc` @@ -24,3 +52,10 @@ Yes, if you need to access your beacon or validator from an address such as `htt If you have configured it correctly you should see a green checkmark indicating Siren is now connected to your Validator Client and Beacon Node. If you have separate address setups for your Validator Client and Beacon Node respectively you should access the `Advance Settings` on the configuration and repeat the steps above for each address. + + +## 8. How do I change my Beacon or Validator address after logging in? +Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. + +## 9. Why doesn't my validator balance graph show any data? +If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index ef81b2b751..cd31d78d62 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -8,7 +8,7 @@ These endpoints are not stable or included in the Ethereum consensus standard AP they are subject to change or removal without a change in major release version. -In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node or using the [/lighthouse/database/reconstruct API](./api-lighthouse.md#lighthousedatabasereconstruct). Once the state reconstruction process is completed, you can apply these APIs to any epoch. +In order to apply these APIs, you need to have historical states information in the database of your node. This means adding the flag `--reconstruct-historic-states` in the beacon node. Once the state reconstruction process is completed, you can apply these APIs to any epoch. ## Endpoints diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index 779c159276..0cec150dab 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -124,7 +124,6 @@ The command will create two files: The VC which will receive the validators needs to have the following flags at a minimum: - `--http` -- `--http-port 5062` - `--enable-doppelganger-protection` Therefore, the VC command might look like: @@ -133,7 +132,6 @@ Therefore, the VC command might look like: lighthouse \ vc \ --http \ - --http-port 5062 \ --enable-doppelganger-protection ``` diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md index 98932604d5..15089d65c5 100644 --- a/book/src/validator-manager-move.md +++ b/book/src/validator-manager-move.md @@ -69,7 +69,6 @@ In reality, many host configurations are possible. For example: The source VC needs to have the following flags at a minimum: - `--http` -- `--http-port 5062` - `--http-allow-keystore-export` Therefore, the source VC command might look like: @@ -78,7 +77,6 @@ Therefore, the source VC command might look like: lighthouse \ vc \ --http \ - --http-port 5062 \ --http-allow-keystore-export ``` @@ -87,7 +85,6 @@ lighthouse \ The destination VC needs to have the following flags at a minimum: - `--http` -- `--http-port 5062` - `--enable-doppelganger-protection` Therefore, the destination VC command might look like: @@ -96,7 +93,6 @@ Therefore, the destination VC command might look like: lighthouse \ vc \ --http \ - --http-port 5062 \ --enable-doppelganger-protection ``` @@ -167,6 +163,8 @@ At the same time, `lighthouse vc` will log: INFO Importing keystores via standard HTTP API, count: 1 INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore INFO Modified key_cache saved successfully +``` + Once the operation completes successfully, there is nothing else to be done. The validators have been removed from the `src-host` and enabled at the `dest-host`. If the `--enable-doppelganger-protection` flag was used it may take 2-3 epochs @@ -183,6 +181,7 @@ lighthouse \ --dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \ --validators 0x9096aab771e44da149bd7c9926d6f7bb96ef465c0eeb4918be5178cd23a1deb4aec232c61d85ff329b54ed4a3bdfff3a,0x90fc4f72d898a8f01ab71242e36f4545aaf87e3887be81632bb8ba4b2ae8fb70753a62f866344d7905e9a07f5a9cdda1 ``` + Any errors encountered during the operation should include information on how to proceed. Assistance is also available on our [Discord](https://discord.gg/cyAszAh). \ No newline at end of file diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index c3dd3bd193..553c8ab867 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,27 +1,26 @@ [package] name = "boot_node" -version = "4.3.0" +version = "4.5.0" authors = ["Sigma Prime "] -edition = "2021" +edition = { workspace = true } [dependencies] -beacon_node = { path = "../beacon_node" } -clap = "2.33.3" -clap_utils = { path = "../common/clap_utils" } -lighthouse_network = { path = "../beacon_node/lighthouse_network" } -types = { path = "../consensus/types" } -ethereum_ssz = "0.5.0" -slog = "2.5.2" -tokio = "1.14.0" -log = "0.4.11" -slog-term = "2.6.0" -logging = { path = "../common/logging" } -slog-async = "2.5.0" +beacon_node = { workspace = true } +clap = { workspace = true } +clap_utils = { workspace = true } +lighthouse_network = { workspace = true } +types = { workspace = true } +ethereum_ssz = { workspace = true } +slog = { workspace = true } +tokio = { workspace = true } +log = { workspace = true } +slog-term = { workspace = true } +logging = { workspace = true } +slog-async = { workspace = true } slog-scope = "4.3.0" slog-stdlog = "4.0.0" -hex = "0.4.2" -serde = "1.0.116" -serde_derive = "1.0.116" -serde_json = "1.0.66" -serde_yaml = "0.8.13" -eth2_network_config = { path = "../common/eth2_network_config" } +hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +eth2_network_config = { workspace = true } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index d006156bf9..6cfa8f4cf7 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -7,9 +7,10 @@ use lighthouse_network::{ discovery::{load_enr_from_disk, use_or_load_enr}, load_private_key, CombinedKeyExt, NetworkConfig, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use std::net::{SocketAddrV4, SocketAddrV6}; +use std::time::Duration; use std::{marker::PhantomData, path::PathBuf}; use types::EthSpec; @@ -24,7 +25,7 @@ pub struct BootNodeConfig { } impl BootNodeConfig { - pub fn new( + pub async fn new( matches: &ArgMatches<'_>, eth2_network_config: &Eth2NetworkConfig, ) -> Result { @@ -57,21 +58,27 @@ impl BootNodeConfig { set_network_config(&mut network_config, matches, &data_dir, &logger)?; - // Set the Enr UDP ports to the listening ports if not present. + // Set the Enr Discovery ports to the listening ports if not present. if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { - network_config.enr_udp4_port = Some( - network_config - .enr_udp4_port - .unwrap_or(listening_addr_v4.udp_port), - ) + if network_config.enr_udp4_port.is_none() { + network_config.enr_udp4_port = + Some(network_config.enr_udp4_port.unwrap_or( + listening_addr_v4.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { - network_config.enr_udp6_port = Some( - network_config - .enr_udp6_port - .unwrap_or(listening_addr_v6.udp_port), - ) + if network_config.enr_udp6_port.is_none() { + network_config.enr_udp6_port = + Some(network_config.enr_udp6_port.unwrap_or( + listening_addr_v6.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; // By default this is enabled. If it is not set, revert to false. @@ -90,8 +97,19 @@ impl BootNodeConfig { let enr_fork = { let spec = eth2_network_config.chain_spec::()?; - if eth2_network_config.beacon_state_is_known() { - let genesis_state = eth2_network_config.beacon_state::()?; + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + if eth2_network_config.genesis_state_is_known() { + let genesis_state = eth2_network_config + .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger).await? + .ok_or_else(|| { + "The genesis state for this network is not known, this is an unsupported mode" + .to_string() + })?; slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); let enr_fork = spec.enr_fork_id::( diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index 3d9dada0fd..d76e7906b2 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -7,7 +7,7 @@ mod cli; pub mod config; mod server; pub use cli::cli_app; -use config::{BootNodeConfig, BootNodeConfigSerialization}; +use config::BootNodeConfig; use types::{EthSpec, EthSpecId}; const LOG_CHANNEL_SIZE: usize = 2048; @@ -81,20 +81,13 @@ fn main( .build() .map_err(|e| format!("Failed to build runtime: {}", e))?; - // parse the CLI args into a useable config - let config: BootNodeConfig = BootNodeConfig::new(bn_matches, eth2_network_config)?; - - // Dump configs if `dump-config` or `dump-chain-config` flags are set - let config_sz = BootNodeConfigSerialization::from_config_ref(&config); - clap_utils::check_dump_configs::<_, T>( - lh_matches, - &config_sz, - ð2_network_config.chain_spec::()?, - )?; - // Run the boot node - if !lh_matches.is_present("immediate-shutdown") { - runtime.block_on(server::run(config, log)); - } + runtime.block_on(server::run::( + lh_matches, + bn_matches, + eth2_network_config, + log, + ))?; + Ok(()) } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 3823b28726..5a5729dc04 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -1,6 +1,9 @@ //! The main bootnode server execution. use super::BootNodeConfig; +use crate::config::BootNodeConfigSerialization; +use clap::ArgMatches; +use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::{ discv5::{enr::NodeId, Discv5, Discv5Event}, EnrExt, Eth2Enr, @@ -8,7 +11,27 @@ use lighthouse_network::{ use slog::info; use types::EthSpec; -pub async fn run(config: BootNodeConfig, log: slog::Logger) { +pub async fn run( + lh_matches: &ArgMatches<'_>, + bn_matches: &ArgMatches<'_>, + eth2_network_config: &Eth2NetworkConfig, + log: slog::Logger, +) -> Result<(), String> { + // parse the CLI args into a useable config + let config: BootNodeConfig = BootNodeConfig::new(bn_matches, eth2_network_config).await?; + + // Dump configs if `dump-config` or `dump-chain-config` flags are set + let config_sz = BootNodeConfigSerialization::from_config_ref(&config); + clap_utils::check_dump_configs::<_, T>( + lh_matches, + &config_sz, + ð2_network_config.chain_spec::()?, + )?; + + if lh_matches.is_present("immediate-shutdown") { + return Ok(()); + } + let BootNodeConfig { boot_nodes, local_enr, @@ -65,8 +88,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // start the server if let Err(e) = discv5.start().await { - slog::crit!(log, "Could not start discv5 server"; "error" => %e); - return; + return Err(format!("Could not start discv5 server: {e:?}")); } // if there are peers in the local routing table, establish a session by running a query @@ -82,8 +104,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { let mut event_stream = match discv5.event_stream().await { Ok(stream) => stream, Err(e) => { - slog::crit!(log, "Failed to obtain event stream"; "error" => %e); - return; + return Err(format!("Failed to obtain event stream: {e:?}")); } }; diff --git a/bors.toml b/bors.toml index 9e633d63f5..e821b89a81 100644 --- a/bors.toml +++ b/bors.toml @@ -1,5 +1,4 @@ status = [ - "cargo-fmt", "release-tests-ubuntu", "release-tests-windows", "debug-tests-ubuntu", @@ -9,20 +8,16 @@ status = [ "eth1-simulator-ubuntu", "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", - "check-benchmarks", - "clippy", - "arbitrary-check", - "cargo-audit", + "check-code", "cargo-udeps", "beacon-chain-tests", "op-pool-tests", "doppelganger-protection-test", "execution-engine-integration-ubuntu", - "cargo-vendor", "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "compile-with-beta-compiler" + "compile-with-beta-compiler", ] use_squash_merge = true timeout_sec = 10800 diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index ccff88ceef..e66bf14233 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -2,22 +2,21 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.8.5" -eth2_wallet = { path = "../../crypto/eth2_wallet" } -eth2_keystore = { path = "../../crypto/eth2_keystore" } -filesystem = { path = "../filesystem" } -zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -serde = "1.0.116" -serde_derive = "1.0.116" -serde_yaml = "0.8.13" -slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } -types = { path = "../../consensus/types" } -validator_dir = { path = "../validator_dir" } -regex = "1.5.5" +rand = { workspace = true } +eth2_wallet = { workspace = true } +eth2_keystore = { workspace = true } +filesystem = { workspace = true } +zeroize = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } +slog = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } +regex = { workspace = true } rpassword = "5.0.0" -directory = { path = "../directory" } +directory = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index e566d7cdda..8707ae531f 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -8,7 +8,7 @@ use eth2_wallet::{ }; use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::fs::{self, File}; use std::io; use std::io::prelude::*; diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index c91e717d11..8dc0888e6e 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -9,7 +9,7 @@ use crate::{ use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, Logger}; use std::collections::HashSet; use std::fs::{self, File}; diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index a882b7ce64..e4dfb2a556 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -2,18 +2,18 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33.3" -hex = "0.4.2" -dirs = "3.0.1" -eth2_network_config = { path = "../eth2_network_config" } -ethereum_ssz = "0.5.0" -ethereum-types = "0.14.1" -serde = "1.0.116" -serde_json = "1.0.59" -serde_yaml = "0.8.13" -types = { path = "../../consensus/types"} +clap = { workspace = true } +hex = { workspace = true } +dirs = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum-types = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +types = { workspace = true } diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml index 58527b5711..9972ca75ca 100644 --- a/common/compare_fields/Cargo.toml +++ b/common/compare_fields/Cargo.toml @@ -2,10 +2,13 @@ name = "compare_fields" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } + +[dependencies] +itertools = { workspace = true } [dev-dependencies] -compare_fields_derive = { path = "../compare_fields_derive" } +compare_fields_derive = { workspace = true } [package.metadata.cargo-udeps.ignore] development = ["compare_fields_derive"] # used in doc-tests diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs index bc2f5446ad..27baf14806 100644 --- a/common/compare_fields/src/lib.rs +++ b/common/compare_fields/src/lib.rs @@ -81,11 +81,8 @@ //! } //! ]; //! assert_eq!(bar_a.compare_fields(&bar_b), bar_a_b); -//! -//! -//! -//! // TODO: //! ``` +use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; #[derive(Debug, PartialEq, Clone)] @@ -112,13 +109,38 @@ impl Comparison { } pub fn from_slice>(field_name: String, a: &[T], b: &[T]) -> Self { - let mut children = vec![]; + Self::from_iter(field_name, a.iter(), b.iter()) + } - for i in 0..std::cmp::max(a.len(), b.len()) { - children.push(FieldComparison::new(format!("{i}"), &a.get(i), &b.get(i))); + pub fn from_into_iter<'a, T: Debug + PartialEq + 'a>( + field_name: String, + a: impl IntoIterator, + b: impl IntoIterator, + ) -> Self { + Self::from_iter(field_name, a.into_iter(), b.into_iter()) + } + + pub fn from_iter<'a, T: Debug + PartialEq + 'a>( + field_name: String, + a: impl Iterator, + b: impl Iterator, + ) -> Self { + let mut children = vec![]; + let mut all_equal = true; + + for (i, entry) in a.zip_longest(b).enumerate() { + let comparison = match entry { + EitherOrBoth::Both(x, y) => { + FieldComparison::new(format!("{i}"), &Some(x), &Some(y)) + } + EitherOrBoth::Left(x) => FieldComparison::new(format!("{i}"), &Some(x), &None), + EitherOrBoth::Right(y) => FieldComparison::new(format!("{i}"), &None, &Some(y)), + }; + all_equal = all_equal && comparison.equal(); + children.push(comparison); } - Self::parent(field_name, a == b, children) + Self::parent(field_name, all_equal, children) } pub fn retain_children(&mut self, f: F) diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index 7696d3606b..b4bbbaa436 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -2,11 +2,11 @@ name = "compare_fields_derive" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } [lib] proc-macro = true [dependencies] -syn = "1.0.42" -quote = "1.0.7" +syn = { workspace = true } +quote = { workspace = true } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index a8b92b3d54..01c5a8f6ef 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -4,10 +4,11 @@ use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; -fn is_slice(field: &syn::Field) -> bool { +fn is_iter(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { attr.path.is_ident("compare_fields") - && attr.tokens.to_string().replace(' ', "") == "(as_slice)" + && (attr.tokens.to_string().replace(' ', "") == "(as_slice)" + || attr.tokens.to_string().replace(' ', "") == "(as_iter)") }) } @@ -18,29 +19,26 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream { let name = &item.ident; let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("compare_fields_derive only supports structs."), + let syn::Data::Struct(struct_data) = &item.data else { + panic!("compare_fields_derive only supports structs."); }; let mut quotes = vec![]; for field in struct_data.fields.iter() { - let ident_a = match &field.ident { - Some(ref ident) => ident, - _ => panic!("compare_fields_derive only supports named struct fields."), + let Some(ident_a) = &field.ident else { + panic!("compare_fields_derive only supports named struct fields."); }; - let field_name = ident_a.to_string(); let ident_b = ident_a.clone(); - let quote = if is_slice(field) { + let quote = if is_iter(field) { quote! { - comparisons.push(compare_fields::Comparison::from_slice( + comparisons.push(compare_fields::Comparison::from_into_iter( #field_name.to_string(), &self.#ident_a, - &b.#ident_b) - ); + &b.#ident_b + )); } } else { quote! { diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index aabc07fc52..a03ac2178f 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -2,18 +2,18 @@ name = "deposit_contract" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } build = "build.rs" [build-dependencies] -reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] } -serde_json = "1.0.58" -sha2 = "0.10" -hex = "0.4.2" +reqwest = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +hex = { workspace = true } [dependencies] -types = { path = "../../consensus/types"} -ethereum_ssz = "0.5.0" -tree_hash = "0.5.0" +types = { workspace = true } +ethereum_ssz = { workspace = true } +tree_hash = { workspace = true } ethabi = "16.0.0" diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index f7b77ab7b7..f724337261 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -2,11 +2,11 @@ name = "directory" version = "0.1.0" authors = ["pawan "] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33.3" -clap_utils = {path = "../clap_utils"} -eth2_network_config = { path = "../eth2_network_config" } +clap = { workspace = true } +clap_utils = { workspace = true } +eth2_network_config = { workspace = true } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d8e1a375fd..02460551a9 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -2,47 +2,43 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.58" -types = { path = "../../consensus/types" } -reqwest = { version = "0.11.0", features = ["json", "stream"] } -lighthouse_network = { path = "../../beacon_node/lighthouse_network" } -proto_array = { path = "../../consensus/proto_array", optional = true } -ethereum_serde_utils = "0.5.0" -eth2_keystore = { path = "../../crypto/eth2_keystore" } -libsecp256k1 = "0.7.0" -ring = "0.16.19" -bytes = "1.0.1" -account_utils = { path = "../../common/account_utils" } -sensitive_url = { path = "../../common/sensitive_url" } -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" +serde = { workspace = true } +serde_json = { workspace = true } +ssz_types = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +reqwest = { workspace = true } +lighthouse_network = { workspace = true } +proto_array = { workspace = true } +ethereum_serde_utils = { workspace = true } +eth2_keystore = { workspace = true } +libsecp256k1 = { workspace = true } +ring = { workspace = true } +bytes = { workspace = true } +account_utils = { workspace = true } +sensitive_url = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } futures-util = "0.3.8" -futures = "0.3.8" -store = { path = "../../beacon_node/store", optional = true } -slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } +futures = { workspace = true } +store = { workspace = true } +slashing_protection = { workspace = true } mediatype = "0.19.13" mime = "0.3.16" -pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } +pretty_reqwest_error = { workspace = true } [dev-dependencies] -tokio = { version = "1.14.0", features = ["full"] } +tokio = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } -procinfo = { version = "0.4.2", optional = true } +procfs = { version = "0.15.1", optional = true } [features] default = ["lighthouse"] -lighthouse = [ - "proto_array", - "psutil", - "procinfo", - "store", - "slashing_protection", -] +lighthouse = ["psutil", "procfs"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 1fc65f9301..9df1f2f600 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -120,6 +120,7 @@ pub struct Timeouts { pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, + pub get_validator_block_ssz: Duration, } impl Timeouts { @@ -135,6 +136,7 @@ impl Timeouts { get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, + get_validator_block_ssz: timeout, } } } @@ -694,8 +696,8 @@ impl BeaconNodeHttpClient { /// /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blocks>( - self, - block: &SignedBeaconBlock, + &self, + block_contents: &SignedBlockContents, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -704,7 +706,7 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blocks"); - self.post_with_timeout(path, block, self.timeouts.proposal) + self.post_with_timeout(path, block_contents, self.timeouts.proposal) .await?; Ok(()) @@ -715,7 +717,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blocks_ssz>( &self, - block: &SignedBeaconBlock, + block_contents: &SignedBlockContents, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -724,8 +726,12 @@ impl BeaconNodeHttpClient { .push("beacon") .push("blocks"); - self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal)) - .await?; + self.post_generic_with_ssz_body( + path, + block_contents.as_ssz_bytes(), + Some(self.timeouts.proposal), + ) + .await?; Ok(()) } @@ -735,7 +741,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blinded_blocks>( &self, - block: &SignedBeaconBlock, + block: &SignedBlockContents, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -755,7 +761,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(None)` on a 404 error. pub async fn post_beacon_blinded_blocks_ssz>( &self, - block: &SignedBeaconBlock, + block: &SignedBlockContents, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -809,14 +815,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blocks` pub async fn post_beacon_blocks_v2>( &self, - block: &SignedBeaconBlock, + block_contents: &SignedBlockContents, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blocks_v2_path(validation_level)?, - block, + block_contents, Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -826,14 +832,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blocks` pub async fn post_beacon_blocks_v2_ssz>( &self, - block: &SignedBeaconBlock, + block_contents: &SignedBlockContents, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blocks_v2_path(validation_level)?, - block.as_ssz_bytes(), + block_contents.as_ssz_bytes(), Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -841,16 +847,16 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blinded_blocks` - pub async fn post_beacon_blinded_blocks_v2( + pub async fn post_beacon_blinded_blocks_v2>( &self, - block: &SignedBlindedBeaconBlock, + block_contents: &SignedBlockContents, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block, + block_contents, Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -860,14 +866,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2_ssz( &self, - block: &SignedBlindedBeaconBlock, + block_contents: &SignedBlindedBlockContents, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block.as_ssz_bytes(), + block_contents.as_ssz_bytes(), Some(self.timeouts.proposal), - block.message().body().fork_name(), + block_contents.signed_block().message().body().fork_name(), ) .await?; @@ -885,6 +891,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/blob_sidecars/{block_id}` + pub fn get_blobs_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blob_sidecars") + .push(&block_id.to_string()); + Ok(path) + } + /// Path for `v1/beacon/blinded_blocks/{block_id}` pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; @@ -907,9 +924,23 @@ impl BeaconNodeHttpClient { Error, > { let path = self.get_beacon_blocks_path(block_id)?; - let response = match self.get_response(path, |b| b).await.optional()? { - Some(res) => res, - None => return Ok(None), + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); + }; + + Ok(Some(response.json().await?)) + } + + /// `GET v1/beacon/blob_sidecars/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_blobs( + &self, + block_id: BlockId, + ) -> Result>>, Error> { + let path = self.get_blobs_path(block_id)?; + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); }; Ok(Some(response.json().await?)) @@ -926,9 +957,8 @@ impl BeaconNodeHttpClient { Error, > { let path = self.get_beacon_blinded_blocks_path(block_id)?; - let response = match self.get_response(path, |b| b).await.optional()? { - Some(res) => res, - None => return Ok(None), + let Some(response) = self.get_response(path, |b| b).await.optional()? else { + return Ok(None); }; Ok(Some(response.json().await?)) @@ -1269,6 +1299,23 @@ impl BeaconNodeHttpClient { Ok(()) } + // GET builder/states/{state_id}/expected_withdrawals + pub async fn get_expected_withdrawals( + &self, + state_id: &StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("builder") + .push("states") + .push(&state_id.to_string()) + .push("expected_withdrawals"); + + self.get(path).await + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, @@ -1584,19 +1631,19 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No) .await } - /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + /// returns `GET v2/validator/blocks/{slot}` URL path + pub async fn get_validator_blocks_path>( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result { let mut path = self.eth_path(V2)?; path.path_segments_mut() @@ -1618,16 +1665,73 @@ impl BeaconNodeHttpClient { .append_pair("skip_randao_verification", ""); } + Ok(path) + } + + /// `GET v2/validator/blocks/{slot}` + pub async fn get_validator_blocks_modular>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result>, Error> { + let path = self + .get_validator_blocks_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + self.get(path).await } + /// `GET v2/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_ssz>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + self.get_validator_blocks_modular_ssz::( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + ) + .await + } + + /// `GET v2/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_modular_ssz>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result>, Error> { + let path = self + .get_validator_blocks_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + .await + } + /// `GET v2/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blinded_blocks_modular( slot, randao_reveal, @@ -1637,17 +1741,14 @@ impl BeaconNodeHttpClient { .await } - /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular< - T: EthSpec, - Payload: AbstractExecPayload, - >( + /// returns `GET v1/validator/blinded_blocks/{slot}` URL path + pub async fn get_validator_blinded_blocks_path>( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1669,9 +1770,71 @@ impl BeaconNodeHttpClient { .append_key_only("skip_randao_verification"); } + Ok(path) + } + + /// `GET v1/validator/blinded_blocks/{slot}` + pub async fn get_validator_blinded_blocks_modular< + T: EthSpec, + Payload: AbstractExecPayload, + >( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result>, Error> { + let path = self + .get_validator_blinded_blocks_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + self.get(path).await } + /// `GET v2/validator/blinded_blocks/{slot}` in ssz format + pub async fn get_validator_blinded_blocks_ssz>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + self.get_validator_blinded_blocks_modular_ssz::( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + ) + .await + } + + pub async fn get_validator_blinded_blocks_modular_ssz< + T: EthSpec, + Payload: AbstractExecPayload, + >( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result>, Error> { + let path = self + .get_validator_blinded_blocks_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + .await + } + /// `GET validator/attestation_data?slot,committee_index` pub async fn get_validator_attestation_data( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 1b4bcc0e39..cd405386b8 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -20,7 +20,7 @@ use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; -use store::{AnchorInfo, Split, StoreConfig}; +use store::{AnchorInfo, BlobInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, @@ -95,8 +95,8 @@ pub struct ValidatorInclusionData { #[cfg(target_os = "linux")] use { - procinfo::pid, psutil::cpu::os::linux::CpuTimesExt, - psutil::memory::os::linux::VirtualMemoryExt, psutil::process::Process, + psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, + psutil::process::Process, }; /// Reports on the health of the Lighthouse instance. @@ -238,7 +238,7 @@ pub struct ProcessHealth { /// The pid of this process. pub pid: u32, /// The number of threads used by this pid. - pub pid_num_threads: i32, + pub pid_num_threads: i64, /// The total resident memory used by this pid. pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. @@ -262,7 +262,12 @@ impl ProcessHealth { .memory_info() .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + let me = procfs::process::Process::myself() + .map_err(|e| format!("Unable to get process: {:?}", e))?; + let stat = me + .stat() + .map_err(|e| format!("Unable to get stat: {:?}", e))?; + let process_times = process .cpu_times() .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; @@ -359,6 +364,7 @@ pub struct DatabaseInfo { pub config: StoreConfig, pub split: Split, pub anchor: Option, + pub blob_info: BlobInfo, } impl BeaconNodeHttpClient { diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 7bf4cf5b19..b2d53c5e08 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -666,7 +666,7 @@ impl ValidatorClientHttpClient { &self, pubkey: &PublicKeyBytes, epoch: Option, - ) -> Result { + ) -> Result, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f451d3b8f2..e87f254f10 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -4,11 +4,17 @@ use crate::Error as ServerError; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; use mediatype::{names, MediaType, MediaTypeList}; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::Value; +use ssz::{Decode, DecodeError}; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; +use tree_hash::TreeHash; +use types::beacon_block_body::KzgCommitments; +use types::builder_bid::BlindedBlobsBundle; pub use types::*; #[cfg(feature = "lighthouse")] @@ -581,6 +587,11 @@ pub struct SyncingData { pub sync_distance: Slot, } +#[derive(Serialize, Deserialize)] +pub struct ExpectedWithdrawalsQuery { + pub proposal_slot: Option, +} + #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] pub struct QueryVec { @@ -647,6 +658,13 @@ pub struct ValidatorBalancesQuery { pub id: Option>, } +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct BlobIndicesQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub indices: Option>, +} + #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); @@ -870,6 +888,28 @@ pub struct SseBlock { pub execution_optimistic: bool, } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseBlobSidecar { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub kzg_commitment: KzgCommitment, + pub versioned_hash: VersionedHash, +} + +impl SseBlobSidecar { + pub fn from_blob_sidecar(blob_sidecar: &BlobSidecar) -> SseBlobSidecar { + SseBlobSidecar { + block_root: blob_sidecar.block_root, + index: blob_sidecar.index, + slot: blob_sidecar.slot, + kzg_commitment: blob_sidecar.kzg_commitment, + versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(), + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseFinalizedCheckpoint { pub block: Hash256, @@ -918,7 +958,7 @@ pub struct SseLateHead { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)) )] #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] @@ -931,8 +971,10 @@ pub struct SsePayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2))] + #[superstruct(only(V2, V3))] pub withdrawals: Vec, + #[superstruct(only(V3), partial_getter(copy))] + pub parent_beacon_block_root: Hash256, } #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] @@ -962,6 +1004,9 @@ impl ForkVersionDeserialize for SsePayloadAttributes { ForkName::Capella => serde_json::from_value(value) .map(Self::V2) .map_err(serde::de::Error::custom), + ForkName::Deneb => serde_json::from_value(value) + .map(Self::V3) + .map_err(serde::de::Error::custom), ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( "SsePayloadAttributes deserialization for {fork_name} not implemented" ))), @@ -995,6 +1040,7 @@ impl ForkVersionDeserialize for SseExtendedPayloadAttributes { pub enum EventKind { Attestation(Box>), Block(SseBlock), + BlobSidecar(SseBlobSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), VoluntaryExit(SignedVoluntaryExit), @@ -1011,6 +1057,7 @@ impl EventKind { match self { EventKind::Head(_) => "head", EventKind::Block(_) => "block", + EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::Attestation(_) => "attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint", @@ -1048,6 +1095,9 @@ impl EventKind { "block" => Ok(EventKind::Block(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block: {:?}", e)), )?)), + "blob_sidecar" => Ok(EventKind::BlobSidecar(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Blob Sidecar: {:?}", e)), + )?)), "chain_reorg" => Ok(EventKind::ChainReorg(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Chain Reorg: {:?}", e)), )?)), @@ -1100,6 +1150,7 @@ pub struct EventQuery { pub enum EventTopic { Head, Block, + BlobSidecar, Attestation, VoluntaryExit, FinalizedCheckpoint, @@ -1118,6 +1169,7 @@ impl FromStr for EventTopic { match s { "head" => Ok(EventTopic::Head), "block" => Ok(EventTopic::Block), + "blob_sidecar" => Ok(EventTopic::BlobSidecar), "attestation" => Ok(EventTopic::Attestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), "finalized_checkpoint" => Ok(EventTopic::FinalizedCheckpoint), @@ -1137,6 +1189,7 @@ impl fmt::Display for EventTopic { match self { EventTopic::Head => write!(f, "head"), EventTopic::Block => write!(f, "block"), + EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::Attestation => write!(f, "attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), EventTopic::FinalizedCheckpoint => write!(f, "finalized_checkpoint"), @@ -1311,9 +1364,31 @@ pub struct BroadcastValidationQuery { pub broadcast_validation: BroadcastValidation, } +pub mod serde_status_code { + use crate::StatusCode; + use serde::{de::Error, Deserialize, Serialize}; + + pub fn serialize(status_code: &StatusCode, ser: S) -> Result + where + S: serde::Serializer, + { + status_code.as_u16().serialize(ser) + } + + pub fn deserialize<'de, D>(de: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + let status_code = u16::deserialize(de)?; + StatusCode::try_from(status_code).map_err(D::Error::custom) + } +} + #[cfg(test)] mod tests { use super::*; + use ssz::Encode; + use std::sync::Arc; #[test] fn query_vec() { @@ -1348,4 +1423,615 @@ mod tests { Accept::Any ); } + + #[test] + fn ssz_signed_block_contents_pre_deneb() { + type E = MainnetEthSpec; + let spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + + let block: SignedBlockContents> = SignedBeaconBlock::from_block( + BeaconBlock::::Capella(BeaconBlockCapella::empty(&spec)), + Signature::empty(), + ) + .try_into() + .expect("should convert into signed block contents"); + + let decoded: SignedBlockContents = + SignedBlockContents::from_ssz_bytes(&block.as_ssz_bytes(), &spec) + .expect("should decode Block"); + assert!(matches!(decoded, SignedBlockContents::Block(_))); + } + + #[test] + fn ssz_signed_block_contents_with_blobs() { + type E = MainnetEthSpec; + let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); + + let block = SignedBeaconBlock::from_block( + BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), + Signature::empty(), + ); + let blobs = SignedSidecarList::from(vec![SignedSidecar { + message: Arc::new(BlobSidecar::empty()), + signature: Signature::empty(), + _phantom: Default::default(), + }]); + let signed_block_contents = SignedBlockContents::new(block, Some(blobs)); + + let decoded: SignedBlockContents> = + SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) + .expect("should decode BlockAndBlobSidecars"); + assert!(matches!( + decoded, + SignedBlockContents::BlockAndBlobSidecars(_) + )); + } + + #[test] + fn ssz_signed_blinded_block_contents_with_blobs() { + type E = MainnetEthSpec; + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + + let blinded_block = SignedBeaconBlock::from_block( + BeaconBlock::>::Deneb(BeaconBlockDeneb::empty(&spec)), + Signature::empty(), + ); + let blinded_blobs = SignedSidecarList::from(vec![SignedSidecar { + message: Arc::new(BlindedBlobSidecar::empty()), + signature: Signature::empty(), + _phantom: Default::default(), + }]); + let signed_block_contents = SignedBlockContents::new(blinded_block, Some(blinded_blobs)); + + let decoded: SignedBlockContents> = + SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) + .expect("should decode BlindedBlockAndBlobSidecars"); + assert!(matches!( + decoded, + SignedBlockContents::BlindedBlockAndBlobSidecars(_) + )); + } +} + +/// A wrapper over a [`BeaconBlock`] or a [`BeaconBlockAndBlobSidecars`]. +#[derive(Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum BlockContents> { + BlockAndBlobSidecars(BeaconBlockAndBlobSidecars), + BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars), + Block(BeaconBlock), +} + +pub type BlockContentsTuple = ( + BeaconBlock, + Option>::Sidecar>>, +); + +impl> BlockContents { + pub fn new( + block: BeaconBlock, + blobs: Option>, + ) -> Self { + match (Payload::block_type(), blobs) { + (BlockType::Full, Some(blobs)) => { + Self::BlockAndBlobSidecars(BeaconBlockAndBlobSidecars { + block, + blob_sidecars: blobs, + }) + } + (BlockType::Blinded, Some(blobs)) => { + Self::BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars { + blinded_block: block, + blinded_blob_sidecars: blobs, + }) + } + (_, None) => Self::Block(block), + } + } + + /// SSZ decode with fork variant determined by slot. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + let slot_len = ::ssz_fixed_len(); + let slot_bytes = bytes + .get(0..slot_len) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_len, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + + match fork_at_slot { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + BeaconBlock::from_ssz_bytes(bytes, spec).map(|block| BlockContents::Block(block)) + } + ForkName::Deneb => { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = + decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let blobs = decoder.decode_next()?; + Ok(BlockContents::new(block, Some(blobs))) + } + } + } + + pub fn block(&self) -> &BeaconBlock { + match self { + BlockContents::BlockAndBlobSidecars(block_and_sidecars) => &block_and_sidecars.block, + BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { + &block_and_sidecars.blinded_block + } + BlockContents::Block(block) => block, + } + } + + pub fn deconstruct(self) -> BlockContentsTuple { + match self { + BlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + block_and_sidecars.block, + Some(block_and_sidecars.blob_sidecars), + ), + BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( + block_and_sidecars.blinded_block, + Some(block_and_sidecars.blinded_blob_sidecars), + ), + BlockContents::Block(block) => (block, None), + } + } + + /// Signs `self`, producing a `SignedBlockContents`. + pub fn sign( + self, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlockContents { + let (block, maybe_blobs) = self.deconstruct(); + let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec); + let signed_blobs = maybe_blobs.map(|blobs| { + blobs + .into_iter() + .map(|blob| blob.sign(secret_key, fork, genesis_validators_root, spec)) + .collect::>() + .into() + }); + SignedBlockContents::new(signed_block, signed_blobs) + } +} + +impl> ForkVersionDeserialize + for BlockContents +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Ok(BlockContents::Block(BeaconBlock::deserialize_by_fork::< + 'de, + D, + >(value, fork_name)?)) + } + ForkName::Deneb => { + let block_contents = match Payload::block_type() { + BlockType::Blinded => BlockContents::BlindedBlockAndBlobSidecars( + BlindedBeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( + value, fork_name, + )?, + ), + BlockType::Full => BlockContents::BlockAndBlobSidecars( + BeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( + value, fork_name, + )?, + ), + }; + Ok(block_contents) + } + } + } +} + +impl> Into> + for BlockContents +{ + fn into(self) -> BeaconBlock { + match self { + Self::BlockAndBlobSidecars(block_and_sidecars) => block_and_sidecars.block, + Self::BlindedBlockAndBlobSidecars(block_and_sidecars) => { + block_and_sidecars.blinded_block + } + Self::Block(block) => block, + } + } +} + +pub type SignedBlockContentsTuple = ( + SignedBeaconBlock, + Option>::Sidecar>>, +); + +pub type SignedBlindedBlockContents = SignedBlockContents>; + +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`]. +#[derive(Clone, Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum SignedBlockContents = FullPayload> { + BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars), + BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars), + Block(SignedBeaconBlock), +} + +impl> SignedBlockContents { + pub fn new( + block: SignedBeaconBlock, + blobs: Option>, + ) -> Self { + match (Payload::block_type(), blobs) { + (BlockType::Full, Some(blobs)) => { + Self::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars { + signed_block: block, + signed_blob_sidecars: blobs, + }) + } + (BlockType::Blinded, Some(blobs)) => { + Self::BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars { + signed_blinded_block: block, + signed_blinded_blob_sidecars: blobs, + }) + } + (_, None) => Self::Block(block), + } + } + + /// SSZ decode with fork variant determined by slot. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + let slot_len = ::ssz_fixed_len(); + let slot_bytes = bytes + .get(0..slot_len) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_len, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + + match fork_at_slot { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + SignedBeaconBlock::from_ssz_bytes(bytes, spec) + .map(|block| SignedBlockContents::Block(block)) + } + ForkName::Deneb => { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + let block = decoder + .decode_next_with(|bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec))?; + let blobs = decoder.decode_next()?; + Ok(SignedBlockContents::new(block, Some(blobs))) + } + } + } + + pub fn signed_block(&self) -> &SignedBeaconBlock { + match self { + SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { + &block_and_sidecars.signed_block + } + SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { + &block_and_sidecars.signed_blinded_block + } + SignedBlockContents::Block(block) => block, + } + } + + pub fn blobs_cloned(&self) -> Option> { + match self { + SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { + Some(block_and_sidecars.signed_blob_sidecars.clone()) + } + SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { + Some(block_and_sidecars.signed_blinded_blob_sidecars.clone()) + } + SignedBlockContents::Block(_block) => None, + } + } + + pub fn deconstruct(self) -> SignedBlockContentsTuple { + match self { + SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + block_and_sidecars.signed_block, + Some(block_and_sidecars.signed_blob_sidecars), + ), + SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( + block_and_sidecars.signed_blinded_block, + Some(block_and_sidecars.signed_blinded_blob_sidecars), + ), + SignedBlockContents::Block(block) => (block, None), + } + } +} + +impl SignedBlockContents> { + pub fn try_into_full_block_and_blobs( + self, + maybe_full_payload_contents: Option>, + ) -> Result>, String> { + match self { + SignedBlockContents::BlindedBlockAndBlobSidecars(blinded_block_and_blob_sidecars) => { + match maybe_full_payload_contents { + None | Some(FullPayloadContents::Payload(_)) => { + Err("Can't build full block contents without payload and blobs".to_string()) + } + Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { + let signed_block = blinded_block_and_blob_sidecars + .signed_blinded_block + .try_into_full_block(Some(payload_and_blobs.execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + let signed_blob_sidecars: SignedBlobSidecarList = + blinded_block_and_blob_sidecars + .signed_blinded_blob_sidecars + .into_iter() + .zip(payload_and_blobs.blobs_bundle.blobs) + .map(|(blinded_blob_sidecar, blob)| { + blinded_blob_sidecar.into_full_blob_sidecars(blob) + }) + .collect::>() + .into(); + + Ok(SignedBlockContents::new( + signed_block, + Some(signed_blob_sidecars), + )) + } + } + } + SignedBlockContents::Block(blinded_block) => { + let full_payload_opt = maybe_full_payload_contents.map(|o| o.deconstruct().0); + blinded_block + .try_into_full_block(full_payload_opt) + .map(SignedBlockContents::Block) + .ok_or("Can't build full block without payload".to_string()) + } + SignedBlockContents::BlockAndBlobSidecars(_) => Err( + "BlockAndBlobSidecars variant not expected when constructing full block" + .to_string(), + ), + } + } +} + +impl SignedBlockContents { + pub fn clone_as_blinded(&self) -> SignedBlindedBlockContents { + let blinded_blobs = self.blobs_cloned().map(|blob_sidecars| { + blob_sidecars + .into_iter() + .map(|blob| blob.into()) + .collect::>() + .into() + }); + SignedBlockContents::new(self.signed_block().clone_as_blinded(), blinded_blobs) + } +} + +impl> TryFrom> + for SignedBlockContents +{ + type Error = &'static str; + fn try_from(block: SignedBeaconBlock) -> Result { + match block { + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Merge(_) + | SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)), + SignedBeaconBlock::Deneb(_) => { + Err("deneb block contents cannot be fully constructed from just the signed block") + } + } + } +} + +impl> From> + for SignedBlockContents +{ + fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { + SignedBlockContents::new(block_contents_tuple.0, block_contents_tuple.1) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec")] +pub struct SignedBeaconBlockAndBlobSidecars> { + pub signed_block: SignedBeaconBlock, + pub signed_blob_sidecars: SignedSidecarList, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BeaconBlockAndBlobSidecars> { + pub block: BeaconBlock, + pub blob_sidecars: SidecarList, +} + +impl> ForkVersionDeserialize + for BeaconBlockAndBlobSidecars +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec, S: Sidecar")] + struct Helper> { + block: serde_json::Value, + blob_sidecars: SidecarList, + } + let helper: Helper = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, + blob_sidecars: helper.blob_sidecars, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec")] +pub struct SignedBlindedBeaconBlockAndBlobSidecars< + T: EthSpec, + Payload: AbstractExecPayload = BlindedPayload, +> { + pub signed_blinded_block: SignedBeaconBlock, + pub signed_blinded_blob_sidecars: SignedSidecarList, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Encode)] +#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] +pub struct BlindedBeaconBlockAndBlobSidecars< + T: EthSpec, + Payload: AbstractExecPayload = BlindedPayload, +> { + pub blinded_block: BeaconBlock, + pub blinded_blob_sidecars: SidecarList, +} + +impl> ForkVersionDeserialize + for BlindedBeaconBlockAndBlobSidecars +{ + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "T: EthSpec, S: Sidecar")] + struct Helper> { + blinded_block: serde_json::Value, + blinded_blob_sidecars: SidecarList, + } + let helper: Helper = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + + Ok(Self { + blinded_block: BeaconBlock::deserialize_by_fork::<'de, D>( + helper.blinded_block, + fork_name, + )?, + blinded_blob_sidecars: helper.blinded_blob_sidecars, + }) + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode)] +#[serde(untagged)] +#[serde(bound = "E: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum FullPayloadContents { + Payload(ExecutionPayload), + PayloadAndBlobs(ExecutionPayloadAndBlobs), +} + +impl FullPayloadContents { + pub fn new( + execution_payload: ExecutionPayload, + maybe_blobs: Option>, + ) -> Self { + match maybe_blobs { + None => Self::Payload(execution_payload), + Some(blobs_bundle) => Self::PayloadAndBlobs(ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + }), + } + } + + pub fn payload_ref(&self) -> &ExecutionPayload { + match self { + FullPayloadContents::Payload(payload) => payload, + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => { + &payload_and_blobs.execution_payload + } + } + } + + pub fn block_hash(&self) -> ExecutionBlockHash { + self.payload_ref().block_hash() + } + + pub fn deconstruct(self) -> (ExecutionPayload, Option>) { + match self { + FullPayloadContents::Payload(payload) => (payload, None), + FullPayloadContents::PayloadAndBlobs(payload_and_blobs) => ( + payload_and_blobs.execution_payload, + Some(payload_and_blobs.blobs_bundle), + ), + } + } +} + +impl ForkVersionDeserialize for FullPayloadContents { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + match fork_name { + ForkName::Merge | ForkName::Capella => serde_json::from_value(value) + .map(Self::Payload) + .map_err(serde::de::Error::custom), + ForkName::Deneb => serde_json::from_value(value) + .map(Self::PayloadAndBlobs) + .map_err(serde::de::Error::custom), + ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( + "FullPayloadContents deserialization for {fork_name} not implemented" + ))), + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode)] +#[serde(bound = "E: EthSpec")] +pub struct ExecutionPayloadAndBlobs { + pub execution_payload: ExecutionPayload, + pub blobs_bundle: BlobsBundle, +} + +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[serde(bound = "E: EthSpec")] +pub struct BlobsBundle { + pub commitments: KzgCommitments, + pub proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, +} + +impl Into> for BlobsBundle { + fn into(self) -> BlindedBlobsBundle { + BlindedBlobsBundle { + commitments: self.commitments, + proofs: self.proofs, + blob_roots: self + .blobs + .into_iter() + .map(|blob| blob.tree_hash_root()) + .collect::>() + .into(), + } + } } diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 08f8c9a393..20c3b0b6f2 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -2,8 +2,8 @@ name = "eth2_config" version = "0.2.0" authors = ["Paul Hauner "] -edition = "2021" +edition = { workspace = true } [dependencies] -types = { path = "../../consensus/types" } -paste = "1.0.5" +types = { workspace = true } +paste = { workspace = true } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7e5506667f..bf707c4d17 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -23,6 +23,23 @@ pub const PREDEFINED_NETWORKS_DIR: &str = predefined_networks_dir!(); pub const GENESIS_FILE_NAME: &str = "genesis.ssz"; pub const GENESIS_ZIP_FILE_NAME: &str = "genesis.ssz.zip"; +const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + urls: &[ + // This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for + // more details. + "https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/holesky/", + ], + checksum: "0xd750639607c337bbb192b15c27f447732267bf72d1650180a0e44c2d93a80741", + genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", +}; + +const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + // No default checkpoint sources are provided. + urls: &[], + checksum: "0xd4a039454c7429f1dfaa7e11e397ef3d0f50d2d5e4c0e4dc04919d153aa13af1", + genesis_validators_root: "0x9d642dac73058fbf39c0ae41ab1e34e4d889043cb199851ded7095bc99eb4c1e", +}; + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone)] pub struct Eth2Config { @@ -62,6 +79,32 @@ impl Eth2Config { } } +/// Describes how a genesis state may be obtained. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum GenesisStateSource { + /// The genesis state for this network is not yet known. + Unknown, + /// The genesis state for this network is included in the binary via + /// `include_bytes!` or by loading from a testnet dir. + IncludedBytes, + /// The genesis state for this network should be downloaded from a URL. + Url { + /// URLs to try to download the file from, in order. + urls: &'static [&'static str], + /// The SHA256 of the genesis state bytes. This is *not* a hash tree + /// root to simplify the types (i.e., to avoid getting EthSpec + /// involved). + /// + /// The format should be 0x-prefixed ASCII bytes. + checksum: &'static str, + /// The `genesis_validators_root` of the genesis state. Used to avoid + /// downloading the state for simple signing operations. + /// + /// The format should be 0x-prefixed ASCII bytes. + genesis_validators_root: &'static str, + }, +} + /// A directory that can be built by downloading files via HTTP. /// /// Used by the `eth2_network_config` crate to initialize the network directories during build and @@ -70,7 +113,7 @@ impl Eth2Config { pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, pub config_dir: &'a str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, } impl<'a> Eth2NetArchiveAndDirectory<'a> { @@ -89,15 +132,11 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { } } -/// Indicates that the `genesis.ssz.zip` file is present on the filesystem. This means that the -/// deposit ceremony has concluded and the final genesis `BeaconState` is known. -const GENESIS_STATE_IS_KNOWN: bool = true; - -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, pub config_dir: &'static str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, pub config: &'static [u8], pub deploy_block: &'static [u8], pub boot_enr: &'static [u8], @@ -109,7 +148,7 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_state_source: path) => { paste! { #[macro_use] pub mod $name_ident { @@ -118,7 +157,7 @@ macro_rules! define_archive { pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { name: stringify!($name_ident), config_dir: $config_dir, - genesis_is_known: $genesis_is_known, + genesis_state_source: $genesis_state_source, }; /// A wrapper around `std::include_bytes` which includes a file from a specific network @@ -151,7 +190,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, config_dir: ETH2_NET_DIR.config_dir, - genesis_is_known: ETH2_NET_DIR.genesis_is_known, + genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), @@ -199,9 +238,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_state_source: path)),+) => { $( - define_archive!($name_ident, $config_dir, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_state_source); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -242,9 +281,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "mainnet", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -252,9 +290,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -264,9 +301,8 @@ define_hardcoded_nets!( // // The Goerli network is effectively an alias to Prater. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -274,9 +310,18 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "gnosis", + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes + ), + ( + // Network name (must be unique among all networks). + chiado, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "chiado", // Set to `true` if the genesis state can be found in the `built_in_network_configs` // directory. - GENESIS_STATE_IS_KNOWN + CHIADO_GENESIS_STATE_SOURCE ), ( // Network name (must be unique among all networks). @@ -284,8 +329,16 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "sepolia", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes + ), + ( + // Network name (must be unique among all networks). + holesky, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "holesky", + // Describes how the genesis state can be obtained. + HOLESKY_GENESIS_STATE_SOURCE ) ); diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 7a376568eb..6f92acc84a 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -2,19 +2,18 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = "1.4.0" +lazy_static = { workspace = true } num-bigint = "0.4.2" -ethereum_hashing = "1.0.0-beta.2" -hex = "0.4.2" -serde_yaml = "0.8.13" -serde = "1.0.116" -serde_derive = "1.0.116" -bls = { path = "../../crypto/bls" } +ethereum_hashing = { workspace = true } +hex = { workspace = true } +serde_yaml = { workspace = true } +serde = { workspace = true } +bls = { workspace = true } [dev-dependencies] base64 = "0.13.0" diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index 7b5fa7a8e4..3d4ff02c38 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -22,7 +22,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; use ethereum_hashing::hash; use num_bigint::BigUint; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::convert::TryInto; use std::fs::File; use std::path::PathBuf; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 338a2d243b..3807c2e993 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -2,20 +2,30 @@ name = "eth2_network_config" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } build = "build.rs" [build-dependencies] -zip = "0.5.8" -eth2_config = { path = "../eth2_config"} +zip = { workspace = true } +eth2_config = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } +tokio = { workspace = true } [dependencies] -serde_yaml = "0.8.13" -types = { path = "../../consensus/types"} -ethereum_ssz = "0.5.0" -eth2_config = { path = "../eth2_config"} -discv5 = "0.3.1" \ No newline at end of file +serde_yaml = { workspace = true } +serde_json = { workspace = true } +types = { workspace = true } +ethereum_ssz = { workspace = true } +eth2_config = { workspace = true } +discv5 = { workspace = true } +reqwest = { workspace = true } +pretty_reqwest_error = { workspace = true } +sha2 = { workspace = true } +url = { workspace = true } +sensitive_url = { workspace = true } +slog = { workspace = true } +logging = { workspace = true } +bytes = { workspace = true } diff --git a/common/eth2_network_config/build.rs b/common/eth2_network_config/build.rs index fa45fafa4e..3165930f4a 100644 --- a/common/eth2_network_config/build.rs +++ b/common/eth2_network_config/build.rs @@ -1,5 +1,7 @@ //! Extracts zipped genesis states on first run. -use eth2_config::{Eth2NetArchiveAndDirectory, ETH2_NET_DIRS, GENESIS_FILE_NAME}; +use eth2_config::{ + Eth2NetArchiveAndDirectory, GenesisStateSource, ETH2_NET_DIRS, GENESIS_FILE_NAME, +}; use std::fs::File; use std::io; use zip::ZipArchive; @@ -26,7 +28,7 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), return Ok(()); } - if network.genesis_is_known { + if network.genesis_state_source == GenesisStateSource::IncludedBytes { // Extract genesis state from genesis.ssz.zip let archive_path = network.genesis_state_archive(); let archive_file = File::open(&archive_path) @@ -46,7 +48,8 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), io::copy(&mut file, &mut outfile) .map_err(|e| format!("Error writing file {:?}: {}", genesis_ssz_path, e))?; } else { - // Create empty genesis.ssz if genesis is unknown + // Create empty genesis.ssz if genesis is unknown or to be downloaded via URL. + // This is a bit of a hack to make `include_bytes!` easier to deal with. File::create(genesis_ssz_path) .map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?; } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml new file mode 100644 index 0000000000..96baffde6f --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml @@ -0,0 +1,8 @@ +# chiado-teku-0 +- "enr:-Ly4QLYLNqrjvSxD3lpAPBUNlxa6cIbe79JqLZLFcZZjWoCjZcw-85agLUErHiygG2weRSCLnd5V460qTbLbwJQsfZkoh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhKq7mu-Jc2VjcDI1NmsxoQP900YAYa9kdvzlSKGjVo-F3XVzATjOYp3BsjLjSophO4hzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +# chiado-teku-1 +- "enr:-Ly4QCGeYvTCNOGKi0mKRUd45rLj96b4pH98qG7B9TCUGXGpHZALtaL2-XfjASQyhbCqENccI4PGXVqYTIehNT9KJMQgh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQrVSJc2VjcDI1NmsxoQP9iDchx2PGl3JyJ29B9fhLCvVMN6n23pPAIIeFV-sHOIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.71.132.231 +- "enr:-Ly4QAtr21x5Ps7HYhdZkIBRBgcBkvlIfEel1YNjtFWf4cV3au2LgBGICz9PtEs9-p2HUl_eME8m1WImxTxSB3AkCMwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANHhOeJc2VjcDI1NmsxoQNLp1QPV8-pyMCohOtj6xGtSBM_GtVTqzlbvNsCF4ezkYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.69.35.13 +- "enr:-Ly4QLgn8Bx6faigkKUGZQvd1HDToV2FAxZIiENK-lczruzQb90qJK-4E65ADly0s4__dQOW7IkLMW7ZAyJy2vtiLy8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANFIw2Jc2VjcDI1NmsxoQMa-fWEy9UJHfOl_lix3wdY5qust78sHAqZnWwEiyqKgYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml new file mode 100644 index 0000000000..fcebaa9bd8 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -0,0 +1,156 @@ +# Extends the mainnet preset +PRESET_BASE: gnosis +# needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis +CONFIG_NAME: chiado + +# Genesis +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 6000 +# 10 October 2022 10:00:00 GMT+0000 +MIN_GENESIS_TIME: 1665396000 +GENESIS_DELAY: 300 + +# Projected time: 2022-11-04T15:00:00.000Z, block: 680928 +TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 + +# Deposit contract +# --------------------------------------------------------------- +# NOTE: Don't use a value too high, or Teku rejects it (4294906129 NOK) +DEPOSIT_CHAIN_ID: 10200 +DEPOSIT_NETWORK_ID: 10200 +DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 + +# Misc +# --------------------------------------------------------------- +# 2**6 (= 64) +MAX_COMMITTEES_PER_SLOT: 64 +# 2**7 (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# 2**12 (= 4096) +CHURN_LIMIT_QUOTIENT: 4096 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 +# Validator +# --------------------------------------------------------------- +# 2**10 (= 1024) ~1.4 hour +ETH1_FOLLOW_DISTANCE: 1024 +# 2**4 (= 16) +TARGET_AGGREGATORS_PER_COMMITTEE: 16 +# 2**0 (= 1) +RANDOM_SUBNETS_PER_VALIDATOR: 1 +# 2**8 (= 256) +EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 +# 6 (estimate from xDai mainnet) +SECONDS_PER_ETH1_BLOCK: 6 + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 +# Initial values +# --------------------------------------------------------------- +# GBC area code +GENESIS_FORK_VERSION: 0x0000006f +BLS_WITHDRAWAL_PREFIX: 0x00 +# Time parameters +# --------------------------------------------------------------- +# 5 seconds +SECONDS_PER_SLOT: 5 +# 2**0 (= 1) slots 12 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 1 +# 2**4 (= 16) slots 1.87 minutes +SLOTS_PER_EPOCH: 16 +# 2**0 (= 1) epochs 1.87 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 7.47 minutes +MAX_SEED_LOOKAHEAD: 4 +# 2**6 (= 64) epochs ~2 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 64 +# 2**13 (= 8,192) slots ~15.9 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 (= 256) epochs ~8 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~8 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**2 (= 4) epochs 7.47 minutes +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + +# State vector lengths +# --------------------------------------------------------------- +# 2**16 (= 65,536) epochs ~85 days +EPOCHS_PER_HISTORICAL_VECTOR: 65536 +# 2**13 (= 8,192) epochs ~10.6 days +EPOCHS_PER_SLASHINGS_VECTOR: 8192 +# 2**24 (= 16,777,216) historical roots, ~15,243 years +HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots +VALIDATOR_REGISTRY_LIMIT: 1099511627776 +# Reward and penalty quotients +# --------------------------------------------------------------- +# 25 +BASE_REWARD_FACTOR: 25 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**26 (= 67,108,864) +INACTIVITY_PENALTY_QUOTIENT: 67108864 +# 2**7 (= 128) (lower safety margin at Phase 0 genesis) +MIN_SLASHING_PENALTY_QUOTIENT: 128 +# 1 (lower safety margin at Phase 0 genesis) +PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) +MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_PROPOSER: 0x00000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 +DOMAIN_DEPOSIT: 0x03000000 +DOMAIN_VOLUNTARY_EXIT: 0x04000000 +DOMAIN_SELECTION_PROOF: 0x05000000 +DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +DOMAIN_SYNC_COMMITTEE: 0x07000000 +DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000 +DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 + +# Altair +ALTAIR_FORK_VERSION: 0x0100006f +ALTAIR_FORK_EPOCH: 90 # Mon Oct 10 2022 12:00:00 GMT+0000 +# Bellatrix +BELLATRIX_FORK_VERSION: 0x0200006f +BELLATRIX_FORK_EPOCH: 180 # Mon Oct 10 2022 14:00:00 GMT+0000 +# Capella +CAPELLA_FORK_VERSION: 0x0300006f +CAPELLA_FORK_EPOCH: 244224 # Wed May 24 2023 13:12:00 GMT+0000 + +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 8e7a9dd07a..940fad3615 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -39,6 +39,9 @@ BELLATRIX_FORK_EPOCH: 385536 # Capella CAPELLA_FORK_VERSION: 0x03000064 CAPELLA_FORK_EPOCH: 648704 +# Deneb +DENEB_FORK_VERSION: 0x04000064 +DENEB_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -71,6 +74,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**12 (= 4096) CHURN_LIMIT_QUOTIENT: 4096 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml new file mode 100644 index 0000000000..4dd2808102 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml @@ -0,0 +1,9 @@ +# EF +- enr:-Ku4QFo-9q73SspYI8cac_4kTX7yF800VXqJW4Lj3HkIkb5CMqFLxciNHePmMt4XdJzHvhrCC5ADI4D_GkAsxGJRLnQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAhnTT-AQFwAP__________gmlkgnY0gmlwhLKAiOmJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyk +- enr:-Ku4QPG7F72mbKx3gEQEx07wpYYusGDh-ni6SNkLvOS-hhN-BxIggN7tKlmalb0L5JPoAfqD-akTZ-gX06hFeBEz4WoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAhnTT-AQFwAP__________gmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyk +- enr:-LK4QPxe-mDiSOtEB_Y82ozvxn9aQM07Ui8A-vQHNgYGMMthfsfOabaaTHhhJHFCBQQVRjBww_A5bM1rf8MlkJU_l68Eh2F0dG5ldHOIAADAAAAAAACEZXRoMpBpt9l0BAFwAAABAAAAAAAAgmlkgnY0gmlwhLKAiOmJc2VjcDI1NmsxoQJu6T9pclPObAzEVQ53DpVQqjadmVxdTLL-J3h9NFoCeIN0Y3CCIyiDdWRwgiMo +- enr:-Ly4QGbOw4xNel5EhmDsJJ-QhC9XycWtsetnWoZ0uRy381GHdHsNHJiCwDTOkb3S1Ade0SFQkWJX_pgb3g8Jfh93rvMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBpt9l0BAFwAAABAAAAAAAAgmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQOxKv9sv3zKF8GDewgFGGHKP5HCZZpPpTrwl9eXKAWGxIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +# Teku +- enr:-LS4QG0uV4qvcpJ-HFDJRGBmnlD3TJo7yc4jwK8iP7iKaTlfQ5kZvIDspLMJhk7j9KapuL9yyHaZmwTEZqr10k9XumyCEcmHYXR0bmV0c4gAAAAABgAAAIRldGgykGm32XQEAXAAAAEAAAAAAACCaWSCdjSCaXCErK4j-YlzZWNwMjU2azGhAgfWRBEJlb7gAhXIB5ePmjj2b8io0UpEenq1Kl9cxStJg3RjcIIjKIN1ZHCCIyg +# Sigma Prime +- enr:-Le4QLoE1wFHSlGcm48a9ZESb_MRLqPPu6G0vHqu4MaUcQNDHS69tsy-zkN0K6pglyzX8m24mkb-LtBcbjAYdP1uxm4BhGV0aDKQabfZdAQBcAAAAQAAAAAAAIJpZIJ2NIJpcIQ5gR6Wg2lwNpAgAUHQBwEQAAAAAAAAADR-iXNlY3AyNTZrMaEDPMSNdcL92uNIyCsS177Z6KTXlbZakQqxv3aQcWawNXeDdWRwgiMohHVkcDaCI4I diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml new file mode 100644 index 0000000000..9f4faeb27a --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -0,0 +1,115 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: holesky + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Sep-28-2023 11:55:00 +UTC +MIN_GENESIS_TIME: 1695902100 +GENESIS_FORK_VERSION: 0x01017000 +# Genesis delay 5 mins +GENESIS_DELAY: 300 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x02017000 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x03017000 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x04017000 +CAPELLA_FORK_EPOCH: 256 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 28,000,000,000 Gwei to ensure quicker ejection +EJECTION_BALANCE: 28000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 17000 +DEPOSIT_NETWORK_ID: 17000 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 428a082cc0..3c6e1bad8a 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -9,12 +9,12 @@ - enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg - enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg # Teku team (Consensys) -- enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA -- enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA +- enr:-KG4QMOEswP62yzDjSwWS4YEjtTZ5PO6r65CPqYBkgTTkrpaedQ8uEUo1uMALtJIvb2w_WWEVmg5yt1UAuK1ftxUU7QDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaEDfol8oLr6XJ7FsdAYE7lpJhKMls4G_v6qQOGKJUWGb_uDdGNwgiMog3VkcIIjKA +- enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA # Prysm team (Prysmatic Labs) - enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg - enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA - enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg # Nimbus team - enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM \ No newline at end of file +- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 98984f3b7d..ed96df2913 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -39,6 +39,9 @@ BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +# Deneb +DENEB_FORK_VERSION: 0x04000000 +DENEB_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -71,6 +74,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index a0dd85fec0..d82a2c09b8 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -70,6 +70,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index e3674cf7df..b3dbb8a115 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -32,6 +32,10 @@ TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 CAPELLA_FORK_VERSION: 0x90000072 CAPELLA_FORK_EPOCH: 56832 +# Deneb +DENEB_FORK_VERSION: 0x03001020 +DENEB_FORK_EPOCH: 18446744073709551615 + # Sharding SHARDING_FORK_VERSION: 0x04001020 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -60,6 +64,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/common/eth2_network_config/built_in_network_configs/trusted_setup.json b/common/eth2_network_config/built_in_network_configs/trusted_setup.json new file mode 100644 index 0000000000..de2bf0ac59 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/trusted_setup.json @@ -0,0 +1 @@ +{"g1_lagrange":["0xa0413c0dcafec6dbc9f47d66785cf1e8c981044f7d13cfe3e4fcbb71b5408dfde6312493cb3c1d30516cb3ca88c03654","0x8b997fb25730d661918371bb41f2a6e899cac23f04fc5365800b75433c0a953250e15e7a98fb5ca5cc56a8cd34c20c57","0x83302852db89424d5699f3f157e79e91dc1380f8d5895c5a772bb4ea3a5928e7c26c07db6775203ce33e62a114adaa99","0xa759c48b7e4a685e735c01e5aa6ef9c248705001f470f9ad856cd87806983e917a8742a3bd5ee27db8d76080269b7c83","0x967f8dc45ebc3be14c8705f43249a30ff48e96205fb02ae28daeab47b72eb3f45df0625928582aa1eb4368381c33e127","0xa418eb1e9fb84cb32b370610f56f3cb470706a40ac5a47c411c464299c45c91f25b63ae3fcd623172aa0f273c0526c13","0x8f44e3f0387293bc7931e978165abbaed08f53acd72a0a23ac85f6da0091196b886233bcee5b4a194db02f3d5a9b3f78","0x97173434b336be73c89412a6d70d416e170ea355bf1956c32d464090b107c090ef2d4e1a467a5632fbc332eeb679bf2d","0xa24052ad8d55ad04bc5d951f78e14213435681594110fd18173482609d5019105b8045182d53ffce4fc29fc8810516c1","0xb950768136b260277590b5bec3f56bbc2f7a8bc383d44ce8600e85bf8cf19f479898bcc999d96dfbd2001ede01d94949","0x92ab8077871037bd3b57b95cbb9fb10eb11efde9191690dcac655356986fd02841d8fdb25396faa0feadfe3f50baf56d","0xa79b096dff98038ac30f91112dd14b78f8ad428268af36d20c292e2b3b6d9ed4fb28480bb04e465071cc67d05786b6d1","0xb9ff71461328f370ce68bf591aa7fb13027044f42a575517f3319e2be4aa4843fa281e756d0aa5645428d6dfa857cef2","0x8d765808c00b3543ff182e2d159c38ae174b12d1314da88ea08e13bd9d1c37184cb515e6bf6420531b5d41767987d7ce","0xb8c9a837d20c3b53e6f578e4a257bb7ef8fc43178614ec2a154915b267ad2be135981d01ed2ee1b5fbd9d9bb27f0800a","0xa9773d92cf23f65f98ef68f6cf95c72b53d0683af2f9bf886bb9036e4a38184b1131b26fd24397910b494fbef856f3aa","0xb41ebe38962d112da4a01bf101cb248d808fbd50aaf749fc7c151cf332032eb3e3bdbd716db899724b734d392f26c412","0x90fbb030167fb47dcc13d604a726c0339418567c1d287d1d87423fa0cb92eec3455fbb46bcbe2e697144a2d3972142e4","0xb11d298bd167464b35fb923520d14832bd9ed50ed841bf6d7618424fd6f3699190af21759e351b89142d355952149da1","0x8bc36066f69dc89f7c4d1e58d67497675050c6aa002244cebd9fc957ec5e364c46bab4735ea3db02b73b3ca43c96e019","0xab7ab92c5d4d773068e485aa5831941ebd63db7118674ca38089635f3b4186833af2455a6fb9ed2b745df53b3ce96727","0xaf191ca3089892cb943cd97cf11a51f38e38bd9be50844a4e8da99f27e305e876f9ed4ab0628e8ae3939066b7d34a15f","0xa3204c1747feabc2c11339a542195e7cb6628fd3964f846e71e2e3f2d6bb379a5e51700682ea1844eba12756adb13216","0x903a29883846b7c50c15968b20e30c471aeac07b872c40a4d19eb1a42da18b649d5bbfde4b4cf6225d215a461b0deb6d","0x8e6e9c15ffbf1e16e5865a5fef7ed751dc81957a9757b535cb38b649e1098cda25d42381dc4f776778573cdf90c3e6e0","0xa8f6dd26100b512a8c96c52e00715c4b2cb9ac457f17aed8ffe1cf1ea524068fe5a1ddf218149845fc1417b789ecfc98","0xa5b0ffc819451ea639cfd1c18cbc9365cc79368d3b2e736c0ae54eba2f0801e6eb0ee14a5f373f4a70ca463bdb696c09","0x879f91ccd56a1b9736fbfd20d8747354da743fb121f0e308a0d298ff0d9344431890e41da66b5009af3f442c636b4f43","0x81bf3a2d9755e206b515a508ac4d1109bf933c282a46a4ae4a1b4cb4a94e1d23642fad6bd452428845afa155742ade7e","0x8de778d4742f945df40004964e165592f9c6b1946263adcdd5a88b00244bda46c7bb49098c8eb6b3d97a0dd46148a8ca","0xb7a57b21d13121907ee28c5c1f80ee2e3e83a3135a8101e933cf57171209a96173ff5037f5af606e9fd6d066de6ed693","0xb0877d1963fd9200414a38753dffd9f23a10eb3198912790d7eddbc9f6b477019d52ddd4ebdcb9f60818db076938a5a9","0x88da2d7a6611bc16adc55fc1c377480c828aba4496c645e3efe0e1a67f333c05a0307f7f1d2df8ac013602c655c6e209","0x95719eb02e8a9dede1a888c656a778b1c69b7716fbe3d1538fe8afd4a1bc972183c7d32aa7d6073376f7701df80116d8","0x8e8a1ca971f2444b35af3376e85dccda3abb8e8e11d095d0a4c37628dfe5d3e043a377c3de68289ef142e4308e9941a0","0xb720caaff02f6d798ac84c4f527203e823ff685869e3943c979e388e1c34c3f77f5c242c6daa7e3b30e511aab917b866","0x86040d55809afeec10e315d1ad950d269d37cfee8c144cd8dd4126459e3b15a53b3e68df5981df3c2346d23c7b4baaf4","0x82d8cabf13ab853db0377504f0aec00dba3a5cd3119787e8ad378ddf2c40b022ecfc67c642b7acc8c1e3dd03ab50993e","0xb8d873927936719d2484cd03a6687d65697e17dcf4f0d5aed6f5e4750f52ef2133d4645894e7ebfc4ef6ce6788d404c8","0xb1235594dbb15b674a419ff2b2deb644ad2a93791ca05af402823f87114483d6aa1689b7a9bea0f547ad12fe270e4344","0xa53fda86571b0651f5affb74312551a082fffc0385cfd24c1d779985b72a5b1cf7c78b42b4f7e51e77055f8e5e915b00","0xb579adcfd9c6ef916a5a999e77a0cb21d378c4ea67e13b7c58709d5da23a56c2e54218691fc4ac39a4a3d74f88cc31f7","0xab79e584011713e8a2f583e483a91a0c2a40771b77d91475825b5acbea82db4262132901cb3e4a108c46d7c9ee217a4e","0xa0fe58ea9eb982d7654c8aaf9366230578fc1362f6faae0594f8b9e659bcb405dff4aac0c7888bbe07f614ecf0d800a6","0x867e50e74281f28ecd4925560e2e7a6f8911b135557b688254623acce0dbc41e23ac3e706a184a45d54c586edc416eb0","0x89f81b61adda20ea9d0b387a36d0ab073dc7c7cbff518501962038be19867042f11fcc7ff78096e5d3b68c6d8dc04d9b","0xa58ee91bb556d43cf01f1398c5811f76dc0f11efdd569eed9ef178b3b0715e122060ec8f945b4dbf6eebfa2b90af6fa6","0xac460be540f4c840def2eef19fc754a9af34608d107cbadb53334cf194cc91138d53b9538fcd0ec970b5d4aa455b224a","0xb09b91f929de52c09d48ca0893be6eb44e2f5210a6c394689dc1f7729d4be4e11d0474b178e80cea8c2ac0d081f0e811","0x8d37a442a76b06a02a4e64c2504aea72c8b9b020ab7bcc94580fe2b9603c7c50d7b1e9d70d2a7daea19c68667e8f8c31","0xa9838d4c4e3f3a0075a952cf7dd623307ec633fcc81a7cf9e52e66c31780de33dbb3d74c320dc7f0a4b72f7a49949515","0xa44766b6251af458fe4f5f9ed1e02950f35703520b8656f09fc42d9a2d38a700c11a7c8a0436ac2e5e9f053d0bb8ff91","0xad78d9481c840f5202546bea0d13c776826feb8b1b7c72e83d99a947622f0bf38a4208551c4c41beb1270d7792075457","0xb619ffa8733b470039451e224b777845021e8dc1125f247a4ff2476cc774657d0ff9c5279da841fc1236047de9d81c60","0xaf760b0a30a1d6af3bc5cd6686f396bd41779aeeb6e0d70a09349bd5da17ca2e7965afc5c8ec22744198fbe3f02fb331","0xa0cc209abdb768b589fcb7b376b6e1cac07743288c95a1cf1a0354b47f0cf91fca78a75c1fcafa6f5926d6c379116608","0x864add673c89c41c754eeb3cd8dcff5cdde1d739fce65c30e474a082bb5d813cba6412e61154ce88fdb6c12c5d9be35b","0xb091443b0ce279327dc37cb484e9a5b69b257a714ce21895d67539172f95ffa326903747b64a3649e99aea7bb10d03f7","0xa8c452b8c4ca8e0a61942a8e08e28f17fb0ef4c5b018b4e6d1a64038280afa2bf1169202f05f14af24a06ca72f448ccd","0xa23c24721d18bc48d5dcf70effcbef89a7ae24e67158d70ae1d8169ee75d9a051d34b14e9cf06488bac324fe58549f26","0x92a730e30eb5f3231feb85f6720489dbb1afd42c43f05a1610c6b3c67bb949ec8fde507e924498f4ffc646f7b07d9123","0x8dbe5abf4031ec9ba6bb06d1a47dd1121fb9e03b652804069250967fd5e9577d0039e233441b7f837a7c9d67ba18c28e","0xaa456bcfef6a21bb88181482b279df260297b3778e84594ebddbdf337e85d9e3d46ca1d0b516622fb0b103df8ec519b7","0xa3b31ae621bd210a2b767e0e6f22eb28fe3c4943498a7e91753225426168b9a26da0e02f1dc5264da53a5ad240d9f51b","0xaa8d66857127e6e71874ce2202923385a7d2818b84cb73a6c42d71afe70972a70c6bdd2aad1a6e8c5e4ca728382a8ea8","0xac7e8e7a82f439127a5e40558d90d17990f8229852d21c13d753c2e97facf077cf59582b603984c3dd3faebd80aff4f5","0x93a8bcf4159f455d1baa73d2ef2450dcd4100420de84169bbe28b8b7a5d1746273f870091a87a057e834f754f34204b1","0x89d0ebb287c3613cdcae7f5acc43f17f09c0213fc40c074660120b755d664109ffb9902ed981ede79e018ddb0c845698","0xa87ccbfad431406aadbee878d9cf7d91b13649d5f7e19938b7dfd32645a43b114eef64ff3a13201398bd9b0337832e5a","0x833c51d0d0048f70c3eefb4e70e4ff66d0809c41838e8d2c21c288dd3ae9d9dfaf26d1742bf4976dab83a2b381677011","0x8bcd6b1c3b02fffead432e8b1680bad0a1ac5a712d4225e220690ee18df3e7406e2769e1f309e2e803b850bc96f0e768","0xb61e3dbd88aaf4ff1401521781e2eea9ef8b66d1fac5387c83b1da9e65c2aa2a56c262dea9eceeb4ad86c90211672db0","0x866d3090db944ecf190dd0651abf67659caafd31ae861bab9992c1e3915cb0952da7c561cc7e203560a610f48fae633b","0xa5e8971543c14274a8dc892b0be188c1b4fbc75c692ed29f166e0ea80874bc5520c2791342b7c1d2fb5dd454b03b8a5b","0x8f2f9fc50471bae9ea87487ebd1bc8576ef844cc42d606af5c4c0969670fdf2189afd643e4de3145864e7773d215f37f","0xb1bb0f2527db6d51f42b9224383c0f96048bbc03d469bf01fe1383173ef8b1cc9455d9dd8ba04d46057f46949bfc92b5","0xaa7c99d906b4d7922296cfe2520473fc50137c03d68b7865c5bfb8adbc316b1034310ec4b5670c47295f4a80fb8d61e9","0xa5d1da4d6aba555919df44cbaa8ff79378a1c9e2cfdfbf9d39c63a4a00f284c5a5724e28ecbc2d9dba27fe4ee5018bd5","0xa8db53224f70af4d991b9aae4ffe92d2aa5b618ad9137784b55843e9f16cefbfd25ada355d308e9bbf55f6d2f7976fb3","0xb6536c4232bb20e22af1a8bb12de76d5fec2ad9a3b48af1f38fa67e0f8504ef60f305a73d19385095bb6a9603fe29889","0x87f7e371a1817a63d6838a8cf4ab3a8473d19ce0d4f40fd013c03d5ddd5f4985df2956531cc9f187928ef54c68f4f9a9","0xae13530b1dbc5e4dced9d909ea61286ec09e25c12f37a1ed2f309b0eb99863d236c3b25ed3484acc8c076ad2fa8cd430","0x98928d850247c6f7606190e687d5c94a627550198dbdbea0161ef9515eacdb1a0f195cae3bb293112179082daccf8b35","0x918528bb8e6a055ad4db6230d3a405e9e55866da15c4721f5ddd1f1f37962d4904aad7a419218fe6d906fe191a991806","0xb71e31a06afe065773dd3f4a6e9ef81c3292e27a3b7fdfdd452d03e05af3b6dd654c355f7516b2a93553360c6681a73a","0x8870b83ab78a98820866f91ac643af9f3ff792a2b7fda34185a9456a63abdce42bfe8ad4dc67f08a6392f250d4062df4","0x91eea1b668e52f7a7a5087fabf1cab803b0316f78d9fff469fbfde2162f660c250e4336a9eea4cb0450bd30ac067bc8b","0x8b74990946de7b72a92147ceac1bd9d55999a8b576e8df68639e40ed5dc2062cfcd727903133de482b6dca19d0aaed82","0x8ebad537fece090ebbab662bdf2618e21ca30cf6329c50935e8346d1217dcbe3c1fe1ea28efca369c6003ce0a94703c1","0xa8640479556fb59ebd1c40c5f368fbd960932fdbb782665e4a0e24e2bdb598fc0164ce8c0726d7759cfc59e60a62e182","0xa9a52a6bf98ee4d749f6d38be2c60a6d54b64d5cbe4e67266633dc096cf28c97fe998596707d31968cbe2064b72256bf","0x847953c48a4ce6032780e9b39d0ed4384e0be202c2bbe2dfda3910f5d87aa5cd3c2ffbfcfae4dddce16d6ab657599b95","0xb6f6e1485d3ec2a06abaecd23028b200b2e4a0096c16144d07403e1720ff8f9ba9d919016b5eb8dc5103880a7a77a1d3","0x98dfc2065b1622f596dbe27131ea60bef7a193b12922cecb27f8c571404f483014f8014572e86ae2e341ab738e4887ef","0xacb0d205566bacc87bbe2e25d10793f63f7a1f27fd9e58f4f653ceae3ffeba511eaf658e068fad289eeb28f9edbeb35b","0xae4411ed5b263673cee894c11fe4abc72a4bf642d94022a5c0f3369380fcdfc1c21e277f2902972252503f91ada3029a","0xac4a7a27ba390a75d0a247d93d4a8ef1f0485f8d373a4af4e1139369ec274b91b3464d9738eeaceb19cd6f509e2f8262","0x87379c3bf231fdafcf6472a79e9e55a938d851d4dd662ab6e0d95fd47a478ed99e2ad1e6e39be3c0fc4f6d996a7dd833","0x81316904b035a8bcc2041199a789a2e6879486ba9fddcba0a82c745cc8dd8374a39e523b91792170cd30be7aa3005b85","0xb8206809c6cd027ed019f472581b45f7e12288f89047928ba32b4856b6560ad30395830d71e5e30c556f6f182b1fe690","0x88d76c028f534a62e019b4a52967bb8642ede6becfa3807be68fdd36d366fc84a4ac8dc176e80a68bc59eb62caf5dff9","0x8c3b8be685b0f8aad131ee7544d0e12f223f08a6f8edaf464b385ac644e0ddc9eff7cc7cb5c1b50ab5d71ea0f41d2213","0x8d91410e004f76c50fdc05784157b4d839cb5090022c629c7c97a5e0c3536eeafee17a527b54b1165c3cd81774bb54ce","0xb25c2863bc28ec5281ce800ddf91a7e1a53f4c6d5da1e6c86ef4616e93bcf55ed49e297216d01379f5c6e7b3c1e46728","0x865f7b09ac3ca03f20be90c48f6975dd2588838c2536c7a3532a6aa5187ed0b709cd03d91ff4048061c10d0aa72b69ce","0xb3f7477c90c11596eb4f8bbf34adbcb832638c4ff3cdd090d4d477ee50472ac9ddaf5be9ad7eca3f148960d362bbd098","0x8db35fd53fca04faecd1c76a8227160b3ab46ac1af070f2492445a19d8ff7c25bbaef6c9fa0c8c088444561e9f7e4eb2","0xa478b6e9d058a2e01d2fc053b739092e113c23a6a2770a16afbef044a3709a9e32f425ace9ba7981325f02667c3f9609","0x98caa6bd38916c08cf221722a675a4f7577f33452623de801d2b3429595f988090907a7e99960fff7c076d6d8e877b31","0xb79aaaacefc49c3038a14d2ac468cfec8c2161e88bdae91798d63552cdbe39e0e02f9225717436b9b8a40a022c633c6e","0x845a31006c680ee6a0cc41d3dc6c0c95d833fcf426f2e7c573fa15b2c4c641fbd6fe5ebb0e23720cc3467d6ee1d80dc4","0xa1bc287e272cf8b74dbf6405b3a5190883195806aa351f1dc8e525aa342283f0a35ff687e3b434324dedee74946dd185","0xa4fd2dc8db75d3783a020856e2b3aa266dc6926e84f5c491ef739a3bddd46dc8e9e0fc1177937839ef1b18d062ffbb9e","0xacbf0d3c697f57c202bb8c5dc4f3fc341b8fc509a455d44bd86acc67cad2a04495d5537bcd3e98680185e8aa286f2587","0xa5caf423a917352e1b8e844f5968a6da4fdeae467d10c6f4bbd82b5eea46a660b82d2f5440d3641c717b2c3c9ed0be52","0x8a39d763c08b926599ab1233219c49c825368fad14d9afc7c0c039224d37c00d8743293fd21645bf0b91eaf579a99867","0xb2b53a496def0ba06e80b28f36530fbe0fb5d70a601a2f10722e59abee529369c1ae8fd0f2db9184dd4a2519bb832d94","0xa73980fcef053f1b60ebbb5d78ba6332a475e0b96a0c724741a3abf3b59dd344772527f07203cf4c9cb5155ebed81fa0","0xa070d20acce42518ece322c9db096f16aed620303a39d8d5735a0df6e70fbeceb940e8d9f5cc38f3314b2240394ec47b","0xa50cf591f522f19ca337b73089557f75929d9f645f3e57d4f241e14cdd1ea3fb48d84bcf05e4f0377afbb789fbdb5d20","0x82a5ffce451096aca8eeb0cd2ae9d83db3ed76da3f531a80d9a70a346359bf05d74863ce6a7c848522b526156a5e20cd","0x88e0e84d358cbb93755a906f329db1537c3894845f32b9b0b691c29cbb455373d9452fadd1e77e20a623f6eaf624de6f","0xaa07ac7b84a6d6838826e0b9e350d8ec75e398a52e9824e6b0da6ae4010e5943fec4f00239e96433f291fef9d1d1e609","0xac8887bf39366034bc63f6cc5db0c26fd27307cbc3d6cce47894a8a019c22dd51322fb5096edc018227edfafc053a8f6","0xb7d26c26c5b33f77422191dca94977588ab1d4b9ce7d0e19c4a3b4cd1c25211b78c328dbf81e755e78cd7d1d622ad23e","0x99a676d5af49f0ba44047009298d8474cabf2d5bca1a76ba21eff7ee3c4691a102fdefea27bc948ccad8894a658abd02","0xb0d09a91909ab3620c183bdf1d53d43d39eb750dc7a722c661c3de3a1a5d383ad221f71bae374f8a71867505958a3f76","0x84681a883de8e4b93d68ac10e91899c2bbb815ce2de74bb48a11a6113b2a3f4df8aceabda1f5f67bc5aacac8c9da7221","0x9470259957780fa9b43521fab3644f555f5343281c72582b56d2efd11991d897b3b481cafa48681c5aeb80c9663b68f7","0xab1b29f7ece686e6fa968a4815da1d64f3579fed3bc92e1f3e51cd13a3c076b6cf695ed269d373300a62463dc98a4234","0x8ab415bfcd5f1061f7687597024c96dd9c7cb4942b5989379a7a3b5742f7d394337886317659cbeacaf030234a24f972","0xb9b524aad924f9acc63d002d617488f31b0016e0f0548f050cada285ce7491b74a125621638f19e9c96eabb091d945be","0x8c4c373e79415061837dd0def4f28a2d5d74d21cb13a76c9049ad678ca40228405ab0c3941df49249847ecdefc1a5b78","0xa8edf4710b5ab2929d3db6c1c0e3e242261bbaa8bcec56908ddadd7d2dad2dca9d6eb9de630b960b122ebeea41040421","0x8d66bb3b50b9df8f373163629f9221b3d4b6980a05ea81dc3741bfe9519cf3ebba7ab98e98390bae475e8ede5821bd5c","0x8d3c21bae7f0cfb97c56952bb22084b58e7bb718890935b73103f33adf5e4d99cd262f929c6eeab96209814f0dbae50a","0xa5c66cfab3d9ebf733c4af24bebc97070e7989fe3c73e79ac85fb0e4d40ae44fb571e0fad4ad72560e13ed453900d14f","0x9362e6b50b43dbefbc3254471372297b5dcce809cd3b60bf74a1268ab68bdb50e46e462cbd78f0d6c056330e982846af","0x854630d08e3f0243d570cc2e856234cb4c1a158d9c1883bf028a76525aaa34be897fe918d5f6da9764a3735fa9ebd24a","0x8c7d246985469ff252c3f4df6c7c9196fc79f05c1c66a609d84725c78001d0837c7a7049394ba5cf7e863e2d58af8417","0xae050271e01b528925302e71903f785b782f7bf4e4e7a7f537140219bc352dc7540c657ed03d3a297ad36798ecdb98cd","0x8d2ae9179fcf2b0c69850554580b52c1f4a5bd865af5f3028f222f4acad9c1ad69a8ef6c7dc7b03715ee5c506b74325e","0xb8ef8de6ce6369a8851cd36db0ccf00a85077e816c14c4e601f533330af9e3acf0743a95d28962ed8bfcfc2520ef3cfe","0xa6ecad6fdfb851b40356a8b1060f38235407a0f2706e7b8bb4a13465ca3f81d4f5b99466ac2565c60af15f022d26732e","0x819ff14cdea3ab89d98e133cd2d0379361e2e2c67ad94eeddcdb9232efd509f51d12f4f03ebd4dd953bd262a886281f7","0x8561cd0f7a6dbcddd83fcd7f472d7dbcba95b2d4fb98276f48fccf69f76d284e626d7e41314b633352df8e6333fd52a1","0xb42557ccce32d9a894d538c48712cb3e212d06ac05cd5e0527ccd2db1078ee6ae399bf6a601ffdab1f5913d35fc0b20c","0x89b4008d767aad3c6f93c349d3b956e28307311a5b1cec237e8d74bb0dee7e972c24f347fd56afd915a2342bd7bc32f0","0x877487384b207e53f5492f4e36c832c2227f92d1bb60542cfeb35e025a4a7afc2b885fae2528b33b40ab09510398f83e","0x8c411050b63c9053dd0cd81dacb48753c3d7f162028098e024d17cd6348482703a69df31ad6256e3d25a8bbf7783de39","0xa8506b54a88d17ac10fb1b0d1fe4aa40eae7553a064863d7f6b52ccc4236dd4b82d01dca6ba87da9a239e3069ba879fb","0xb1a24caef9df64750c1350789bb8d8a0db0f39474a1c74ea9ba064b1516db6923f00af8d57c632d58844fb8786c3d47a","0x959d6e255f212b0708c58a2f75cb1fe932248c9d93424612c1b8d1e640149656059737e4db2139afd5556bcdacf3eda2","0x84525af21a8d78748680b6535bbc9dc2f0cf9a1d1740d12f382f6ecb2e73811d6c1da2ad9956070b1a617c61fcff9fe5","0xb74417d84597a485d0a8e1be07bf78f17ebb2e7b3521b748f73935b9afbbd82f34b710fb7749e7d4ab55b0c7f9de127d","0xa4a9aecb19a6bab167af96d8b9d9aa5308eab19e6bfb78f5a580f9bf89bdf250a7b52a09b75f715d651cb73febd08e84","0x9777b30be2c5ffe7d29cc2803a562a32fb43b59d8c3f05a707ab60ec05b28293716230a7d264d7cd9dd358fc031cc13e","0x95dce7a3d4f23ac0050c510999f5fbf8042f771e8f8f94192e17bcbfa213470802ebdbe33a876cb621cf42e275cbfc8b","0xb0b963ebcbbee847ab8ae740478544350b3ac7e86887e4dfb2299ee5096247cd2b03c1de74c774d9bde94ae2ee2dcd59","0xa4ab20bafa316030264e13f7ef5891a2c3b29ab62e1668fcb5881f50a9acac6adbe3d706c07e62f2539715db768f6c43","0x901478a297669d608e406fe4989be75264b6c8be12169aa9e0ad5234f459ca377f78484ffd2099a2fe2db5e457826427","0x88c76e5c250810c057004a03408b85cd918e0c8903dc55a0dd8bb9b4fc2b25c87f9b8cf5943eb19fbbe99d36490050c5","0x91607322bbad4a4f03fc0012d0821eff5f8c516fda45d1ec1133bface6f858bf04b25547be24159cab931a7aa08344d4","0x843203e07fce3c6c81f84bc6dc5fb5e9d1c50c8811ace522dc66e8658433a0ef9784c947e6a62c11bf705307ef05212e","0x91dd8813a5d6dddcda7b0f87f672b83198cd0959d8311b2b26fb1fae745185c01f796fbd03aad9db9b58482483fdadd8","0x8d15911aacf76c8bcd7136e958febd6963104addcd751ce5c06b6c37213f9c4fb0ffd4e0d12c8e40c36d658999724bfd","0x8a36c5732d3f1b497ebe9250610605ee62a78eaa9e1a45f329d09aaa1061131cf1d9df00f3a7d0fe8ad614a1ff9caaae","0xa407d06affae03660881ce20dab5e2d2d6cddc23cd09b95502a9181c465e57597841144cb34d22889902aff23a76d049","0xb5fd856d0578620a7e25674d9503be7d97a2222900e1b4738c1d81ff6483b144e19e46802e91161e246271f90270e6cf","0x91b7708869cdb5a7317f88c0312d103f8ce90be14fb4f219c2e074045a2a83636fdc3e69e862049fc7c1ef000e832541","0xb64719cc5480709d1dae958f1d3082b32a43376da446c8f9f64cb02a301effc9c34d9102051733315a8179aed94d53cc","0x94347a9542ff9d18f7d9eaa2f4d9b832d0e535fe49d52aa2de08aa8192400eddabdb6444a2a78883e27c779eed7fdf5a","0x840ef44a733ff1376466698cd26f82cf56bb44811e196340467f932efa3ae1ef9958a0701b3b032f50fd9c1d2aed9ab5","0x90ab3f6f67688888a31ffc2a882bb37adab32d1a4b278951a21646f90d03385fc976715fc639a785d015751171016f10","0xb56f35d164c24b557dbcbc8a4bfa681ec916f8741ffcb27fb389c164f4e3ed2be325210ef5bdaeae7a172ca9599ab442","0xa7921a5a80d7cf6ae81ba9ee05e0579b18c20cd2852762c89d6496aa4c8ca9d1ca2434a67b2c16d333ea8e382cdab1e3","0xa506bcfbd7e7e5a92f68a1bd87d07ad5fe3b97aeee40af2bf2cae4efcd77fff03f872732c5b7883aa6584bee65d6f8cb","0xa8c46cff58931a1ce9cbe1501e1da90b174cddd6d50f3dfdfb759d1d4ad4673c0a8feed6c1f24c7af32865a7d6c984e5","0xb45686265a83bff69e312c5149db7bb70ac3ec790dc92e392b54d9c85a656e2bf58596ce269f014a906eafc97461aa5f","0x8d4009a75ccb2f29f54a5f16684b93202c570d7a56ec1a8b20173269c5f7115894f210c26b41e8d54d4072de2d1c75d0","0xaef8810af4fc676bf84a0d57b189760ddc3375c64e982539107422e3de2580b89bd27aa6da44e827b56db1b5555e4ee8","0x888f0e1e4a34f48eb9a18ef4de334c27564d72f2cf8073e3d46d881853ac1424d79e88d8ddb251914890588937c8f711","0xb64b0aa7b3a8f6e0d4b3499fe54e751b8c3e946377c0d5a6dbb677be23736b86a7e8a6be022411601dd75012012c3555","0x8d57776f519f0dd912ea14f79fbab53a30624e102f9575c0bad08d2dc754e6be54f39b11278c290977d9b9c7c0e1e0ad","0xa018fc00d532ceb2e4de908a15606db9b6e0665dd77190e2338da7c87a1713e6b9b61554e7c1462f0f6d4934b960b15c","0x8c932be83ace46f65c78e145b384f58e41546dc0395270c1397874d88626fdeda395c8a289d602b4c312fe98c1311856","0x89174838e21639d6bdd91a0621f04dc056907b88e305dd66e46a08f6d65f731dea72ae87ca5e3042d609e8de8de9aa26","0xb7b7f508bb74f7a827ac8189daa855598ff1d96fa3a02394891fd105d8f0816224cd50ac4bf2ed1cf469ace516c48184","0xb31877ad682583283baadd68dc1bebd83f5748b165aadd7fe9ef61a343773b88bcd3a022f36d6c92f339b7bfd72820a9","0xb79d77260b25daf9126dab7a193df2d7d30542786fa1733ffaf6261734770275d3ca8bae1d9915d1181a78510b3439db","0x91894fb94cd4c1dd2ceaf9c53a7020c5799ba1217cf2d251ea5bc91ed26e1159dd758e98282ebe35a0395ef9f1ed15a0","0xab59895cdafd33934ceedfc3f0d5d89880482cba6c99a6db93245f9e41987efd76e0640e80aef31782c9a8c7a83fccec","0xaa22ea63654315e033e09d4d4432331904a6fc5fb1732557987846e3c564668ca67c60a324b4af01663a23af11a9ce4b","0xb53ba3ef342601467e1f71aa280e100fbabbd38518fa0193e0099505036ee517c1ac78e96e9baeb549bb6879bb698fb0","0x943fd69fd656f37487cca3605dc7e5a215fddd811caf228595ec428751fc1de484a0cb84c667fe4d7c35599bfa0e5e34","0x9353128b5ebe0dddc555093cf3e5942754f938173541033e8788d7331fafc56f68d9f97b4131e37963ab7f1c8946f5f1","0xa76cd3c566691f65cfb86453b5b31dbaf3cab8f84fe1f795dd1e570784b9b01bdd5f0b3c1e233942b1b5838290e00598","0x983d84b2e53ffa4ae7f3ba29ef2345247ea2377686b74a10479a0ef105ecf90427bf53b74c96dfa346d0f842b6ffb25b","0x92e0fe9063306894a2c6970c001781cff416c87e87cb5fbac927a3192655c3da4063e6fa93539f6ff58efac6adcc5514","0xb00a81f03c2b8703acd4e2e4c21e06973aba696415d0ea1a648ace2b0ea19b242fede10e4f9d7dcd61c546ab878bc8f9","0xb0d08d880f3b456a10bf65cff983f754f545c840c413aea90ce7101a66eb0a0b9b1549d6c4d57725315828607963f15a","0x90cb64d03534f913b411375cce88a9e8b1329ce67a9f89ca5df8a22b8c1c97707fec727dbcbb9737f20c4cf751359277","0x8327c2d42590dfcdb78477fc18dcf71608686ad66c49bce64d7ee874668be7e1c17cc1042a754bbc77c9daf50b2dae07","0x8532171ea13aa7e37178e51a6c775da469d2e26ec854eb16e60f3307db4acec110d2155832c202e9ba525fc99174e3b0","0x83ca44b15393d021de2a511fa5511c5bd4e0ac7d67259dce5a5328f38a3cce9c3a269405959a2486016bc27bb140f9ff","0xb1d36e8ca812be545505c8214943b36cabee48112cf0de369957afa796d37f86bf7249d9f36e8e990f26f1076f292b13","0x9803abf45be5271e2f3164c328d449efc4b8fc92dfc1225d38e09630909fe92e90a5c77618daa5f592d23fc3ad667094","0xb268ad68c7bf432a01039cd889afae815c3e120f57930d463aece10af4fd330b5bd7d8869ef1bcf6b2e78e4229922edc","0xa4c91a0d6f16b1553264592b4cbbbf3ca5da32ab053ffbdd3dbb1aed1afb650fb6e0dc5274f71a51d7160856477228db","0xad89d043c2f0f17806277ffdf3ecf007448e93968663f8a0b674254f36170447b7527d5906035e5e56f4146b89b5af56","0x8b6964f757a72a22a642e4d69102951897e20c21449184e44717bd0681d75f7c5bfa5ee5397f6e53febf85a1810d6ed1","0xb08f5cdaabec910856920cd6e836c830b863eb578423edf0b32529488f71fe8257d90aed4a127448204df498b6815d79","0xaf26bb3358be9d280d39b21d831bb53145c4527a642446073fee5a86215c4c89ff49a3877a7a549486262f6f57a0f476","0xb4010b37ec4d7c2af20800e272539200a6b623ae4636ecbd0e619484f4ab9240d02bc5541ace3a3fb955dc0a3d774212","0x82752ab52bdcc3cc2fc405cb05a2e694d3df4a3a68f2179ec0652536d067b43660b96f85f573f26fbd664a9ef899f650","0x96d392dde067473a81faf2d1fea55b6429126b88b160e39b4210d31d0a82833ffd3a80e07d24d495aea2d96be7251547","0xa76d8236d6671204d440c33ac5b8deb71fa389f6563d80e73be8b043ec77d4c9b06f9a586117c7f957f4af0331cbc871","0xb6c90961f68b5e385d85c9830ec765d22a425f506904c4d506b87d8944c2b2c09615e740ed351df0f9321a7b93979cae","0xa6ec5ea80c7558403485b3b1869cdc63bde239bafdf936d9b62a37031628402a36a2cfa5cfbb8e26ac922cb0a209b3ba","0x8c3195bbdbf9bc0fc95fa7e3d7f739353c947f7767d1e3cb24d8c8602d8ea0a1790ac30b815be2a2ba26caa5227891e2","0xa7f8a63d809f1155722c57f375ea00412b00147776ae4444f342550279ef4415450d6f400000a326bf11fea6c77bf941","0x97fa404df48433a00c85793440e89bb1af44c7267588ae937a1f5d53e01e1c4d4fc8e4a6d517f3978bfdd6c2dfde012f","0xa984a0a3836de3d8d909c4629a2636aacb85393f6f214a2ef68860081e9db05ad608024762db0dc35e895dc00e2d4cdd","0x9526cf088ab90335add1db4d3a4ac631b58cbfbe88fa0845a877d33247d1cfeb85994522e1eb8f8874651bfb1df03e2a","0xac83443fd0afe99ad49de9bf8230158c118e2814c9c89db5ac951c240d6c2ce45e7677221279d9e97848ec466b99aafe","0xaeeefdbaba612e971697798ceaf63b247949dc823a0ad771ae5b988a5e882b338a98d3d0796230f49d533ec5ba411b39","0xae3f248b5a7b0f92b7820a6c5ae21e5bd8f4265d4f6e21a22512079b8ee9be06393fd3133ce8ebac0faf23f4f8517e36","0xa64a831b908eee784b8388b45447d2885ec0551b26b0c2b15e5f417d0a12c79e867fb7bd3d008d0af98b44336f8ec1ad","0xb242238cd8362b6e440ba21806905714dd55172db25ec7195f3fc4937b2aba146d5cbf3cf691a1384b4752dc3b54d627","0x819f97f337eea1ffb2a678cc25f556f1aab751c6b048993a1d430fe1a3ddd8bb411c152e12ca60ec6e057c190cd1db9a","0xb9d7d187407380df54ee9fef224c54eec1bfabf17dc8abf60765b7951f538f59aa26fffd5846cfe05546c35f59b573f4","0xaa6e3c14efa6a5962812e3f94f8ce673a433f4a82d07a67577285ea0eaa07f8be7115853122d12d6d4e1fdf64c504be1","0x82268bee9c1662d3ddb5fb785abfae6fb8b774190f30267f1d47091d2cd4b3874db4372625aa36c32f27b0eee986269b","0xb236459565b7b966166c4a35b2fa71030b40321821b8e96879d95f0e83a0baf33fa25721f30af4a631df209e25b96061","0x8708d752632d2435d2d5b1db4ad1fa2558d776a013655f88e9a3556d86b71976e7dfe5b8834fdec97682cd94560d0d0d","0xae1424a68ae2dbfb0f01211f11773732a50510b5585c1fb005cb892b2c6a58f4a55490b5c5b4483c6fce40e9d3236a52","0xb3f5f722af9dddb07293c871ce97abbccba0093ca98c8d74b1318fa21396fc1b45b69c15084f63d728f9908442024506","0x9606f3ce5e63886853ca476dc0949e7f1051889d529365c0cb0296fdc02abd088f0f0318ecd2cf36740a3634132d36f6","0xb11a833a49fa138db46b25ff8cdda665295226595bc212c0931b4931d0a55c99da972c12b4ef753f7e37c6332356e350","0xafede34e7dab0a9e074bc19a7daddb27df65735581ca24ad70c891c98b1349fcebbcf3ba6b32c2617fe06a5818dabc2d","0x97993d456e459e66322d01f8eb13918979761c3e8590910453944bdff90b24091bb018ac6499792515c9923be289f99f","0x977e3e967eff19290a192cd11df3667d511b398fb3ac9a5114a0f3707e25a0edcb56105648b1b85a8b7519fc529fc6f6","0xb873a7c88bf58731fe1bf61ff6828bf114cf5228f254083304a4570e854e83748fc98683ddba62d978fff7909f2c5c47","0xad4b2691f6f19da1d123aaa23cca3e876247ed9a4ab23c599afdbc0d3aa49776442a7ceaa996ac550d0313d9b9a36cee","0xb9210713c78e19685608c6475bfa974b57ac276808a443f8b280945c5d5f9c39da43effa294bfb1a6c6f7b6b9f85bf6c","0xa65152f376113e61a0e468759de38d742caa260291b4753391ee408dea55927af08a4d4a9918600a3bdf1df462dffe76","0x8bf8c27ad5140dde7f3d2280fd4cc6b29ab76537e8d7aa7011a9d2796ee3e56e9a60c27b5c2da6c5e14fc866301dc195","0x92fde8effc9f61393a2771155812b863cff2a0c5423d7d40aa04d621d396b44af94ddd376c28e7d2f53c930aea947484","0x97a01d1dd9ee30553ce676011aea97fa93d55038ada95f0057d2362ae9437f3ed13de8290e2ff21e3167dd7ba10b9c3f","0x89affffaa63cb2df3490f76f0d1e1d6ca35c221dd34057176ba739fa18d492355e6d2a5a5ad93a136d3b1fed0bb8aa19","0x928b8e255a77e1f0495c86d3c63b83677b4561a5fcbbe5d3210f1e0fc947496e426d6bf3b49394a5df796c9f25673fc4","0x842a0af91799c9b533e79ee081efe2a634cac6c584c2f054fb7d1db67dde90ae36de36cbf712ec9cd1a0c7ee79e151ea","0xa65b946cf637e090baf2107c9a42f354b390e7316beb8913638130dbc67c918926eb87bec3b1fe92ef72bc77a170fa3b","0xaafc0f19bfd71ab5ae4a8510c7861458b70ad062a44107b1b1dbacbfa44ba3217028c2824bd7058e2fa32455f624040b","0x95269dc787653814e0be899c95dba8cfa384f575a25e671c0806fd80816ad6797dc819d30ae06e1d0ed9cb01c3950d47","0xa1e760f7fa5775a1b2964b719ff961a92083c5c617f637fc46e0c9c20ab233f8686f7f38c3cb27d825c54dd95e93a59b","0xac3b8a7c2317ea967f229eddc3e23e279427f665c4705c7532ed33443f1243d33453c1088f57088d2ab1e3df690a9cc9","0xb787beeddfbfe36dd51ec4efd9cf83e59e84d354c3353cc9c447be53ae53d366ed1c59b686e52a92f002142c8652bfe0","0xb7a64198300cb6716aa7ac6b25621f8bdec46ad5c07a27e165b3f774cdf65bcfdbf31e9bae0c16b44de4b00ada7a4244","0xb8ae9f1452909e0c412c7a7fe075027691ea8df1347f65a5507bc8848f1d2c833d69748076db1129e5b4fb912f65c86c","0x9682e41872456b9fa67def89e71f06d362d6c8ca85c9c48536615bc401442711e1c9803f10ab7f8ab5feaec0f9df20a6","0x88889ff4e271dc1c7e21989cc39f73cde2f0475acd98078281591ff6c944fadeb9954e72334319050205d745d4df73df","0x8f79b5b8159e7fd0d93b0645f3c416464f39aec353b57d99ecf24f96272df8a068ad67a6c90c78d82c63b40bb73989bb","0x838c01a009a3d8558a3f0bdd5e22de21af71ca1aefc8423c91dc577d50920e9516880e87dce3e6d086e11cd45c9052d9","0xb97f1c6eee8a78f137c840667cc288256e39294268a3009419298a04a1d0087c9c9077b33c917c65caf76637702dda8a","0x972284ce72f96a61c899260203dfa06fc3268981732bef74060641c1a5068ead723e3399431c247ca034b0dae861e8df","0x945a8d52d6d3db6663dbd3110c6587f9e9c44132045eeffba15621576d178315cb52870fa5861669f84f0bee646183fe","0xa0a547b5f0967b1c3e5ec6c6a9a99f0578521489180dfdfbb5561f4d166baac43a2f06f950f645ce991664e167537eed","0xa0592cda5cdddf1340033a745fd13a6eff2021f2e26587116c61c60edead067e0f217bc2bef4172a3c9839b0b978ab35","0xb9c223b65a3281587fa44ec829e609154b32f801fd1de6950e01eafb07a8324243b960d5735288d0f89f0078b2c42b5b","0x99ebfc3b8f9f98249f4d37a0023149ed85edd7a5abe062c8fb30c8c84555258b998bdcdd1d400bc0fa2a4aaa8b224466","0x955b68526e6cb3937b26843270f4e60f9c6c8ece2fa9308fe3e23afa433309c068c66a4bc16ee2cf04220f095e9afce4","0xb766caeafcc00378135ae53397f8a67ed586f5e30795462c4a35853de6681b1f17401a1c40958de32b197c083b7279c1","0x921bf87cad947c2c33fa596d819423c10337a76fe5a63813c0a9dc78a728207ae7b339407a402fc4d0f7cba3af6da6fc","0xa74ba1f3bc3e6c025db411308f49b347ec91da1c916bda9da61e510ec8d71d25e0ac0f124811b7860e5204f93099af27","0xa29b4d144e0bf17a7e8353f2824cef0ce85621396babe8a0b873ca1e8a5f8d508b87866cf86da348470649fceefd735c","0xa8040e12ffc3480dd83a349d06741d1572ef91932c46f5cf03aee8454254156ee95786fd013d5654725e674c920cec32","0x8c4cf34ca60afd33923f219ffed054f90cd3f253ffeb2204a3b61b0183417e366c16c07fae860e362b0f2bfe3e1a1d35","0x8195eede4ddb1c950459df6c396b2e99d83059f282b420acc34220cadeed16ab65c856f2c52568d86d3c682818ed7b37","0x91fff19e54c15932260aa990c7fcb3c3c3da94845cc5aa8740ef56cf9f58d19b4c3c55596f8d6c877f9f4d22921d93aa","0xa3e0bf7e5d02a80b75cf75f2db7e66cb625250c45436e3c136d86297d652590ec97c2311bafe407ad357c79ab29d107b","0x81917ff87e5ed2ae4656b481a63ced9e6e5ff653b8aa6b7986911b8bc1ee5b8ef4f4d7882c3f250f2238e141b227e510","0x915fdbe5e7de09c66c0416ae14a8750db9412e11dc576cf6158755fdcaf67abdbf0fa79b554cac4fe91c4ec245be073f","0x8df27eafb5c3996ba4dc5773c1a45ca77e626b52e454dc1c4058aa94c2067c18332280630cc3d364821ee53bf2b8c130","0x934f8a17c5cbb827d7868f5c8ca00cb027728a841000a16a3428ab16aa28733f16b52f58c9c4fbf75ccc45df72d9c4df","0xb83f4da811f9183c25de8958bc73b504cf790e0f357cbe74ef696efa7aca97ad3b7ead1faf76e9f982c65b6a4d888fc2","0x87188213c8b5c268dc2b6da413f0501c95749e953791b727450af3e43714149c115b596b33b63a2f006a1a271b87efd0","0x83e9e888ab9c3e30761de635d9aabd31248cdd92f7675fc43e4b21fd96a03ec1dc4ad2ec94fec857ffb52683ac98e360","0xb4b9a1823fe2d983dc4ec4e3aaea297e581c3fc5ab4b4af5fa1370caa37af2d1cc7fc6bfc5e7da60ad8fdce27dfe4b24","0x856388bc78aef465dbcdd1f559252e028c9e9a2225c37d645c138e78f008f764124522705822a61326a6d1c79781e189","0xa6431b36db93c3b47353ba22e7c9592c9cdfb9cbdd052ecf2cc3793f5b60c1e89bc96e6bae117bfd047f2308da00dd2f","0xb619972d48e7e4291542dcde08f7a9cdc883c892986ded2f23ccb216e245cd8d9ad1d285347b0f9d7611d63bf4cee2bc","0x8845cca6ff8595955f37440232f8e61d5351500bd016dfadd182b9d39544db77a62f4e0102ff74dd4173ae2c181d24ef","0xb2f5f7fa26dcd3b6550879520172db2d64ee6aaa213cbef1a12befbce03f0973a22eb4e5d7b977f466ac2bf8323dcedd","0x858b7f7e2d44bdf5235841164aa8b4f3d33934e8cb122794d90e0c1cac726417b220529e4f896d7b77902ab0ccd35b3a","0x80b0408a092dae2b287a5e32ea1ad52b78b10e9c12f49282976cd738f5d834e03d1ad59b09c5ccaccc39818b87d06092","0xb996b0a9c6a2d14d984edcd6ab56bc941674102980d65b3ad9733455f49473d3f587c8cbf661228a7e125ddbe07e3198","0x90224fcebb36865293bd63af786e0c5ade6b67c4938d77eb0cbae730d514fdd0fe2d6632788e858afd29d46310cf86df","0xb71351fdfff7168b0a5ec48397ecc27ac36657a8033d9981e97002dcca0303e3715ce6dd3f39423bc8ef286fa2e9e669","0xae2a3f078b89fb753ce4ed87e0c1a58bb19b4f0cfb6586dedb9fcab99d097d659a489fb40e14651741e1375cfc4b6c5f","0x8ef476b118e0b868caed297c161f4231bbeb863cdfa5e2eaa0fc6b6669425ce7af50dc374abceac154c287de50c22307","0x92e46ab472c56cfc6458955270d3c72b7bde563bb32f7d4ab4d959db6f885764a3d864e1aa19802fefaa5e16b0cb0b54","0x96a3f68323d1c94e73d5938a18a377af31b782f56212de3f489d22bc289cf24793a95b37f1d6776edf88114b5c1fa695","0x962cc068cfce6faaa27213c4e43e44eeff0dfbb6d25b814e82c7da981fb81d7d91868fa2344f05fb552362f98cfd4a72","0x895d4e4c4ad670abf66d43d59675b1add7afad7438ada8f42a0360c704cee2060f9ac15b4d27e9b9d0996bb801276fe3","0xb3ad18d7ece71f89f2ef749b853c45dc56bf1c796250024b39a1e91ed11ca32713864049c9aaaea60cde309b47486bbf","0x8f05404e0c0258fdbae50e97ccb9b72ee17e0bd2400d9102c0dad981dac8c4c71585f03e9b5d50086d0a2d3334cb55d1","0x8bd877e9d4591d02c63c6f9fc9976c109de2d0d2df2bfa5f6a3232bab5b0b8b46e255679520480c2d7a318545efa1245","0x8d4c16b5d98957c9da13d3f36c46f176e64e5be879f22be3179a2c0e624fe4758a82bf8c8027410002f973a3b84cd55a","0x86e2a8dea86427b424fa8eada881bdff896907084a495546e66556cbdf070b78ba312bf441eb1be6a80006d25d5097a3","0x8608b0c117fd8652fdab0495b08fadbeba95d9c37068e570de6fddfef1ba4a1773b42ac2be212836141d1bdcdef11a17","0xa13d6febf5fb993ae76cae08423ca28da8b818d6ef0fde32976a4db57839cd45b085026b28ee5795f10a9a8e3098c683","0x8e261967fa6de96f00bc94a199d7f72896a6ad8a7bbb1d6187cca8fad824e522880e20f766620f4f7e191c53321d70f9","0x8b8e8972ac0218d7e3d922c734302803878ad508ca19f5f012bc047babd8a5c5a53deb5fe7c15a4c00fd6d1cb9b1dbd0","0xb5616b233fb3574a2717d125a434a2682ff68546dccf116dd8a3b750a096982f185614b9fb6c7678107ff40a451f56fa","0xaa6adf9b0c3334b0d0663f583a4914523b2ac2e7adffdb026ab9109295ff6af003ef8357026dbcf789896d2afded8d73","0xacb72df56a0b65496cd534448ed4f62950bb1e11e50873b6ed349c088ee364441821294ce0f7c61bd7d38105bea3b442","0xabae12df83e01ec947249fedd0115dc501d2b03ff7232092979eda531dbbca29ace1d46923427c7dde4c17bdf3fd7708","0x820b4fc2b63a9fda7964acf5caf19a2fc4965007cb6d6b511fcafcb1f71c3f673a1c0791d3f86e3a9a1eb6955b191cc0","0xaf277259d78c6b0f4f030a10c53577555df5e83319ddbad91afbd7c30bc58e7671c56d00d66ec3ab5ef56470cd910cee","0xad4a861c59f1f5ca1beedd488fb3d131dea924fffd8e038741a1a7371fad7370ca5cf80dc01f177fbb9576713bb9a5b3","0xb67a5162982ce6a55ccfb2f177b1ec26b110043cf18abd6a6c451cf140b5af2d634591eb4f28ad92177d8c7e5cd0a5e8","0x96176d0a83816330187798072d449cbfccff682561e668faf6b1220c9a6535b32a6e4f852e8abb00f79abb87493df16b","0xb0afe6e7cb672e18f0206e4423f51f8bd0017bf464c4b186d46332c5a5847647f89ff7fa4801a41c1b0b42f6135bcc92","0x8fc5e7a95ef20c1278c645892811f6fe3f15c431ebc998a32ec0da44e7213ea934ed2be65239f3f49b8ec471e9914160","0xb7793e41adda6c82ba1f2a31f656f6205f65bf8a3d50d836ee631bc7ce77c153345a2d0fc5c60edf8b37457c3729c4ec","0xa504dd7e4d6b2f4379f22cc867c65535079c75ccc575955f961677fa63ecb9f74026fa2f60c9fb6323c1699259e5e9c8","0xab899d00ae693649cc1afdf30fb80d728973d2177c006e428bf61c7be01e183866614e05410041bc82cb14a33330e69c","0x8a3bd8b0b1be570b65c4432a0f6dc42f48a2000e30ab089cf781d38f4090467b54f79c0d472fcbf18ef6a00df69cc6f3","0xb4d7028f7f76a96a3d7803fca7f507ae11a77c5346e9cdfccb120a833a59bda1f4264e425aa588e7a16f8e7638061d84","0xb9c7511a76ea5fb105de905d44b02edb17008335766ee357ed386b7b3cf19640a98b38785cb14603c1192bee5886c9b6","0x8563afb12e53aed71ac7103ab8602bfa8371ae095207cb0d59e8fd389b6ad1aff0641147e53cb6a7ca16c7f37c9c5e6b","0x8e108be614604e09974a9ed90960c28c4ea330a3d9a0cb4af6dd6f193f84ab282b243ecdf549b3131036bebc8905690c","0xb794d127fbedb9c5b58e31822361706ffac55ce023fbfe55716c3c48c2fd2f2c7660a67346864dfe588812d369cb50b6","0xb797a3442fc3b44f41baefd30346f9ac7f96e770d010d53c146ce74ce424c10fb62758b7e108b8abfdc5fafd89d745cb","0x993bb71e031e8096442e6205625e1bfddfe6dd6a83a81f3e2f84fafa9e5082ab4cad80a099f21eff2e81c83457c725c3","0x8711ab833fc03e37acf2e1e74cfd9133b101ff4144fe30260654398ae48912ab46549d552eb9d15d2ea57760d35ac62e","0xb21321fd2a12083863a1576c5930e1aecb330391ef83326d9d92e1f6f0d066d1394519284ddab55b2cb77417d4b0292f","0x877d98f731ffe3ee94b0b5b72d127630fa8a96f6ca4f913d2aa581f67732df6709493693053b3e22b0181632ac6c1e3b","0xae391c12e0eb8c145103c62ea64f41345973311c3bf7281fa6bf9b7faafac87bcf0998e5649b9ef81e288c369c827e07","0xb83a2842f36998890492ab1cd5a088d9423d192681b9a3a90ec518d4c541bce63e6c5f4df0f734f31fbfdd87785a2463","0xa21b6a790011396e1569ec5b2a423857b9bec16f543e63af28024e116c1ea24a3b96e8e4c75c6537c3e4611fd265e896","0xb4251a9c4aab3a495da7a42e684ba4860dbcf940ad1da4b6d5ec46050cbe8dab0ab9ae6b63b5879de97b905723a41576","0x8222f70aebfe6ac037f8543a08498f4cadb3edaac00336fc00437eb09f2cba758f6c38e887cc634b4d5b7112b6334836","0x86f05038e060594c46b5d94621a1d9620aa8ba59a6995baf448734e21f58e23c1ea2993d3002ad5250d6edd5ba59b34f","0xa7c0c749baef811ab31b973c39ceb1d94750e2bc559c90dc5eeb20d8bb6b78586a2b363c599ba2107d6be65cd435f24e","0x861d46a5d70b38d6c1cd72817a2813803d9f34c00320c8b62f8b9deb67f5b5687bc0b37c16d28fd017367b92e05da9ca","0xb3365d3dab639bffbe38e35383686a435c8c88b397b717cd4aeced2772ea1053ceb670f811f883f4e02975e5f1c4ac58","0xa5750285f61ab8f64cd771f6466e2c0395e01b692fd878f2ef2d5c78bdd8212a73a3b1dfa5e4c8d9e1afda7c84857d3b","0x835a10809ccf939bc46cf950a33b36d71be418774f51861f1cd98a016ade30f289114a88225a2c11e771b8b346cbe6ef","0xa4f59473a037077181a0a62f1856ec271028546ca9452b45cedfcb229d0f4d1aabfc13062b07e536cc8a0d4b113156a2","0x95cd14802180b224d44a73cc1ed599d6c4ca62ddcaa503513ccdc80aaa8be050cc98bd4b4f3b639549beb4587ac6caf9","0x973b731992a3e69996253d7f36dd7a0af1982b5ed21624b77a7965d69e9a377b010d6dabf88a8a97eec2a476259859cc","0xaf8a1655d6f9c78c8eb9a95051aa3baaf9c811adf0ae8c944a8d3fcba87b15f61021f3baf6996fa0aa51c81b3cb69de1","0x835aad5c56872d2a2d6c252507b85dd742bf9b8c211ccb6b25b52d15c07245b6d89b2a40f722aeb5083a47cca159c947","0xabf4e970b02bef8a102df983e22e97e2541dd3650b46e26be9ee394a3ea8b577019331857241d3d12b41d4eacd29a3ac","0xa13c32449dbedf158721c13db9539ae076a6ce5aeaf68491e90e6ad4e20e20d1cdcc4a89ed9fd49cb8c0dd50c17633c1","0x8c8f78f88b7e22dd7e9150ab1c000f10c28e696e21d85d6469a6fe315254740f32e73d81ab1f3c1cf8f544c86df506e8","0xb4b77f2acfe945abf81f2605f906c10b88fb4d28628487fb4feb3a09f17f28e9780445dfcee4878349d4c6387a9d17d4","0x8d255c235f3812c6ecc646f855fa3832be5cb4dbb9c9e544989fafdf3f69f05bfd370732eaf954012f0044aa013fc9c6","0xb982efd3f34b47df37c910148ac56a84e8116647bea24145a49e34e0a6c0176e3284d838dae6230cb40d0be91c078b85","0x983f365aa09bd85df2a6a2ad8e4318996b1e27d02090755391d4486144e40d80b1fbfe1c798d626db92f52e33aa634da","0x95fd1981271f3ea3a41d654cf497e6696730d9ff7369f26bc4d7d15c7adb4823dd0c42e4a005a810af12d234065e5390","0xa9f5219bd4b913c186ef30c02f995a08f0f6f1462614ea5f236964e02bdaa33db9d9b816c4aee5829947840a9a07ba60","0x9210e6ceb05c09b46fd09d036287ca33c45124ab86315e5d6911ff89054f1101faaa3e83d123b7805056d388bcec6664","0x8ed9cbf69c6ff3a5c62dd9fe0d7264578c0f826a29e614bc2fb4d621d90c8c9992438accdd7a614b1dca5d1bb73dc315","0x85cf2a8cca93e00da459e3cecd22c342d697eee13c74d5851634844fc215f60053cf84b0e03c327cb395f48d1c71a8a4","0x8818a18e9a2ec90a271b784400c1903089ffb0e0b40bc5abbbe12fbebe0f731f91959d98c5519ef1694543e31e2016d4","0x8dabc130f296fa7a82870bf9a8405aaf542b222ed9276bba9bd3c3555a0f473acb97d655ee7280baff766a827a8993f0","0xac7952b84b0dc60c4d858f034093b4d322c35959605a3dad2b806af9813a4680cb038c6d7f4485b4d6b2ff502aaeca25","0xad65cb6d57b48a2602568d2ec8010baed0eb440eec7638c5ec8f02687d764e9de5b5d42ad5582934e592b48471c22d26","0xa02ab8bd4c3d114ea23aebdd880952f9495912817da8c0c08eabc4e6755439899d635034413d51134c72a6320f807f1c","0x8319567764b8295402ec1ebef4c2930a138480b37e6d7d01c8b4c9cd1f2fc3f6e9a44ae6e380a0c469b25b06db23305f","0xafec53b2301dc0caa8034cd9daef78c48905e6068d692ca23d589b84a6fa9ddc2ed24a39480597e19cb3e83eec213b3f","0xac0b4ffdb5ae08e586a9cdb98f9fe56f4712af3a97065e89e274feacfb52b53c839565aee93c4cfaaccfe51432c4fab0","0x8972cbf07a738549205b1094c5987818124144bf187bc0a85287c94fdb22ce038c0f11df1aa16ec5992e91b44d1af793","0xb7267aa6f9e3de864179b7da30319f1d4cb2a3560f2ea980254775963f1523b44c680f917095879bebfa3dc2b603efcf","0x80f68f4bfc337952e29504ee5149f15093824ea7ab02507efd1317a670f6cbc3611201848560312e3e52e9d9af72eccf","0x8897fee93ce8fc1e1122e46b6d640bba309384dbd92e46e185e6364aa8210ebf5f9ee7e5e604b6ffba99aa80a10dd7d0","0xb58ea6c02f2360be60595223d692e82ee64874fda41a9f75930f7d28586f89be34b1083e03bbc1575bbfdda2d30db1ea","0x85a523a33d903280d70ac5938770453a58293480170c84926457ac2df45c10d5ff34322ab130ef4a38c916e70d81af53","0xa2cbf045e1bed38937492c1f2f93a5ba41875f1f262291914bc1fc40c60bd0740fb3fea428faf6da38b7c180fe8ac109","0x8c09328770ed8eb17afc6ac7ddd87bb476de18ed63cab80027234a605806895959990c47bd10d259d7f3e2ecb50074c9","0xb4b9e19edb4a33bde8b7289956568a5b6b6557404e0a34584b5721fe6f564821091013fbb158e2858c6d398293bb4b59","0x8a47377df61733a2aa5a0e945fce00267f8e950f37e109d4487d92d878fb8b573317bb382d902de515b544e9e233458d","0xb5804c9d97efeff5ca94f3689b8088c62422d92a1506fd1d8d3b1b30e8a866ad0d6dad4abfa051dfc4471250cac4c5d9","0x9084a6ee8ec22d4881e9dcc8a9eb3c2513523d8bc141942370fd191ad2601bf9537a0b1e84316f3209b3d8a54368051e","0x85447eea2fa26656a649f8519fa67279183044791d61cf8563d0783d46d747d96af31d0a93507bbb2242666aa87d3720","0x97566a84481027b60116c751aec552adfff2d9038e68d48c4db9811fb0cbfdb3f1d91fc176a0b0d988a765f8a020bce1","0xae87e5c1b9e86c49a23dceda4ecfd1dcf08567f1db8e5b6ec752ebd45433c11e7da4988573cdaebbb6f4135814fc059e","0xabee05cf9abdbc52897ac1ce9ed157f5466ed6c383d6497de28616238d60409e5e92619e528af8b62cc552bf09970dc2","0xae6d31cd7bf9599e5ee0828bab00ceb4856d829bba967278a73706b5f388465367aa8a6c7da24b5e5f1fdd3256ef8e63","0xac33e7b1ee47e1ee4af472e37ab9e9175260e506a4e5ce449788075da1b53c44cb035f3792d1eea2aa24b1f688cc6ed3","0x80f65b205666b0e089bb62152251c48c380a831e5f277f11f3ef4f0d52533f0851c1b612267042802f019ec900dc0e8f","0x858520ad7aa1c9fed738e3b583c84168f2927837ad0e1d326afe9935c26e9b473d7f8c382e82ef1fe37d2b39bb40a1ee","0xb842dd4af8befe00a97c2d0f0c33c93974761e2cb9e5ab8331b25170318ddd5e4bdbc02d8f90cbfdd5f348f4f371c1f7","0x8bf2cb79bc783cb57088aae7363320cbeaabd078ffdec9d41bc74ff49e0043d0dad0086a30e5112b689fd2f5a606365d","0x982eb03bbe563e8850847cd37e6a3306d298ab08c4d63ab6334e6b8c1fa13fce80cf2693b09714c7621d74261a0ff306","0xb143edb113dec9f1e5105d4a93fbe502b859e587640d3db2f628c09a17060e6aec9e900e2c8c411cda99bc301ff96625","0xaf472d9befa750dcebc5428fe1a024f18ec1c07bca0f95643ce6b5f4189892a910285afb03fd7ed7068fbe614e80d33c","0xa97e3bc57ede73ecd1bbf02de8f51b4e7c1a067da68a3cd719f4ba26a0156cbf1cef2169fd35a18c5a4cced50d475998","0xa862253c937cf3d75d7183e5f5be6a4385d526aeda5171c1c60a8381fea79f88f5f52a4fab244ecc70765d5765e6dfd5","0x90cb776f8e5a108f1719df4a355bebb04bf023349356382cae55991b31720f0fd03206b895fa10c56c98f52453be8778","0xa7614e8d0769dccd520ea4b46f7646e12489951efaef5176bc889e9eb65f6e31758df136b5bf1e9107e68472fa9b46ec","0xac3a9b80a3254c42e5ed3a090a0dd7aee2352f480de96ad187027a3bb6c791eddfc3074b6ffd74eea825188f107cda4d","0x82a01d0168238ef04180d4b6e0a0e39024c02c2d75b065017c2928039e154d093e1af4503f4d1f3d8a948917abb5d09f","0x8fab000a2b0eef851a483aec8d2dd85fe60504794411a2f73ed82e116960547ac58766cb73df71aea71079302630258d","0x872451a35c6db61c63e9b8bb9f16b217f985c20be4451c14282c814adb29d7fb13f201367c664435c7f1d4d9375d7a58","0x887d9ff54cc96b35d562df4a537ff972d7c4b3fd91ab06354969a4cfede0b9fc68bbffb61d0dbf1a58948dc701e54f5a","0x8cb5c2a6bd956875d88f41ae24574434f1308514d44057b55c9c70f13a3366ed054150eed0955a38fda3f757be73d55f","0x89ad0163cad93e24129d63f8e38422b7674632a8d0a9016ee8636184cab177659a676c4ee7efba3abe1a68807c656d60","0xb9ec01c7cab6d00359b5a0b4a1573467d09476e05ca51a9227cd16b589a9943d161eef62dcc73f0de2ec504d81f4d252","0x8031d17635d39dfe9705c485d2c94830b6fc9bc67b91300d9d2591b51e36a782e77ab5904662effa9382d9cca201f525","0x8be5a5f6bc8d680e5092d6f9a6585acbaaaa2ddc671da560dcf5cfa4472f4f184b9597b5b539438accd40dda885687cc","0xb1fc0f052fae038a2e3de3b3a96b0a1024b009de8457b8b3adb2d315ae68a89af905720108a30038e5ab8d0d97087785","0x8b8bdc77bd3a6bc7ca5492b6f8c614852c39a70d6c8a74916eaca0aeb4533b11898b8820a4c2620a97bf35e275480029","0xaf35f4dc538d4ad5cdf710caa38fd1eb496c3fa890a047b6a659619c5ad3054158371d1e88e0894428282eed9f47f76b","0x8166454a7089cc07758ad78724654f4e7a1a13e305bbf88ddb86f1a4b2904c4fc8ab872d7da364cdd6a6c0365239e2ad","0xab287c7d3addce74ce40491871c768abe01daaa0833481276ff2e56926b38a7c6d2681ffe837d2cc323045ad1a4414f9","0xb90317f4505793094d89365beb35537f55a6b5618904236258dd04ca61f21476837624a2f45fef8168acf732cab65579","0x98ae5ea27448e236b6657ab5ef7b1cccb5372f92ab25f5fa651fbac97d08353a1dae1b280b1cd42b17d2c6a70a63ab9d","0xadcf54e752d32cbaa6cb98fbca48d8cd087b1db1d131d465705a0d8042c8393c8f4d26b59006eb50129b21e6240f0c06","0xb591a3e4db18a7345fa935a8dd7994bbac5cc270b8ebd84c8304c44484c7a74afb45471fdbe4ab22156a30fae1149b40","0x806b53ac049a42f1dcc1d6335505371da0bf27c614f441b03bbf2e356be7b2fb4eed7117eabcce9e427a542eaa2bf7d8","0x800482e7a772d49210b81c4a907f5ce97f270b959e745621ee293cf8c71e8989363d61f66a98f2d16914439544ca84c7","0x99de9eafdad3617445312341644f2bb888680ff01ce95ca9276b1d2e5ef83fa02dab5e948ebf66c17df0752f1bd37b70","0x961ee30810aa4c93ae157fbe9009b8e443c082192bd36a73a6764ff9b2ad8b0948fe9a73344556e01399dd77badb4257","0xae0a361067c52efbe56c8adf982c00432cd478929459fc7f74052c8ee9531cd031fe1335418fde53f7c2ef34254eb7ac","0xa3503d16b6b27eb20c1b177bcf90d13706169220523a6271b85b2ce35a9a2b9c5bed088540031c0a4ebfdae3a4c6ab04","0x909420122c3e723289ca4e7b81c2df5aff312972a2203f4c45821b176e7c862bf9cac7f7df3adf1d59278f02694d06e7","0x989f42380ae904b982f85d0c6186c1aef5d6bcba29bcfbb658e811b587eb2749c65c6e4a8cc6409c229a107499a4f5d7","0x8037a6337195c8e26a27ea4ef218c6e7d79a9720aaab43932d343192abc2320fe72955f5e431c109093bda074103330a","0xb312e168663842099b88445e940249cc508f080ab0c94331f672e7760258dbd86be5267e4cf25ea25facb80bff82a7e9","0xaaa3ff8639496864fcdbfdda1ac97edc4f08e3c9288b768f6c8073038c9fbbf7e1c4bea169b4d45c31935cdf0680d45e","0x97dbd3df37f0b481a311dfc5f40e59227720f367912200d71908ef6650f32cc985cb05b981e3eea38958f7e48d10a15d","0xa89d49d1e267bb452d6cb621b9a90826fe55e9b489c0427b94442d02a16f390eed758e209991687f73f6b5a032321f42","0x9530dea4e0e19d6496f536f2e75cf7d814d65fde567055eb20db48fd8d20d501cd2a22fb506db566b94c9ee10f413d43","0x81a7009b9e67f1965fa7da6a57591c307de91bf0cd35ab4348dc4a98a4961e096d004d7e7ad318000011dc4342c1b809","0x83440a9402b766045d7aca61a58bba2aa29cac1cf718199e472ba086f5d48093d9dda4d135292ba51d049a23964eceae","0xa06c9ce5e802df14f6b064a3d1a0735d429b452f0e2e276042800b0a4f16df988fd94cf3945921d5dd3802ab2636f867","0xb1359e358b89936dee9e678a187aad3e9ab14ac40e96a0a68f70ee2583cdcf467ae03bef4215e92893f4e12f902adec8","0x835304f8619188b4d14674d803103d5a3fa594d48e96d9699e653115dd05fdc2dda6ba3641cf7ad53994d448da155f02","0x8327cba5a9ff0d3f5cd0ae55e77167448926d5fcf76550c0ad978092a14122723090c51c415e88e42a2b62eb07cc3981","0xb373dcdaea85f85ce9978b1426a7ef4945f65f2d3467a9f1cc551a99766aac95df4a09e2251d3f89ca8c9d1a7cfd7b0e","0xab1422dc41af2a227b973a6fd124dfcb2367e2a11a21faa1d381d404f51b7257e5bc82e9cf20cd7fe37d7ae761a2ab37","0xa93774a03519d2f20fdf2ef46547b0a5b77c137d6a3434b48d56a2cbef9e77120d1b85d0092cf8842909213826699477","0x8eb967a495a38130ea28711580b7e61bcd1d051cd9e4f2dbf62f1380bd86e0d60e978d72f6f31e909eb97b3b9a2b867c","0xae8213378da1287ba1fe4242e1acaec19b877b6fe872400013c6eac1084b8d03156792fa3020201725b08228a1e80f49","0xb143daf6893d674d607772b3b02d8ac48f294237e2f2c87963c0d4e26d9227d94a2a13512457c3d5883544bbc259f0ef","0xb343bd2aca8973888e42542218924e2dda2e938fd1150d06878af76f777546213912b7c7a34a0f94186817d80ffa185c","0xb188ebc6a8c3007001aa347ae72cc0b15d09bc6c19a80e386ee4b334734ec0cc2fe8b493c2422f38d1e6d133cc3db6fe","0xb795f6a8b9b826aaeee18ccd6baf6c5adeeec85f95eb5b6d19450085ec7217e95a2d9e221d77f583b297d0872073ba0e","0xb1c7dbd998ad32ae57bfa95deafa147024afd57389e98992c36b6e52df915d3d5a39db585141ec2423173e85d212fed8","0x812bcdeb9fe5f12d0e1df9964798056e1f1c3de3b17b6bd2919b6356c4b86d8e763c01933efbe0224c86a96d5198a4be","0xb19ebeda61c23d255cbf472ef0b8a441f4c55b70f0d8ed47078c248b1d3c7c62e076b43b95c00a958ec8b16d5a7cb0d7","0xb02adc9aaa20e0368a989c2af14ff48b67233d28ebee44ff3418bb0473592e6b681af1cc45450bd4b175df9051df63d9","0x8d87f0714acee522eb58cec00360e762adc411901dba46adc9227124fa70ee679f9a47e91a6306d6030dd4eb8de2f3c1","0x8be54cec21e74bcc71de29dc621444263737db15f16d0bb13670f64e42f818154e04b484593d19ef95f2ee17e4b3fe21","0xab8e20546c1db38d31493b5d5f535758afb17e459645c1b70813b1cf7d242fd5d1f4354a7c929e8f7259f6a25302e351","0x89f035a1ed8a1e302ac893349ba8ddf967580fcb6e73d44af09e3929cde445e97ff60c87dafe489e2c0ab9c9986cfa00","0x8b2b0851a795c19191a692af55f7e72ad2474efdc5401bc3733cfdd910e34c918aaebe69d5ea951bdddf3c01cabbfc67","0xa4edb52c2b51495ccd1ee6450fc14b7b3ede8b3d106808929d02fb31475bacb403e112ba9c818d2857651e508b3a7dd1","0x9569341fded45d19f00bcf3cbf3f20eb2b4d82ef92aba3c8abd95866398438a2387437e580d8b646f17cf6fde8c5af23","0xaa4b671c6d20f72f2f18a939a6ff21cc37e0084b44b4a717f1be859a80b39fb1be026b3205adec2a66a608ec2bcd578f","0x94902e980de23c4de394ad8aec91b46f888d18f045753541492bfbb92c59d3daa8de37ae755a6853744af8472ba7b72b","0xaf651ef1b2a0d30a7884557edfad95b6b5d445a7561caebdc46a485aedd25932c62c0798465c340a76f6feaa196dd712","0xb7b669b8e5a763452128846dd46b530dca4893ace5cc5881c7ddcd3d45969d7e73fbebdb0e78aa81686e5f7b22ec5759","0x82507fd4ebe9fa656a7f2e084d64a1fa6777a2b0bc106d686e2d9d2edafc58997e58cb6bfd0453b2bf415704aa82ae62","0xb40bce2b42b88678400ecd52955bbdadd15f8b9e1b3751a1a3375dc0efb5ca3ee258cf201e1140b3c09ad41217d1d49e","0xb0210d0cbb3fbf3b8cdb39e862f036b0ff941cd838e7aaf3a8354e24246e64778d22f3de34572e6b2a580614fb6425be","0x876693cba4301b251523c7d034108831df3ce133d8be5a514e7a2ca494c268ca0556fa2ad8310a1d92a16b55bcd99ea9","0x8660281406d22a4950f5ef050bf71dd3090edb16eff27fa29ef600cdea628315e2054211ed2cc6eaf8f2a1771ef689fd","0xa610e7e41e41ab66955b809ba4ade0330b8e9057d8efc9144753caed81995edeb1a42a53f93ce93540feca1fae708dac","0xa49e2c176a350251daef1218efaccc07a1e06203386ede59c136699d25ca5cb2ac1b800c25b28dd05678f14e78e51891","0x83e0915aa2b09359604566080d411874af8c993beba97d4547782fdbe1a68e59324b800ff1f07b8db30c71adcbd102a8","0xa19e84e3541fb6498e9bb8a099c495cbfcad113330e0262a7e4c6544495bb8a754b2208d0c2d895c93463558013a5a32","0x87f2bd49859a364912023aca7b19a592c60214b8d6239e2be887ae80b69ebdeb59742bdebcfa73a586ab23b2c945586c","0xb8e8fdddae934a14b57bc274b8dcd0d45ebb95ddbaabef4454e0f6ce7d3a5a61c86181929546b3d60c447a15134d08e1","0x87e0c31dcb736ea4604727e92dc1d9a3cf00adcff79df3546e02108355260f3dd171531c3c0f57be78d8b28058fcc8c0","0x9617d74e8f808a4165a8ac2e30878c349e1c3d40972006f0787b31ea62d248c2d9f3fc3da83181c6e57e95feedfd0e8c","0x8949e2cee582a2f8db86e89785a6e46bc1565c2d8627d5b6bf43ba71ffadfab7e3c5710f88dcb5fb2fc6edf6f4fae216","0xad3fa7b0edceb83118972a2935a09f409d09a8db3869f30be3a76f67aa9fb379cabb3a3aff805ba023a331cad7d7eb64","0x8c95718a4112512c4efbd496be38bf3ca6cdcaad8a0d128f32a3f9aae57f3a57bdf295a3b372a8c549fda8f4707cffed","0x88f3261d1e28a58b2dee3fcc799777ad1c0eb68b3560f9b4410d134672d9533532a91ea7be28a041784872632d3c9d80","0xb47472a41d72dd2e8b72f5c4f8ad626737dde3717f63d6bc776639ab299e564cbad0a2ad5452a07f02ff49a359c437e5","0x9896d21dc2e8aad87b76d6df1654f10cd7bceed4884159d50a818bea391f8e473e01e14684814c7780235f28e69dca6e","0x82d47c332bbd31bbe83b5eb44a23da76d4a7a06c45d7f80f395035822bc27f62f59281d5174e6f8e77cc9b5c3193d6f0","0x95c74cd46206e7f70c9766117c34c0ec45c2b0f927a15ea167901a160e1530d8522943c29b61e03568aa0f9c55926c53","0xa89d7757825ae73a6e81829ff788ea7b3d7409857b378ebccd7df73fdbe62c8d9073741cf038314971b39af6c29c9030","0x8c1cd212d0b010905d560688cfc036ae6535bc334fa8b812519d810b7e7dcf1bb7c5f43deaa40f097158358987324a7f","0xb86993c383c015ed8d847c6b795164114dd3e9efd25143f509da318bfba89389ea72a420699e339423afd68b6512fafb","0x8d06bd379c6d87c6ed841d8c6e9d2d0de21653a073725ff74be1934301cc3a79b81ef6dd0aad4e7a9dc6eac9b73019bc","0x81af4d2d87219985b9b1202d724fe39ef988f14fef07dfe3c3b11714e90ffba2a97250838e8535eb63f107abfe645e96","0x8c5e0af6330a8becb787e4b502f34f528ef5756e298a77dc0c7467433454347f3a2e0bd2641fbc2a45b95e231c6e1c02","0x8e2a8f0f04562820dc8e7da681d5cad9fe2e85dd11c785fb6fba6786c57a857e0b3bd838fb849b0376c34ce1665e4837","0xa39be8269449bfdfc61b1f62077033649f18dae9bef7c6163b9314ca8923691fb832f42776f0160b9e8abd4d143aa4e1","0x8c154e665706355e1cc98e0a4cabf294ab019545ba9c4c399d666e6ec5c869ca9e1faf8fb06cd9c0a5c2f51a7d51b70a","0xa046a7d4de879d3ebd4284f08f24398e9e3bf006cd4e25b5c67273ade248689c69affff92ae810c07941e4904296a563","0xafd94c1cb48758e5917804df03fb38a6da0e48cd9b6262413ea13b26973f9e266690a1b7d9d24bbaf7e82718e0e594b0","0x859e21080310c8d6a38e12e2ac9f90a156578cdeb4bb2e324700e97d9a5511cd6045dc39d1d0de3f94aeed043a24119d","0xa219fb0303c379d0ab50893264919f598e753aac9065e1f23ef2949abc992577ab43c636a1d2c089203ec9ddb941e27d","0xb0fdb639d449588a2ca730afcba59334e7c387342d56defdfb7ef79c493f7fd0e5277eff18e7203e756c7bdda5803047","0x87f9c3b7ed01f54368aca6dbcf2f6e06bff96e183c4b2c65f8baa23b377988863a0a125d5cdd41a072da8462ced4c070","0x99ef7a5d5ac2f1c567160e1f8c95f2f38d41881850f30c461a205f7b1b9fb181277311333839b13fb3ae203447e17727","0xaeaca9b1c2afd24e443326cc68de67b4d9cedb22ad7b501a799d30d39c85bb2ea910d4672673e39e154d699e12d9b3dc","0xa11675a1721a4ba24dd3d0e4c3c33a6edf4cd1b9f6b471070b4386c61f77452266eae6e3f566a40cfc885eada9a29f23","0xb228334445e37b9b49cb4f2cc56b454575e92173ddb01370a553bba665adadd52df353ad74470d512561c2c3473c7bb9","0xa18177087c996572d76f81178d18ed1ceebc8362a396348ce289f1d8bd708b9e99539be6fccd4acb1112381cfc5749b4","0x8e7b8bf460f0d3c99abb19803b9e43422e91507a1c0c22b29ee8b2c52d1a384da4b87c292e28eff040db5be7b1f8641f","0xb03d038d813e29688b6e6f444eb56fec3abba64c3d6f890a6bcf2e916507091cdb2b9d2c7484617be6b26552ed1c56cb","0xa1c88ccd30e934adfc5494b72655f8afe1865a84196abfb376968f22ddc07761210b6a9fb7638f1413d1b4073d430290","0x961b714faebf172ad2dbc11902461e286e4f24a99a939152a53406117767682a571057044decbeb3d3feef81f4488497","0xa03dc4059b46effdd786a0a03cc17cfee8585683faa35bb07936ded3fa3f3a097f518c0b8e2db92fd700149db1937789","0xadf60180c99ca574191cbcc23e8d025b2f931f98ca7dfcebfc380226239b6329347100fcb8b0fcb12db108c6ad101c07","0x805d4f5ef24d46911cbf942f62cb84b0346e5e712284f82b0db223db26d51aabf43204755eb19519b00e665c7719fcaa","0x8dea7243e9c139662a7fe3526c6c601eee72fd8847c54c8e1f2ad93ef7f9e1826b170afe58817dac212427164a88e87f","0xa2ba42356606d651b077983de1ad643650997bb2babb188c9a3b27245bb65d2036e46667c37d4ce02cb1be5ae8547abe","0xaf2ae50b392bdc013db2d12ce2544883472d72424fc767d3f5cb0ca2d973fc7d1f425880101e61970e1a988d0670c81b","0x98e6bec0568d3939b31d00eb1040e9b8b2a35db46ddf4369bdaee41bbb63cc84423d29ee510a170fb5b0e2df434ba589","0x822ff3cd12fbef4f508f3ca813c04a2e0b9b799c99848e5ad3563265979e753ee61a48f6adc2984a850f1b46c1a43d35","0x891e8b8b92a394f36653d55725ef514bd2e2a46840a0a2975c76c2a935577f85289026aaa74384da0afe26775cbddfb9","0xb2a3131a5d2fe7c8967047aa66e4524babae941d90552171cc109527f345f42aa0df06dcbb2fa01b33d0043917bbed69","0x80c869469900431f3eeefafdbe07b8afd8cee7739e659e6d0109b397cacff85a88247698f87dc4e2fe39a592f250ac64","0x9091594f488b38f9d2bb5df49fd8b4f8829d9c2f11a197dd1431ed5abbc5c954bbde3387088f9ee3a5a834beb7619bce","0xb472e241e6956146cca57b97a8a204668d050423b4e76f857bad5b47f43b203a04c8391ba9d9c3e95093c071f9d376a1","0xb7dd2de0284844392f7dfb56fe7ca3ede41e27519753ffc579a0a8d2d65ceb8108d06b6b0d4c3c1a2588951297bd1a1e","0x902116ce70d0a079ac190321c1f48701318c05f8e69ee09694754885d33a835a849cafe56f499a2f49f6cda413ddf9a7","0xb18105cc736787fafaf7c3c11c448bce9466e683159dff52723b7951dff429565e466e4841d982e3aaa9ee2066838666","0x97ab9911f3f659691762d568ae0b7faa1047b0aed1009c319fa79d15d0db8db9f808fc385dc9a68fa388c10224985379","0xb2a2cba65f5b927e64d2904ba412e2bac1cf18c9c3eda9c72fb70262497ecf505b640827e2afebecf10eebbcf48ccd3e","0xb36a3fd677baa0d3ef0dac4f1548ff50a1730286b8c99d276a0a45d576e17b39b3cbadd2fe55e003796d370d4be43ce3","0xa5dfec96ca3c272566e89dc453a458909247e3895d3e44831528130bc47cc9d0a0dac78dd3cad680a4351d399d241967","0x8029382113909af6340959c3e61db27392531d62d90f92370a432aec3eb1e4c36ae1d4ef2ba8ec6edb4d7320c7a453f6","0x971d85121ea108e6769d54f9c51299b0381ece8b51d46d49c89f65bedc123bab4d5a8bc14d6f67f4f680077529cbae4c","0x98ff6afc01d0bec80a278f25912e1b1ebff80117adae72e31d5b9fa4d9624db4ba2065b444df49b489b0607c45e26c4c","0x8fa29be10fb3ab30ce25920fec0187e6e91e458947009dabb869aade7136c8ba23602682b71e390c251f3743164cbdaa","0xb3345c89eb1653418fe3940cf3e56a9a9c66526389b98f45ca02dd62bfb37baa69a4baaa7132d7320695f8ea6ad1fd94","0xb72c7f5541c9ac6b60a7ec9f5415e7fb14da03f7164ea529952a29399f3a071576608dbbcc0d45994f21f92ddbeb1e19","0xaa3450bb155a5f9043d0ef95f546a2e6ade167280bfb75c9f09c6f9cdb1fffb7ce8181436161a538433afa3681c7a141","0x92a18fecaded7854b349f441e7102b638ababa75b1b0281dd0bded6541abe7aa37d96693595be0b01fe0a2e2133d50f9","0x980756ddf9d2253cfe6c94960b516c94889d09e612810935150892627d2ecee9a2517e04968eea295d0106850c04ca44","0xae68c6ccc454318cdd92f32b11d89116a3b8350207a36d22a0f626718cad671d960090e054c0c77ac3162ae180ecfd4b","0x99f31f66eaaa551749ad91d48a0d4e3ff4d82ef0e8b28f3184c54e852422ba1bdafd53b1e753f3a070f3b55f3c23b6a2","0xa44eaeaa6589206069e9c0a45ff9fc51c68da38d4edff1d15529b7932e6f403d12b9387019c44a1488a5d5f27782a51f","0xb80b5d54d4b344840e45b79e621bd77a3f83fb4ce6d8796b7d6915107b3f3c34d2e7d95bdafd120f285669e5acf2437a","0xb36c069ec085a612b5908314d6b84c00a83031780261d1c77a0384c406867c9847d5b0845deddfa512cc04a8df2046fb","0xb09dbe501583220f640d201acea7ee3e39bf9eda8b91aa07b5c50b7641d86d71acb619b38d27835ce97c3759787f08e9","0x87403d46a2bf63170fff0b857acacf42ee801afe9ccba8e5b4aea967b68eac73a499a65ca46906c2eb4c8f27bc739faa","0x82b93669f42a0a2aa5e250ffe6097269da06a9c02fcd1801abbad415a7729a64f830754bafc702e64600ba47671c2208","0x8e3a3029be7edb8dd3ab1f8216664c8dc50d395f603736061d802cef77627db7b859ef287ed850382c13b4d22d6a2d80","0x968e9ec7194ff424409d182ce0259acd950c384c163c04463bc8700a40b79beba6146d22b7fa7016875a249b7b31c602","0x8b42c984bbe4996e0c20862059167c6bdc5164b1ffcd928f29512664459212d263e89f0f0e30eed4e672ffa5ed0b01b5","0x96bac54062110dada905363211133f1f15dc7e4fd80a4c6e4a83bc9a0bcbbaba11cd2c7a13debcf0985e1a954c1da66b","0xa16dc8a653d67a7cd7ae90b2fffac0bf1ca587005430fe5ba9403edd70ca33e38ba5661d2ed6e9d2864400d997626a62","0xa68ab11a570a27853c8d67e491591dcba746bfbee08a2e75ae0790399130d027ed387f41ef1d7de8df38b472df309161","0x92532b74886874447c0300d07eda9bbe4b41ed25349a3da2e072a93fe32c89d280f740d8ff70d5816793d7f2b97373cc","0x88e35711b471e89218fd5f4d0eadea8a29405af1cd81974427bc4a5fb26ed60798daaf94f726c96e779b403a2cd82820","0xb5c72aa4147c19f8c4f3a0a62d32315b0f4606e0a7025edc5445571eaf4daff64f4b7a585464821574dd50dbe1b49d08","0x9305d9b4095258e79744338683fd93f9e657367b3ab32d78080e51d54eec331edbc224fad5093ebf8ee4bd4286757eb8","0xb2a17abb3f6a05bcb14dc7b98321fa8b46d299626c73d7c6eb12140bf4c3f8e1795250870947af817834f033c88a59d6","0xb3477004837dbd8ba594e4296f960fc91ab3f13551458445e6c232eb04b326da803c4d93e2e8dcd268b4413305ff84da","0x924b4b2ebaafdcfdfedb2829a8bf46cd32e1407d8d725a5bd28bdc821f1bafb3614f030ea4352c671076a63494275a3f","0x8b81b9ef6125c82a9bece6fdcb9888a767ac16e70527753428cc87c56a1236e437da8be4f7ecfe57b9296dc3ae7ba807","0x906e19ec8b8edd58bdf9ae05610a86e4ea2282b1bbc1e8b00b7021d093194e0837d74cf27ac9916bdb8ec308b00da3da","0xb41c5185869071760ac786078a57a2ab4e2af60a890037ac0c0c28d6826f15c2cf028fddd42a9b6de632c3d550bfbc14","0xa646e5dec1b713ae9dfdf7bdc6cd474d5731a320403c7dfcfd666ffc9ae0cff4b5a79530e8df3f4aa9cb80568cb138e9","0xb0efad22827e562bd3c3e925acbd0d9425d19057868608d78c2209a531cccd0f2c43dc5673acf9822247428ffa2bb821","0xa94c19468d14b6f99002fc52ac06bbe59e5c472e4a0cdb225144a62f8870b3f10593749df7a2de0bd3c9476ce682e148","0x803864a91162f0273d49271dafaab632d93d494d1af935aefa522768af058fce52165018512e8d6774976d52bd797e22","0xa08711c2f7d45c68fb340ac23597332e1bcaec9198f72967b9921204b9d48a7843561ff318f87908c05a44fc35e3cc9d","0x91c3cad94a11a3197ae4f9461faab91a669e0dddb0371d3cab3ed9aeb1267badc797d8375181130e461eadd05099b2a2","0x81bdaaf48aae4f7b480fc13f1e7f4dd3023a41439ba231760409ce9292c11128ab2b0bdbbf28b98af4f97b3551f363af","0x8d60f9df9fd303f625af90e8272c4ecb95bb94e6efc5da17b8ab663ee3b3f673e9f6420d890ccc94acf4d2cae7a860d8","0xa7b75901520c06e9495ab983f70b61483504c7ff2a0980c51115d11e0744683ce022d76e3e09f4e99e698cbd21432a0d","0x82956072df0586562fda7e7738226f694e1c73518dd86e0799d2e820d7f79233667192c9236dcb27637e4c65ef19d493","0xa586beb9b6ffd06ad200957490803a7cd8c9bf76e782734e0f55e04a3dc38949de75dc607822ec405736c576cf83bca3","0xa179a30d00def9b34a7e85607a447eea0401e32ab5abeee1a281f2acd1cf6ec81a178020666f641d9492b1bdf66f05a3","0x83e129705c538787ed8e0fdc1275e6466a3f4ee21a1e6abedd239393b1df72244723b92f9d9d9339a0cab6ebf28f5a16","0x811bd8d1e3722b64cd2f5b431167e7f91456e8bba2cc669d3fbbce7d553e29c3c19f629fcedd2498bc26d33a24891d17","0xa243c030c858f1f60cccd26b45b024698cc6d9d9e6198c1ed4964a235d9f8d0baf9cde10c8e63dfaa47f8e74e51a6e85","0xab839eb82e23ca52663281f863b55b0a3d6d4425c33ffb4eeb1d7979488ab068bf99e2a60e82cea4dc42c56c26cbfebe","0x8b896f9bb21d49343e67aec6ad175b58c0c81a3ca73d44d113ae4354a0065d98eb1a5cafedaf232a2bb9cdc62152f309","0xaf6230340cc0b66f5bf845540ed4fc3e7d6077f361d60762e488d57834c3e7eb7eacc1b0ed73a7d134f174a01410e50c","0x88975e1b1af678d1b5179f72300a30900736af580dd748fd9461ef7afccc91ccd9bed33f9da55c8711a7635b800e831f","0xa97486bb9047391661718a54b8dd5a5e363964e495eae6c692730264478c927cf3e66dd3602413189a3699fbeae26e15","0xa5973c161ab38732885d1d2785fd74bf156ba34881980cba27fe239caef06b24a533ffe6dbbbeca5e6566682cc00300a","0xa24776e9a840afda0003fa73b415d5bd6ecd9b5c2cc842b643ee51b8c6087f4eead4d0bfbd987eb174c489a7b952ff2a","0xa8a6ee06e3af053b705a12b59777267c546f33ba8a0f49493af8e6df4e15cf8dd2d4fb4daf7e84c6b5d3a7363118ff03","0xa28e59ce6ad02c2ce725067c0123117e12ac5a52c8f5af13eec75f4a9efc4f696777db18a374fa33bcae82e0734ebd16","0x86dfc3b78e841c708aff677baa8ee654c808e5d257158715097c1025d46ece94993efe12c9d188252ad98a1e0e331fec","0xa88d0275510f242eab11fdb0410ff6e1b9d7a3cbd3658333539815f1b450a84816e6613d15aa8a8eb15d87cdad4b27a2","0x8440acea2931118a5b481268ff9f180ee4ede85d14a52c026adc882410825b8275caa44aff0b50c2b88d39f21b1a0696","0xa7c3182eab25bd6785bacf12079d0afb0a9b165d6ed327814e2177148539f249eb9b5b2554538f54f3c882d37c0a8abe","0x85291fbe10538d7da38efdd55a7acebf03b1848428a2f664c3ce55367aece60039f4f320b1771c9c89a35941797f717c","0xa2c6414eeb1234728ab0de94aa98fc06433a58efa646ca3fcbd97dbfb8d98ae59f7ce6d528f669c8149e1e13266f69c9","0x840c8462785591ee93aee2538d9f1ec44ba2ca61a569ab51d335ac873f5d48099ae8d7a7efa0725d9ff8f9475bfa4f56","0xa7065a9d02fb3673acf7702a488fbc01aa69580964932f6f40b6c2d1c386b19e50b0e104fcac24ea26c4e723611d0238","0xb72db6d141267438279e032c95e6106c2ccb3164b842ba857a2018f3a35f4b040da92680881eb17cd61d0920d5b8f006","0xa8005d6c5960e090374747307ef0be2871a7a43fa4e76a16c35d2baab808e9777b496e9f57a4218b23390887c33a0b55","0x8e152cea1e00a451ca47c20a1e8875873419700af15a5f38ee2268d3fbc974d4bd5f4be38008fa6f404dbdedd6e6e710","0xa3391aed1fcd68761f06a7d1008ec62a09b1cb3d0203cd04e300a0c91adfed1812d8bc1e4a3fd7976dc0aae0e99f52f1","0x967eb57bf2aa503ee0c6e67438098149eac305089c155f1762cf5e84e31f0fbf27c34a9af05621e34645c1ec96afaec8","0x88af97ddc4937a95ec0dcd25e4173127260f91c8db2f6eac84afb789b363705fb3196235af631c70cafd09411d233589","0xa32df75b3f2c921b8767638fd289bcfc61e08597170186637a7128ffedd52c798c434485ac2c7de07014f9e895c2c3d8","0xb0a783832153650aa0d766a3a73ec208b6ce5caeb40b87177ffc035ab03c7705ecdd1090b6456a29f5fb7e90e2fa8930","0xb59c8e803b4c3486777d15fc2311b97f9ded1602fa570c7b0200bada36a49ee9ef4d4c1474265af8e1c38a93eb66b18b","0x982f2c85f83e852022998ff91bafbb6ff093ef22cf9d5063e083a48b29175ccbd51b9c6557151409e439096300981a6c","0x939e3b5989fefebb9d272a954659a4eb125b98c9da6953f5e628d26266bd0525ec38304b8d56f08d65abc4d6da4a8dbb","0x8898212fe05bc8de7d18503cb84a1c1337cc2c09d1eeef2b475aa79185b7322bf1f8e065f1bf871c0c927dd19faf1f6d","0x94b0393a41cd00f724aee2d4bc72103d626a5aecb4b5486dd1ef8ac27528398edf56df9db5c3d238d8579af368afeb09","0x96ac564450d998e7445dd2ea8e3fc7974d575508fa19e1c60c308d83b645864c029f2f6b7396d4ff4c1b24e92e3bac37","0x8adf6638e18aff3eb3b47617da696eb6c4bdfbecbbc3c45d3d0ab0b12cbad00e462fdfbe0c35780d21aa973fc150285e","0xb53f94612f818571b5565bbb295e74bada9b5f9794b3b91125915e44d6ddcc4da25510eab718e251a09c99534d6042d9","0x8b96462508d77ee083c376cd90807aebad8de96bca43983c84a4a6f196d5faf6619a2351f43bfeec101864c3bf255519","0xaeadf34657083fc71df33bd44af73bf5281c9ca6d906b9c745536e1819ea90b56107c55e2178ebad08f3ba75b3f81c86","0x9784ba29b2f0057b5af1d3ab2796d439b8753f1f749c73e791037461bdfc3f7097394283105b8ab01788ea5255a96710","0x8756241bda159d4a33bf74faba0d4594d963c370fb6a18431f279b4a865b070b0547a6d1613cf45b8cfb5f9236bbf831","0xb03ebfd6b71421dfd49a30460f9f57063eebfe31b9ceaa2a05c37c61522b35bdc09d7db3ad75c76c253c00ba282d3cd2","0xb34e7e6341fa9d854b2d3153bdda0c4ae2b2f442ab7af6f99a0975d45725aa48e36ae5f7011edd249862e91f499687d4","0xb462ee09dc3963a14354244313e3444de5cc37ea5ccfbf14cd9aca8027b59c4cb2a949bc30474497cab8123e768460e6","0xaea753290e51e2f6a21a9a0ee67d3a2713f95c2a5c17fe41116c87d3aa77b1683761264d704df1ac34f8b873bc88ef7b","0x98430592afd414394f98ddfff9f280fcb1c322dbe3510f45e1e9c4bb8ee306b3e0cf0282c0ee73ebb8ba087d4d9e0858","0xb95d3b5aaf54ffca11f4be8d57f76e14afdb20afc859dc7c7471e0b42031e8f3d461b726ecb979bdb2f353498dfe95ea","0x984d17f9b11a683132e0b5a9ee5945e3ff7054c2d5c716be73b29078db1d36f54c6e652fd2f52a19da313112e97ade07","0xab232f756b3fff3262be418a1af61a7e0c95ceebbc775389622a8e10610508cd6784ab7960441917a83cc191c58829ea","0xa28f41678d6e60de76b0e36ab10e4516e53e02e9c77d2b5af3cfeee3ce94cfa30c5797bd1daab20c98e1cad83ad0f633","0xb55395fca84dd3ccc05dd480cb9b430bf8631ff06e24cb51d54519703d667268c2f8afcde4ba4ed16bece8cc7bc8c6e0","0x8a8a5392a0e2ea3c7a8c51328fab11156004e84a9c63483b64e8f8ebf18a58b6ffa8fe8b9d95af0a2f655f601d096396","0xab480000fe194d23f08a7a9ec1c392334e9c687e06851f083845121ce502c06b54dda8c43092bcc1035df45cc752fe9b","0xb265644c29f628d1c7e8e25a5e845cabb21799371814730a41a363e1bda8a7be50fee7c3996a365b7fcba4642add10db","0xb8a915a3c685c2d4728f6931c4d29487cad764c5ce23c25e64b1a3259ac27235e41b23bfe7ae982921b4cb84463097df","0x8efa7338442a4b6318145a5440fc213b97869647eeae41b9aa3c0a27ee51285b73e3ae3b4a9423df255e6add58864aa9","0x9106d65444f74d217f4187dfc8fcf3810b916d1e4275f94f6a86d1c4f3565b131fd6cde1fa708bc05fe183c49f14941a","0x948252dac8026bbbdb0a06b3c9d66ec4cf9532163bab68076fda1bd2357b69e4b514729c15aaa83b5618b1977bbc60c4","0xae6596ccfdf5cbbc5782efe3bb0b101bb132dbe1d568854ca24cacc0b2e0e9fabcb2ca7ab42aecec412efd15cf8cb7a2","0x84a0b6c198ff64fd7958dfd1b40eac9638e8e0b2c4cd8cf5d8cdf80419baee76a05184bce6c5b635f6bf2d30055476a7","0x8893118be4a055c2b3da593dbca51b1ae2ea2469911acfb27ee42faf3e6c3ad0693d3914c508c0b05b36a88c8b312b76","0xb097479e967504deb6734785db7e60d1d8034d6ca5ba9552887e937f5e17bb413fccac2c1d1082154ed76609127860ad","0xa0294e6b9958f244d29943debf24b00b538b3da1116269b6e452bb12dc742226712fd1a15b9c88195afeb5d2415f505c","0xb3cc15f635080bc038f61b615f62b5b5c6f2870586191f59476e8368a73641d6ac2f7d0c1f54621982defdb318020230","0x99856f49b9fe1604d917c94d09cc0ed753d13d015d30587a94e6631ffd964b214e607deb8a69a8b5e349a7edf4309206","0xa8571e113ea22b4b4fce41a094da8c70de37830ae32e62c65c2fa5ad06a9bc29e884b945e73d448c72b176d6ecebfb58","0xa9e9c6e52beb0013273c29844956b3ce291023678107cdc785f7b44eff5003462841ad8780761b86aefc6b734adde7cf","0x80a784b0b27edb51ef2bad3aee80e51778dcaa0f3f5d3dcb5dc5d4f4b2cf7ae35b08de6680ea9dac53f8438b92eb09ef","0x827b543e609ea328e97e373f70ad72d4915a2d1daae0c60d44ac637231070e164c43a2a58db80a64df1c624a042b38f9","0xb449c65e8195202efdcb9bdb4e869a437313b118fef8b510cbbf8b79a4e99376adb749b37e9c20b51b31ed3310169e27","0x8ea3028f4548a79a94c717e1ed28ad4d8725b8d6ab18b021063ce46f665c79da3c49440c6577319dab2d036b7e08f387","0x897798431cfb17fe39f08f5f854005dc37b1c1ec1edba6c24bc8acb3b88838d0534a75475325a5ea98b326ad47dbad75","0x89cf232e6303b0751561960fd4dea5754a28c594daf930326b4541274ffb03c7dd75938e411eb9a375006a70ce38097f","0x9727c6ae7f0840f0b6c8bfb3a1a5582ceee705e0b5c59b97def7a7a2283edd4d3f47b7971e902a3a2079e40b53ff69b8","0xb76ed72b122c48679d221072efc0eeea063cb205cbf5f9ef0101fd10cb1075b8628166c83577cced654e1c001c7882f7","0xae908c42d208759da5ee9b405df85a6532ea35c6f0f6a1288d22870f59d98edc896841b8ac890a538e6c8d1e8b02d359","0x809d12fe4039a0ec80dc9be6a89acaab7797e5f7f9b163378f52f9a75a1d73b2e9ae6e3dd49e32ced439783c1cabbef5","0xa4149530b7f85d1098ba534d69548c6c612c416e8d35992fc1f64f4deeb41e09e49c6cf7aadbed7e846b91299358fe2d","0xa49342eacd1ec1148b8df1e253b1c015f603c39de11fa0a364ccb86ea32d69c34fd7aa6980a1fadcd8e785a57fa46f60","0x87d43eff5a006dc4dddcf76cc96c656a1f3a68f19f124181feab86c6cc9a52cb9189cdbb423414defdd9bb0ca8ff1ddc","0x861367e87a9aa2f0f68296ba50aa5dbc5713008d260cc2c7e62d407c2063064749324c4e8156dc21b749656cfebce26b","0xb5303c2f72e84e170e66ae1b0fbd51b8c7a6f27476eaf5694b64e8737d5c84b51fe90100b256465a4c4156dd873cddb0","0xb62849a4f891415d74f434cdc1d23c4a69074487659ca96e1762466b2b7a5d8525b056b891d0feea6fe6845cba8bc7fb","0x923dd9e0d6590a9307e8c4c23f13bae3306b580e297a937711a8b13e8de85e41a61462f25b7d352b682e8437bf2b4ab3","0x9147379860cd713cd46c94b8cdf75125d36c37517fbecf81ace9680b98ce6291cd1c3e472f84249cc3b2b445e314b1b6","0xa808a4f17ac21e3fb5cfef404e61fae3693ca3e688d375f99b6116779696059a146c27b06de3ac36da349b0649befd56","0x87787e9322e1b75e66c1f0d9ea0915722a232770930c2d2a95e9478c4b950d15ab767e30cea128f9ed65893bfc2d0743","0x9036a6ee2577223be105defe1081c48ea7319e112fff9110eb9f61110c319da25a6cea0464ce65e858635b079691ef1f","0xaf5548c7c24e1088c23b57ee14d26c12a83484c9fd9296edf1012d8dcf88243f20039b43c8c548c265ef9a1ffe9c1c88","0xa0fff520045e14065965fb8accd17e878d3fcaf9e0af2962c8954e50be6683d31fa0bf4816ab68f08630dbac6bfce52a","0xb4c1b249e079f6ae1781af1d97a60b15855f49864c50496c09c91fe1946266915b799f0406084d7783f5b1039116dd8b","0x8b0ffa5e7c498cb3879dddca34743b41eee8e2dea3d4317a6e961b58adb699ef0c92400c068d5228881a2b08121226bf","0x852ae8b19a1d80aa8ae5382e7ee5c8e7670ceb16640871c56b20b96b66b3b60e00015a3dde039446972e57b49a999ddd","0xa49942f04234a7d8492169da232cfff8051df86e8e1ba3db46aede02422c689c87dc1d99699c25f96cb763f5ca0983e5","0xb04b597b7760cf5dcf411ef896d1661e6d5b0db3257ac2cf64b20b60c6cc18fa10523bb958a48d010b55bac7b02ab3b1","0xa494591b51ea8285daecc194b5e5bd45ae35767d0246ac94fae204d674ee180c8e97ff15f71f28b7aeb175b8aea59710","0x97d2624919e78406e7460730680dea8e71c8571cf988e11441aeea54512b95bd820e78562c99372d535d96f7e200d20d","0xac693ddb00e48f76e667243b9b6a7008424043fb779e4f2252330285232c3fccac4da25cbd6d95fe9ad959ff305a91f6","0x8d20ca0a71a64a3f702a0825bb46bd810d03bebfb227683680d474a52f965716ff99e19a165ebaf6567987f4f9ee3c94","0xa5c516a438f916d1d68ca76996404792e0a66e97b7f18fc54c917bf10cf3211b62387932756e39e67e47b0bd6e88385a","0xb089614d830abc0afa435034cec7f851f2f095d479cacf1a3fb57272da826c499a52e7dcbc0eb85f4166fb94778e18e9","0xa8dacc943765d930848288192f4c69e2461c4b9bc6e79e30eeef9a543318cf9ae9569d6986c65c5668a89d49993f8e07","0xab5a9361fa339eec8c621bdad0a58078983abd8942d4282b22835d7a3a47e132d42414b7c359694986f7db39386c2e19","0x94230517fb57bd8eb26c6f64129b8b2abd0282323bf7b94b8bac7fab27b4ecc2c4290c294275e1a759de19f2216134f3","0xb8f158ea5006bc3b90b285246625faaa6ac9b5f5030dc69701b12f3b79a53ec7e92eeb5a63bbd1f9509a0a3469ff3ffc","0x8b6944fd8cb8540957a91a142fdcda827762aa777a31e8810ca6d026e50370ee1636fc351724767e817ca38804ebe005","0x82d1ee40fe1569c29644f79fa6c4033b7ed45cd2c3b343881f6eb0de2e79548fded4787fae19bed6ee76ed76ff9f2f11","0xa8924c7035e99eaed244ca165607e7e568b6c8085510dcdbaf6ebdbed405af2e6c14ee27d94ffef10d30aa52a60bf66d","0x956f82a6c2ae044635e85812581e4866c5fa2f427b01942047d81f6d79a14192f66fbbe77c9ffeaef4e6147097fdd2b5","0xb1100255a1bcf5e05b6aff1dfeb6e1d55b5d68d43a7457ba10cc76b61885f67f4d0d5179abda786e037ae95deb8eea45","0x99510799025e3e5e8fbf06dedb14c060c6548ba2bda824f687d3999dc395e794b1fb6514b9013f3892b6cf65cb0d65aa","0x8f9091cebf5e9c809aab415942172258f894e66e625d7388a05289183f01b8d994d52e05a8e69f784fba41db9ea357f0","0xa13d2eeb0776bdee9820ecb6693536720232848c51936bb4ef4fe65588d3f920d08a21907e1fdb881c1ad70b3725e726","0xa68b8f18922d550284c5e5dc2dda771f24c21965a6a4d5e7a71678178f46df4d8a421497aad8fcb4c7e241aba26378a0","0x8b7601f0a3c6ad27f03f2d23e785c81c1460d60100f91ea9d1cab978aa03b523150206c6d52ce7c7769c71d2c8228e9e","0xa8e02926430813caa851bb2b46de7f0420f0a64eb5f6b805401c11c9091d3b6d67d841b5674fa2b1dce0867714124cd8","0xb7968ecba568b8193b3058400af02c183f0a6df995a744450b3f7e0af7a772454677c3857f99c140bbdb2a09e832e8e0","0x8f20b1e9ba87d0a3f35309b985f3c18d2e8800f1ca7f0c52cadef773f1496b6070c936eea48c4a1cae83fd2524e9d233","0x88aef260042db0d641a51f40639dbeeefa9e9811df30bee695f3791f88a2f84d318f04e8926b7f47bf25956cb9e3754f","0x9725345893b647e9ba4e6a29e12f96751f1ae25fcaec2173e9a259921a1a7edb7a47159b3c8767e44d9e2689f5aa0f72","0x8c281e6f72752cb11e239e4df9341c45106eb7993c160e54423c2bffe10bc39d42624b45a1f673936ef2e1a02fc92f1a","0x90aba2f68bddb2fcce6c51430dacdfeec43ea8dc379660c99095df11017691ccf5faa27665cf4b9f0eea7728ae53c327","0xb7022695c16521c5704f49b7ddbdbec9b5f57ce0ceebe537bc0ebb0906d8196cc855a9afeb8950a1710f6a654464d93f","0x8fe1b9dd3c6a258116415d36e08374e094b22f0afb104385a5da48be17123e86fb8327baacc4f0d9ebae923d55d99bb5","0x817e85d8e3d19a4cbc1dec31597142c2daa4871bda89c2177fa719c00eda3344eb08b82eb92d4aa91a9eaacb3fc09783","0xb59053e1081d2603f1ca0ba553804d6fa696e1fd996631db8f62087b26a40dfef02098b0326bb75f99ec83b9267ca738","0x990a173d857d3ba81ff3789b931bfc9f5609cde0169b7f055fa3cb56451748d593d62d46ba33f80f9cafffe02b68dd14","0xb0c538dbba4954b809ab26f9f94a3cf1dcb77ce289eaec1d19f556c0ae4be1fa03af4a9b7057837541c3cc0a80538736","0xac3ba42f5f44f9e1fc453ce49c4ab79d0e1d5c42d3b30b1e098f3ab3f414c4c262fa12fb2be249f52d4aaf3c5224beb9","0xaf47467eb152e59870e21f0d4da2f43e093daf40180ab01438030684b114d025326928eaab12c41b81a066d94fce8436","0x98d1b58ba22e7289b1c45c79a24624f19b1d89e00f778eef327ec4856a9a897278e6f1a9a7e673844b31dde949153000","0x97ccb15dfadc7c59dca08cfe0d22df2e52c684cf97de1d94bc00d7ba24e020025130b0a39c0f4d46e4fc872771ee7875","0xb699e4ed9a000ff96ca296b2f09dce278832bc8ac96851ff3cff99ed3f6f752cfc0fea8571be28cd9b5a7ec36f1a08ee","0xb9f49f0edb7941cc296435ff0a912e3ad16848ee8765ab5f60a050b280d6ea585e5b34051b15f6b8934ef01ceb85f648","0xac3893df7b4ceab23c6b9054e48e8ba40d6e5beda8fbe90b814f992f52494186969b35d8c4cdc3c99890a222c9c09008","0xa41293ad22fae81dea94467bc1488c3707f3d4765059173980be93995fa4fcc3c9340796e3eed0beeb0ba0d9bb4fa3aa","0xa0543e77acd2aeecde13d18d258aeb2c7397b77f17c35a1992e8666ea7abcd8a38ec6c2741bd929abba2f766138618cc","0x92e79b22bc40e69f6527c969500ca543899105837b6b1075fa1796755c723462059b3d1b028e0b3df2559fa440e09175","0xa1fa1eac8f41a5197a6fb4aa1eae1a031c89f9c13ff9448338b222780cf9022e0b0925d930c37501a0ef7b2b00fdaf83","0xb3cb29ff73229f0637335f28a08ad8c5f166066f27c6c175164d0f26766a927f843b987ee9b309ed71cbf0a65d483831","0x84d4ab787f0ac00f104f4a734dc693d62d48c2aeb03913153da62c2ae2c27d11b1110dcef8980368dd84682ea2c1a308","0xab6a8e4bbc78d4a7b291ad3e9a8fe2d65f640524ba3181123b09d2d18a9e300e2509ccf7000fe47e75b65f3e992a2e7e","0xb7805ebe4f1a4df414003dc10bca805f2ab86ca75820012653e8f9b79c405196b0e2cab099f2ab953d67f0d60d31a0f9","0xb12c582454148338ea605d22bd00a754109063e22617f1f8ac8ddf5502c22a181c50c216c3617b9852aa5f26af56b323","0x86333ad9f898947e31ce747728dc8c887479e18d36ff3013f69ebef807d82c6981543b5c3788af93c4d912ba084d3cba","0xb514efa310dc4ad1258add138891e540d8c87142a881b5f46563cc58ecd1488e6d3a2fca54c0b72a929f3364ca8c333e","0xaa0a30f92843cf2f484066a783a1d75a7aa6f41f00b421d4baf20a6ac7886c468d0eea7ca8b17dd22f4f74631b62b640","0xb3b7dc63baec9a752e8433c0cdee4d0f9bc41f66f2b8d132faf925eef9cf89aae756fc132c45910f057122462605dc10","0xb9b8190dac5bfdeb59fd44f4da41a57e7f1e7d2c21faba9da91fa45cbeca06dcf299c9ae22f0c89ece11ac46352d619f","0x89f8cf36501ad8bdfeab863752a9090e3bfda57cf8fdeca2944864dc05925f501e252c048221bcc57136ab09a64b64b2","0xb0cbfaf317f05f97be47fc9d69eda2dd82500e00d42612f271a1fe24626408c28881f171e855bd5bd67409f9847502b4","0xa7c21a8fcede581bfd9847b6835eda62ba250bea81f1bb17372c800a19c732abe03064e64a2f865d974fb636cab4b859","0x95f9df524ba7a4667351696c4176b505d8ea3659f5ff2701173064acc624af69a0fad4970963736383b979830cb32260","0x856a74fe8b37a2e3afeac858c8632200485d438422a16ae3b29f359e470e8244995c63ad79c7e007ed063f178d0306fd","0xb37faa4d78fdc0bb9d403674dbea0176c2014a171c7be8527b54f7d1a32a76883d3422a3e7a5f5fcc5e9b31b57822eeb","0x8d37234d8594ec3fe75670b5c9cc1ec3537564d4739b2682a75b18b08401869a4264c0f264354219d8d896cded715db4","0xb5289ee5737f0e0bde485d32096d23387d68dab8f01f47821ab4f06cc79a967afe7355e72dc0c751d96b2747b26f6255","0x9085e1fdf9f813e9c3b8232d3c8863cd84ab30d45e8e0d3d6a0abd9ebc6fd70cdf749ff4d04390000e14c7d8c6655fc7","0x93a388c83630331eca4da37ea4a97b3b453238af474817cc0a0727fd3138dcb4a22de38c04783ec829c22cb459cb4e8e","0xa5377116027c5d061dbe24c240b891c08cdd8cd3f0899e848d682c873aff5b8132c1e7cfe76d2e5ed97ee0eb1d42cb68","0xa274c84b04338ed28d74683e2a7519c2591a3ce37c294d6f6e678f7d628be2db8eff253ede21823e2df7183e6552f622","0x8bc201147a842453a50bec3ac97671397bc086d6dfc9377fa38c2124cdc286abda69b7324f47d64da094ae011d98d9d9","0x9842d0c066c524592b76fbec5132bc628e5e1d21c424bec4555efca8619cc1fd8ea3161febcb8b9e8ab54702f4e815e2","0xa19191b713a07efe85c266f839d14e25660ee74452e6c691cd9997d85ae4f732052d802d3deb018bdd847caa298a894b","0xa24f71fc0db504da4e287dd118a4a74301cbcd16033937ba2abc8417956fcb4ae19b8e63b931795544a978137eff51cb","0xa90eec4a6a3a4b8f9a5b93d978b5026fcf812fe65585b008d7e08c4aaf21195a1d0699f12fc16f79b6a18a369af45771","0x8b551cf89737d7d06d9b3b9c4c1c73b41f2ea0af4540999c70b82dabff8580797cf0a3caf34c86c59a7069eb2e38f087","0xb8d312e6c635e7a216a1cda075ae77ba3e1d2fd501dc31e83496e6e81ed5d9c7799f8e578869c2e0e256fb29f5de10a7","0x8d144bdb8cae0b2cdb5b33d44bbc96984a5925202506a8cc65eb67ac904b466f5a7fe3e1cbf04aa785bbb7348c4bb73c","0xa101b3d58b7a98659244b88de0b478b3fb87dc5fc6031f6e689b99edf498abd43e151fd32bd4bbd240e0b3e59c440359","0x907453abca7d8e7151a05cc3d506c988007692fe7401395dc93177d0d07d114ab6cca0cc658eb94c0223fe8658295cad","0x825329ffbe2147ddb68f63a0a67f32d7f309657b8e5d9ab5bb34b3730bfa2c77a23eaaadb05def7d9f94a9e08fdc1e96","0x88ee923c95c1dac99ae7ed6067906d734d793c5dc5d26339c1bb3314abe201c5dccb33b9007351885eb2754e9a8ea06c","0x98bc9798543f5f1adc9f2cfcfa72331989420e9c3f6598c45269f0dc9b7c8607bbeaf03faa0aea2ddde2b8f17fdceff5","0x8ee87877702a79aef923ab970db6fa81561b3c07d5bf1a072af0a7bad765b4cbaec910afe1a91703feacc7822fa38a94","0x8060b9584aa294fe8adc2b22f67e988bc6da768eae91e429dcc43ddc53cfcc5d6753fdc1b420b268c7eb2fb50736a970","0xb344a5524d80a2f051870c7001f74fcf348a70fcf78dbd20c6ff9ca85d81567d2318c8b8089f2c4f195d6aec9fc15fa6","0x8f5a5d893e1936ed062149d20eb73d98b62b7f50ab5d93a6429c03656b36688d1c80cb5010e4977491e51fa0d7dd35d5","0x86fa32ebbf97328c5f5f15564e1238297e289ec3219b9a741724e9f3ae8d5c15277008f555863a478b247ba5dc601d44","0x9557e55377e279f4b6b5e0ffe01eca037cc13aac242d67dfcd0374a1e775c5ed5cb30c25fe21143fee54e3302d34a3ea","0x8cb6bcbc39372d23464a416ea7039f57ba8413cf3f00d9a7a5b356ab20dcb8ed11b3561f7bce372b8534d2870c7ee270","0xb5d59075cb5abde5391f64b6c3b8b50adc6e1f654e2a580b6d6d6eff3f4fbdd8fffc92e06809c393f5c8eab37f774c4b","0xafcfb6903ef13e493a1f7308675582f15af0403b6553e8c37afb8b2808ad21b88b347dc139464367dc260df075fea1ad","0x810fbbe808375735dd22d5bc7fc3828dc49fdd22cc2d7661604e7ac9c4535c1df578780affb3b895a0831640a945bcad","0x8056b0c678803b416f924e09a6299a33cf9ad7da6fe1ad7accefe95c179e0077da36815fde3716711c394e2c5ea7127f","0x8b67403702d06979be19f1d6dc3ec73cc2e81254d6b7d0cc49cd4fdda8cd51ab0835c1d2d26fc0ecab5df90585c2f351","0x87f97f9e6d4be07e8db250e5dd2bffdf1390665bc5709f2b631a6fa69a7fca958f19bd7cc617183da1f50ee63e9352b5","0xae151310985940471e6803fcf37600d7fa98830613e381e00dab943aec32c14162d51c4598e8847148148000d6e5af5c","0x81eb537b35b7602c45441cfc61b27fa9a30d3998fad35a064e05bc9479e9f10b62eba2b234b348219eea3cadcaac64bb","0x8a441434934180ab6f5bc541f86ebd06eadbee01f438836d797e930fa803a51510e005c9248cecc231a775b74d12b5e9","0x81f3c250a27ba14d8496a5092b145629eb2c2e6a5298438670375363f57e2798207832c8027c3e9238ad94ecdadfc4df","0xa6217c311f2f3db02ceaa5b6096849fe92b6f4b6f1491535ef8525f6ccee6130bed2809e625073ecbaddd4a3eb3df186","0x82d1c396f0388b942cf22b119d7ef1ad03d3dad49a74d9d01649ee284f377c8daddd095d596871669e16160299a210db","0xa40ddf7043c5d72a7246bd727b07f7fff1549f0e443d611de6f9976c37448b21664c5089c57f20105102d935ab82f27b","0xb6c03c1c97adf0c4bf4447ec71366c6c1bff401ba46236cd4a33d39291e7a1f0bb34bd078ba3a18d15c98993b153a279","0x8a94f5f632068399c359c4b3a3653cb6df2b207379b3d0cdace51afdf70d6d5cce6b89a2b0fee66744eba86c98fb21c2","0xb2f19e78ee85073f680c3bba1f07fd31b057c00b97040357d97855b54a0b5accb0d3b05b2a294568fcd6a4be6f266950","0xa74632d13bbe2d64b51d7a9c3ae0a5a971c19f51cf7596a807cea053e6a0f3719700976d4e394b356c0329a2dced9aa2","0xafef616d341a9bc94393b8dfba68ff0581436aa3a3adb7c26a1bbf2cf19fa877066191681f71f17f3cd6f9cf6bf70b5a","0x8ce96d93ae217408acf7eb0f9cbb9563363e5c7002e19bbe1e80760bc9d449daee2118f3878b955163ed664516b97294","0x8414f79b496176bc8b8e25f8e4cfee28f4f1c2ddab099d63d2aca1b6403d26a571152fc3edb97794767a7c4686ad557c","0xb6c61d01fd8ce087ef9f079bf25bf10090db483dd4f88c4a786d31c1bdf52065651c1f5523f20c21e75cea17df69ab73","0xa5790fd629be70545093631efadddc136661f63b65ec682609c38ef7d3d7fa4e56bdf94f06e263bc055b90cb1c6bcefe","0xb515a767e95704fb7597bca9e46f1753abacdc0e56e867ee3c6f4cd382643c2a28e65312c05ad040eaa3a8cbe7217a65","0x8135806a02ead6aa92e9adb6fefb91349837ab73105aaa7be488ef966aa8dfaafdfa64bbae30fcbfa55dd135a036a863","0x8f22435702716d76b1369750694540742d909d5e72b54d0878245fab7c269953b1c6f2b29c66f08d5e0263ca3a731771","0x8e0f8a8e8753e077dac95848212aeffd51c23d9b6d611df8b102f654089401954413ecbedc6367561ca599512ae5dda7","0x815a9084e3e2345f24c5fa559deec21ee1352fb60f4025c0779be65057f2d528a3d91593bd30d3a185f5ec53a9950676","0x967e6555ccba395b2cc1605f8484c5112c7b263f41ce8439a99fd1c71c5ed14ad02684d6f636364199ca48afbbde13be","0x8cd0ccf17682950b34c796a41e2ea7dd5367aba5e80a907e01f4cdc611e4a411918215e5aebf4292f8b24765d73314a6","0xa58bf1bbb377e4b3915df6f058a0f53b8fb8130fdec8c391f6bc82065694d0be59bb67ffb540e6c42cc8b380c6e36359","0x92af3151d9e6bfb3383d85433e953c0160859f759b0988431ec5893542ba40288f65db43c78a904325ef8d324988f09d","0x8011bbb05705167afb47d4425065630f54cb86cd462095e83b81dfebf348f846e4d8fbcf1c13208f5de1931f81da40b9","0x81c743c104fc3cb047885c9fa0fb9705c3a83ee24f690f539f4985509c3dafd507af3f6a2128276f45d5939ef70c167f","0xa2c9679b151c041aaf5efeac5a737a8f70d1631d931609fca16be1905682f35e291292874cb3b03f14994f98573c6f44","0xa4949b86c4e5b1d5c82a337e5ce6b2718b1f7c215148c8bfb7e7c44ec86c5c9476048fc5c01f57cb0920876478c41ad6","0x86c2495088bd1772152e527a1da0ef473f924ea9ab0e5b8077df859c28078f73c4e22e3a906b507fdf217c3c80808b5c","0x892e0a910dcf162bcea379763c3e2349349e4cda9402949255ac4a78dd5a47e0bf42f5bd0913951576b1d206dc1e536a","0xa7009b2c6b396138afe4754b7cc10dee557c51c7f1a357a11486b3253818531f781ea8107360c8d4c3b1cd96282353c0","0x911763ef439c086065cc7b4e57484ed6d693ea44acee4b18c9fd998116da55fbe7dcb8d2a0f0f9b32132fca82d73dff6","0xa722000b95a4a2d40bed81870793f15ba2af633f9892df507f2842e52452e02b5ea8dea6a043c2b2611d82376e33742a","0x9387ac49477bd719c2f92240d0bdfcf9767aad247ca93dc51e56106463206bc343a8ec855eb803471629a66fffb565d6","0x92819a1fa48ab4902939bb72a0a4e6143c058ea42b42f9bc6cea5df45f49724e2530daf3fc4f097cceefa2a8b9db0076","0x98eac7b04537653bc0f4941aae732e4b1f84bd276c992c64a219b8715eb1fb829b5cbd997d57feb15c7694c468f95f70","0xb275e7ba848ce21bf7996e12dbeb8dadb5d0e4f1cb5a0248a4f8f9c9fe6c74e3c93f4b61edbcb0a51af5a141e1c14bc7","0x97243189285aba4d49c53770c242f2faf5fd3914451da4931472e3290164f7663c726cf86020f8f181e568c72fd172d1","0x839b0b3c25dd412bee3dc24653b873cc65454f8f16186bb707bcd58259c0b6765fa4c195403209179192a4455c95f3b8","0x8689d1a870514568a074a38232e2ceb4d7df30fabeb76cff0aed5b42bf7f02baea12c5fadf69f4713464dbd52aafa55f","0x8958ae7b290f0b00d17c3e9fdb4dbf168432b457c7676829299dd428984aba892de1966fc106cfc58a772862ecce3976","0xa422bc6bd68b8870cfa5bc4ce71781fd7f4368b564d7f1e0917f6013c8bbb5b240a257f89ecfdbecb40fe0f3aa31d310","0xaa61f78130cebe09bc9a2c0a37f0dd57ed2d702962e37d38b1df7f17dc554b1d4b7a39a44182a452ce4c5eb31fa4cfcc","0xb7918bd114f37869bf1a459023386825821bfadce545201929d13ac3256d92a431e34f690a55d944f77d0b652cefeffc","0x819bba35fb6ace1510920d4dcff30aa682a3c9af9022e287751a6a6649b00c5402f14b6309f0aeef8fce312a0402915e","0x8b7c9ad446c6f63c11e1c24e24014bd570862b65d53684e107ba9ad381e81a2eaa96731b4b33536efd55e0f055071274","0x8fe79b53f06d33386c0ec7d6d521183c13199498594a46d44a8a716932c3ec480c60be398650bbfa044fa791c4e99b65","0x9558e10fb81250b9844c99648cf38fa05ec1e65d0ccbb18aa17f2d1f503144baf59d802c25be8cc0879fff82ed5034ad","0xb538a7b97fbd702ba84645ca0a63725be1e2891c784b1d599e54e3480e4670d0025526674ef5cf2f87dddf2290ba09f0","0x92eafe2e869a3dd8519bbbceb630585c6eb21712b2f31e1b63067c0acb5f9bdbbcbdb612db4ea7f9cc4e7be83d31973f","0xb40d21390bb813ab7b70a010dff64c57178418c62685761784e37d327ba3cb9ef62df87ecb84277c325a637fe3709732","0xb349e6fbf778c4af35fbed33130bd8a7216ed3ba0a79163ebb556e8eb8e1a7dad3456ddd700dad9d08d202491c51b939","0xa8fdaedecb251f892b66c669e34137f2650509ade5d38fbe8a05d9b9184bb3b2d416186a3640429bd1f3e4b903c159dd","0xac6167ebfee1dbab338eff7642f5e785fc21ef0b4ddd6660333fe398068cbd6c42585f62e81e4edbb72161ce852a1a4f","0x874b1fbf2ebe140c683bd7e4e0ab017afa5d4ad38055aaa83ee6bbef77dbc88a6ce8eb0dcc48f0155244af6f86f34c2d","0x903c58e57ddd9c446afab8256a6bb6c911121e6ccfb4f9b4ed3e2ed922a0e500a5cb7fa379d5285bc16e11dac90d1fda","0x8dae7a0cffa2fd166859cd1bf10ff82dd1932e488af377366b7efc0d5dec85f85fe5e8150ff86a79a39cefc29631733a","0xaa047857a47cc4dfc08585f28640420fcf105b881fd59a6cf7890a36516af0644d143b73f3515ab48faaa621168f8c31","0x864508f7077c266cc0cb3f7f001cb6e27125ebfe79ab57a123a8195f2e27d3799ff98413e8483c533b46a816a3557f1f","0x8bcd45ab1f9cbab36937a27e724af819838f66dfeb15923f8113654ff877bd8667c54f6307aaf0c35027ca11b6229bfd","0xb21aa34da9ab0a48fcfdd291df224697ce0c1ebc0e9b022fdee8750a1a4b5ba421c419541ed5c98b461eecf363047471","0xa9a18a2ab2fae14542dc336269fe612e9c1af6cf0c9ac933679a2f2cb77d3c304114f4d219ca66fe288adde30716775b","0xb5205989b92c58bdda71817f9a897e84100b5c4e708de1fced5c286f7a6f01ae96b1c8d845f3a320d77c8e2703c0e8b1","0xa364059412bbcc17b8907d43ac8e5df90bc87fd1724b5f99832d0d24559fae6fa76a74cff1d1eac8cbac6ec80b44af20","0xae709f2c339886b31450834cf29a38b26eb3b0779bd77c9ac269a8a925d1d78ea3837876c654b61a8fe834b3b6940808","0x8802581bba66e1952ac4dab36af371f66778958f4612901d95e5cac17f59165e6064371d02de8fb6fccf89c6dc8bd118","0xa313252df653e29c672cbcfd2d4f775089cb77be1077381cf4dc9533790e88af6cedc8a119158e7da5bf6806ad9b91a1","0x992a065b4152c7ef11515cd54ba9d191fda44032a01aed954acff3443377ee16680c7248d530b746b8c6dee2d634e68c","0xb627b683ee2b32c1ab4ccd27b9f6cce2fe097d96386fa0e5c182ad997c4c422ab8dfc03870cd830b8c774feb66537282","0xb823cf8a9aee03dadd013eb9efe40a201b4b57ef67efaae9f99683005f5d1bf55e950bf4af0774f50859d743642d3fea","0xb8a7449ffac0a3f206677097baf7ce00ca07a4d2bd9b5356fbcb83f3649b0fda07cfebad220c1066afba89e5a52abf4b","0xb2dd1a2f986395bb4e3e960fbbe823dbb154f823284ebc9068502c19a7609790ec0073d08bfa63f71e30c7161b6ef966","0x98e5236de4281245234f5d40a25b503505af140b503a035fc25a26159a9074ec81512b28f324c56ea2c9a5aa7ce90805","0x89070847dc8bbf5bc4ed073aa2e2a1f699cf0c2ca226f185a0671cecc54e7d3e14cd475c7752314a7a8e7476829da4bc","0xa9402dc9117fdb39c4734c0688254f23aed3dce94f5f53f5b7ef2b4bf1b71a67f85ab1a38ec224a59691f3bee050aeb3","0x957288f9866a4bf56a4204218ccc583f717d7ce45c01ea27142a7e245ad04a07f289cc044f8cf1f21d35e67e39299e9c","0xb2fb31ccb4e69113763d7247d0fc8edaae69b550c5c56aecacfd780c7217dc672f9fb7496edf4aba65dacf3361268e5b","0xb44a4526b2f1d6eb2aa8dba23bfa385ff7634572ab2afddd0546c3beb630fbfe85a32f42dd287a7fec069041411537f7","0x8db5a6660c3ac7fd7a093573940f068ee79a82bc17312af900b51c8c439336bc86ca646c6b7ab13aaaa008a24ca508ab","0x8f9899a6d7e8eb4367beb5c060a1f8e94d8a21099033ae582118477265155ba9e72176a67f7f25d7bad75a152b56e21a","0xa67de0e91ade8d69a0e00c9ff33ee2909b8a609357095fa12319e6158570c232e5b6f4647522efb7345ce0052aa9d489","0x82eb2414898e9c3023d57907a2b17de8e7eea5269029d05a94bfd7bf5685ac4a799110fbb375eb5e0e2bd16acf6458ae","0x94451fc7fea3c5a89ba701004a9693bab555cb622caf0896b678faba040409fdfd14a978979038b2a81e8f0abc4994d2","0xac879a5bb433998e289809a4a966bd02b4bf6a9c1cc276454e39c886efcf4fc68baebed575826bde577ab5aa71d735a9","0x880c0f8f49c875dfd62b4ddedde0f5c8b19f5687e693717f7e5c031bc580e58e13ab497d48b4874130a18743c59fdce3","0xb582af8d8ff0bf76f0a3934775e0b54c0e8fed893245d7d89cae65b03c8125b7237edc29dc45b4fe1a3fe6db45d280ee","0x89f337882ed3ae060aaee98efa20d79b6822bde9708c1c5fcee365d0ec9297f694cae37d38fd8e3d49717c1e86f078e7","0x826d2c1faea54061848b484e288a5f4de0d221258178cf87f72e14baaa4acc21322f8c9eab5dde612ef497f2d2e1d60b","0xa5333d4f227543e9cd741ccf3b81db79f2f03ca9e649e40d6a6e8ff9073e06da83683566d3b3c8d7b258c62970fb24d1","0xa28f08c473db06aaf4c043a2fae82b3c8cfaa160bce793a4c208e4e168fb1c65115ff8139dea06453c5963d95e922b94","0x8162546135cc5e124e9683bdfaa45833c18553ff06a0861c887dc84a5b12ae8cd4697f6794c7ef6230492c32faba7014","0xb23f0d05b74c08d6a7df1760792be83a761b36e3f8ae360f3c363fb196e2a9dd2de2e492e49d36561366e14daa77155c","0xb6f70d6c546722d3907c708d630dbe289771d2c8bf059c2e32b77f224696d750b4dda9b3a014debda38e7d02c9a77585","0x83bf4c4a9f3ca022c631017e7a30ea205ba97f7f5927cba8fc8489a4646eac6712cb821c5668c9ffe94d69d524374a27","0xb0371475425a8076d0dd5f733f55aabbe42d20a7c8ea7da352e736d4d35a327b2beb370dfcb05284e22cfd69c5f6c4cc","0xa0031ba7522c79211416c2cca3aa5450f96f8fee711552a30889910970ba13608646538781a2c08b834b140aadd7166f","0x99d273c80c7f2dc6045d4ed355d9fc6f74e93549d961f4a3b73cd38683f905934d359058cd1fc4da8083c7d75070487f","0xb0e4b0efa3237793e9dcce86d75aafe9879c5fa23f0d628649aef2130454dcf72578f9bf227b9d2b9e05617468e82588","0xa5ab076fa2e1c5c51f3ae101afdd596ad9d106bba7882b359c43d8548b64f528af19afa76cd6f40da1e6c5fca4def3fa","0x8ce2299e570331d60f6a6eff1b271097cd5f1c0e1113fc69b89c6a0f685dabea3e5bc2ac6bd789aa492ab189f89be494","0x91b829068874d911a310a5f9dee001021f97471307b5a3de9ec336870ec597413e1d92010ce320b619f38bed7c4f7910","0xb14fe91f4b07bf33b046e9285b66cb07927f3a8da0af548ac2569b4c4fb1309d3ced76d733051a20814e90dd5b75ffd1","0xabaab92ea6152d40f82940277c725aa768a631ee0b37f5961667f82fb990fc11e6d3a6a2752b0c6f94563ed9bb28265c","0xb7fe28543eca2a716859a76ab9092f135337e28109544f6bd2727728d0a7650428af5713171ea60bfc273d1c821d992c","0x8a4917b2ab749fc7343fc64bdf51b6c0698ff15d740cc7baf248c030475c097097d5a473bcc00d8c25817563fe0447b4","0xaa96156d1379553256350a0a3250166add75948fb9cde62aa555a0a9dc0a9cb7f2f7b8428aff66097bf6bfedaf14bbe2","0xae4ffeb9bdc76830d3eca2b705f30c1bdede6412fa064260a21562c8850c7fb611ec62bc68479fe48f692833e6f66d8d","0xb96543caaba9d051600a14997765d49e4ab10b07c7a92cccf0c90b309e6da334fdd6d18c96806cbb67a7801024fbd3c7","0x97b2b9ad76f19f500fcc94ca8e434176249f542ac66e5881a3dccd07354bdab6a2157018b19f8459437a68d8b86ba8e0","0xa8d206f6c5a14c80005849474fde44b1e7bcf0b2d52068f5f97504c3c035b09e65e56d1cf4b5322791ae2c2fdbd61859","0x936bad397ad577a70cf99bf9056584a61bd7f02d2d5a6cf219c05d770ae30a5cd902ba38366ce636067fc1dd10108d31","0xa77e30195ee402b84f3882e2286bf5380c0ed374a112dbd11e16cef6b6b61ab209d4635e6f35cdaaa72c1a1981d5dabe","0xa46ba4d3947188590a43c180757886a453a0503f79cc435322d92490446f37419c7b999fdf868a023601078070e03346","0x80d8d4c5542f223d48240b445d4d8cf6a75d120b060bc08c45e99a13028b809d910b534d2ac47fb7068930c54efd8da9","0x803be9c68c91b42b68e1f55e58917a477a9a6265e679ca44ee30d3eb92453f8c89c64eafc04c970d6831edd33d066902","0xb14b2b3d0dfe2bb57cee4cd72765b60ac33c1056580950be005790176543826c1d4fbd737f6cfeada6c735543244ab57","0xa9e480188bba1b8fb7105ff12215706665fd35bf1117bacfb6ab6985f4dbc181229873b82e5e18323c2b8f5de03258e0","0xa66a0f0779436a9a3999996d1e6d3000f22c2cac8e0b29cddef9636393c7f1457fb188a293b6c875b05d68d138a7cc4a","0x848397366300ab40c52d0dbbdafbafef6cd3dadf1503bb14b430f52bb9724188928ac26f6292a2412bc7d7aa620763c8","0x95466cc1a78c9f33a9aaa3829a4c8a690af074916b56f43ae46a67a12bb537a5ac6dbe61590344a25b44e8512355a4a7","0x8b5f7a959f818e3baf0887f140f4575cac093d0aece27e23b823cf421f34d6e4ff4bb8384426e33e8ec7b5eed51f6b5c","0x8d5e1368ec7e3c65640d216bcc5d076f3d9845924c734a34f3558ac0f16e40597c1a775a25bf38b187213fbdba17c93b","0xb4647c1b823516880f60d20c5cc38c7f80b363c19d191e8992226799718ee26b522a12ecb66556ed3d483aa4824f3326","0xac3abaea9cd283eb347efda4ed9086ea3acf495043e08d0d19945876329e8675224b685612a6badf8fd72fb6274902b1","0x8eae1ce292d317aaa71bcf6e77e654914edd5090e2e1ebab78b18bb41b9b1bc2e697439f54a44c0c8aa0d436ebe6e1a9","0x94dc7d1aec2c28eb43d93b111fa59aaa0d77d5a09501220bd411768c3e52208806abf973c6a452fd8292ff6490e0c9e2","0x8fd8967f8e506fef27d17b435d6b86b232ec71c1036351f12e6fb8a2e12daf01d0ee04451fb944d0f1bf7fd20e714d02","0x824e6865be55d43032f0fec65b3480ea89b0a2bf860872237a19a54bc186a85d2f8f9989cc837fbb325b7c72d9babe2c","0x8bd361f5adb27fd6f4e3f5de866e2befda6a8454efeb704aacc606f528c03f0faae888f60310e49440496abd84083ce2","0xb098a3c49f2aaa28b6b3e85bc40ce6a9cdd02134ee522ae73771e667ad7629c8d82c393fba9f27f5416986af4c261438","0xb385f5ca285ff2cfe64dcaa32dcde869c28996ed091542600a0b46f65f3f5a38428cca46029ede72b6cf43e12279e3d3","0x8196b03d011e5be5288196ef7d47137d6f9237a635ab913acdf9c595fa521d9e2df722090ec7eb0203544ee88178fc5f","0x8ed1270211ef928db18e502271b7edf24d0bbd11d97f2786aee772d70c2029e28095cf8f650b0328cc8a4c38d045316d","0xa52ab60e28d69b333d597a445884d44fd2a7e1923dd60f763951e1e45f83e27a4dac745f3b9eff75977b3280e132c15d","0x91e9fe78cdac578f4a4687f71b800b35da54b824b1886dafec073a3c977ce7a25038a2f3a5b1e35c2c8c9d1a7312417c","0xa42832173f9d9491c7bd93b21497fbfa4121687cd4d2ab572e80753d7edcbb42cfa49f460026fbde52f420786751a138","0x97b947126d84dcc70c97be3c04b3de3f239b1c4914342fa643b1a4bb8c4fe45c0fcb585700d13a7ed50784790c54bef9","0x860e407d353eac070e2418ef6cb80b96fc5f6661d6333e634f6f306779651588037be4c2419562c89c61f9aa2c4947f5","0xb2c9d93c3ba4e511b0560b55d3501bf28a510745fd666b3cb532db051e6a8617841ea2f071dda6c9f15619c7bfd2737f","0x8596f4d239aeeac78311207904d1bd863ef68e769629cc379db60e019aaf05a9d5cd31dc8e630b31e106a3a93e47cbc5","0x8b26e14e2e136b65c5e9e5c2022cee8c255834ea427552f780a6ca130a6446102f2a6f334c3f9a0308c53df09e3dba7e","0xb54724354eb515a3c8bed0d0677ff1db94ac0a07043459b4358cb90e3e1aa38ac23f2caa3072cf9647275d7cd61d0e80","0xb7ce9fe0e515e7a6b2d7ddcb92bc0196416ff04199326aea57996eef8c5b1548bd8569012210da317f7c0074691d01b7","0xa1a13549c82c877253ddefa36a29ea6a23695ee401fdd48e65f6f61e5ebd956d5e0edeff99484e9075cb35071fec41e2","0x838ba0c1e5bd1a6da05611ff1822b8622457ebd019cb065ece36a2d176bd2d889511328120b8a357e44569e7f640c1e6","0xb916eccff2a95519400bbf76b5f576cbe53cf200410370a19d77734dc04c05b585cfe382e8864e67142d548cd3c4c2f4","0xa610447cb7ca6eea53a6ff1f5fe562377dcb7f4aaa7300f755a4f5e8eba61e863c51dc2aa9a29b35525b550fbc32a0fe","0x9620e8f0f0ee9a4719aa9685eeb1049c5c77659ba6149ec4c158f999cfd09514794b23388879931fe26fea03fa471fd3","0xa9dcf8b679e276583cf5b9360702a185470d09aea463dc474ee9c8aee91ef089dacb073e334e47fbc78ec5417c90465c","0x8c9adee8410bdd99e5b285744cee61e2593b6300ff31a8a83b0ec28da59475a5c6fb9346fe43aadea2e6c3dad2a8e30a","0x97d5afe9b3897d7b8bb628b7220cf02d8ee4e9d0b78f5000d500aaf4c1df9251aaaabfd1601626519f9d66f00a821d4e","0x8a382418157b601ce4c3501d3b8409ca98136a4ef6abcbf62885e16e215b76b035c94d149cc41ff92e42ccd7c43b9b3d","0xb64b8d11fb3b01abb2646ac99fdb9c02b804ce15d98f9fe0fbf1c9df8440c71417487feb6cdf51e3e81d37104b19e012","0x849d7d044f9d8f0aab346a9374f0b3a5d14a9d1faa83dbacccbdc629ad1ef903a990940255564770537f8567521d17f0","0x829dbb0c76b996c2a91b4cbbe93ba455ca0d5729755e5f0c92aaee37dff7f36fcdc06f33aca41f1b609c784127b67d88","0x85a7c0069047b978422d264d831ab816435f63938015d2e977222b6b5746066c0071b7f89267027f8a975206ed25c1b0","0x84b9fbc1cfb302df1acdcf3dc5d66fd1edfe7839f7a3b2fb3a0d5548656249dd556104d7c32b73967bccf0f5bdcf9e3b","0x972220ac5b807f53eac37dccfc2ad355d8b21ea6a9c9b011c09fe440ddcdf7513e0b43d7692c09ded80d7040e26aa28f","0x855885ed0b21350baeca890811f344c553cf9c21024649c722453138ba29193c6b02c4b4994cd414035486f923472e28","0x841874783ae6d9d0e59daea03e96a01cbbe4ecaced91ae4f2c8386e0d87b3128e6d893c98d17c59e4de1098e1ad519dd","0x827e50fc9ce56f97a4c3f2f4cbaf0b22f1c3ce6f844ff0ef93a9c57a09b8bf91ebfbd2ba9c7f83c442920bffdaf288cc","0xa441f9136c7aa4c08d5b3534921b730e41ee91ab506313e1ba5f7c6f19fd2d2e1594e88c219834e92e6fb95356385aa7","0x97d75b144471bf580099dd6842b823ec0e6c1fb86dd0da0db195e65524129ea8b6fd4a7a9bbf37146269e938a6956596","0xa4b6fa87f09d5a29252efb2b3aaab6b3b6ea9fab343132a651630206254a25378e3e9d6c96c3d14c150d01817d375a8e","0xa31a671876d5d1e95fe2b8858dc69967231190880529d57d3cab7f9f4a2b9b458ac9ee5bdaa3289158141bf18f559efb","0x90bee6fff4338ba825974021b3b2a84e36d617e53857321f13d2b3d4a28954e6de3b3c0e629d61823d18a9763313b3bf","0x96b622a63153f393bb419bfcf88272ea8b3560dbd46b0aa07ada3a6223990d0abdd6c2adb356ef4be5641688c8d83941","0x84c202adeaff9293698022bc0381adba2cd959f9a35a4e8472288fd68f96f6de8be9da314c526d88e291c96b1f3d6db9","0x8ca01a143b8d13809e5a8024d03e6bc9492e22226073ef6e327edf1328ef4aff82d0bcccee92cb8e212831fa35fe1204","0xb2f970dbad15bfbefb38903c9bcc043d1367055c55dc1100a850f5eb816a4252c8c194b3132c929105511e14ea10a67d","0xa5e36556472a95ad57eb90c3b6623671b03eafd842238f01a081997ffc6e2401f76e781d049bb4aa94d899313577a9cf","0x8d1057071051772f7c8bedce53a862af6fd530dd56ae6321eaf2b9fc6a68beff5ed745e1c429ad09d5a118650bfd420a","0x8aadc4f70ace4fcb8d93a78610779748dcffc36182d45b932c226dc90e48238ea5daa91f137c65ed532352c4c4d57416","0xa2ea05ae37e673b4343232ae685ee14e6b88b867aef6dfac35db3589cbcd76f99540fed5c2641d5bb5a4a9f808e9bf0d","0x947f1abad982d65648ae4978e094332b4ecb90f482c9be5741d5d1cf5a28acf4680f1977bf6e49dd2174c37f11e01296","0xa27b144f1565e4047ba0e3f4840ef19b5095d1e281eaa463c5358f932114cbd018aa6dcf97546465cf2946d014d8e6d6","0x8574e1fc3acade47cd4539df578ce9205e745e161b91e59e4d088711a7ab5aa3b410d517d7304b92109924d9e2af8895","0xa48ee6b86b88015d6f0d282c1ae01d2a5b9e8c7aa3d0c18b35943dceb1af580d08a65f54dc6903cde82fd0d73ce94722","0x8875650cec543a7bf02ea4f2848a61d167a66c91ffaefe31a9e38dc8511c6a25bde431007eefe27a62af3655aca208dc","0x999b0a6e040372e61937bf0d68374e230346b654b5a0f591a59d33a4f95bdb2f3581db7c7ccb420cd7699ed709c50713","0x878c9e56c7100c5e47bbe77dc8da5c5fe706cec94d37fa729633bca63cace7c40102eee780fcdabb655f5fa47a99600e","0x865006fb5b475ada5e935f27b96f9425fc2d5449a3c106aa366e55ebed3b4ee42adc3c3f0ac19fd129b40bc7d6bc4f63","0xb7a7da847f1202e7bc1672553e68904715e84fd897d529243e3ecda59faa4e17ba99c649a802d53f6b8dfdd51f01fb74","0x8b2fb4432c05653303d8c8436473682933a5cb604da10c118ecfcd2c8a0e3132e125afef562bdbcc3df936164e5ce4f2","0x808d95762d33ddfa5d0ee3d7d9f327de21a994d681a5f372e2e3632963ea974da7f1f9e5bac8ccce24293509d1f54d27","0x932946532e3c397990a1df0e94c90e1e45133e347a39b6714c695be21aeb2d309504cb6b1dde7228ff6f6353f73e1ca2","0x9705e7c93f0cdfaa3fa96821f830fe53402ad0806036cd1b48adc2f022d8e781c1fbdab60215ce85c653203d98426da3","0xaa180819531c3ec1feb829d789cb2092964c069974ae4faad60e04a6afcce5c3a59aec9f11291e6d110a788d22532bc6","0x88f755097f7e25cb7dd3c449520c89b83ae9e119778efabb54fbd5c5714b6f37c5f9e0346c58c6ab09c1aef2483f895d","0x99fc03ab7810e94104c494f7e40b900f475fde65bdec853e60807ffd3f531d74de43335c3b2646b5b8c26804a7448898","0xaf2dea9683086bed1a179110efb227c9c00e76cd00a2015b089ccbcee46d1134aa18bda5d6cab6f82ae4c5cd2461ac21","0xa500f87ba9744787fdbb8e750702a3fd229de6b8817594348dec9a723b3c4240ddfa066262d002844b9e38240ce55658","0x924d0e45c780f5bc1c1f35d15dfc3da28036bdb59e4c5440606750ecc991b85be18bc9a240b6c983bc5430baa4c68287","0x865b11e0157b8bf4c5f336024b016a0162fc093069d44ac494723f56648bc4ded13dfb3896e924959ea11c96321afefc","0x93672d8607d4143a8f7894f1dcca83fb84906dc8d6dd7dd063bb0049cfc20c1efd933e06ca7bd03ea4cb5a5037990bfe","0x826891efbdff0360446825a61cd1fa04326dd90dae8c33dfb1ed97b045e165766dd070bd7105560994d0b2044bdea418","0x93c4a4a8bcbc8b190485cc3bc04175b7c0ed002c28c98a540919effd6ed908e540e6594f6db95cd65823017258fb3b1c","0xaeb2a0af2d2239fda9aa6b8234b019708e8f792834ff0dd9c487fa09d29800ddceddd6d7929faa9a3edcb9e1b3aa0d6b","0x87f11de7236d387863ec660d2b04db9ac08143a9a2c4dfff87727c95b4b1477e3bc473a91e5797313c58754905079643","0x80dc1db20067a844fe8baceca77f80db171a5ca967acb24e2d480eae9ceb91a3343c31ad1c95b721f390829084f0eae6","0x9825c31f1c18da0de3fa84399c8b40f8002c3cae211fb6a0623c76b097b4d39f5c50058f57a16362f7a575909d0a44a2","0xa99fc8de0c38dbf7b9e946de83943a6b46a762167bafe2a603fb9b86f094da30d6de7ed55d639aafc91936923ee414b3","0xad594678b407db5d6ea2e90528121f84f2b96a4113a252a30d359a721429857c204c1c1c4ff71d8bb5768c833f82e80e","0xb33d985e847b54510b9b007e31053732c8a495e43be158bd2ffcea25c6765bcbc7ca815f7c60b36ad088b955dd6e9350","0x815f8dfc6f90b3342ca3fbd968c67f324dae8f74245cbf8bc3bef10e9440c65d3a2151f951e8d18959ba01c1b50b0ec1","0x94c608a362dd732a1abc56e338637c900d59013db8668e49398b3c7a0cae3f7e2f1d1bf94c0299eeafe6af7f76c88618","0x8ebd8446b23e5adfcc393adc5c52fe172f030a73e63cd2d515245ca0dd02782ceed5bcdd9ccd9c1b4c5953dfac9c340c","0x820437f3f6f9ad0f5d7502815b221b83755eb8dc56cd92c29e9535eb0b48fb8d08c9e4fcc26945f9c8cca60d89c44710","0x8910e4e8a56bf4be9cc3bbf0bf6b1182a2f48837a2ed3c2aaec7099bfd7f0c83e14e608876b17893a98021ff4ab2f20d","0x9633918fde348573eec15ce0ad53ac7e1823aac86429710a376ad661002ae6d049ded879383faaa139435122f64047c6","0xa1f5e3fa558a9e89318ca87978492f0fb4f6e54a9735c1b8d2ecfb1d1c57194ded6e0dd82d077b2d54251f3bee1279e1","0xb208e22d04896abfd515a95c429ff318e87ff81a5d534c8ac2c33c052d6ffb73ef1dccd39c0bbe0734b596c384014766","0x986d5d7d2b5bde6d16336f378bd13d0e671ad23a8ec8a10b3fc09036faeeb069f60662138d7a6df3dfb8e0d36180f770","0xa2d4e6c5f5569e9cef1cddb569515d4b6ace38c8aed594f06da7434ba6b24477392cc67ba867c2b079545ca0c625c457","0xb5ac32b1d231957d91c8b7fc43115ce3c5c0d8c13ca633374402fa8000b6d9fb19499f9181844f0c10b47357f3f757ce","0x96b8bf2504b4d28fa34a4ec378e0e0b684890c5f44b7a6bb6e19d7b3db2ab27b1e2686389d1de9fbd981962833a313ea","0x953bfd7f6c3a0469ad432072b9679a25486f5f4828092401eff494cfb46656c958641a4e6d0d97d400bc59d92dba0030","0x876ab3cea7484bbfd0db621ec085b9ac885d94ab55c4bb671168d82b92e609754b86aaf472c55df3d81421d768fd108a","0x885ff4e67d9ece646d02dd425aa5a087e485c3f280c3471b77532b0db6145b69b0fbefb18aa2e3fa5b64928b43a94e57","0xb91931d93f806d0b0e6cc62a53c718c099526140f50f45d94b8bbb57d71e78647e06ee7b42aa5714aed9a5c05ac8533f","0xa0313eeadd39c720c9c27b3d671215331ab8d0a794e71e7e690f06bcd87722b531d6525060c358f35f5705dbb7109ccb","0x874c0944b7fedc6701e53344100612ddcb495351e29305c00ec40a7276ea5455465ffb7bded898886c1853139dfb1fc7","0x8dc31701a01ee8137059ca1874a015130d3024823c0576aa9243e6942ec99d377e7715ed1444cd9b750a64b85dcaa3e5","0x836d2a757405e922ec9a2dfdcf489a58bd48b5f9683dd46bf6047688f778c8dee9bc456de806f70464df0b25f3f3d238","0xb30b0a1e454a503ea3e2efdec7483eaf20b0a5c3cefc42069e891952b35d4b2c955cf615f3066285ed8fafd9fcfbb8f6","0x8e6d4044b55ab747e83ec8762ea86845f1785cc7be0279c075dadf08aca3ccc5a096c015bb3c3f738f647a4eadea3ba5","0xad7735d16ab03cbe09c029610aa625133a6daecfc990b297205b6da98eda8c136a7c50db90f426d35069708510d5ae9c","0x8d62d858bbb59ec3c8cc9acda002e08addab4d3ad143b3812098f3d9087a1b4a1bb255dcb1635da2402487d8d0249161","0x805beec33238b832e8530645a3254aeef957e8f7ea24bcfc1054f8b9c69421145ebb8f9d893237e8a001c857fedfc77e","0xb1005644be4b085e3f5775aa9bd3e09a283e87ddada3082c04e7a62d303dcef3b8cf8f92944c200c7ae6bb6bdf63f832","0xb4ba0e0790dc29063e577474ffe3b61f5ea2508169f5adc1e394934ebb473e356239413a17962bc3e5d3762d72cce8c2","0xa157ba9169c9e3e6748d9f1dd67fbe08b9114ade4c5d8fc475f87a764fb7e6f1d21f66d7905cd730f28a1c2d8378682a","0x913e52b5c93989b5d15e0d91aa0f19f78d592bc28bcfdfddc885a9980c732b1f4debb8166a7c4083c42aeda93a702898","0x90fbfc1567e7cd4e096a38433704d3f96a2de2f6ed3371515ccc30bc4dd0721a704487d25a97f3c3d7e4344472702d8d","0x89646043028ffee4b69d346907586fd12c2c0730f024acb1481abea478e61031966e72072ff1d5e65cb8c64a69ad4eb1","0xb125a45e86117ee11d2fb42f680ab4a7894edd67ff927ae2c808920c66c3e55f6a9d4588eee906f33a05d592e5ec3c04","0xaad47f5b41eae9be55fb4f67674ff1e4ae2482897676f964a4d2dcb6982252ee4ff56aac49578b23f72d1fced707525e","0xb9ddff8986145e33851b4de54d3e81faa3352e8385895f357734085a1616ef61c692d925fe62a5ed3be8ca49f5d66306","0xb3cb0963387ed28c0c0adf7fe645f02606e6e1780a24d6cecef5b7c642499109974c81a7c2a198b19862eedcea2c2d8c","0xac9c53c885457aaf5cb36c717a6f4077af701e0098eebd7aa600f5e4b14e6c1067255b3a0bc40e4a552025231be7de60","0x8e1a8d823c4603f6648ec21d064101094f2a762a4ed37dd2f0a2d9aa97b2d850ce1e76f4a4b8cae58819b058180f7031","0xb268b73bf7a179b6d22bd37e5e8cb514e9f5f8968c78e14e4f6d5700ca0d0ca5081d0344bb73b028970eebde3cb4124e","0xa7f57d71940f0edbd29ed8473d0149cae71d921dd15d1ff589774003e816b54b24de2620871108cec1ab9fa956ad6ce6","0x8053e6416c8b120e2b999cc2fc420a6a55094c61ac7f2a6c6f0a2c108a320890e389af96cbe378936132363c0d551277","0xb3823f4511125e5aa0f4269e991b435a0d6ceb523ebd91c04d7add5534e3df5fc951c504b4fd412a309fd3726b7f940b","0xae6eb04674d04e982ca9a6add30370ab90e303c71486f43ed3efbe431af1b0e43e9d06c11c3412651f304c473e7dbf39","0x96ab55e641ed2e677591f7379a3cd126449614181fce403e93e89b1645d82c4af524381ff986cae7f9cebe676878646d","0xb52423b4a8c37d3c3e2eca8f0ddbf7abe0938855f33a0af50f117fab26415fb0a3da5405908ec5fdc22a2c1f2ca64892","0x82a69ce1ee92a09cc709d0e3cd22116c9f69d28ea507fe5901f5676000b5179b9abe4c1875d052b0dd42d39925e186bb","0xa84c8cb84b9d5cfb69a5414f0a5283a5f2e90739e9362a1e8c784b96381b59ac6c18723a4aa45988ee8ef5c1f45cc97d","0xafd7efce6b36813082eb98257aae22a4c1ae97d51cac7ea9c852d4a66d05ef2732116137d8432e3f117119725a817d24","0xa0f5fe25af3ce021b706fcff05f3d825384a272284d04735574ce5fb256bf27100fad0b1f1ba0e54ae9dcbb9570ecad3","0x8751786cb80e2e1ff819fc7fa31c2833d25086534eb12b373d31f826382430acfd87023d2a688c65b5e983927e146336","0x8cf5c4b17fa4f3d35c78ce41e1dc86988fd1135cd5e6b2bb0c108ee13538d0d09ae7102609c6070f39f937b439b31e33","0xa9108967a2fedd7c322711eca8159c533dd561bedcb181b646de98bf5c3079449478eab579731bee8d215ae8852c7e21","0xb54c5171704f42a6f0f4e70767cdb3d96ffc4888c842eece343a01557da405961d53ffdc34d2f902ea25d3e1ed867cad","0xae8d4b764a7a25330ba205bf77e9f46182cd60f94a336bbd96773cf8064e3d39caf04c310680943dc89ed1fbad2c6e0d","0xaa5150e911a8e1346868e1b71c5a01e2a4bb8632c195861fb6c3038a0e9b85f0e09b3822e9283654a4d7bb17db2fc5f4","0x9685d3756ce9069bf8bb716cf7d5063ebfafe37e15b137fc8c3159633c4e006ff4887ddd0ae90360767a25c3f90cba7f","0x82155fd70f107ab3c8e414eadf226c797e07b65911508c76c554445422325e71af8c9a8e77fd52d94412a6fc29417cd3","0xabfae52f53a4b6e00760468d973a267f29321997c3dbb5aee36dc1f20619551229c0c45b9d9749f410e7f531b73378e8","0x81a76d921f8ef88e774fd985e786a4a330d779b93fad7def718c014685ca0247379e2e2a007ad63ee7f729cd9ed6ce1b","0x81947c84bc5e28e26e2e533af5ae8fe10407a7b77436dbf8f1d5b0bbe86fc659eae10f974659dc7c826c6dabd03e3a4b","0x92b8c07050d635b8dd4fd09df9054efe4edae6b86a63c292e73cc819a12a21dd7d104ce51fa56af6539dedf6dbe6f7b6","0xb44c579e3881f32b32d20c82c207307eca08e44995dd2aac3b2692d2c8eb2a325626c80ac81c26eeb38c4137ff95add5","0x97efab8941c90c30860926dea69a841f2dcd02980bf5413b9fd78d85904588bf0c1021798dbc16c8bbb32cce66c82621","0x913363012528b50698e904de0588bf55c8ec5cf6f0367cfd42095c4468fcc64954fbf784508073e542fee242d0743867","0x8ed203cf215148296454012bd10fddaf119203db1919a7b3d2cdc9f80e66729464fdfae42f1f2fc5af1ed53a42b40024","0xab84312db7b87d711e9a60824f4fe50e7a6190bf92e1628688dfcb38930fe87b2d53f9e14dd4de509b2216856d8d9188","0x880726def069c160278b12d2258eac8fa63f729cd351a710d28b7e601c6712903c3ac1e7bbd0d21e4a15f13ca49db5aa","0x980699cd51bac6283959765f5174e543ed1e5f5584b5127980cbc2ef18d984ecabba45042c6773b447b8e694db066028","0xaeb019cb80dc4cb4207430d0f2cd24c9888998b6f21d9bf286cc638449668d2eec0018a4cf3fe6448673cd6729335e2b","0xb29852f6aa6c60effdffe96ae88590c88abae732561d35cc19e82d3a51e26cb35ea00986193e07f90060756240f5346e","0xa0fa855adc5ba469f35800c48414b8921455950a5c0a49945d1ef6e8f2a1881f2e2dfae47de6417270a6bf49deeb091d","0xb6c7332e3b14813641e7272d4f69ecc7e09081df0037d6dab97ce13a9e58510f5c930d300633f208181d9205c5534001","0x85a6c050f42fce560b5a8d54a11c3bbb8407abbadd859647a7b0c21c4b579ec65671098b74f10a16245dc779dff7838e","0x8f3eb34bb68759d53c6677de4de78a6c24dd32c8962a7fb355ed362572ef8253733e6b52bc21c9f92ecd875020a9b8de","0xa17dd44181e5dab4dbc128e1af93ec22624b57a448ca65d2d9e246797e4af7d079e09c6e0dfb62db3a9957ce92f098d5","0xa56a1b854c3183082543a8685bb34cae1289f86cfa8123a579049dbd059e77982886bfeb61bf6e05b4b1fe4e620932e7","0xaedae3033cb2fb7628cb4803435bdd7757370a86f808ae4cecb9a268ad0e875f308c048c80cbcac523de16b609683887","0x9344905376aa3982b1179497fac5a1d74b14b7038fd15e3b002db4c11c8bfc7c39430db492cdaf58b9c47996c9901f28","0xa3bfafdae011a19f030c749c3b071f83580dee97dd6f949e790366f95618ca9f828f1daaeabad6dcd664fcef81b6556d","0x81c03d8429129e7e04434dee2c529194ddb01b414feda3adee2271eb680f6c85ec872a55c9fa9d2096f517e13ed5abcc","0x98205ef3a72dff54c5a9c82d293c3e45d908946fa74bb749c3aabe1ab994ea93c269bcce1a266d2fe67a8f02133c5985","0x85a70aeed09fda24412fadbafbbbf5ba1e00ac92885df329e147bfafa97b57629a3582115b780d8549d07d19b7867715","0xb0fbe81c719f89a57d9ea3397705f898175808c5f75f8eb81c2193a0b555869ba7bd2e6bc54ee8a60cea11735e21c68c","0xb03a0bd160495ee626ff3a5c7d95bc79d7da7e5a96f6d10116600c8fa20bedd1132f5170f25a22371a34a2d763f2d6d0","0xa90ab04091fbca9f433b885e6c1d60ab45f6f1daf4b35ec22b09909d493a6aab65ce41a6f30c98239cbca27022f61a8b","0xb66f92aa3bf2549f9b60b86f99a0bd19cbdd97036d4ae71ca4b83d669607f275260a497208f6476cde1931d9712c2402","0xb08e1fdf20e6a9b0b4942f14fa339551c3175c1ffc5d0ab5b226b6e6a322e9eb0ba96adc5c8d59ca4259e2bdd04a7eb0","0xa2812231e92c1ce74d4f5ac3ab6698520288db6a38398bb38a914ac9326519580af17ae3e27cde26607e698294022c81","0xabfcbbcf1d3b9e84c02499003e490a1d5d9a2841a9e50c7babbef0b2dd20d7483371d4dc629ba07faf46db659459d296","0xb0fe9f98c3da70927c23f2975a9dc4789194d81932d2ad0f3b00843dd9cbd7fb60747a1da8fe5a79f136a601becf279d","0xb130a6dba7645165348cb90f023713bed0eefbd90a976b313521c60a36d34f02032e69a2bdcf5361e343ed46911297ec","0x862f0cffe3020cea7a5fd4703353aa1eb1be335e3b712b29d079ff9f7090d1d8b12013011e1bdcbaa80c44641fd37c9f","0x8c6f11123b26633e1abb9ed857e0bce845b2b3df91cc7b013b2fc77b477eee445da0285fc6fc793e29d5912977f40916","0x91381846126ea819d40f84d3005e9fb233dc80071d1f9bb07f102bf015f813f61e5884ffffb4f5cd333c1b1e38a05a58","0x8add7d908de6e1775adbd39c29a391f06692b936518db1f8fde74eb4f533fc510673a59afb86e3a9b52ade96e3004c57","0x8780e086a244a092206edcde625cafb87c9ab1f89cc3e0d378bc9ee776313836160960a82ec397bc3800c0a0ec3da283","0xa6cb4cd9481e22870fdd757fae0785edf4635e7aacb18072fe8dc5876d0bab53fb99ce40964a7d3e8bcfff6f0ab1332f","0xaf30ff47ecc5b543efba1ba4706921066ca8bb625f40e530fb668aea0551c7647a9d126e8aba282fbcce168c3e7e0130","0x91b0bcf408ce3c11555dcb80c4410b5bc2386d3c05caec0b653352377efdcb6bab4827f2018671fc8e4a0e90d772acc1","0xa9430b975ef138b6b2944c7baded8fe102d31da4cfe3bd3d8778bda79189c99d38176a19c848a19e2d1ee0bddd9a13c1","0xaa5a4eef849d7c9d2f4b018bd01271c1dd83f771de860c4261f385d3bdcc130218495860a1de298f14b703ec32fa235f","0xb0ce79e7f9ae57abe4ff366146c3b9bfb38b0dee09c28c28f5981a5d234c6810ad4d582751948affb480d6ae1c8c31c4","0xb75122748560f73d15c01a8907d36d06dc068e82ce22b84b322ac1f727034493572f7907dec34ebc3ddcc976f2f89ed7","0xb0fc7836369a3e4411d34792d6bd5617c14f61d9bba023dda64e89dc5fb0f423244e9b48ee64869258931daa9753a56f","0x8956d7455ae9009d70c6e4a0bcd7610e55f37494cf9897a8f9e1b904cc8febc3fd2d642ebd09025cfff4609ad7e3bc52","0xad741efe9e472026aa49ae3d9914cb9c1a6f37a54f1a6fe6419bebd8c7d68dca105a751c7859f4389505ede40a0de786","0xb52f418797d719f0d0d0ffb0846788b5cba5d0454a69a2925de4b0b80fa4dd7e8c445e5eac40afd92897ed28ca650566","0xa0ab65fb9d42dd966cd93b1de01d7c822694669dd2b7a0c04d99cd0f3c3de795f387b9c92da11353412f33af5c950e9a","0xa0052f44a31e5741a331f7cac515a08b3325666d388880162d9a7b97598fde8b61f9ff35ff220df224eb5c4e40ef0567","0xa0101cfdc94e42b2b976c0d89612a720e55d145a5ef6ef6f1f78cf6de084a49973d9b5d45915349c34ce712512191e3c","0xa0dd99fcf3f5cead5aaf08e82212df3a8bb543c407a4d6fab88dc5130c1769df3f147e934a46f291d6c1a55d92b86917","0xa5939153f0d1931bbda5cf6bdf20562519ea55fbfa978d6dbc6828d298260c0da7a50c37c34f386e59431301a96c2232","0x9568269f3f5257200f9ca44afe1174a5d3cf92950a7f553e50e279c239e156a9faaa2a67f288e3d5100b4142efe64856","0xb746b0832866c23288e07f24991bbf687cad794e7b794d3d3b79367566ca617d38af586cdc8d6f4a85a34835be41d54f","0xa871ce28e39ab467706e32fec1669fda5a4abba2f8c209c6745df9f7a0fa36bbf1919cf14cb89ea26fa214c4c907ae03","0xa08dacdd758e523cb8484f6bd070642c0c20e184abdf8e2a601f61507e93952d5b8b0c723c34fcbdd70a8485eec29db2","0x85bdb78d501382bb95f1166b8d032941005661aefd17a5ac32df9a3a18e9df2fc5dc2c1f07075f9641af10353cecc0c9","0x98d730c28f6fa692a389e97e368b58f4d95382fad8f0baa58e71a3d7baaea1988ead47b13742ce587456f083636fa98e","0xa557198c6f3d5382be9fb363feb02e2e243b0c3c61337b3f1801c4a0943f18e38ce1a1c36b5c289c8fa2aa9d58742bab","0x89174f79201742220ac689c403fc7b243eed4f8e3f2f8aba0bf183e6f5d4907cb55ade3e238e3623d9885f03155c4d2b","0xb891d600132a86709e06f3381158db300975f73ea4c1f7c100358e14e98c5fbe792a9af666b85c4e402707c3f2db321e","0xb9e5b2529ef1043278c939373fc0dbafe446def52ddd0a8edecd3e4b736de87e63e187df853c54c28d865de18a358bb6","0x8589b2e9770340c64679062c5badb7bbef68f55476289b19511a158a9a721f197da03ece3309e059fc4468b15ac33aa3","0xaad8c6cd01d785a881b446f06f1e9cd71bca74ba98674c2dcddc8af01c40aa7a6d469037498b5602e76e9c91a58d3dbd","0xabaccb1bd918a8465f1bf8dbe2c9ad4775c620b055550b949a399f30cf0d9eb909f3851f5b55e38f9e461e762f88f499","0xae62339d26db46e85f157c0151bd29916d5cc619bd4b832814b3fd2f00af8f38e7f0f09932ffe5bba692005dab2d9a74","0x93a6ff30a5c0edf8058c89aba8c3259e0f1b1be1b80e67682de651e5346f7e1b4b4ac3d87cbaebf198cf779524aff6bf","0x8980a2b1d8f574af45b459193c952400b10a86122b71fca2acb75ee0dbd492e7e1ef5b959baf609a5172115e371f3177","0x8c2f49f3666faee6940c75e8c7f6f8edc3f704cca7a858bbb7ee5e96bba3b0cf0993996f781ba6be3b0821ef4cb75039","0xb14b9e348215b278696018330f63c38db100b0542cfc5be11dc33046e3bca6a13034c4ae40d9cef9ea8b34fef0910c4e","0xb59bc3d0a30d66c16e6a411cb641f348cb1135186d5f69fda8b0a0934a5a2e7f6199095ba319ec87d3fe8f1ec4a06368","0x8874aca2a3767aa198e4c3fec2d9c62d496bc41ff71ce242e9e082b7f38cdf356089295f80a301a3cf1182bde5308c97","0xb1820ebd61376d91232423fc20bf008b2ba37e761199f4ef0648ea2bd70282766799b4de814846d2f4d516d525c8daa7","0xa6b202e5dedc16a4073e04a11af3a8509b23dfe5a1952f899adeb240e75c3f5bde0c424f811a81ea48d343591faffe46","0xa69becee9c93734805523b92150a59a62eed4934f66056b645728740d42223f2925a1ad38359ba644da24d9414f4cdda","0xad72f0f1305e37c7e6b48c272323ee883320994cb2e0d850905d6655fafc9f361389bcb9c66b3ff8d2051dbb58c8aa96","0xb563600bd56fad7c8853af21c6a02a16ed9d8a8bbeea2c31731d63b976d83cb05b9779372d898233e8fd597a75424797","0xb0abb78ce465bf7051f563c62e8be9c57a2cc997f47c82819300f36e301fefd908894bb2053a9d27ce2d0f8c46d88b5b","0xa071a85fb8274bac2202e0cb8e0e2028a5e138a82d6e0374d39ca1884a549c7c401312f00071b91f455c3a2afcfe0cda","0xb931c271513a0f267b9f41444a5650b1918100b8f1a64959c552aff4e2193cc1b9927906c6fa7b8a8c68ef13d79aaa52","0xa6a1bb9c7d32cb0ca44d8b75af7e40479fbce67d216b48a2bb680d3f3a772003a49d3cd675fc64e9e0f8fabeb86d6d61","0xb98d609858671543e1c3b8564162ad828808bb50ded261a9f8690ded5b665ed8368c58f947365ed6e84e5a12e27b423d","0xb3dca58cd69ec855e2701a1d66cad86717ff103ef862c490399c771ad28f675680f9500cb97be48de34bcdc1e4503ffd","0xb34867c6735d3c49865e246ddf6c3b33baf8e6f164db3406a64ebce4768cb46b0309635e11be985fee09ab7a31d81402","0xacb966c554188c5b266624208f31fab250b3aa197adbdd14aee5ab27d7fb886eb4350985c553b20fdf66d5d332bfd3fe","0x943c36a18223d6c870d54c3b051ef08d802b85e9dd6de37a51c932f90191890656c06adfa883c87b906557ae32d09da0","0x81bca7954d0b9b6c3d4528aadf83e4bc2ef9ea143d6209bc45ae9e7ae9787dbcd8333c41f12c0b6deee8dcb6805e826a","0xaba176b92256efb68f574e543479e5cf0376889fb48e3db4ebfb7cba91e4d9bcf19dcfec444c6622d9398f06de29e2b9","0xb9f743691448053216f6ece7cd699871fff4217a1409ceb8ab7bdf3312d11696d62c74b0664ba0a631b1e0237a8a0361","0xa383c2b6276fa9af346b21609326b53fb14fdf6f61676683076e80f375b603645f2051985706d0401e6fbed7eb0666b6","0xa9ef2f63ec6d9beb8f3d04e36807d84bda87bdd6b351a3e4a9bf7edcb5618c46c1f58cfbf89e64b40f550915c6988447","0xa141b2d7a82f5005eaea7ae7d112c6788b9b95121e5b70b7168d971812f3381de8b0082ac1f0a82c7d365922ebd2d26a","0xb1b76ef8120e66e1535c17038b75255a07849935d3128e3e99e56567b842fb1e8d56ef932d508d2fb18b82f7868fe1a9","0x8e2e234684c81f21099f5c54f6bbe2dd01e3b172623836c77668a0c49ce1fe218786c3827e4d9ae2ea25c50a8924fb3c","0xa5caf5ff948bfd3c4ca3ffbdfcd91eec83214a6c6017235f309a0bbf7061d3b0b466307c00b44a1009cf575163898b43","0x986415a82ca16ebb107b4c50b0c023c28714281db0bcdab589f6cb13d80e473a3034b7081b3c358e725833f6d845cb14","0xb94836bf406ac2cbacb10e6df5bcdfcc9d9124ae1062767ca4e322d287fd5e353fdcebd0e52407cb3cd68571258a8900","0x83c6d70a640b33087454a4788dfd9ef3ed00272da084a8d36be817296f71c086b23b576f98178ab8ca6a74f04524b46b","0xad4115182ad784cfe11bcfc5ce21fd56229cc2ce77ac82746e91a2f0aa53ca6593a22efd2dc4ed8d00f84542643d9c58","0xab1434c5e5065da826d10c2a2dba0facccab0e52b506ce0ce42fbe47ced5a741797151d9ecc99dc7d6373cfa1779bbf6","0x8a8b591d82358d55e6938f67ea87a89097ab5f5496f7260adb9f649abb289da12b498c5b2539c2f9614fb4e21b1f66b0","0x964f355d603264bc1f44c64d6d64debca66f37dff39c971d9fc924f2bc68e6c187b48564a6dc82660a98b035f8addb5d","0xb66235eaaf47456bc1dc4bde454a028e2ce494ece6b713a94cd6bf27cf18c717fd0c57a5681caaa2ad73a473593cdd7a","0x9103e3bb74304186fa4e3e355a02da77da4aca9b7e702982fc2082af67127ebb23a455098313c88465bc9b7d26820dd5","0xb6a42ff407c9dd132670cdb83cbad4b20871716e44133b59a932cd1c3f97c7ac8ff7f61acfaf8628372508d8dc8cad7c","0x883a9c21c16a167a4171b0f084565c13b6f28ba7c4977a0de69f0a25911f64099e7bbb4da8858f2e93068f4155d04e18","0x8dbb3220abc6a43220adf0331e3903d3bfd1d5213aadfbd8dfcdf4b2864ce2e96a71f35ecfb7a07c3bbabf0372b50271","0xb4ad08aee48e176bda390b7d9acf2f8d5eb008f30d20994707b757dc6a3974b2902d29cd9b4d85e032810ad25ac49e97","0x865bb0f33f7636ec501bb634e5b65751c8a230ae1fa807a961a8289bbf9c7fe8c59e01fbc4c04f8d59b7f539cf79ddd5","0x86a54d4c12ad1e3605b9f93d4a37082fd26e888d2329847d89afa7802e815f33f38185c5b7292293d788ad7d7da1df97","0xb26c8615c5e47691c9ff3deca3021714662d236c4d8401c5d27b50152ce7e566266b9d512d14eb63e65bc1d38a16f914","0x827639d5ce7db43ba40152c8a0eaad443af21dc92636cc8cc2b35f10647da7d475a1e408901cd220552fddad79db74df","0xa2b79a582191a85dbe22dc384c9ca3de345e69f6aa370aa6d3ff1e1c3de513e30b72df9555b15a46586bd27ea2854d9d","0xae0d74644aba9a49521d3e9553813bcb9e18f0b43515e4c74366e503c52f47236be92dfbd99c7285b3248c267b1de5a0","0x80fb0c116e0fd6822a04b9c25f456bdca704e2be7bdc5d141dbf5d1c5eeb0a2c4f5d80db583b03ef3e47517e4f9a1b10","0xac3a1fa3b4a2f30ea7e0a114cdc479eb51773573804c2a158d603ad9902ae8e39ffe95df09c0d871725a5d7f9ba71a57","0xb56b2b0d601cba7f817fa76102c68c2e518c6f20ff693aad3ff2e07d6c4c76203753f7f91686b1801e8c4659e4d45c48","0x89d50c1fc56e656fb9d3915964ebce703cb723fe411ab3c9eaa88ccc5d2b155a9b2e515363d9c600d3c0cee782c43f41","0xb24207e61462f6230f3cd8ccf6828357d03e725769f7d1de35099ef9ee4dca57dbce699bb49ed994462bee17059d25ce","0xb886f17fcbcbfcd08ac07f04bb9543ef58510189decaccea4b4158c9174a067cb67d14b6be3c934e6e2a18c77efa9c9c","0xb9c050ad9cafd41c6e2e192b70d080076eed59ed38ea19a12bd92fa17b5d8947d58d5546aaf5e8e27e1d3b5481a6ce51","0xaaf7a34d3267e3b1ddbc54c641e3922e89303f7c86ebebc7347ebca4cffad5b76117dac0cbae1a133053492799cd936f","0xa9ee604ada50adef82e29e893070649d2d4b7136cc24fa20e281ce1a07bd736bf0de7c420369676bcbcecff26fb6e900","0x9855315a12a4b4cf80ab90b8bd13003223ba25206e52fd4fe6a409232fbed938f30120a3db23eab9c53f308bd8b9db81","0x8cd488dd7a24f548a3cf03c54dec7ff61d0685cb0f6e5c46c2d728e3500d8c7bd6bba0156f4bf600466fda53e5b20444","0x890ad4942ebac8f5b16c777701ab80c68f56fa542002b0786f8fea0fb073154369920ac3dbfc07ea598b82f4985b8ced","0x8de0cf9ddc84c9b92c59b9b044387597799246b30b9f4d7626fc12c51f6e423e08ee4cbfe9289984983c1f9521c3e19d","0xb474dfb5b5f4231d7775b3c3a8744956b3f0c7a871d835d7e4fd9cc895222c7b868d6c6ce250de568a65851151fac860","0x86433b6135d9ed9b5ee8cb7a6c40e5c9d30a68774cec04988117302b8a02a11a71a1e03fd8e0264ef6611d219f103007","0x80b9ed4adbe9538fb1ef69dd44ec0ec5b57cbfea820054d8d445b4261962624b4c70ac330480594bc5168184378379c3","0x8b2e83562ccd23b7ad2d17f55b1ab7ef5fbef64b3a284e6725b800f3222b8bdf49937f4a873917ada9c4ddfb090938c2","0xabe78cebc0f5a45d754140d1f685e387489acbfa46d297a8592aaa0d676a470654f417a4f7d666fc0b2508fab37d908e","0xa9c5f8ff1f8568e252b06d10e1558326db9901840e6b3c26bbd0cd5e850cb5fb3af3f117dbb0f282740276f6fd84126f","0x975f8dc4fb55032a5df3b42b96c8c0ffecb75456f01d4aef66f973cb7270d4eff32c71520ceefc1adcf38d77b6b80c67","0xb043306ed2c3d8a5b9a056565afd8b5e354c8c4569fda66b0d797a50a3ce2c08cffbae9bbe292da69f39e89d5dc7911e","0x8d2afc36b1e44386ba350c14a6c1bb31ff6ea77128a0c5287584ac3584282d18516901ce402b4644a53db1ed8e7fa581","0x8c294058bed53d7290325c363fe243f6ec4f4ea2343692f4bac8f0cb86f115c069ccb8334b53d2e42c067691ad110dba","0xb92157b926751aaf7ef82c1aa8c654907dccab6376187ee8b3e8c0c82811eae01242832de953faa13ebaff7da8698b3e","0xa780c4bdd9e4ba57254b09d745075cecab87feda78c88ffee489625c5a3cf96aa6b3c9503a374a37927d9b78de9bd22b","0x811f548ef3a2e6a654f7dcb28ac9378de9515ed61e5a428515d9594a83e80b35c60f96a5cf743e6fab0d3cb526149f49","0x85a4dccf6d90ee8e094731eec53bd00b3887aec6bd81a0740efddf812fd35e3e4fe4f983afb49a8588691c202dabf942","0xb152c2da6f2e01c8913079ae2b40a09b1f361a80f5408a0237a8131b429677c3157295e11b365b1b1841924b9efb922e","0x849b9efee8742502ffd981c4517c88ed33e4dd518a330802caff168abae3cd09956a5ee5eda15900243bc2e829016b74","0x955a933f3c18ec0f1c0e38fa931e4427a5372c46a3906ebe95082bcf878c35246523c23f0266644ace1fa590ffa6d119","0x911989e9f43e580c886656377c6f856cdd4ff1bd001b6db3bbd86e590a821d34a5c6688a29b8d90f28680e9fdf03ba69","0xb73b8b4f1fd6049fb68d47cd96a18fcba3f716e0a1061aa5a2596302795354e0c39dea04d91d232aec86b0bf2ba10522","0x90f87456d9156e6a1f029a833bf3c7dbed98ca2f2f147a8564922c25ae197a55f7ea9b2ee1f81bf7383197c4bad2e20c","0x903cba8b1e088574cb04a05ca1899ab00d8960580c884bd3c8a4c98d680c2ad11410f2b75739d6050f91d7208cac33a5","0x9329987d42529c261bd15ecedd360be0ea8966e7838f32896522c965adfc4febf187db392bd441fb43bbd10c38fdf68b","0x8178ee93acf5353baa349285067b20e9bb41aa32d77b5aeb7384fe5220c1fe64a2461bd7a83142694fe673e8bbf61b7c","0xa06a8e53abcff271b1394bcc647440f81fb1c1a5f29c27a226e08f961c3353f4891620f2d59b9d1902bf2f5cc07a4553","0xaaf5fe493b337810889e777980e6bbea6cac39ac66bc0875c680c4208807ac866e9fda9b5952aa1d04539b9f4a4bec57","0xaa058abb1953eceac14ccfa7c0cc482a146e1232905dcecc86dd27f75575285f06bbae16a8c9fe8e35d8713717f5f19f","0x8f15dd732799c879ca46d2763453b359ff483ca33adb1d0e0a57262352e0476c235987dc3a8a243c74bc768f93d3014c","0xa61cc8263e9bc03cce985f1663b8a72928a607121005a301b28a278e9654727fd1b22bc8a949af73929c56d9d3d4a273","0x98d6dc78502d19eb9f921225475a6ebcc7b44f01a2df6f55ccf6908d65b27af1891be2a37735f0315b6e0f1576c1f8d8","0x8bd258b883f3b3793ec5be9472ad1ff3dc4b51bc5a58e9f944acfb927349ead8231a523cc2175c1f98e7e1e2b9f363b8","0xaeacc2ecb6e807ad09bedd99654b097a6f39840e932873ace02eabd64ccfbb475abdcb62939a698abf17572d2034c51e","0xb8ccf78c08ccd8df59fd6eda2e01de328bc6d8a65824d6f1fc0537654e9bc6bf6f89c422dd3a295cce628749da85c864","0x8f91fd8cb253ba2e71cc6f13da5e05f62c2c3b485c24f5d68397d04665673167fce1fc1aec6085c69e87e66ec555d3fd","0xa254baa10cb26d04136886073bb4c159af8a8532e3fd36b1e9c3a2e41b5b2b6a86c4ebc14dbe624ee07b7ccdaf59f9ab","0x94e3286fe5cd68c4c7b9a7d33ae3d714a7f265cf77cd0e9bc19fc51015b1d1c34ad7e3a5221c459e89f5a043ee84e3a9","0xa279da8878af8d449a9539bec4b17cea94f0242911f66fab275b5143ab040825f78c89cb32a793930609415cfa3a1078","0xac846ceb89c9e5d43a2991c8443079dc32298cd63e370e64149cec98cf48a6351c09c856f2632fd2f2b3d685a18bbf8b","0xa847b27995c8a2e2454aaeb983879fb5d3a23105c33175839f7300b7e1e8ec3efd6450e9fa3f10323609dee7b98c6fd5","0xa2f432d147d904d185ff4b2de8c6b82fbea278a2956bc406855b44c18041854c4f0ecccd472d1d0dff1d8aa8e281cb1d","0x94a48ad40326f95bd63dff4755f863a1b79e1df771a1173b17937f9baba57b39e651e7695be9f66a472f098b339364fc","0xa12a0ccd8f96e96e1bc6494341f7ebce959899341b3a084aa1aa87d1c0d489ac908552b7770b887bb47e7b8cbc3d8e66","0x81a1f1681bda923bd274bfe0fbb9181d6d164fe738e54e25e8d4849193d311e2c4253614ed673c98af2c798f19a93468","0xabf71106a05d501e84cc54610d349d7d5eae21a70bd0250f1bebbf412a130414d1c8dbe673ffdb80208fd72f1defa4d4","0x96266dc2e0df18d8136d79f5b59e489978eee0e6b04926687fe389d4293c14f36f055c550657a8e27be4118b64254901","0x8df5dcbefbfb4810ae3a413ca6b4bf08619ca53cd50eb1dde2a1c035efffc7b7ac7dff18d403253fd80104bd83dc029e","0x9610b87ff02e391a43324a7122736876d5b3af2a137d749c52f75d07b17f19900b151b7f439d564f4529e77aa057ad12","0xa90a5572198b40fe2fcf47c422274ff36c9624df7db7a89c0eb47eb48a73a03c985f4ac5016161c76ca317f64339bce1","0x98e5e61a6ab6462ba692124dba7794b6c6bde4249ab4fcc98c9edd631592d5bc2fb5e38466691a0970a38e48d87c2e43","0x918cefb8f292f78d4db81462c633daf73b395e772f47b3a7d2cea598025b1d8c3ec0cbff46cdb23597e74929981cde40","0xa98918a5dc7cf610fe55f725e4fd24ce581d594cb957bb9b4e888672e9c0137003e1041f83e3f1d7b9caab06462c87d4","0xb92b74ac015262ca66c33f2d950221e19d940ba3bf4cf17845f961dc1729ae227aa9e1f2017829f2135b489064565c29","0xa053ee339f359665feb178b4e7ee30a85df37debd17cacc5a27d6b3369d170b0114e67ad1712ed26d828f1df641bcd99","0x8c3c8bad510b35da5ce5bd84b35c958797fbea024ad1c97091d2ff71d9b962e9222f65a9b776e5b3cc29c36e1063d2ee","0xaf99dc7330fe7c37e850283eb47cc3257888e7c197cb0d102edf94439e1e02267b6a56306d246c326c4c79f9dc8c6986","0xafecb2dc34d57a725efbd7eb93d61eb29dbe8409b668ab9ea040791f5b796d9be6d4fc10d7f627bf693452f330cf0435","0x93334fedf19a3727a81a6b6f2459db859186227b96fe7a391263f69f1a0884e4235de64d29edebc7b99c44d19e7c7d7a","0x89579c51ac405ad7e9df13c904061670ce4b38372492764170e4d3d667ed52e5d15c7cd5c5991bbfa3a5e4e3fa16363e","0x9778f3e8639030f7ef1c344014f124e375acb8045bd13d8e97a92c5265c52de9d1ffebaa5bc3e1ad2719da0083222991","0x88f77f34ee92b3d36791bdf3326532524a67d544297dcf1a47ff00b47c1b8219ff11e34034eab7d23b507caa2fd3c6b9","0xa699c1e654e7c484431d81d90657892efeb4adcf72c43618e71ca7bd7c7a7ebbb1db7e06e75b75dc4c74efd306b5df3f","0x81d13153baebb2ef672b5bdb069d3cd669ce0be96b742c94e04038f689ff92a61376341366b286eee6bf3ae85156f694","0x81efb17de94400fdacc1deec2550cbe3eecb27c7af99d8207e2f9be397e26be24a40446d2a09536bb5172c28959318d9","0x989b21ebe9ceab02488992673dc071d4d5edec24bff0e17a4306c8cb4b3c83df53a2063d1827edd8ed16d6e837f0d222","0x8d6005d6536825661b13c5fdce177cb37c04e8b109b7eb2b6d82ea1cb70efecf6a0022b64f84d753d165edc2bba784a3","0xa32607360a71d5e34af2271211652d73d7756d393161f4cf0da000c2d66a84c6826e09e759bd787d4fd0305e2439d342","0xaaad8d6f6e260db45d51b2da723be6fa832e76f5fbcb77a9a31e7f090dd38446d3b631b96230d78208cae408c288ac4e","0xabcfe425255fd3c5cffd3a818af7650190c957b6b07b632443f9e33e970a8a4c3bf79ac9b71f4d45f238a04d1c049857","0xaeabf026d4c783adc4414b5923dbd0be4b039cc7201219f7260d321f55e9a5b166d7b5875af6129c034d0108fdc5d666","0xaf49e740c752d7b6f17048014851f437ffd17413c59797e5078eaaa36f73f0017c3e7da020310cfe7d3c85f94a99f203","0x8854ca600d842566e3090040cd66bb0b3c46dae6962a13946f0024c4a8aca447e2ccf6f240045f1ceee799a88cb9210c","0xb6c03b93b1ab1b88ded8edfa1b487a1ed8bdce8535244dddb558ffb78f89b1c74058f80f4db2320ad060d0c2a9c351cc","0xb5bd7d17372faff4898a7517009b61a7c8f6f0e7ed4192c555db264618e3f6e57fb30a472d169fea01bf2bf0362a19a8","0x96eb1d38319dc74afe7e7eb076fcd230d19983f645abd14a71e6103545c01301b31c47ae931e025f3ecc01fb3d2f31fa","0xb55a8d30d4403067def9b65e16f867299f8f64c9b391d0846d4780bc196569622e7e5b64ce799b5aefac8f965b2a7a7b","0x8356d199a991e5cbbff608752b6291731b6b6771aed292f8948b1f41c6543e4ab1bedc82dd26d10206c907c03508df06","0x97f4137445c2d98b0d1d478049de952610ad698c91c9d0f0e7227d2aae690e9935e914ec4a2ea1fbf3fc1dddfeeacebb","0xaf5621707e0938320b15ddfc87584ab325fbdfd85c30efea36f8f9bd0707d7ec12c344eff3ec21761189518d192df035","0x8ac7817e71ea0825b292687928e349da7140285d035e1e1abff0c3704fa8453faaae343a441b7143a74ec56539687cc4","0x8a5e0a9e4758449489df10f3386029ada828d1762e4fb0a8ffe6b79e5b6d5d713cb64ed95960e126398b0cdb89002bc9","0x81324be4a71208bbb9bca74b77177f8f1abb9d3d5d9db195d1854651f2cf333cd618d35400da0f060f3e1b025124e4b2","0x849971d9d095ae067525b3cbc4a7dfae81f739537ade6d6cec1b42fb692d923176197a8770907c58069754b8882822d6","0x89f830825416802477cc81fdf11084885865ee6607aa15aa4eb28e351c569c49b8a1b9b5e95ddc04fa0ebafe20071313","0x9240aeeaff37a91af55f860b9badd466e8243af9e8c96a7aa8cf348cd270685ab6301bc135b246dca9eda696f8b0e350","0xacf74db78cc33138273127599eba35b0fb4e7b9a69fe02dae18fc6692d748ca332bd00b22afa8e654ed587aab11833f3","0xb091e6d37b157b50d76bd297ad752220cd5c9390fac16dc838f8557aed6d9833fc920b61519df21265406216315e883f","0xa6446c429ebf1c7793c622250e23594c836b2fbcaf6c5b3d0995e1595a37f50ea643f3e549b0be8bbdadd69044d72ab9","0x93e675353bd60e996bf1c914d5267eeaa8a52fc3077987ccc796710ef9becc6b7a00e3d82671a6bdfb8145ee3c80245a","0xa2f731e43251d04ed3364aa2f072d05355f299626f2d71a8a38b6f76cf08c544133f7d72dd0ab4162814b674b9fc7fa6","0x97a8b791a5a8f6e1d0de192d78615d73d0c38f1e557e4e15d15adc663d649e655bc8da3bcc499ef70112eafe7fb45c7a","0x98cd624cbbd6c53a94469be4643c13130916b91143425bcb7d7028adbbfede38eff7a21092af43b12d4fab703c116359","0x995783ce38fd5f6f9433027f122d4cf1e1ff3caf2d196ce591877f4a544ce9113ead60de2de1827eaff4dd31a20d79a8","0x8cf251d6f5229183b7f3fe2f607a90b4e4b6f020fb4ba2459d28eb8872426e7be8761a93d5413640a661d73e34a5b81f","0xb9232d99620652a3aa7880cad0876f153ff881c4ed4c0c2e7b4ea81d5d42b70daf1a56b869d752c3743c6d4c947e6641","0x849716f938f9d37250cccb1bf77f5f9fde53096cdfc6f2a25536a6187029a8f1331cdbed08909184b201f8d9f04b792f","0x80c7c4de098cbf9c6d17b14eba1805e433b5bc905f6096f8f63d34b94734f2e4ebf4bce8a177efd1186842a61204a062","0xb790f410cf06b9b8daadceeb4fd5ff40a2deda820c8df2537e0a7554613ae3948e149504e3e79aa84889df50c8678eeb","0x813aab8bd000299cd37485b73cd7cba06e205f8efb87f1efc0bae8b70f6db2bc7702eb39510ad734854fb65515fe9d0f","0x94f0ab7388ac71cdb67f6b85dfd5945748afb2e5abb622f0b5ad104be1d4d0062b651f134ba22385c9e32c2dfdcccce1","0xab6223dca8bd6a4f969e21ccd9f8106fc5251d321f9e90cc42cea2424b3a9c4e5060a47eeef6b23c7976109b548498e8","0x859c56b71343fce4d5c5b87814c47bf55d581c50fd1871a17e77b5e1742f5af639d0e94d19d909ec7dfe27919e954e0c","0xaae0d632b6191b8ad71b027791735f1578e1b89890b6c22e37de0e4a6074886126988fe8319ae228ac9ef3b3bcccb730","0x8ca9f32a27a024c3d595ecfaf96b0461de57befa3b331ab71dc110ec3be5824fed783d9516597537683e77a11d334338","0xa061df379fb3f4b24816c9f6cd8a94ecb89b4c6dc6cd81e4b8096fa9784b7f97ab3540259d1de9c02eb91d9945af4823","0x998603102ac63001d63eb7347a4bb2bf4cf33b28079bb48a169076a65c20d511ccd3ef696d159e54cc8e772fb5d65d50","0x94444d96d39450872ac69e44088c252c71f46be8333a608a475147752dbb99db0e36acfc5198f158509401959c12b709","0xac1b51b6c09fe055c1d7c9176eea9adc33f710818c83a1fbfa073c8dc3a7eb3513cbdd3f5960b7845e31e3e83181e6ba","0x803d530523fc9e1e0f11040d2412d02baef3f07eeb9b177fa9bfa396af42eea898a4276d56e1db998dc96ae47b644cb2","0x85a3c9fc7638f5bf2c3e15ba8c2fa1ae87eb1ceb44c6598c67a2948667a9dfa41e61f66d535b4e7fda62f013a5a8b885","0xa961cf5654c46a1a22c29baf7a4e77837a26b7f138f410e9d1883480ed5fa42411d522aba32040b577046c11f007388e","0xad1154142344f494e3061ef45a34fab1aaacf5fdf7d1b26adbb5fbc3d795655fa743444e39d9a4119b4a4f82a6f30441","0xb1d6c30771130c77806e7ab893b73d4deb590b2ff8f2f8b5e54c2040c1f3e060e2bd99afc668cf706a2df666a508bbf6","0xa00361fd440f9decabd98d96c575cd251dc94c60611025095d1201ef2dedde51cb4de7c2ece47732e5ed9b3526c2012c","0xa85c5ab4d17d328bda5e6d839a9a6adcc92ff844ec25f84981e4f44a0e8419247c081530f8d9aa629c7eb4ca21affba6","0xa4ddd3eab4527a2672cf9463db38bc29f61460e2a162f426b7852b7a7645fbd62084fd39a8e4d60e1958cce436dd8f57","0x811648140080fe55b8618f4cf17f3c5a250adb0cd53d885f2ddba835d2b4433188e41fc0661faac88e4ff910b16278c0","0xb85c7f1cfb0ed29addccf7546023a79249e8f15ac2d14a20accbfef4dd9dc11355d599815fa09d2b6b4e966e6ea8cff1","0xa10b5d8c260b159043b020d5dd62b3467df2671afea6d480ca9087b7e60ed170c82b121819d088315902842d66c8fb45","0x917e191df1bcf3f5715419c1e2191da6b8680543b1ba41fe84ed07ef570376e072c081beb67b375fca3565a2565bcabb","0x881fd967407390bfd7badc9ab494e8a287559a01eb07861f527207c127eadea626e9bcc5aa9cca2c5112fbac3b3f0e9c","0x959fd71149af82cc733619e0e5bf71760ca2650448c82984b3db74030d0e10f8ab1ce1609a6de6f470fe8b5bd90df5b3","0xa3370898a1c5f33d15adb4238df9a6c945f18b9ada4ce2624fc32a844f9ece4c916a64e9442225b6592afa06d2e015f2","0x817efb8a791435e4236f7d7b278181a5fa34587578c629dbc14fbf9a5c26772290611395eecd20222a4c58649fc256d8","0xa04c9876acf2cfdc8ef96de4879742709270fa1d03fe4c8511fbef2d59eb0aaf0336fa2c7dfe41a651157377fa217813","0x81e15875d7ea7f123e418edf14099f2e109d4f3a6ce0eb65f67fe9fb10d2f809a864a29f60ad3fc949f89e2596b21783","0xb49f529975c09e436e6bc202fdc16e3fdcbe056db45178016ad6fdece9faad4446343e83aed096209690b21a6910724f","0x879e8eda589e1a279f7f49f6dd0580788c040d973748ec4942dbe51ea8fbd05983cc919b78f0c6b92ef3292ae29db875","0x81a2b74b2118923f34139a102f3d95e7eee11c4c2929c2576dee200a5abfd364606158535a6c9e4178a6a83dbb65f3c4","0x8913f281d8927f2b45fc815d0f7104631cb7f5f7278a316f1327d670d15868daadd2a64e3eb98e1f53fe7e300338cc80","0xa6f815fba7ef9af7fbf45f93bc952e8b351f5de6568a27c7c47a00cb39a254c6b31753794f67940fc7d2e9cc581529f4","0xb3722a15c66a0014ce4d082de118def8d39190c15678a472b846225585f3a83756ae1b255b2e3f86a26168878e4773b2","0x817ae61ab3d0dd5b6e24846b5a5364b1a7dc2e77432d9fed587727520ae2f307264ea0948c91ad29f0aea3a11ff38624","0xb3db467464415fcad36dc1de2d6ba7686772a577cc2619242ac040d6734881a45d3b40ed4588db124e4289cfeec4bbf6","0xad66a14f5a54ac69603b16e5f1529851183da77d3cc60867f10aea41339dd5e06a5257982e9e90a352cdd32750f42ee4","0xadafa3681ef45d685555601a25a55cf23358319a17f61e2179e704f63df83a73bdd298d12cf6cef86db89bd17119e11d","0xa379dc44cb6dd3b9d378c07b2ec654fec7ca2f272de6ba895e3d00d20c9e4c5550498a843c8ac67e4221db2115bedc1c","0xb7bf81c267a78efc6b9e5a904574445a6487678d7ef70054e3e93ea6a23f966c2b68787f9164918e3b16d2175459ed92","0xb41d66a13a4afafd5760062b77f79de7e6ab8ccacde9c6c5116a6d886912fb491dc027af435b1b44aacc6af7b3c887f2","0x9904d23a7c1c1d2e4bab85d69f283eb0a8e26d46e8b7b30224438015c936729b2f0af7c7c54c03509bb0500acb42d8a4","0xae30d65e9e20c3bfd603994ae2b175ff691d51f3e24b2d058b3b8556d12ca4c75087809062dddd4aaac81c94d15d8a17","0x9245162fab42ac01527424f6013310c3eb462982518debef6c127f46ba8a06c705d7dc9f0a41e796ba8d35d60ae6cc64","0x87fab853638d7a29a20f3ba2b1a7919d023e9415bfa78ebb27973d8cbc7626f584dc5665d2e7ad71f1d760eba9700d88","0x85aac46ecd330608e5272430970e6081ff02a571e8ea444f1e11785ea798769634a22a142d0237f67b75369d3c484a8a","0x938c85ab14894cc5dfce3d80456f189a2e98eddbc8828f4ff6b1df1dcb7b42b17ca2ff40226a8a1390a95d63dca698dd","0xa18ce1f846e3e3c4d846822f60271eecf0f5d7d9f986385ac53c5ace9589dc7c0188910448c19b91341a1ef556652fa9","0x8611608a9d844f0e9d7584ad6ccf62a5087a64f764caf108db648a776b5390feb51e5120f0ef0e9e11301af3987dd7dc","0x8106333ba4b4de8d1ae43bc9735d3fea047392e88efd6a2fa6f7b924a18a7a265ca6123c3edc0f36307dd7fb7fe89257","0xa91426fa500951ff1b051a248c050b7139ca30dde8768690432d597d2b3c4357b11a577be6b455a1c5d145264dcf81fc","0xb7f9f90e0e450f37b081297f7f651bad0496a8b9afd2a4cf4120a2671aaaa8536dce1af301258bfbfdb122afa44c5048","0x84126da6435699b0c09fa4032dec73d1fca21d2d19f5214e8b0bea43267e9a8dd1fc44f8132d8315e734c8e2e04d7291","0xaff064708103884cb4f1a3c1718b3fc40a238d35cf0a7dc24bdf9823693b407c70da50df585bf5bc4e9c07d1c2d203e8","0xa8b40fc6533752983a5329c31d376c7a5c13ce6879cc7faee648200075d9cd273537001fb4c86e8576350eaac6ba60c2","0xa02db682bdc117a84dcb9312eb28fcbde12d49f4ce915cc92c610bb6965ec3cc38290f8c5b5ec70afe153956692cda95","0x86decd22b25d300508472c9ce75d3e465b737e7ce13bc0fcce32835e54646fe12322ba5bc457be18bfd926a1a6ca4a38","0xa18666ef65b8c2904fd598791f5627207165315a85ee01d5fb0e6b2e10bdd9b00babc447da5bd63445e3337de33b9b89","0x89bb0c06effadefdaf34ffe4b123e1678a90d4451ee856c863df1e752eef41fd984689ded8f0f878bf8916d5dd8e8024","0x97cfcba08ebec05d0073992a66b1d7d6fb9d95871f2cdc36db301f78bf8069294d1c259efef5c93d20dc937eedae3a1a","0xac2643b14ece79dcb2e289c96776a47e2bebd40dd6dc74fd035df5bb727b5596f40e3dd2d2202141e69b0993717ede09","0xa5e6fd88a2f9174d9bd4c6a55d9c30974be414992f22aa852f552c7648f722ed8077acf5aba030abd47939bb451b2c60","0x8ad40a612824a7994487731a40b311b7349038c841145865539c6ada75c56de6ac547a1c23df190e0caaafecddd80ccc","0x953a7cea1d857e09202c438c6108060961f195f88c32f0e012236d7a4b39d840c61b162ec86436e8c38567328bea0246","0x80d8b47a46dae1868a7b8ccfe7029445bbe1009dad4a6c31f9ef081be32e8e1ac1178c3c8fb68d3e536c84990cc035b1","0x81ecd99f22b3766ce0aca08a0a9191793f68c754fdec78b82a4c3bdc2db122bbb9ebfd02fc2dcc6e1567a7d42d0cc16a","0xb1dd0446bccc25846fb95d08c1c9cc52fb51c72c4c5d169ffde56ecfe800f108dc1106d65d5c5bd1087c656de3940b63","0xb87547f0931e164e96de5c550ca5aa81273648fe34f6e193cd9d69cf729cb432e17aa02e25b1c27a8a0d20a3b795e94e","0x820a94e69a927e077082aae66f6b292cfbe4589d932edf9e68e268c9bd3d71ef76cf7d169dd445b93967c25db11f58f1","0xb0d07ddf2595270c39adfa0c8cf2ab1322979b0546aa4d918f641be53cd97f36c879bb75d205e457c011aca3bbd9f731","0x8700b876b35b4b10a8a9372c5230acecd39539c1bb87515640293ad4464a9e02929d7d6a6a11112e8a29564815ac0de4","0xa61a601c5bb27dcb97e37c8e2b9ce479c6b192a5e04d9ed5e065833c5a1017ee5f237b77d1a17be5d48f8e7cc0bcacf6","0x92fb88fe774c1ba1d4a08cae3c0e05467ad610e7a3f1d2423fd47751759235fe0a3036db4095bd6404716aa03820f484","0xb274f140d77a3ce0796f5e09094b516537ccaf27ae1907099bff172e6368ba85e7c3ef8ea2a07457cac48ae334da95b3","0xb2292d9181f16581a9a9142490b2bdcdfb218ca6315d1effc8592100d792eb89d5356996c890441f04f2b4a95763503e","0x8897e73f576d86bc354baa3bd96e553107c48cf5889dcc23c5ba68ab8bcd4e81f27767be2233fdfa13d39f885087e668","0xa29eac6f0829791c728d71abc49569df95a4446ecbfc534b39f24f56c88fe70301838dfc1c19751e7f3c5c1b8c6af6a0","0x9346dc3720adc5df500a8df27fd9c75ef38dc5c8f4e8ed66983304750e66d502c3c59b8e955be781b670a0afc70a2167","0x9566d534e0e30a5c5f1428665590617e95fd05d45f573715f58157854ad596ece3a3cfec61356aee342308d623e029d5","0xa464fb8bffe6bd65f71938c1715c6e296cc6d0311a83858e4e7eb5873b7f2cf0c584d2101e3407b85b64ca78b2ac93ce","0xb54088f7217987c87e9498a747569ac5b2f8afd5348f9c45bf3fd9fbf713a20f495f49c8572d087efe778ac7313ad6d3","0x91fa9f5f8000fe050f5b224d90b59fcce13c77e903cbf98ded752e5b3db16adb2bc1f8c94be48b69f65f1f1ad81d6264","0x92d04a5b0ac5d8c8e313709b432c9434ecd3e73231f01e9b4e7952b87df60cbfa97b5dedd2200bd033b4b9ea8ba45cc1","0xa94b90ad3c3d6c4bbe169f8661a790c40645b40f0a9d1c7220f01cf7fc176e04d80bab0ced9323fcafb93643f12b2760","0x94d86149b9c8443b46196f7e5a3738206dd6f3be7762df488bcbb9f9ee285a64c997ed875b7b16b26604fa59020a8199","0x82efe4ae2c50a2d7645240c173a047f238536598c04a2c0b69c96e96bd18e075a99110f1206bc213f39edca42ba00cc1","0xab8667685f831bc14d4610f84a5da27b4ea5b133b4d991741a9e64dceb22cb64a3ce8f1b6e101d52af6296df7127c9ad","0x83ba433661c05dcc5d562f4a9a261c8110dac44b8d833ae1514b1fc60d8b4ee395b18804baea04cb10adb428faf713c3","0xb5748f6f660cc5277f1211d2b8649493ed8a11085b871cd33a5aea630abd960a740f08c08be5f9c21574600ac9bf5737","0xa5c8dd12af48fb710642ad65ebb97ca489e8206741807f7acfc334f8035d3c80593b1ff2090c9bb7bd138f0c48714ca8","0xa2b382fd5744e3babf454b1d806cc8783efeb4761bc42b6914ea48a46a2eae835efbe0a18262b6bc034379e03cf1262b","0xb3145ffaf603f69f15a64936d32e3219eea5ed49fdfd2f5bf40ea0dfd974b36fb6ff12164d4c2282d892db4cf3ff3ce1","0x87a316fb213f4c5e30c5e3face049db66be4f28821bd96034714ec23d3e97849d7b301930f90a4323c7ccf53de23050c","0xb9de09a919455070fed6220fc179c8b7a4c753062bcd27acf28f5b9947a659c0b364298daf7c85c4ca6fca7f945add1f","0x806fbd98d411b76979464c40ad88bc07a151628a27fcc1012ba1dfbaf5b5cc9d962fb9b3386008978a12515edce934bc","0xa15268877fae0d21610ae6a31061ed7c20814723385955fac09fdc9693a94c33dea11db98bb89fdfe68f933490f5c381","0x8d633fb0c4da86b2e0b37d8fad5972d62bff2ac663c5ec815d095cd4b7e1fe66ebef2a2590995b57eaf941983c7ad7a4","0x8139e5dd9cf405e8ef65f11164f0440827d98389ce1b418b0c9628be983a9ddd6cf4863036ccb1483b40b8a527acd9ed","0x88b15fa94a08eac291d2b94a2b30eb851ff24addf2cc30b678e72e32cfcb3424cf4b33aa395d741803f3e578ddf524de","0xb5eaf0c8506e101f1646bcf049ee38d99ea1c60169730da893fd6020fd00a289eb2f415947e44677af49e43454a7b1be","0x8489822ad0647a7e06aa2aa5595960811858ddd4542acca419dd2308a8c5477648f4dd969a6740bb78aa26db9bfcc555","0xb1e9a7b9f3423c220330d45f69e45fa03d7671897cf077f913c252e3e99c7b1b1cf6d30caad65e4228d5d7b80eb86e5e","0xb28fe9629592b9e6a55a1406903be76250b1c50c65296c10c5e48c64b539fb08fe11f68cf462a6edcbba71b0cee3feb2","0xa41acf96a02c96cd8744ff6577c244fc923810d17ade133587e4c223beb7b4d99fa56eae311a500d7151979267d0895c","0x880798938fe4ba70721be90e666dfb62fcab4f3556fdb7b0dc8ec5bc34f6b4513df965eae78527136eb391889fe2caf9","0x98d4d89d358e0fb7e212498c73447d94a83c1b66e98fc81427ab13acddb17a20f52308983f3a5a8e0aaacec432359604","0x81430b6d2998fc78ba937a1639c6020199c52da499f68109da227882dc26d005b73d54c5bdcac1a04e8356a8ca0f7017","0xa8d906a4786455eb74613aba4ce1c963c60095ffb8658d368df9266fdd01e30269ce10bf984e7465f34b4fd83beba26a","0xaf54167ac1f954d10131d44a8e0045df00d581dd9e93596a28d157543fbe5fb25d213806ed7fb3cba6b8f5b5423562db","0x8511e373a978a12d81266b9afbd55035d7bc736835cfa921903a92969eeba3624437d1346b55382e61415726ab84a448","0x8cf43eea93508ae586fa9a0f1354a1e16af659782479c2040874a46317f9e8d572a23238efa318fdfb87cc63932602b7","0xb0bdd3bacff077173d302e3a9678d1d37936188c7ecc34950185af6b462b7c679815176f3cce5db19aac8b282f2d60ad","0xa355e9b87f2f2672052f5d4d65b8c1c827d24d89b0d8594641fccfb69aef1b94009105f3242058bb31c8bf51caae5a41","0xb8baa9e4b950b72ff6b88a6509e8ed1304bc6fd955748b2e59a523a1e0c5e99f52aec3da7fa9ff407a7adf259652466c","0x840bc3dbb300ea6f27d1d6dd861f15680bd098be5174f45d6b75b094d0635aced539fa03ddbccb453879de77fb5d1fe9","0xb4bc7e7e30686303856472bae07e581a0c0bfc815657c479f9f5931cff208d5c12930d2fd1ff413ebd8424bcd7a9b571","0x89b5d514155d7999408334a50822508b9d689add55d44a240ff2bdde2eee419d117031f85e924e2a2c1ca77db9b91eea","0xa8604b6196f87a04e1350302e8aa745bba8dc162115d22657b37a1d1a98cb14876ddf7f65840b5dbd77e80cd22b4256c","0x83cb7acdb9e03247515bb2ce0227486ccf803426717a14510f0d59d45e998b245797d356f10abca94f7a14e1a2f0d552","0xaeb3266a9f16649210ab2df0e1908ac259f34ce1f01162c22b56cf1019096ee4ea5854c36e30bb2feb06c21a71e8a45c","0x89e72e86edf2aa032a0fc9acf4d876a40865fbb2c8f87cb7e4d88856295c4ac14583e874142fd0c314a49aba68c0aa3c","0x8c3576eba0583c2a7884976b4ed11fe1fda4f6c32f6385d96c47b0e776afa287503b397fa516a455b4b8c3afeedc76db","0xa31e5b633bda9ffa174654fee98b5d5930a691c3c42fcf55673d927dbc8d91c58c4e42e615353145431baa646e8bbb30","0x89f2f3f7a8da1544f24682f41c68114a8f78c86bd36b066e27da13acb70f18d9f548773a16bd8e24789420e17183f137","0xada27fa4e90a086240c9164544d2528621a415a5497badb79f8019dc3dce4d12eb6b599597e47ec6ac39c81efda43520","0x90dc1eb21bf21c0187f359566fc4bf5386abea52799306a0e5a1151c0817c5f5bc60c86e76b1929c092c0f3ff48cedd2","0xb702a53ebcc17ae35d2e735a347d2c700e9cbef8eadbece33cac83df483b2054c126593e1f462cfc00a3ce9d737e2af5","0x9891b06455ec925a6f8eafffba05af6a38cc5e193acaaf74ffbf199df912c5197106c5e06d72942bbb032ce277b6417f","0x8c0ee71eb01197b019275bcf96cae94e81d2cdc3115dbf2d8e3080074260318bc9303597e8f72b18f965ad601d31ec43","0x8aaf580aaf75c1b7a5f99ccf60503506e62058ef43b28b02f79b8536a96be3f019c9f71caf327b4e6730134730d1bef5","0xae6f9fc21dd7dfa672b25a87eb0a41644f7609fab5026d5cedb6e43a06dbbfd6d6e30322a2598c8dedde88c52eaed626","0x8159b953ffece5693edadb2e906ebf76ff080ee1ad22698950d2d3bfc36ac5ea78f58284b2ca180664452d55bd54716c","0xab7647c32ca5e9856ac283a2f86768d68de75ceeba9e58b74c5324f8298319e52183739aba4340be901699d66ac9eb3f","0xa4d85a5701d89bcfaf1572db83258d86a1a0717603d6f24ac2963ffcf80f1265e5ab376a4529ca504f4396498791253c","0x816080c0cdbfe61b4d726c305747a9eb58ac26d9a35f501dd32ba43c098082d20faf3ccd41aad24600aa73bfa453dfac","0x84f3afac024f576b0fd9acc6f2349c2fcefc3f77dbe5a2d4964d14b861b88e9b1810334b908cf3427d9b67a8aee74b18","0x94b390655557b1a09110018e9b5a14490681ade275bdc83510b6465a1218465260d9a7e2a6e4ec700f58c31dc3659962","0xa8c66826b1c04a2dd4c682543242e7a57acae37278bd09888a3d17747c5b5fec43548101e6f46d703638337e2fd3277b","0x86e6f4608a00007fa533c36a5b054c5768ccafe41ad52521d772dcae4c8a4bcaff8f7609be30d8fab62c5988cbbb6830","0x837da4cf09ae8aa0bceb16f8b3bfcc3b3367aecac9eed6b4b56d7b65f55981ef066490764fb4c108792623ecf8cad383","0x941ff3011462f9b5bf97d8cbdb0b6f5d37a1b1295b622f5485b7d69f2cb2bcabc83630dae427f0259d0d9539a77d8424","0xb99e5d6d82aa9cf7d5970e7f710f4039ac32c2077530e4c2779250c6b9b373bc380adb0a03b892b652f649720672fc8c","0xa791c78464b2d65a15440b699e1e30ebd08501d6f2720adbc8255d989a82fcded2f79819b5f8f201bed84a255211b141","0x84af7ad4a0e31fcbb3276ab1ad6171429cf39adcf78dc03750dc5deaa46536d15591e26d53e953dfb31e1622bc0743ab","0xa833e62fe97e1086fae1d4917fbaf09c345feb6bf1975b5cb863d8b66e8d621c7989ab3dbecda36bc9eaffc5eaa6fa66","0xb4ef79a46a2126f53e2ebe62770feb57fd94600be29459d70a77c5e9cc260fa892be06cd60f886bf48459e48eb50d063","0xb43b8f61919ea380bf151c294e54d3a3ff98e20d1ee5efbfe38aa2b66fafbc6a49739793bd5cb1c809f8b30466277c3a","0xab37735af2412d2550e62df9d8b3b5e6f467f20de3890bf56faf1abf2bf3bd1d98dc3fa0ad5e7ab3fce0fa20409eb392","0x82416b74b1551d484250d85bb151fabb67e29cce93d516125533df585bc80779ab057ea6992801a3d7d5c6dcff87a018","0x8145d0787f0e3b5325190ae10c1d6bee713e6765fb6a0e9214132c6f78f4582bb2771aaeae40d3dad4bafb56bf7e36d8","0xb6935886349ecbdd5774e12196f4275c97ec8279fdf28ccf940f6a022ebb6de8e97d6d2173c3fe402cbe9643bed3883b","0x87ef9b4d3dc71ac86369f8ed17e0dd3b91d16d14ae694bc21a35b5ae37211b043d0e36d8ff07dcc513fb9e6481a1f37f","0xae1d0ded32f7e6f1dc8fef495879c1d9e01826f449f903c1e5034aeeabc5479a9e323b162b688317d46d35a42d570d86","0xa40d16497004db4104c6794e2f4428d75bdf70352685944f3fbe17526df333e46a4ca6de55a4a48c02ecf0bde8ba03c0","0x8d45121efba8cc308a498e8ee39ea6fa5cae9fb2e4aab1c2ff9d448aa8494ccbec9a078f978a86fcd97b5d5e7be7522a","0xa8173865c64634ba4ac2fa432740f5c05056a9deaf6427cb9b4b8da94ca5ddbc8c0c5d3185a89b8b28878194de9cdfcd","0xb6ec06a74d690f6545f0f0efba236e63d1fdfba54639ca2617408e185177ece28901c457d02b849fd00f1a53ae319d0a","0xb69a12df293c014a40070e3e760169b6f3c627caf9e50b35a93f11ecf8df98b2bc481b410eecb7ab210bf213bbe944de","0x97e7dc121795a533d4224803e591eef3e9008bab16f12472210b73aaf77890cf6e3877e0139403a0d3003c12c8f45636","0xacdfa6fdd4a5acb7738cc8768f7cba84dbb95c639399b291ae8e4e63df37d2d4096900a84d2f0606bf534a9ccaa4993f","0x86ee253f3a9446a33e4d1169719b7d513c6b50730988415382faaf751988c10a421020609f7bcdef91be136704b906e2","0xaac9438382a856caf84c5a8a234282f71b5fc5f65219103b147e7e6cf565522285fbfd7417b513bdad8277a00f652ca1","0x83f3799d8e5772527930f5dc071a2e0a65471618993ec8990a96ccdeee65270e490bda9d26bb877612475268711ffd80","0x93f28a81ac8c0ec9450b9d762fae9c7f8feaace87a6ee6bd141ef1d2d0697ef1bbd159fe6e1de640dbdab2b0361fca8a","0xa0825c95ba69999b90eac3a31a3fd830ea4f4b2b7409bde5f202b61d741d6326852ce790f41de5cb0eccec7af4db30c1","0x83924b0e66233edd603c3b813d698daa05751fc34367120e3cf384ea7432e256ccee4d4daf13858950549d75a377107d","0x956fd9fa58345277e06ba2ec72f49ed230b8d3d4ff658555c52d6cddeb84dd4e36f1a614f5242d5ca0192e8daf0543c2","0x944869912476baae0b114cced4ff65c0e4c90136f73ece5656460626599051b78802df67d7201c55d52725a97f5f29fe","0x865cb25b64b4531fb6fe4814d7c8cd26b017a6c6b72232ff53defc18a80fe3b39511b23f9e4c6c7249d06e03b2282ed2","0x81e09ff55214960775e1e7f2758b9a6c4e4cd39edf7ec1adfaad51c52141182b79fe2176b23ddc7df9fd153e5f82d668","0xb31006896f02bc90641121083f43c3172b1039334501fbaf1672f7bf5d174ddd185f945adf1a9c6cf77be34c5501483d","0x88b92f6f42ae45e9f05b16e52852826e933efd0c68b0f2418ac90957fd018df661bc47c8d43c2a7d7bfcf669dab98c3c","0x92fc68f595853ee8683930751789b799f397135d002eda244fe63ecef2754e15849edde3ba2f0cc8b865c9777230b712","0x99ca06a49c5cd0bb097c447793fcdd809869b216a34c66c78c7e41e8c22f05d09168d46b8b1f3390db9452d91bc96dea","0xb48b9490a5d65296802431852d548d81047bbefc74fa7dc1d4e2a2878faacdfcb365ae59209cb0ade01901a283cbd15d","0xaff0fdbef7c188b120a02bc9085d7b808e88f73973773fef54707bf2cd772cd066740b1b6f4127b5c349f657bd97e738","0x966fd4463b4f43dd8ccba7ad50baa42292f9f8b2e70da23bb6780e14155d9346e275ef03ddaf79e47020dcf43f3738bd","0x9330c3e1fadd9e08ac85f4839121ae20bbeb0a5103d84fa5aadbd1213805bdcda67bf2fb75fc301349cbc851b5559d20","0x993bb99867bd9041a71a55ad5d397755cfa7ab6a4618fc526179bfc10b7dc8b26e4372fe9a9b4a15d64f2b63c1052dda","0xa29b59bcfab51f9b3c490a3b96f0bf1934265c315349b236012adbd64a56d7f6941b2c8cc272b412044bc7731f71e1dc","0xa65c9cefe1fc35d089fe8580c2e7671ebefdb43014ac291528ff4deefd4883fd4df274af83711dad610dad0d615f9d65","0x944c78c56fb227ae632805d448ca3884cd3d2a89181cead3d2b7835e63297e6d740aa79a112edb1d4727824991636df5","0xa73d782da1db7e4e65d7b26717a76e16dd9fab4df65063310b8e917dc0bc24e0d6755df5546c58504d04d9e68c3b474a","0xaf80f0b87811ae3124f68108b4ca1937009403f87928bbc53480e7c5408d072053ace5eeaf5a5aba814dab8a45502085","0x88aaf1acfc6e2e19b8387c97da707cb171c69812fefdd4650468e9b2c627bd5ccfb459f4d8e56bdfd84b09ddf87e128f","0x92c97276ff6f72bab6e9423d02ad6dc127962dbce15a0dd1e4a393b4510c555df6aa27be0f697c0d847033a9ca8b8dfd","0xa0e07d43d96e2d85b6276b3c60aadb48f0aedf2de8c415756dc597249ea64d2093731d8735231dadc961e5682ac59479","0xadc9e6718a8f9298957d1da3842a7751c5399bbdf56f8de6c1c4bc39428f4aee6f1ba6613d37bf46b9403345e9d6fc81","0x951da434da4b20d949b509ceeba02e24da7ed2da964c2fcdf426ec787779c696b385822c7dbea4df3e4a35921f1e912c","0xa04cbce0d2b2e87bbf038c798a12ec828423ca6aca08dc8d481cf6466e3c9c73d4d4a7fa47df9a7e2e15aae9e9f67208","0x8f855cca2e440d248121c0469de1f94c2a71b8ee2682bbad3a78243a9e03da31d1925e6760dbc48a1957e040fae9abe8","0xb642e5b17c1df4a4e101772d73851180b3a92e9e8b26c918050f51e6dd3592f102d20b0a1e96f0e25752c292f4c903ff","0xa92454c300781f8ae1766dbbb50a96192da7d48ef4cbdd72dd8cbb44c6eb5913c112cc38e9144615fdc03684deb99420","0x8b74f7e6c2304f8e780df4649ef8221795dfe85fdbdaa477a1542d135b75c8be45bf89adbbb6f3ddf54ca40f02e733e9","0x85cf66292cbb30cec5fd835ab10c9fcb3aea95e093aebf123e9a83c26f322d76ebc89c4e914524f6c5f6ee7d74fc917d","0xae0bfe0cdc97c09542a7431820015f2d16067b30dca56288013876025e81daa8c519e5e347268e19aa1a85fa1dc28793","0x921322fc6a47dc091afa0ad6df18ed14cde38e48c6e71550aa513918b056044983aee402de21051235eecf4ce8040fbe","0x96c030381e97050a45a318d307dcb3c8377b79b4dd5daf6337cded114de26eb725c14171b9b8e1b3c08fe1f5ea6b49e0","0x90c23b86b6111818c8baaf53a13eaee1c89203b50e7f9a994bf0edf851919b48edbac7ceef14ac9414cf70c486174a77","0x8bf6c301240d2d1c8d84c71d33a6dfc6d9e8f1cfae66d4d0f7a256d98ae12b0bcebfa94a667735ee89f810bcd7170cff","0xa41a4ffbbea0e36874d65c009ee4c3feffff322f6fc0e30d26ee4dbc1f46040d05e25d9d0ecb378cef0d24a7c2c4b850","0xa8d4cdd423986bb392a0a92c12a8bd4da3437eec6ef6af34cf5310944899287452a2eb92eb5386086d5063381189d10e","0xa81dd26ec057c4032a4ed7ad54d926165273ed51d09a1267b2e477535cf6966835a257c209e4e92d165d74fa75695fa3","0x8d7f708c3ee8449515d94fc26b547303b53d8dd55f177bc3b25d3da2768accd9bc8e9f09546090ebb7f15c66e6c9c723","0x839ba65cffcd24cfffa7ab3b21faabe3c66d4c06324f07b2729c92f15cad34e474b0f0ddb16cd652870b26a756b731d3","0x87f1a3968afec354d92d77e2726b702847c6afcabb8438634f9c6f7766de4c1504317dc4fa9a4a735acdbf985e119564","0x91a8a7fd6542f3e0673f07f510d850864b34ac087eb7eef8845a1d14b2b1b651cbdc27fa4049bdbf3fea54221c5c8549","0xaef3cf5f5e3a2385ead115728d7059e622146c3457d266c612e778324b6e06fbfb8f98e076624d2f3ce1035d65389a07","0x819915d6232e95ccd7693fdd78d00492299b1983bc8f96a08dcb50f9c0a813ed93ae53c0238345d5bea0beda2855a913","0x8e9ba68ded0e94935131b392b28218315a185f63bf5e3c1a9a9dd470944509ca0ba8f6122265f8da851b5cc2abce68f1","0xb28468e9b04ee9d69003399a3cf4457c9bf9d59f36ab6ceeb8e964672433d06b58beeea198fedc7edbaa1948577e9fa2","0xa633005e2c9f2fd94c8bce2dd5bb708fe946b25f1ec561ae65e54e15cdd88dc339f1a083e01f0d39610c8fe24151aaf0","0x841d0031e22723f9328dd993805abd13e0c99b0f59435d2426246996b08d00ce73ab906f66c4eab423473b409e972ce0","0x85758d1b084263992070ec8943f33073a2d9b86a8606672550c17545507a5b3c88d87382b41916a87ee96ff55a7aa535","0x8581b06b0fc41466ef94a76a1d9fb8ae0edca6d018063acf6a8ca5f4b02d76021902feba58972415691b4bdbc33ae3b4","0x83539597ff5e327357ee62bc6bf8c0bcaec2f227c55c7c385a4806f0d37fb461f1690bad5066b8a5370950af32fafbef","0xaee3557290d2dc10827e4791d00e0259006911f3f3fce4179ed3c514b779160613eca70f720bff7804752715a1266ffa","0xb48d2f0c4e90fc307d5995464e3f611a9b0ef5fe426a289071f4168ed5cc4f8770c9332960c2ca5c8c427f40e6bb389f","0x847af8973b4e300bb06be69b71b96183fd1a0b9d51b91701bef6fcfde465068f1eb2b1503b07afda380f18d69de5c9e1","0xa70a6a80ce407f07804c0051ac21dc24d794b387be94eb24e1db94b58a78e1bcfb48cd0006db8fc1f9bedaece7a44fbe","0xb40e942b8fa5336910ff0098347df716bff9d1fa236a1950c16eeb966b3bc1a50b8f7b0980469d42e75ae13ced53cead","0xb208fabaa742d7db3148515330eb7a3577487845abdb7bd9ed169d0e081db0a5816595c33d375e56aeac5b51e60e49d3","0xb7c8194b30d3d6ef5ab66ec88ad7ebbc732a3b8a41731b153e6f63759a93f3f4a537eab9ad369705bd730184bdbbdc34","0x9280096445fe7394d04aa1bc4620c8f9296e991cc4d6c131bd703cb1cc317510e6e5855ac763f4d958c5edfe7eebeed7","0xabc2aa4616a521400af1a12440dc544e3c821313d0ab936c86af28468ef8bbe534837e364598396a81cf8d06274ed5a6","0xb18ca8a3325adb0c8c18a666d4859535397a1c3fe08f95eebfac916a7a99bbd40b3c37b919e8a8ae91da38bc00fa56c0","0x8a40c33109ecea2a8b3558565877082f79121a432c45ec2c5a5e0ec4d1c203a6788e6b69cb37f1fd5b8c9a661bc5476d","0x88c47301dd30998e903c84e0b0f2c9af2e1ce6b9f187dab03528d44f834dc991e4c86d0c474a2c63468cf4020a1e24a0","0x920c832853e6ab4c851eecfa9c11d3acc7da37c823be7aa1ab15e14dfd8beb5d0b91d62a30cec94763bd8e4594b66600","0x98e1addbe2a6b8edc7f12ecb9be81c3250aeeca54a1c6a7225772ca66549827c15f3950d01b8eb44aecb56fe0fff901a","0x8cfb0fa1068be0ec088402f5950c4679a2eb9218c729da67050b0d1b2d7079f3ddf4bf0f57d95fe2a8db04bc6bcdb20c","0xb70f381aafe336b024120453813aeab70baac85b9c4c0f86918797b6aee206e6ed93244a49950f3d8ec9f81f4ac15808","0xa4c8edf4aa33b709a91e1062939512419711c1757084e46f8f4b7ed64f8e682f4e78b7135920c12f0eb0422fe9f87a6a","0xb4817e85fd0752d7ebb662d3a51a03367a84bac74ebddfba0e5af5e636a979500f72b148052d333b3dedf9edd2b4031b","0xa87430169c6195f5d3e314ff2d1c2f050e766fd5d2de88f5207d72dba4a7745bb86d0baca6e9ae156582d0d89e5838c7","0x991b00f8b104566b63a12af4826b61ce7aa40f4e5b8fff3085e7a99815bdb4471b6214da1e480214fac83f86a0b93cc5","0xb39966e3076482079de0678477df98578377a094054960ee518ef99504d6851f8bcd3203e8da5e1d4f6f96776e1fe6eb","0xa448846d9dc2ab7a0995fa44b8527e27f6b3b74c6e03e95edb64e6baa4f1b866103f0addb97c84bef1d72487b2e21796","0x894bec21a453ae84b592286e696c35bc30e820e9c2fd3e63dd4fbe629e07df16439c891056070faa490155f255bf7187","0xa9ec652a491b11f6a692064e955f3f3287e7d2764527e58938571469a1e29b5225b9415bd602a45074dfbfe9c131d6ca","0xb39d37822e6cbe28244b5f42ce467c65a23765bd16eb6447c5b3e942278069793763483dafd8c4dd864f8917aad357fe","0x88dba51133f2019cb266641c56101e3e5987d3b77647a2e608b5ff9113dfc5f85e2b7c365118723131fbc0c9ca833c9c","0xb566579d904b54ecf798018efcb824dccbebfc6753a0fd2128ac3b4bd3b038c2284a7c782b5ca6f310eb7ea4d26a3f0a","0xa97a55c0a492e53c047e7d6f9d5f3e86fb96f3dddc68389c0561515343b66b4bc02a9c0d5722dff1e3445308240b27f7","0xa044028ab4bcb9e1a2b9b4ca4efbf04c5da9e4bf2fff0e8bd57aa1fc12a71e897999c25d9117413faf2f45395dee0f13","0xa78dc461decbeaeed8ebd0909369b491a5e764d6a5645a7dac61d3140d7dc0062526f777b0eb866bff27608429ebbdde","0xb2c2a8991f94c39ca35fea59f01a92cb3393e0eccb2476dfbf57261d406a68bd34a6cff33ed80209991688c183609ef4","0x84189eefb521aff730a4fd3fd5b10ddfd29f0d365664caef63bb015d07e689989e54c33c2141dd64427805d37a7e546e","0x85ac80bd734a52235da288ff042dea9a62e085928954e8eacd2c751013f61904ed110e5b3afe1ab770a7e6485efb7b5e","0x9183a560393dcb22d0d5063e71182020d0fbabb39e32493eeffeb808df084aa243eb397027f150b55a247d1ed0c8513e","0x81c940944df7ecc58d3c43c34996852c3c7915ed185d7654627f7af62abae7e0048dd444a6c09961756455000bd96d09","0xaa8c34e164019743fd8284b84f06c3b449aae7996e892f419ee55d82ad548cb300fd651de329da0384243954c0ef6a60","0x89a7b7bdfc7e300d06a14d463e573d6296d8e66197491900cc9ae49504c4809ff6e61b758579e9091c61085ba1237b83","0x878d21809ba540f50bd11f4c4d9590fb6f3ab9de5692606e6e2ef4ed9d18520119e385be5e1f4b3f2e2b09c319f0e8fc","0x8eb248390193189cf0355365e630b782cd15751e672dc478b39d75dc681234dcd9309df0d11f4610dbb249c1e6be7ef9","0xa1d7fb3aecb896df3a52d6bd0943838b13f1bd039c936d76d03de2044c371d48865694b6f532393b27fd10a4cf642061","0xa34bca58a24979be442238cbb5ece5bee51ae8c0794dd3efb3983d4db713bc6f28a96e976ac3bd9a551d3ed9ba6b3e22","0x817c608fc8cacdd178665320b5a7587ca21df8bdd761833c3018b967575d25e3951cf3d498a63619a3cd2ad4406f5f28","0x86c95707db0495689afd0c2e39e97f445f7ca0edffad5c8b4cacd1421f2f3cc55049dfd504f728f91534e20383955582","0x99c3b0bb15942c301137765d4e19502f65806f3b126dc01a5b7820c87e8979bce6a37289a8f6a4c1e4637227ad5bf3bf","0x8aa1518a80ea8b074505a9b3f96829f5d4afa55a30efe7b4de4e5dbf666897fdd2cf31728ca45921e21a78a80f0e0f10","0x8d74f46361c79e15128ac399e958a91067ef4cec8983408775a87eca1eed5b7dcbf0ddf30e66f51780457413496c7f07","0xa41cde4a786b55387458a1db95171aca4fd146507b81c4da1e6d6e495527c3ec83fc42fad1dfe3d92744084a664fd431","0x8c352852c906fae99413a84ad11701f93f292fbf7bd14738814f4c4ceab32db02feb5eb70bc73898b0bc724a39d5d017","0xa5993046e8f23b71ba87b7caa7ace2d9023fb48ce4c51838813174880d918e9b4d2b0dc21a2b9c6f612338c31a289df8","0x83576d3324bf2d8afbfb6eaecdc5d767c8e22e7d25160414924f0645491df60541948a05e1f4202e612368e78675de8a","0xb43749b8df4b15bc9a3697e0f1c518e6b04114171739ef1a0c9c65185d8ec18e40e6954d125cbc14ebc652cf41ad3109","0xb4eebd5d80a7327a040cafb9ccdb12b2dfe1aa86e6bc6d3ac8a57fadfb95a5b1a7332c66318ff72ba459f525668af056","0x9198be7f1d413c5029b0e1c617bcbc082d21abe2c60ec8ce9b54ca1a85d3dba637b72fda39dae0c0ae40d047eab9f55a","0x8d96a0232832e24d45092653e781e7a9c9520766c3989e67bbe86b3a820c4bf621ea911e7cd5270a4bfea78b618411f6","0x8d7160d0ea98161a2d14d46ef01dff72d566c330cd4fabd27654d300e1bc7644c68dc8eabf2a20a59bfe7ba276545f9b","0xabb60fce29dec7ba37e3056e412e0ec3e05538a1fc0e2c68877378c867605966108bc5742585ab6a405ce0c962b285b6","0x8fabffa3ed792f05e414f5839386f6449fd9f7b41a47595c5d71074bd1bb3784cc7a1a7e1ad6b041b455035957e5b2dc","0x90ff017b4804c2d0533b72461436b10603ab13a55f86fd4ec11b06a70ef8166f958c110519ca1b4cc7beba440729fe2d","0xb340cfd120f6a4623e3a74cf8c32bfd7cd61a280b59dfd17b15ca8fae4d82f64a6f15fbde4c02f424debc72b7db5fe67","0x871311c9c7220c932e738d59f0ecc67a34356d1429fe570ca503d340c9996cb5ee2cd188fad0e3bd16e4c468ec1dbebd","0xa772470262186e7b94239ba921b29f2412c148d6f97c4412e96d21e55f3be73f992f1ad53c71008f0558ec3f84e2b5a7","0xb2a897dcb7ffd6257f3f2947ec966f2077d57d5191a88840b1d4f67effebe8c436641be85524d0a21be734c63ab5965d","0xa044f6eacc48a4a061fa149500d96b48cbf14853469aa4d045faf3dca973be1bd4b4ce01646d83e2f24f7c486d03205d","0x981af5dc2daa73f7fa9eae35a93d81eb6edba4a7f673b55d41f6ecd87a37685d31bb40ef4f1c469b3d72f2f18b925a17","0x912d2597a07864de9020ac77083eff2f15ceb07600f15755aba61251e8ce3c905a758453b417f04d9c38db040954eb65","0x9642b7f6f09394ba5e0805734ef6702c3eddf9eea187ba98c676d5bbaec0e360e3e51dc58433aaa1e2da6060c8659cb7","0x8ab3836e0a8ac492d5e707d056310c4c8e0489ca85eb771bff35ba1d658360084e836a6f51bb990f9e3d2d9aeb18fbb5","0x879e058e72b73bb1f4642c21ffdb90544b846868139c6511f299aafe59c2d0f0b944dffc7990491b7c4edcd6a9889250","0xb9e60b737023f61479a4a8fd253ed0d2a944ea6ba0439bbc0a0d3abf09b0ad1f18d75555e4a50405470ae4990626f390","0xb9c2535d362796dcd673640a9fa2ebdaec274e6f8b850b023153b0a7a30fffc87f96e0b72696f647ebe7ab63099a6963","0x94aeff145386a087b0e91e68a84a5ede01f978f9dd9fe7bebca78941938469495dc30a96bba9508c0d017873aeea9610","0x98b179f8a3d9f0d0a983c30682dd425a2ddc7803be59bd626c623c8951a5179117d1d2a68254c95c9952989877d0ee55","0x889ecf5f0ee56938273f74eb3e9ecfb5617f04fb58e83fe4c0e4aef51615cf345bc56f3f61b17f6eed3249d4afd54451","0xa0f2b2c39bcea4b50883e2587d16559e246248a66ecb4a4b7d9ab3b51fb39fe98d83765e087eee37a0f86b0ba4144c02","0xb2a61e247ed595e8a3830f7973b07079cbda510f28ad8c78c220b26cb6acde4fbb5ee90c14a665f329168ee951b08cf0","0x95bd0fcfb42f0d6d8a8e73d7458498a85bcddd2fb132fd7989265648d82ac2707d6d203fac045504977af4f0a2aca4b7","0x843e5a537c298666e6cf50fcc044f13506499ef83c802e719ff2c90e85003c132024e04711be7234c04d4b0125512d5d","0xa46d1797c5959dcd3a5cfc857488f4d96f74277c3d13b98b133620192f79944abcb3a361d939a100187f1b0856eae875","0xa1c7786736d6707a48515c38660615fcec67eb8a2598f46657855215f804fd72ab122d17f94fcffad8893f3be658dca7","0xb23dc9e610abc7d8bd21d147e22509a0fa49db5be6ea7057b51aae38e31654b3aa044df05b94b718153361371ba2f622","0xb00cc8f257d659c22d30e6d641f79166b1e752ea8606f558e4cad6fc01532e8319ea4ee12265ba4140ac45aa4613c004","0xac7019af65221b0cc736287b32d7f1a3561405715ba9a6a122342e04e51637ba911c41573de53e4781f2230fdcb2475f","0x81a630bc41b3da8b3eb4bf56cba10cd9f93153c3667f009dc332287baeb707d505fb537e6233c8e53d299ec0f013290c","0xa6b7aea5c545bb76df0f230548539db92bc26642572cb7dd3d5a30edca2b4c386f44fc8466f056b42de2a452b81aff5b","0x8271624ff736b7b238e43943c81de80a1612207d32036d820c11fc830c737972ccc9c60d3c2359922b06652311e3c994","0x8a684106458cb6f4db478170b9ad595d4b54c18bf63b9058f095a2fa1b928c15101472c70c648873d5887880059ed402","0xa5cc3c35228122f410184e4326cf61a37637206e589fcd245cb5d0cec91031f8f7586b80503070840fdfd8ce75d3c88b","0x9443fc631aed8866a7ed220890911057a1f56b0afe0ba15f0a0e295ab97f604b134b1ed9a4245e46ee5f9a93aa74f731","0x984b6f7d79835dffde9558c6bb912d992ca1180a2361757bdba4a7b69dc74b056e303adc69fe67414495dd9c2dd91e64","0xb15a5c8cba5de080224c274d31c68ed72d2a7126d347796569aef0c4e97ed084afe3da4d4b590b9dda1a07f0c2ff3dfb","0x991708fe9650a1f9a4e43938b91d45dc68c230e05ee999c95dbff3bf79b1c1b2bb0e7977de454237c355a73b8438b1d9","0xb4f7edc7468b176a4a7c0273700c444fa95c726af6697028bed4f77eee887e3400f9c42ee15b782c0ca861c4c3b8c98a","0x8c60dcc16c51087eb477c13e837031d6c6a3dc2b8bf8cb43c23f48006bc7173151807e866ead2234b460c2de93b31956","0x83ad63e9c910d1fc44bc114accfb0d4d333b7ebe032f73f62d25d3e172c029d5e34a1c9d547273bf6c0fead5c8801007","0x85de73213cc236f00777560756bdbf2b16841ba4b55902cf2cad9742ecaf5d28209b012ceb41f337456dfeca93010cd7","0xa7561f8827ccd75b6686ba5398bb8fc3083351c55a589b18984e186820af7e275af04bcd4c28e1dc11be1e8617a0610b","0x88c0a4febd4068850557f497ea888035c7fc9f404f6cc7794e7cc8722f048ad2f249e7dc62743e7a339eb7473ad3b0cd","0x932b22b1d3e6d5a6409c34980d176feb85ada1bf94332ef5c9fc4d42b907dabea608ceef9b5595ef3feee195151f18d8","0xa2867bb3f5ab88fbdae3a16c9143ab8a8f4f476a2643c505bb9f37e5b1fd34d216cab2204c9a017a5a67b7ad2dda10e8","0xb573d5f38e4e9e8a3a6fd82f0880dc049efa492a946d00283019bf1d5e5516464cf87039e80aef667cb86fdea5075904","0xb948f1b5ab755f3f5f36af27d94f503b070696d793b1240c1bdfd2e8e56890d69e6904688b5f8ff5a4bdf5a6abfe195f","0x917eae95ebc4109a2e99ddd8fec7881d2f7aaa0e25fda44dec7ce37458c2ee832f1829db7d2dcfa4ca0f06381c7fe91d","0x95751d17ed00a3030bce909333799bb7f4ab641acf585807f355b51d6976dceee410798026a1a004ef4dcdff7ec0f5b8","0xb9b7bd266f449a79bbfe075e429613e76c5a42ac61f01c8f0bbbd34669650682efe01ff9dbbc400a1e995616af6aa278","0xac1722d097ce9cd7617161f8ec8c23d68f1fb1c9ca533e2a8b4f78516c2fd8fb38f23f834e2b9a03bb06a9d655693ca9","0xa7ad9e96ffd98db2ecdb6340c5d592614f3c159abfd832fe27ee9293519d213a578e6246aae51672ee353e3296858873","0x989b8814d5de7937c4acafd000eec2b4cd58ba395d7b25f98cafd021e8efa37029b29ad8303a1f6867923f5852a220eb","0xa5bfe6282c771bc9e453e964042d44eff4098decacb89aecd3be662ea5b74506e1357ab26f3527110ba377711f3c9f41","0x8900a7470b656639721d2abbb7b06af0ac4222ab85a1976386e2a62eb4b88bfb5b72cf7921ddb3cf3a395d7eeb192a2e","0x95a71b55cd1f35a438cf5e75f8ff11c5ec6a2ebf2e4dba172f50bfad7d6d5dca5de1b1afc541662c81c858f7604c1163","0x82b5d62fea8db8d85c5bc3a76d68dedd25794cf14d4a7bc368938ffca9e09f7e598fdad2a5aac614e0e52f8112ae62b9","0x997173f07c729202afcde3028fa7f52cefc90fda2d0c8ac2b58154a5073140683e54c49ed1f254481070d119ce0ce02a","0xaeffb91ccc7a72bbd6ffe0f9b99c9e66e67d59cec2e02440465e9636a613ab3017278cfa72ea8bc4aba9a8dc728cb367","0x952743b06e8645894aeb6440fc7a5f62dd3acf96dab70a51e20176762c9751ea5f2ba0b9497ccf0114dc4892dc606031","0x874c63baeddc56fbbca2ff6031f8634b745f6e34ea6791d7c439201aee8f08ef5ee75f7778700a647f3b21068513fce6","0x85128fec9c750c1071edfb15586435cc2f317e3e9a175bb8a9697bcda1eb9375478cf25d01e7fed113483b28f625122d","0x85522c9576fd9763e32af8495ae3928ed7116fb70d4378448926bc9790e8a8d08f98cf47648d7da1b6e40d6a210c7924","0x97d0f37a13cfb723b848099ca1c14d83e9aaf2f7aeb71829180e664b7968632a08f6a85f557d74b55afe6242f2a36e7c","0xabaa472d6ad61a5fccd1a57c01aa1bc081253f95abbcba7f73923f1f11c4e79b904263890eeb66926de3e2652f5d1c70","0xb3c04945ba727a141e5e8aec2bf9aa3772b64d8fd0e2a2b07f3a91106a95cbcb249adcd074cbe498caf76fffac20d4ef","0x82c46781a3d730d9931bcabd7434a9171372dde57171b6180e5516d4e68db8b23495c8ac3ab96994c17ddb1cf249b9fb","0xa202d8b65613c42d01738ccd68ed8c2dbc021631f602d53f751966e04182743ebc8e0747d600b8a8676b1da9ae7f11ab","0xae73e7256e9459db04667a899e0d3ea5255211fb486d084e6550b6dd64ca44af6c6b2d59d7aa152de9f96ce9b58d940d","0xb67d87b176a9722945ec7593777ee461809861c6cfd1b945dde9ee4ff009ca4f19cf88f4bbb5c80c9cbab2fe25b23ac8","0x8f0b7a317a076758b0dac79959ee4a06c08b07d0f10538a4b53d3da2eda16e2af26922feb32c090330dc4d969cf69bd3","0x90b36bf56adbd8c4b6cb32febc3a8d5f714370c2ac3305c10fa6d168dffb2a026804517215f9a2d4ec8310cdb6bb459b","0xaa80c19b0682ead69934bf18cf476291a0beddd8ef4ed75975d0a472e2ab5c70f119722a8574ae4973aceb733d312e57","0xa3fc9abb12574e5c28dcb51750b4339b794b8e558675eef7d26126edf1de920c35e992333bcbffcbf6a5f5c0d383ce62","0xa1573ff23ab972acdcd08818853b111fc757fdd35aa070186d3e11e56b172fb49d840bf297ac0dd222e072fc09f26a81","0x98306f2be4caa92c2b4392212d0cbf430b409b19ff7d5b899986613bd0e762c909fc01999aa94be3bd529d67f0113d7f","0x8c1fc42482a0819074241746d17dc89c0304a2acdae8ed91b5009e9e3e70ff725ba063b4a3e68fdce05b74f5180c545e","0xa6c6113ebf72d8cf3163b2b8d7f3fa24303b13f55752522c660a98cd834d85d8c79214d900fa649499365e2e7641f77a","0xab95eea424f8a2cfd9fb1c78bb724e5b1d71a0d0d1e4217c5d0f98b0d8bbd3f8400a2002abc0a0e4576d1f93f46fefad","0x823c5a4fd8cf4a75fdc71d5f2dd511b6c0f189b82affeacd2b7cfcad8ad1a5551227dcc9bfdb2e34b2097eaa00efbb51","0xb97314dfff36d80c46b53d87a61b0e124dc94018a0bb680c32765b9a2d457f833a7c42bbc90b3b1520c33a182580398d","0xb17566ee3dcc6bb3b004afe4c0136dfe7dd27df9045ae896dca49fb36987501ae069eb745af81ba3fc19ff037e7b1406","0xb0bdc0f55cfd98d331e3a0c4fbb776a131936c3c47c6bffdc3aaf7d8c9fa6803fbc122c2fefbb532e634228687d52174","0xaa5d9e60cc9f0598559c28bb9bdd52aa46605ab4ffe3d192ba982398e72cec9a2a44c0d0d938ce69935693cabc0887ea","0x802b6459d2354fa1d56c592ac1346c428dadea6b6c0a87bf7d309bab55c94e1cf31dd98a7a86bd92a840dd51f218b91b","0xa526914efdc190381bf1a73dd33f392ecf01350b9d3f4ae96b1b1c3d1d064721c7d6eec5788162c933245a3943f5ee51","0xb3b8fcf637d8d6628620a1a99dbe619eabb3e5c7ce930d6efd2197e261bf394b74d4e5c26b96c4b8009c7e523ccfd082","0x8f7510c732502a93e095aba744535f3928f893f188adc5b16008385fb9e80f695d0435bfc5b91cdad4537e87e9d2551c","0x97b90beaa56aa936c3ca45698f79273a68dd3ccd0076eab48d2a4db01782665e63f33c25751c1f2e070f4d1a8525bf96","0xb9fb798324b1d1283fdc3e48288e3861a5449b2ab5e884b34ebb8f740225324af86e4711da6b5cc8361c1db15466602f","0xb6d52b53cea98f1d1d4c9a759c25bf9d8a50b604b144e4912acbdbdc32aab8b9dbb10d64a29aa33a4f502121a6fb481c","0x9174ffff0f2930fc228f0e539f5cfd82c9368d26b074467f39c07a774367ff6cccb5039ac63f107677d77706cd431680","0xa33b6250d4ac9e66ec51c063d1a6a31f253eb29bbaed12a0d67e2eccfffb0f3a52750fbf52a1c2aaba8c7692346426e7","0xa97025fd5cbcebe8ef865afc39cd3ea707b89d4e765ec817fd021d6438e02fa51e3544b1fd45470c58007a08efac6edd","0xb32a78480edd9ff6ba2f1eec4088db5d6ceb2d62d7e59e904ecaef7bb4a2e983a4588e51692b3be76e6ffbc0b5f911a5","0xb5ab590ef0bb77191f00495b33d11c53c65a819f7d0c1f9dc4a2caa147a69c77a4fff7366a602d743ee1f395ce934c1e","0xb3fb0842f9441fb1d0ee0293b6efbc70a8f58d12d6f769b12872db726b19e16f0f65efbc891cf27a28a248b0ef9c7e75","0x9372ad12856fefb928ccb0d34e198df99e2f8973b07e9d417a3134d5f69e12e79ff572c4e03ccd65415d70639bc7c73e","0xaa8d6e83d09ce216bfe2009a6b07d0110d98cf305364d5529c170a23e693aabb768b2016befb5ada8dabdd92b4d012bb","0xa954a75791eeb0ce41c85200c3763a508ed8214b5945a42c79bfdcfb1ec4f86ad1dd7b2862474a368d4ac31911a2b718","0x8e2081cfd1d062fe3ab4dab01f68062bac802795545fede9a188f6c9f802cb5f884e60dbe866710baadbf55dc77c11a4","0xa2f06003b9713e7dd5929501ed485436b49d43de80ea5b15170763fd6346badf8da6de8261828913ee0dacd8ff23c0e1","0x98eecc34b838e6ffd1931ca65eec27bcdb2fdcb61f33e7e5673a93028c5865e0d1bf6d3bec040c5e96f9bd08089a53a4","0x88cc16019741b341060b95498747db4377100d2a5bf0a5f516f7dec71b62bcb6e779de2c269c946d39040e03b3ae12b7","0xad1135ccbc3019d5b2faf59a688eef2500697642be8cfbdf211a1ab59abcc1f24483e50d653b55ff1834675ac7b4978f","0xa946f05ed9972f71dfde0020bbb086020fa35b482cce8a4cc36dd94355b2d10497d7f2580541bb3e81b71ac8bba3c49f","0xa83aeed488f9a19d8cfd743aa9aa1982ab3723560b1cd337fc2f91ad82f07afa412b3993afb845f68d47e91ba4869840","0x95eebe006bfc316810cb71da919e5d62c2cebb4ac99d8e8ef67be420302320465f8b69873470982de13a7c2e23516be9","0xa55f8961295a11e91d1e5deadc0c06c15dacbfc67f04ccba1d069cba89d72aa3b3d64045579c3ea8991b150ac29366ae","0xb321991d12f6ac07a5de3c492841d1a27b0d3446082fbce93e7e1f9e8d8fe3b45d41253556261c21b70f5e189e1a7a6f","0xa0b0822f15f652ce7962a4f130104b97bf9529797c13d6bd8e24701c213cc37f18157bd07f3d0f3eae6b7cd1cb40401f","0x96e2fa4da378aa782cc2d5e6e465fc9e49b5c805ed01d560e9b98abb5c0de8b74a2e7bec3aa5e2887d25cccb12c66f0c","0x97e4ab610d414f9210ed6f35300285eb3ccff5b0b6a95ed33425100d7725e159708ea78704497624ca0a2dcabce3a2f9","0x960a375b17bdb325761e01e88a3ea57026b2393e1d887b34b8fa5d2532928079ce88dc9fd06a728b26d2bb41b12b9032","0x8328a1647398e832aadc05bd717487a2b6fcdaa0d4850d2c4da230c6a2ed44c3e78ec4837b6094f3813f1ee99414713f","0xaa283834ebd18e6c99229ce4b401eda83f01d904f250fedd4e24f1006f8fa0712a6a89a7296a9bf2ce8de30e28d1408e","0xb29e097f2caadae3e0f0ae3473c072b0cd0206cf6d2e9b22c1a5ad3e07d433e32bd09ed1f4e4276a2da4268633357b7f","0x9539c5cbba14538b2fe077ecf67694ef240da5249950baaabea0340718b882a966f66d97f08556b08a4320ceb2cc2629","0xb4529f25e9b42ae8cf8338d2eface6ba5cd4b4d8da73af502d081388135c654c0b3afb3aa779ffc80b8c4c8f4425dd2b","0x95be0739c4330619fbe7ee2249c133c91d6c07eab846c18c5d6c85fc21ac5528c5d56dcb0145af68ed0c6a79f68f2ccd","0xac0c83ea802227bfc23814a24655c9ff13f729619bcffdb487ccbbf029b8eaee709f8bddb98232ef33cd70e30e45ca47","0xb503becb90acc93b1901e939059f93e671900ca52c6f64ae701d11ac891d3a050b505d89324ce267bc43ab8275da6ffe","0x98e3811b55b1bacb70aa409100abb1b870f67e6d059475d9f278c751b6e1e2e2d6f2e586c81a9fb6597fda06e7923274","0xb0b0f61a44053fa6c715dbb0731e35d48dba257d134f851ee1b81fd49a5c51a90ebf5459ec6e489fce25da4f184fbdb1","0xb1d2117fe811720bb997c7c93fe9e4260dc50fca8881b245b5e34f724aaf37ed970cdad4e8fcb68e05ac8cf55a274a53","0xa10f502051968f14b02895393271776dee7a06db9de14effa0b3471825ba94c3f805302bdddac4d397d08456f620999d","0xa3dbad2ef060ae0bb7b02eaa4a13594f3f900450faa1854fc09620b01ac94ab896321dfb1157cf2374c27e5718e8026a","0xb550fdec503195ecb9e079dcdf0cad559d64d3c30818ef369b4907e813e689da316a74ad2422e391b4a8c2a2bef25fc0","0xa25ba865e2ac8f28186cea497294c8649a201732ecb4620c4e77b8e887403119910423df061117e5f03fc5ba39042db1","0xb3f88174e03fdb443dd6addd01303cf88a4369352520187c739fc5ae6b22fa99629c63c985b4383219dab6acc5f6f532","0x97a7503248e31e81b10eb621ba8f5210c537ad11b539c96dfb7cf72b846c7fe81bd7532c5136095652a9618000b7f8d3","0xa8bcdc1ce5aa8bfa683a2fc65c1e79de8ff5446695dcb8620f7350c26d2972a23da22889f9e2b1cacb3f688c6a2953dc","0x8458c111df2a37f5dd91a9bee6c6f4b79f4f161c93fe78075b24a35f9817da8dde71763218d627917a9f1f0c4709c1ed","0xac5f061a0541152b876cbc10640f26f1cc923c9d4ae1b6621e4bb3bf2cec59bbf87363a4eb72fb0e5b6d4e1c269b52d5","0xa9a25ca87006e8a9203cbb78a93f50a36694aa4aad468b8d80d3feff9194455ca559fcc63838128a0ab75ad78c07c13a","0xa450b85f5dfffa8b34dfd8bc985f921318efacf8857cf7948f93884ba09fb831482ee90a44224b1a41e859e19b74962f","0x8ed91e7f92f5c6d7a71708b6132f157ac226ecaf8662af7d7468a4fa25627302efe31e4620ad28719318923e3a59bf82","0xab524165fd4c71b1fd395467a14272bd2b568592deafa039d8492e9ef36c6d3f96927c95c72d410a768dc0b6d1fbbc9b","0xb662144505aa8432c75ffb8d10318526b6d5777ac7af9ebfad87d9b0866c364f7905a6352743bd8fd79ffd9d5dd4f3e6","0xa48f1677550a5cd40663bb3ba8f84caaf8454f332d0ceb1d94dbea52d0412fe69c94997f7749929712fd3995298572f7","0x8391cd6e2f6b0c242de1117a612be99776c3dc95cb800b187685ea5bf7e2722275eddb79fd7dfc8be8e389c4524cdf70","0x875d3acb9af47833b72900bc0a2448999d638f153c5e97e8a14ec02d0c76f6264353a7e275e1f1a5855daced523d243b","0x91f1823657d30b59b2f627880a9a9cb530f5aca28a9fd217fe6f2f5133690dfe7ad5a897872e400512db2e788b3f7628","0xad3564332aa56cea84123fc7ca79ea70bb4fef2009fa131cb44e4b15e8613bd11ca1d83b9d9bf456e4b7fee9f2e8b017","0x8c530b84001936d5ab366c84c0b105241a26d1fb163669f17c8f2e94776895c2870edf3e1bc8ccd04d5e65531471f695","0x932d01fa174fdb0c366f1230cffde2571cc47485f37f23ba5a1825532190cc3b722aeb1f15aed62cf83ccae9403ba713","0x88b28c20585aca50d10752e84b901b5c2d58efef5131479fbbe53de7bce2029e1423a494c0298e1497669bd55be97a5d","0xb914148ca717721144ebb3d3bf3fcea2cd44c30c5f7051b89d8001502f3856fef30ec167174d5b76265b55d70f8716b5","0x81d0173821c6ddd2a068d70766d9103d1ee961c475156e0cbd67d54e668a796310474ef698c7ab55abe6f2cf76c14679","0x8f28e8d78e2fe7fa66340c53718e0db4b84823c8cfb159c76eac032a62fb53da0a5d7e24ca656cf9d2a890cb2a216542","0x8a26360335c73d1ab51cec3166c3cf23b9ea51e44a0ad631b0b0329ef55aaae555420348a544e18d5760969281759b61","0x94f326a32ed287545b0515be9e08149eb0a565025074796d72387cc3a237e87979776410d78339e23ef3172ca43b2544","0xa785d2961a2fa5e70bffa137858a92c48fe749fee91b02599a252b0cd50d311991a08efd7fa5e96b78d07e6e66ffe746","0x94af9030b5ac792dd1ce517eaadcec1482206848bea4e09e55cc7f40fd64d4c2b3e9197027c5636b70d6122c51d2235d","0x9722869f7d1a3992850fe7be405ec93aa17dc4d35e9e257d2e469f46d2c5a59dbd504056c85ab83d541ad8c13e8bcd54","0xb13c4088b61a06e2c03ac9813a75ff1f68ffdfee9df6a8f65095179a475e29cc49119cad2ce05862c3b1ac217f3aace9","0x8c64d51774753623666b10ca1b0fe63ae42f82ed6aa26b81dc1d48c86937c5772eb1402624c52a154b86031854e1fb9f","0xb47e4df18002b7dac3fee945bf9c0503159e1b8aafcce2138818e140753011b6d09ef1b20894e08ba3006b093559061b","0x93cb5970076522c5a0483693f6a35ffd4ea2aa7aaf3730c4eccd6af6d1bebfc1122fc4c67d53898ae13eb6db647be7e2","0xa68873ef80986795ea5ed1a597d1cd99ed978ec25e0abb57fdcc96e89ef0f50aeb779ff46e3dce21dc83ada3157a8498","0x8cab67f50949cc8eee6710e27358aea373aae3c92849f8f0b5531c080a6300cdf2c2094fe6fecfef6148de0d28446919","0x993e932bcb616dbaa7ad18a4439e0565211d31071ef1b85a0627db74a05d978c60d507695eaeea5c7bd9868a21d06923","0xacdadff26e3132d9478a818ef770e9fa0d2b56c6f5f48bd3bd674436ccce9bdfc34db884a73a30c04c5f5e9764cb2218","0xa0d3e64c9c71f84c0eef9d7a9cb4fa184224b969db5514d678e93e00f98b41595588ca802643ea225512a4a272f5f534","0x91c9140c9e1ba6e330cb08f6b2ce4809cd0d5a0f0516f70032bf30e912b0ed684d07b413b326ab531ee7e5b4668c799b","0x87bc2ee7a0c21ba8334cd098e35cb703f9af57f35e091b8151b9b63c3a5b0f89bd7701dbd44f644ea475901fa6d9ef08","0x9325ccbf64bf5d71b303e31ee85d486298f9802c5e55b2c3d75427097bf8f60fa2ab4fcaffa9b60bf922c3e24fbd4b19","0x95d0506e898318f3dc8d28d16dfd9f0038b54798838b3c9be2a2ae3c2bf204eb496166353fc042220b0bd4f6673b9285","0x811de529416331fe9c416726d45df9434c29dcd7e949045eb15740f47e97dde8f31489242200e19922cac2a8b7c6fd1f","0xade632d04a4c8bbab6ca7df370b2213cb9225023e7973f0e29f4f5e52e8aeaabc65171306bbdd12a67b195dfbb96d48f","0x88b7f029e079b6ae956042c0ea75d53088c5d0efd750dd018adaeacf46be21bf990897c58578c491f41afd3978d08073","0x91f477802de507ffd2be3f4319903119225b277ad24f74eb50f28b66c14d32fae53c7edb8c7590704741af7f7f3e3654","0x809838b32bb4f4d0237e98108320d4b079ee16ed80c567e7548bd37e4d7915b1192880f4812ac0e00476d246aec1dbc8","0x84183b5fc4a7997a8ae5afedb4d21dce69c480d5966b5cbdafd6dd10d29a9a6377f3b90ce44da0eb8b176ac3af0253bb","0x8508abbf6d3739a16b9165caf0f95afb3b3ac1b8c38d6d374cf0c91296e2c1809a99772492b539cda184510bce8a0271","0x8722054e59bab2062e6419a6e45fc803af77fde912ef2cd23055ad0484963de65a816a2debe1693d93c18218d2b8e81a","0x8e895f80e485a7c4f56827bf53d34b956281cdc74856c21eb3b51f6288c01cc3d08565a11cc6f3e2604775885490e8c5","0xafc92714771b7aa6e60f3aee12efd9c2595e9659797452f0c1e99519f67c8bc3ac567119c1ddfe82a3e961ee9defea9a","0x818ff0fd9cefd32db87b259e5fa32967201016fc02ef44116cdca3c63ce5e637756f60477a408709928444a8ad69c471","0x8251e29af4c61ae806fc5d032347fb332a94d472038149225298389495139ce5678fae739d02dfe53a231598a992e728","0xa0ea39574b26643f6f1f48f99f276a8a64b5481989cfb2936f9432a3f8ef5075abfe5c067dc5512143ce8bf933984097","0xaf67a73911b372bf04e57e21f289fc6c3dfac366c6a01409b6e76fea4769bdb07a6940e52e8d7d3078f235c6d2f632c6","0xb5291484ef336024dd2b9b4cf4d3a6b751133a40656d0a0825bcc6d41c21b1c79cb50b0e8f4693f90c29c8f4358641f9","0x8bc0d9754d70f2cb9c63f991902165a87c6535a763d5eece43143b5064ae0bcdce7c7a8f398f2c1c29167b2d5a3e6867","0x8d7faff53579ec8f6c92f661c399614cc35276971752ce0623270f88be937c414eddcb0997e14724a783905a026c8883","0x9310b5f6e675fdf60796f814dbaa5a6e7e9029a61c395761e330d9348a7efab992e4e115c8be3a43d08e90d21290c892","0xb5eb4f3eb646038ad2a020f0a42202532d4932e766da82b2c1002bf9c9c2e5336b54c8c0ffcc0e02d19dde2e6a35b6cc","0x91dabfd30a66710f1f37a891136c9be1e23af4abf8cb751f512a40c022a35f8e0a4fb05b17ec36d4208de02d56f0d53a","0xb3ded14e82d62ac7a5a036122a62f00ff8308498f3feae57d861babaff5a6628d43f0a0c5fc903f10936bcf4e2758ceb","0xa88e8348fed2b26acca6784d19ef27c75963450d99651d11a950ea81d4b93acd2c43e0ecce100eaf7e78508263d5baf3","0xb1f5bbf7c4756877b87bb42163ac570e08c6667c4528bf68b5976680e19beeff7c5effd17009b0718797077e2955457a","0xad2e7b516243f915d4d1415326e98b1a7390ae88897d0b03b66c2d9bd8c3fba283d7e8fe44ed3333296a736454cef6d8","0x8f82eae096d5b11f995de6724a9af895f5e1c58d593845ad16ce8fcae8507e0d8e2b2348a0f50a1f66a17fd6fac51a5c","0x890e4404d0657c6c1ee14e1aac132ecf7a568bb3e04137b85ac0f84f1d333bd94993e8750f88eee033a33fb00f85dcc7","0x82ac7d3385e035115f1d39a99fc73e5919de44f5e6424579776d118d711c8120b8e5916372c6f27bed4cc64cac170b6c","0x85ee16d8901c272cfbbe966e724b7a891c1bd5e68efd5d863043ad8520fc409080af61fd726adc680b3f1186fe0ac8b8","0x86dc564c9b545567483b43a38f24c41c6551a49cabeebb58ce86404662a12dbfafd0778d30d26e1c93ce222e547e3898","0xa29f5b4522db26d88f5f95f18d459f8feefab02e380c2edb65aa0617a82a3c1a89474727a951cef5f15050bcf7b380fb","0xa1ce039c8f6cac53352899edb0e3a72c76da143564ad1a44858bd7ee88552e2fe6858d1593bbd74aeee5a6f8034b9b9d","0x97f10d77983f088286bd7ef3e7fdd8fa275a56bec19919adf33cf939a90c8f2967d2b1b6fc51195cb45ad561202a3ed7","0xa25e2772e8c911aaf8712bdac1dd40ee061c84d3d224c466cfaae8e5c99604053f940cde259bd1c3b8b69595781dbfec","0xb31bb95a0388595149409c48781174c340960d59032ab2b47689911d03c68f77a2273576fbe0c2bf4553e330656058c7","0xb8b2e9287ad803fb185a13f0d7456b397d4e3c8ad5078f57f49e8beb2e85f661356a3392dbd7bcf6a900baa5582b86a1","0xa3d0893923455eb6e96cc414341cac33d2dbc88fba821ac672708cce131761d85a0e08286663a32828244febfcae6451","0x82310cb42f647d99a136014a9f881eb0b9791efd2e01fc1841907ad3fc8a9654d3d1dab6689c3607214b4dc2aca01cee","0x874022d99c16f60c22de1b094532a0bc6d4de700ad01a31798fac1d5088b9a42ad02bef8a7339af7ed9c0d4f16b186ee","0x94981369e120265aed40910eebc37eded481e90f4596b8d57c3bec790ab7f929784bd33ddd05b7870aad6c02e869603b","0xa4f1f50e1e2a73f07095e0dd31cb45154f24968dae967e38962341c1241bcd473102fff1ff668b20c6547e9732d11701","0xae2328f3b0ad79fcda807e69a1b5278145225083f150f67511dafc97e079f860c3392675f1752ae7e864c056e592205b","0x875d8c971e593ca79552c43d55c8c73b17cd20c81ff2c2fed1eb19b1b91e4a3a83d32df150dbfd5db1092d0aebde1e1f","0xadd2e80aa46aae95da73a11f130f4bda339db028e24c9b11e5316e75ba5e63bc991d2a1da172c7c8e8fee038baae3433","0xb46dbe1cb3424002aa7de51e82f600852248e251465c440695d52538d3f36828ff46c90ed77fc1d11534fe3c487df8ef","0xa5e5045d28b4e83d0055863c30c056628c58d4657e6176fd0536f5933f723d60e851bb726d5bf3c546b8ce4ac4a57ef8","0x91fec01e86dd1537e498fff7536ea3ca012058b145f29d9ada49370cd7b7193ac380e116989515df1b94b74a55c45df3","0xa7428176d6918cd916a310bdc75483c72de660df48cac4e6e7478eef03205f1827ea55afc0df5d5fa7567d14bbea7fc9","0x851d89bef45d9761fe5fdb62972209335193610015e16a675149519f9911373bac0919add226ef118d9f3669cfdf4734","0xb74acf5c149d0042021cb2422ea022be4c4f72a77855f42393e71ffd12ebb3eec16bdf16f812159b67b79a9706e7156d","0x99f35dce64ec99aa595e7894b55ce7b5a435851b396e79036ffb249c28206087db4c85379df666c4d95857db02e21ff9","0xb6b9a384f70db9e298415b8ab394ee625dafff04be2886476e59df8d052ca832d11ac68a9b93fba7ab055b7bc36948a4","0x898ee4aefa923ffec9e79f2219c7389663eb11eb5b49014e04ed4a336399f6ea1691051d86991f4c46ca65bcd4fdf359","0xb0f948217b0d65df7599a0ba4654a5e43c84db477936276e6f11c8981efc6eaf14c90d3650107ed4c09af4cc8ec11137","0xaa6286e27ac54f73e63dbf6f41865dd94d24bc0cf732262fcaff67319d162bb43af909f6f8ee27b1971939cfbba08141","0x8bca7cdf730cf56c7b2c8a2c4879d61361a6e1dba5a3681a1a16c17a56e168ace0e99cf0d15826a1f5e67e6b8a8a049a","0xa746d876e8b1ce225fcafca603b099b36504846961526589af977a88c60d31ba2cc56e66a3dec8a77b3f3531bf7524c9","0xa11e2e1927e6704cdb8874c75e4f1842cef84d7d43d7a38e339e61dc8ba90e61bbb20dd3c12e0b11d2471d58eed245be","0xa36395e22bc1d1ba8b0459a235203177737397da5643ce54ded3459d0869ff6d8d89f50c73cb62394bf66a959cde9b90","0x8b49f12ba2fdf9aca7e5f81d45c07d47f9302a2655610e7634d1e4bd16048381a45ef2c95a8dd5b0715e4b7cf42273af","0x91cffa2a17e64eb7f76bccbe4e87280ee1dd244e04a3c9eac12e15d2d04845d876eb24fe2ec6d6d266cce9efb281077f","0xa6b8afabf65f2dee01788114e33a2f3ce25376fb47a50b74da7c3c25ff1fdc8aa9f41307534abbf48acb6f7466068f69","0x8d13db896ccfea403bd6441191995c1a65365cab7d0b97fbe9526da3f45a877bd1f4ef2edef160e8a56838cd1586330e","0x98c717de9e01bef8842c162a5e757fe8552d53269c84862f4d451e7c656ae6f2ae473767b04290b134773f63be6fdb9d","0x8c2036ace1920bd13cf018e82848c49eb511fad65fd0ff51f4e4b50cf3bfc294afb63cba682c16f52fb595a98fa84970","0xa3520fdff05dbad9e12551b0896922e375f9e5589368bcb2cc303bde252743b74460cb5caf99629325d3620f13adc796","0x8d4f83a5bfec05caf5910e0ce538ee9816ee18d0bd44c1d0da2a87715a23cd2733ad4d47552c6dc0eb397687d611dd19","0xa7b39a0a6a02823452d376533f39d35029867b3c9a6ad6bca181f18c54132d675613a700f9db2440fb1b4fa13c8bf18a","0x80bcb114b2544b80f404a200fc36860ed5e1ad31fe551acd4661d09730c452831751baa9b19d7d311600d267086a70bc","0x90dcce03c6f88fc2b08f2b42771eedde90cc5330fe0336e46c1a7d1b5a6c1641e5fcc4e7b3d5db00bd8afca9ec66ed81","0xaec15f40805065c98e2965b1ae12a6c9020cfdb094c2d0549acfc7ea2401a5fb48d3ea7d41133cf37c4e096e7ff53eb9","0x80e129b735dba49fa627a615d6c273119acec8e219b2f2c4373a332b5f98d66cbbdd688dfbe72a8f8bfefaccc02c50c1","0xa9b596da3bdfe23e6799ece5f7975bf7a1979a75f4f546deeaf8b34dfe3e0d623217cb4cf4ccd504cfa3625b88cd53f1","0xabcbbb70b16f6e517c0ab4363ab76b46e4ff58576b5f8340e5c0e8cc0e02621b6e23d742d73b015822a238b17cfd7665","0xa046937cc6ea6a2e1adae543353a9fe929c1ae4ad655be1cc051378482cf88b041e28b1e9a577e6ccff2d3570f55e200","0x831279437282f315e65a60184ef158f0a3dddc15a648dc552bdc88b3e6fe8288d3cfe9f0031846d81350f5e7874b4b33","0x993d7916fa213c6d66e7c4cafafc1eaec9a2a86981f91c31eb8a69c5df076c789cbf498a24c84e0ee77af95b42145026","0x823907a3b6719f8d49b3a4b7c181bd9bb29fcf842d7c70660c4f351852a1e197ca46cf5e879b47fa55f616fa2b87ce5e","0x8d228244e26132b234930ee14c75d88df0943cdb9c276a8faf167d259b7efc1beec2a87c112a6c608ad1600a239e9aae","0xab6e55766e5bfb0cf0764ed909a8473ab5047d3388b4f46faeba2d1425c4754c55c6daf6ad4751e634c618b53e549529","0xab0cab6860e55a84c5ad2948a7e0989e2b4b1fd637605634b118361497332df32d9549cb854b2327ca54f2bcb85eed8f","0xb086b349ae03ef34f4b25a57bcaa5d1b29bd94f9ebf87e22be475adfe475c51a1230c1ebe13506cb72c4186192451658","0x8a0b49d8a254ca6d91500f449cbbfbb69bb516c6948ac06808c65595e46773e346f97a5ce0ef7e5a5e0de278af22709c","0xac49de11edaaf04302c73c578cc0824bdd165c0d6321be1c421c1950e68e4f3589aa3995448c9699e93c6ebae8803e27","0x884f02d841cb5d8f4c60d1402469216b114ab4e93550b5bc1431756e365c4f870a9853449285384a6fa49e12ce6dc654","0xb75f3a28fa2cc8d36b49130cb7448a23d73a7311d0185ba803ad55c8219741d451c110f48b786e96c728bc525903a54f","0x80ae04dbd41f4a35e33f9de413b6ad518af0919e5a30cb0fa1b061b260420780bb674f828d37fd3b52b5a31673cbd803","0xb9a8011eb5fcea766907029bf743b45262db3e49d24f84503687e838651ed11cb64c66281e20a0ae9f6aa51acc552263","0x90bfdd75e2dc9cf013e22a5d55d2d2b8a754c96103a17524488e01206e67f8b6d52b1be8c4e3d5307d4fe06d0e51f54c","0xb4af353a19b06203a815ec43e79a88578cc678c46f5a954b85bc5c53b84059dddba731f3d463c23bfd5273885c7c56a4","0xaa125e96d4553b64f7140e5453ff5d2330318b69d74d37d283e84c26ad672fa00e3f71e530eb7e28be1e94afb9c4612e","0xa18e060aee3d49cde2389b10888696436bb7949a79ca7d728be6456a356ea5541b55492b2138da90108bd1ce0e6f5524","0x93e55f92bdbccc2de655d14b1526836ea2e52dba65eb3f87823dd458a4cb5079bf22ce6ef625cb6d6bfdd0995ab9a874","0x89f5a683526b90c1c3ceebbb8dc824b21cff851ce3531b164f6626e326d98b27d3e1d50982e507d84a99b1e04e86a915","0x83d1c38800361633a3f742b1cb2bfc528129496e80232611682ddbe403e92c2ac5373aea0bca93ecb5128b0b2b7a719e","0x8ecba560ac94905e19ce8d9c7af217bf0a145d8c8bd38e2db82f5e94cc3f2f26f55819176376b51f154b4aab22056059","0xa7e2a4a002b60291924850642e703232994acb4cfb90f07c94d1e0ecd2257bb583443283c20fc6017c37e6bfe85b7366","0x93ed7316fa50b528f1636fc6507683a672f4f4403e55e94663f91221cc198199595bd02eef43d609f451acc9d9b36a24","0xa1220a8ebc5c50ceed76a74bc3b7e0aa77f6884c71b64b67c4310ac29ce5526cb8992d6abc13ef6c8413ce62486a6795","0xb2f6eac5c869ad7f4a25161d3347093e2f70e66cd925032747e901189355022fab3038bca4d610d2f68feb7e719c110b","0xb703fa11a4d511ca01c7462979a94acb40b5d933759199af42670eb48f83df202fa0c943f6ab3b4e1cc54673ea3aab1e","0xb5422912afbfcb901f84791b04f1ddb3c3fbdc76d961ee2a00c5c320e06d3cc5b5909c3bb805df66c5f10c47a292b13d","0xad0934368da823302e1ac08e3ede74b05dfdbfffca203e97ffb0282c226814b65c142e6e15ec1e754518f221f01b30f7","0xa1dd302a02e37df15bf2f1147efe0e3c06933a5a767d2d030e1132f5c3ce6b98e216b6145eb39e1e2f74e76a83165b8d","0xa346aab07564432f802ae44738049a36f7ca4056df2d8f110dbe7fef4a3e047684dea609b2d03dc6bf917c9c2a47608f","0xb96c5f682a5f5d02123568e50f5d0d186e4b2c4c9b956ec7aabac1b3e4a766d78d19bd111adb5176b898e916e49be2aa","0x8a96676d56876fc85538db2e806e1cba20fd01aeb9fa3cb43ca6ca94a2c102639f65660db330e5d74a029bb72d6a0b39","0xab0048336bd5c3def1a4064eadd49e66480c1f2abb4df46e03afbd8a3342c2c9d74ee35d79f08f4768c1646681440984","0x888427bdf76caec90814c57ee1c3210a97d107dd88f7256f14f883ad0f392334b82be11e36dd8bfec2b37935177c7831","0xb622b282becf0094a1916fa658429a5292ba30fb48a4c8066ce1ddcefb71037948262a01c95bab6929ed3a76ba5db9fe","0xb5b9e005c1f456b6a368a3097634fb455723abe95433a186e8278dceb79d4ca2fbe21f8002e80027b3c531e5bf494629","0xa3c6707117a1e48697ed41062897f55d8119403eea6c2ee88f60180f6526f45172664bfee96bf61d6ec0b7fbae6aa058","0xb02a9567386a4fbbdb772d8a27057b0be210447348efe6feb935ceec81f361ed2c0c211e54787dc617cdffed6b4a6652","0xa9b8364e40ef15c3b5902e5534998997b8493064fa2bea99600def58279bb0f64574c09ba11e9f6f669a8354dd79dc85","0x9998a2e553a9aa9a206518fae2bc8b90329ee59ab23005b10972712389f2ec0ee746033c733092ffe43d73d33abbb8ef","0x843a4b34d9039bf79df96d79f2d15e8d755affb4d83d61872daf540b68c0a3888cf8fc00d5b8b247b38524bcb3b5a856","0x84f7128920c1b0bb40eee95701d30e6fc3a83b7bb3709f16d97e72acbb6057004ee7ac8e8f575936ca9dcb7866ab45f7","0x918d3e2222e10e05edb34728162a899ad5ada0aaa491aeb7c81572a9c0d506e31d5390e1803a91ff3bd8e2bb15d47f31","0x9442d18e2489613a7d47bb1cb803c8d6f3259d088cd079460976d87f7905ee07dea8f371b2537f6e1d792d36d7e42723","0xb491976970fe091995b2ed86d629126523ccf3e9daf8145302faca71b5a71a5da92e0e05b62d7139d3efac5c4e367584","0xaa628006235dc77c14cef4c04a308d66b07ac92d377df3de1a2e6ecfe3144f2219ad6d7795e671e1cb37a3641910b940","0x99d386adaea5d4981d7306feecac9a555b74ffdc218c907c5aa7ac04abaead0ec2a8237300d42a3fbc464673e417ceed","0x8f78e8b1556f9d739648ea3cab9606f8328b52877fe72f9305545a73b74d49884044ba9c1f1c6db7d9b7c7b7c661caba","0x8fb357ae49932d0babdf74fc7aa7464a65d3b6a2b3acf4f550b99601d3c0215900cfd67f2b6651ef94cfc323bac79fae","0x9906f2fa25c0290775aa001fb6198113d53804262454ae8b83ef371b5271bde189c0460a645829cb6c59f9ee3a55ce4d","0x8f4379b3ebb50e052325b27655ca6a82e6f00b87bf0d2b680d205dd2c7afdc9ff32a9047ae71a1cdf0d0ce6b9474d878","0xa85534e88c2bd43c043792eaa75e50914b21741a566635e0e107ae857aed0412035f7576cf04488ade16fd3f35fdbb87","0xb4ce93199966d3c23251ca7f28ec5af7efea1763d376b0385352ffb2e0a462ef95c69940950278cf0e3dafd638b7bd36","0xb10cb3d0317dd570aa73129f4acf63c256816f007607c19b423fb42f65133ce21f2f517e0afb41a5378cccf893ae14d0","0xa9b231c9f739f7f914e5d943ed9bff7eba9e2c333fbd7c34eb1648a362ee01a01af6e2f7c35c9fe962b11152cddf35de","0x99ff6a899e156732937fb81c0cced80ae13d2d44c40ba99ac183aa246103b31ec084594b1b7feb96da58f4be2dd5c0ed","0x8748d15d18b75ff2596f50d6a9c4ce82f61ecbcee123a6ceae0e43cab3012a29b6f83cf67b48c22f6f9d757c6caf76b2","0xb88ab05e4248b7fb634cf640a4e6a945d13e331237410f7217d3d17e3e384ddd48897e7a91e4516f1b9cbd30f35f238b","0x8d826deaeeb84a3b2d2c04c2300ca592501f992810582d6ae993e0d52f6283a839dba66c6c72278cff5871802b71173b","0xb36fed027c2f05a5ef625ca00b0364b930901e9e4420975b111858d0941f60e205546474bb25d6bfa6928d37305ae95f","0xaf2fcfc6b87967567e8b8a13a4ed914478185705724e56ce68fb2df6d1576a0cf34a61e880997a0d35dc2c3276ff7501","0xac351b919cd1fbf106feb8af2c67692bfcddc84762d18cea681cfa7470a5644839caace27efee5f38c87d3df306f4211","0x8d6665fb1d4d8d1fa23bd9b8a86e043b8555663519caac214d1e3e3effbc6bee7f2bcf21e645f77de0ced279d69a8a8b","0xa9fc1c2061756b2a1a169c1b149f212ff7f0d2488acd1c5a0197eba793cffa593fc6d1d1b40718aa75ca3ec77eff10e1","0xaff64f0fa009c7a6cf0b8d7a22ddb2c8170c3cb3eec082e60d5aadb00b0040443be8936d728d99581e33c22178c41c87","0x82e0b181adc5e3b1c87ff8598447260e839d53debfae941ebea38265575546c3a74a14b4325a030833a62ff6c52d9365","0xb7ad43cbb22f6f892c2a1548a41dc120ab1f4e1b8dea0cb6272dd9cb02054c542ecabc582f7e16de709d48f5166cae86","0x985e0c61094281532c4afb788ecb2dfcba998e974b5d4257a22040a161883908cdd068fe80f8eb49b8953cfd11acf43a","0xae46895c6d67ea6d469b6c9c07b9e5d295d9ae73b22e30da4ba2c973ba83a130d7eef39717ec9d0f36e81d56bf742671","0x8600177ea1f7e7ef90514b38b219a37dedfc39cb83297e4c7a5b479817ef56479d48cf6314820960c751183f6edf8b0e","0xb9208ec1c1d7a1e99b59c62d3e4e61dfb706b0e940d09d3abfc3454c19749083260614d89cfd7e822596c3cdbcc6bb95","0xa1e94042c796c2b48bc724352d2e9f3a22291d9a34705993357ddb6adabd76da6fc25dac200a8cb0b5bbd99ecddb7af6","0xb29c3adedd0bcad8a930625bc4dfdc3552a9afd5ca6dd9c0d758f978068c7982b50b711aa0eb5b97f2b84ee784637835","0xaf0632a238bb1f413c7ea8e9b4c3d68f2827bd2e38cd56024391fba6446ac5d19a780d0cfd4a78fe497d537b766a591a","0xaaf6e7f7d54f8ef5e2e45dd59774ecbeecf8683aa70483b2a75be6a6071b5981bbaf1627512a65d212817acdfab2e428","0x8c751496065da2e927cf492aa5ca9013b24f861d5e6c24b30bbf52ec5aaf1905f40f9a28175faef283dd4ed4f2182a09","0x8952377d8e80a85cf67d6b45499f3bad5fd452ea7bcd99efc1b066c4720d8e5bff1214cea90fd1f972a7f0baac3d29be","0xa1946ee543d1a6e21f380453be4d446e4130950c5fc3d075794eb8260f6f52d0a795c1ff91d028a648dc1ce7d9ab6b47","0x89f3fefe37af31e0c17533d2ca1ce0884cc1dc97c15cbfab9c331b8debd94781c9396abef4bb2f163d09277a08d6adf0","0xa2753f1e6e1a154fb117100a5bd9052137add85961f8158830ac20541ab12227d83887d10acf7fd36dcaf7c2596d8d23","0x814955b4198933ee11c3883863b06ff98c7eceb21fc3e09df5f916107827ccf3323141983e74b025f46ae00284c9513b","0x8cc5c6bb429073bfef47cae7b3bfccb0ffa076514d91a1862c6bda4d581e0df87db53cc6c130bf8a7826304960f5a34e","0x909f22c1f1cdc87f7be7439c831a73484a49acbf8f23d47087d7cf867c64ef61da3bde85dc57d705682b4c3fc710d36e","0x8048fee7f276fcd504aed91284f28e73693615e0eb3858fa44bcf79d7285a9001c373b3ef71d9a3054817ba293ebe28c","0x94400e5cf5d2700ca608c5fe35ce14623f71cc24959f2bc27ca3684092850f76b67fb1f07ca9e5b2ca3062cf8ad17bd4","0x81c2ae7d4d1b17f8b6de6a0430acc0d58260993980fe48dc2129c4948269cdc74f9dbfbf9c26b19360823fd913083d48","0x8c41fe765128e63f6889d6a979f6a4342300327c8b245a8cfe3ecfbcac1e09c3da30e2a1045b24b78efc6d6d50c8c6ac","0xa5dd4ae51ae48c8be4b218c312ade226cffce671cf121cb77810f6c0990768d6dd767badecb5c69921d5574d5e8433d3","0xb7642e325f4ba97ae2a39c1c9d97b35aafd49d53dba36aed3f3cb0ca816480b3394079f46a48252d46596559c90f4d58","0xae87375b40f35519e7bd4b1b2f73cd0b329b0c2cb9d616629342a71c6c304338445eda069b78ea0fbe44087f3de91e09","0xb08918cb6f736855e11d3daca1ddfbdd61c9589b203b5493143227bf48e2c77c2e8c94b0d1aa2fab2226e0eae83f2681","0xac36b84a4ac2ebd4d6591923a449c564e3be8a664c46092c09e875c2998eba16b5d32bfd0882fd3851762868e669f0b1","0xa44800a3bb192066fa17a3f29029a23697240467053b5aa49b9839fb9b9b8b12bcdcbfc557f024b61f4f51a9aacdefcb","0x9064c688fec23441a274cdf2075e5a449caf5c7363cc5e8a5dc9747183d2e00a0c69f2e6b3f6a7057079c46014c93b3b","0xaa367b021469af9f5b764a79bb3afbe2d87fe1e51862221672d1a66f954b165778b7c27a705e0f93841fab4c8468344d","0xa1a8bfc593d4ab71f91640bc824de5c1380ab2591cfdafcbc78a14b32de3c0e15f9d1b461d85c504baa3d4232c16bb53","0x97df48da1799430f528184d30b6baa90c2a2f88f34cdfb342d715339c5ebd6d019aa693cea7c4993daafc9849063a3aa","0xabd923831fbb427e06e0dd335253178a9e5791395c84d0ab1433c07c53c1209161097e9582fb8736f8a60bde62d8693e","0x84cd1a43f1a438b43dc60ffc775f646937c4f6871438163905a3cebf1115f814ccd38a6ccb134130bff226306e412f32","0x91426065996b0743c5f689eb3ca68a9f7b9e4d01f6c5a2652b57fa9a03d8dc7cd4bdbdab0ca5a891fee1e97a7f00cf02","0xa4bee50249db3df7fd75162b28f04e57c678ba142ce4d3def2bc17bcb29e4670284a45f218dad3969af466c62a903757","0x83141ebcc94d4681404e8b67a12a46374fded6df92b506aff3490d875919631408b369823a08b271d006d5b93136f317","0xa0ea1c8883d58d5a784da3d8c8a880061adea796d7505c1f903d07c287c5467f71e4563fc0faafbc15b5a5538b0a7559","0x89d9d480574f201a87269d26fb114278ed2c446328df431dc3556e3500e80e4cd01fcac196a2459d8646361ebda840df","0x8bf302978973632dd464bec819bdb91304712a3ec859be071e662040620422c6e75eba6f864f764cffa2799272efec39","0x922f666bc0fd58b6d7d815c0ae4f66d193d32fc8382c631037f59eeaeae9a8ca6c72d08e72944cf9e800b8d639094e77","0x81ad8714f491cdff7fe4399f2eb20e32650cff2999dd45b9b3d996d54a4aba24cc6c451212e78c9e5550368a1a38fb3f","0xb58fcf4659d73edb73175bd9139d18254e94c3e32031b5d4b026f2ed37aa19dca17ec2eb54c14340231615277a9d347e","0xb365ac9c2bfe409b710928c646ea2fb15b28557e0f089d39878e365589b9d1c34baf5566d20bb28b33bb60fa133f6eff","0x8fcae1d75b53ab470be805f39630d204853ca1629a14158bac2f52632277d77458dec204ff84b7b2d77e641c2045be65","0xa03efa6bebe84f4f958a56e2d76b5ba4f95dd9ed7eb479edc7cc5e646c8d4792e5b0dfc66cc86aa4b4afe2f7a4850760","0xaf1c823930a3638975fb0cc5c59651771b2719119c3cd08404fbd4ce77a74d708cefbe3c56ea08c48f5f10e6907f338f","0x8260c8299b17898032c761c325ac9cabb4c5b7e735de81eacf244f647a45fb385012f4f8df743128888c29aefcaaad16","0xab2f37a573c82e96a8d46198691cd694dfa860615625f477e41f91b879bc58a745784fccd8ffa13065834ffd150d881d","0x986c746c9b4249352d8e5c629e8d7d05e716b3c7aab5e529ca969dd1e984a14b5be41528baef4c85d2369a42d7209216","0xb25e32da1a8adddf2a6080725818b75bc67240728ad1853d90738485d8924ea1e202df0a3034a60ffae6f965ec55cf63","0xa266e627afcebcefea6b6b44cbc50f5c508f7187e87d047b0450871c2a030042c9e376f3ede0afcf9d1952f089582f71","0x86c3bbca4c0300606071c0a80dbdec21ce1dd4d8d4309648151c420854032dff1241a1677d1cd5de4e4de4385efda986","0xb9a21a1fe2d1f3273a8e4a9185abf2ff86448cc98bfa435e3d68306a2b8b4a6a3ea33a155be3cb62a2170a86f77679a5","0xb117b1ea381adce87d8b342cba3a15d492ff2d644afa28f22424cb9cbc820d4f7693dfc1a4d1b3697046c300e1c9b4c8","0x9004c425a2e68870d6c69b658c344e3aa3a86a8914ee08d72b2f95c2e2d8a4c7bb0c6e7e271460c0e637cec11117bf8e","0x86a18aa4783b9ebd9131580c8b17994825f27f4ac427b0929a1e0236907732a1c8139e98112c605488ee95f48bbefbfc","0x84042243b955286482ab6f0b5df4c2d73571ada00716d2f737ca05a0d2e88c6349e8ee9e67934cfee4a1775dbf7f4800","0x92c2153a4733a62e4e1d5b60369f3c26777c7d01cd3c8679212660d572bd3bac9b8a8a64e1f10f7dbf5eaa7579c4e423","0x918454b6bb8e44a2afa144695ba8d48ae08d0cdfef4ad078f67709eddf3bb31191e8b006f04e82ea45a54715ef4d5817","0xacf0b54f6bf34cf6ed6c2b39cf43194a40d68de6bcf1e4b82c34c15a1343e9ac3737885e1a30b78d01fa3a5125463db8","0xa7d60dbe4b6a7b054f7afe9ee5cbbfeca0d05dc619e6041fa2296b549322529faddb8a11e949562309aecefb842ac380","0x91ffb53e6d7e5f11159eaf13e783d6dbdfdb1698ed1e6dbf3413c6ea23492bbb9e0932230a9e2caac8fe899a17682795","0xb6e8d7be5076ee3565d5765a710c5ecf17921dd3cf555c375d01e958a365ae087d4a88da492a5fb81838b7b92bf01143","0xa8c6b763de2d4b2ed42102ef64eccfef31e2fb2a8a2776241c82912fa50fc9f77f175b6d109a97ede331307c016a4b1a","0x99839f86cb700c297c58bc33e28d46b92931961548deac29ba8df91d3e11721b10ea956c8e16984f9e4acf1298a79b37","0x8c2e2c338f25ea5c25756b7131cde0d9a2b35abf5d90781180a00fe4b8e64e62590dc63fe10a57fba3a31c76d784eb01","0x9687d7df2f41319ca5469d91978fed0565a5f11f829ebadaa83db92b221755f76c6eacd7700735e75c91e257087512e3","0x8795fdfb7ff8439c58b9bf58ed53873d2780d3939b902b9ddaaa4c99447224ced9206c3039a23c2c44bcc461e2bb637f","0xa803697b744d2d087f4e2307218d48fa88620cf25529db9ce71e2e3bbcc65bac5e8bb9be04777ef7bfb5ed1a5b8e6170","0x80f3d3efbbb9346ddd413f0a8e36b269eb5d7ff6809d5525ff9a47c4bcab2c01b70018b117f6fe05253775612ff70c6b","0x9050e0e45bcc83930d4c505af35e5e4d7ca01cd8681cba92eb55821aececcebe32bb692ebe1a4daac4e7472975671067","0x8d206812aac42742dbaf233e0c080b3d1b30943b54b60283515da005de05ea5caa90f91fedcfcba72e922f64d7040189","0xa2d44faaeb2eff7915c83f32b13ca6f31a6847b1c1ce114ea240bac3595eded89f09b2313b7915ad882292e2b586d5b4","0x961776c8576030c39f214ea6e0a3e8b3d32f023d2600958c098c95c8a4e374deeb2b9dc522adfbd6bda5949bdc09e2a2","0x993fa7d8447407af0fbcd9e6d77f815fa5233ab00674efbcf74a1f51c37481445ae291cc7b76db7c178f9cb0e570e0fc","0xabd5b1c78e05f9d7c8cc99bdaef8b0b6a57f2daf0f02bf492bec48ea4a27a8f1e38b5854da96efff11973326ff980f92","0x8f15af4764bc275e6ccb892b3a4362cacb4e175b1526a9a99944e692fe6ccb1b4fc19abf312bb2a089cb1f344d91a779","0xa09b27ccd71855512aba1d0c30a79ffbe7f6707a55978f3ced50e674b511a79a446dbc6d7946add421ce111135a460af","0x94b2f98ce86a9271fbd4153e1fc37de48421fe3490fb3840c00f2d5a4d0ba8810c6a32880b002f6374b59e0a7952518b","0x8650ac644f93bbcb88a6a0f49fee2663297fd4bc6fd47b6a89b9d8038d32370438ab3a4775ec9b58cb10aea8a95ef7b6","0x95e5c2f2e84eed88c6980bbba5a1c0bb375d5a628bff006f7516d45bb7d723da676add4fdd45956f312e7bab0f052644","0xb3278a3fa377ac93af7cfc9453f8cb594aae04269bbc99d2e0e45472ff4b6a2f97a26c4c57bf675b9d86f5e77a5d55d1","0xb4bcbe6eb666a206e2ea2f877912c1d3b5bdbd08a989fc4490eb06013e1a69ad1ba08bcdac048bf29192312be399077b","0xa76d70b78c99fffcbf9bb9886eab40f1ea4f99a309710b660b64cbf86057cbcb644d243f6e341711bb7ef0fedf0435a7","0xb2093c1ee945dca7ac76ad5aed08eae23af31dd5a77c903fd7b6f051f4ab84425d33a03c3d45bf2907bc93c02d1f3ad8","0x904b1f7534e053a265b22d20be859912b9c9ccb303af9a8d6f1d8f6ccdc5c53eb4a45a1762b880d8444d9be0cd55e7f9","0x8f664a965d65bc730c9ef1ec7467be984d4b8eb46bd9b0d64e38e48f94e6e55dda19aeac82cbcf4e1473440e64c4ca18","0x8bcee65c4cc7a7799353d07b114c718a2aae0cd10a3f22b7eead5185d159dafd64852cb63924bf87627d176228878bce","0x8c78f2e3675096fef7ebaa898d2615cd50d39ca3d8f02b9bdfb07e67da648ae4be3da64838dffc5935fd72962c4b96c7","0x8c40afd3701629421fec1df1aac4e849384ef2e80472c0e28d36cb1327acdf2826f99b357f3d7afdbc58a6347fc40b3c","0xa197813b1c65a8ea5754ef782522a57d63433ef752215ecda1e7da76b0412ee619f58d904abd2e07e0c097048b6ae1dd","0xa670542629e4333884ad7410f9ea3bd6f988df4a8f8a424ca74b9add2312586900cf9ae8bd50411f9146e82626b4af56","0xa19875cc07ab84e569d98b8b67fb1dbbdfb59093c7b748fae008c8904a6fd931a63ca8d03ab5fea9bc8d263568125a9b","0xb57e7f68e4eb1bd04aafa917b1db1bdab759a02aa8a9cdb1cba34ba8852b5890f655645c9b4e15d5f19bf37e9f2ffe9f","0x8abe4e2a4f6462b6c64b3f10e45db2a53c2b0d3c5d5443d3f00a453e193df771eda635b098b6c8604ace3557514027af","0x8459e4fb378189b22b870a6ef20183deb816cefbf66eca1dc7e86d36a2e011537db893729f500dc154f14ce24633ba47","0x930851df4bc7913c0d8c0f7bd3b071a83668987ed7c397d3d042fdc0d9765945a39a3bae83da9c88cb6b686ed8aeeb26","0x8078c9e5cd05e1a8c932f8a1d835f61a248b6e7133fcbb3de406bf4ffc0e584f6f9f95062740ba6008d98348886cf76b","0xaddff62bb29430983fe578e3709b0949cdc0d47a13a29bc3f50371a2cb5c822ce53e2448cfaa01bcb6e0aa850d5a380e","0x9433add687b5a1e12066721789b1db2edf9b6558c3bdc0f452ba33b1da67426abe326e9a34d207bfb1c491c18811bde1","0x822beda3389963428cccc4a2918fa9a8a51cf0919640350293af70821967108cded5997adae86b33cb917780b097f1ca","0xa7a9f52bda45e4148ed56dd176df7bd672e9b5ed18888ccdb405f47920fdb0844355f8565cefb17010b38324edd8315f","0xb35c3a872e18e607b2555c51f9696a17fa18da1f924d503b163b4ec9fe22ed0c110925275cb6c93ce2d013e88f173d6a","0xadf34b002b2b26ab84fc1bf94e05bd8616a1d06664799ab149363c56a6e0c807fdc473327d25632416e952ea327fcd95","0xae4a6b9d22a4a3183fac29e2551e1124a8ce4a561a9a2afa9b23032b58d444e6155bb2b48f85c7b6d70393274e230db7","0xa2ea3be4fc17e9b7ce3110284038d46a09e88a247b6971167a7878d9dcf36925d613c382b400cfa4f37a3ebea3699897","0x8e5863786b641ce3140fbfe37124d7ad3925472e924f814ebfc45959aaf3f61dc554a597610b5defaecc85b59a99b50f","0xaefde3193d0f700d0f515ab2aaa43e2ef1d7831c4f7859f48e52693d57f97fa9e520090f3ed700e1c966f4b76048e57f","0x841a50f772956622798e5cd208dc7534d4e39eddee30d8ce133383d66e5f267e389254a0cdae01b770ecd0a9ca421929","0x8fbc2bfd28238c7d47d4c03b1b910946c0d94274a199575e5b23242619b1de3497784e646a92aa03e3e24123ae4fcaba","0x926999579c8eec1cc47d7330112586bdca20b4149c8b2d066f527c8b9f609e61ce27feb69db67eea382649c6905efcf9","0xb09f31f305efcc65589adf5d3690a76cf339efd67cd43a4e3ced7b839507466e4be72dd91f04e89e4bbef629d46e68c0","0xb917361f6b95f759642638e0b1d2b3a29c3bdef0b94faa30de562e6078c7e2d25976159df3edbacbf43614635c2640b4","0x8e7e8a1253bbda0e134d62bfe003a2669d471b47bd2b5cde0ff60d385d8e62279d54022f5ac12053b1e2d3aaa6910b4c","0xb69671a3c64e0a99d90b0ed108ce1912ff8ed983e4bddd75a370e9babde25ee1f5efb59ec707edddd46793207a8b1fe7","0x910b2f4ebd37b7ae94108922b233d0920b4aba0bd94202c70f1314418b548d11d8e9caa91f2cd95aff51b9432d122b7f","0x82f645c90dfb52d195c1020346287c43a80233d3538954548604d09fbab7421241cde8593dbc4acc4986e0ea39a27dd9","0x8fee895f0a140d88104ce442fed3966f58ff9d275e7373483f6b4249d64a25fb5374bbdc6bce6b5ab0270c2847066f83","0x84f5bd7aab27b2509397aeb86510dd5ac0a53f2c8f73799bf720f2f87a52277f8d6b0f77f17bc80739c6a7119b7eb062","0x9903ceced81099d7e146e661bcf01cbaccab5ba54366b85e2177f07e2d8621e19d9c9c3eee14b9266de6b3f9b6ea75ae","0xb9c16ea2a07afa32dd6c7c06df0dec39bca2067a9339e45475c98917f47e2320f6f235da353fd5e15b477de97ddc68dd","0x9820a9bbf8b826bec61ebf886de2c4f404c1ebdc8bab82ee1fea816d9de29127ce1852448ff717a3fe8bbfe9e92012e5","0x817224d9359f5da6f2158c2c7bf9165501424f063e67ba9859a07ab72ee2ee62eb00ca6da821cfa19065c3282ca72c74","0x94b95c465e6cb00da400558a3c60cfec4b79b27e602ca67cbc91aead08de4b6872d8ea096b0dc06dca4525c8992b8547","0xa2b539a5bccd43fa347ba9c15f249b417997c6a38c63517ca38394976baa08e20be384a360969ff54e7e721db536b3e5","0x96caf707e34f62811ee8d32ccf28d8d6ec579bc33e424d0473529af5315c456fd026aa910c1fed70c91982d51df7d3ca","0x8a77b73e890b644c6a142bdbac59b22d6a676f3b63ddafb52d914bb9d395b8bf5aedcbcc90429337df431ebd758a07a6","0x8857830a7351025617a08bc44caec28d2fae07ebf5ffc9f01d979ce2a53839a670e61ae2783e138313929129790a51a1","0xaa3e420321ed6f0aa326d28d1a10f13facec6f605b6218a6eb9cbc074801f3467bf013a456d1415a5536f12599efa3d3","0x824aed0951957b00ea2f3d423e30328a3527bf6714cf9abbae84cf27e58e5c35452ba89ccc011de7c68c75d6e021d8f1","0xa2e87cc06bf202e953fb1081933d8b4445527dde20e38ed1a4f440144fd8fa464a2b73e068b140562e9045e0f4bd3144","0xae3b8f06ad97d7ae3a5e5ca839efff3e4824dc238c0c03fc1a8d2fc8aa546cdfd165b784a31bb4dec7c77e9305b99a4b","0xb30c3e12395b1fb8b776f3ec9f87c70e35763a7b2ddc68f0f60a4982a84017f27c891a98561c830038deb033698ed7fc","0x874e507757cd1177d0dff0b0c62ce90130324442a33da3b2c8ee09dbca5d543e3ecfe707e9f1361e7c7db641c72794bb","0xb53012dd10b5e7460b57c092eaa06d6502720df9edbbe3e3f61a9998a272bf5baaac4a5a732ad4efe35d6fac6feca744","0x85e6509d711515534d394e6cacbed6c81da710074d16ef3f4950bf2f578d662a494d835674f79c4d6315bced4defc5f0","0xb6132b2a34b0905dcadc6119fd215419a7971fe545e52f48b768006944b4a9d7db1a74b149e2951ea48c083b752d0804","0x989867da6415036d19b4bacc926ce6f4df7a556f50a1ba5f3c48eea9cefbb1c09da81481c8009331ee83f0859185e164","0x960a6c36542876174d3fbc1505413e29f053ed87b8d38fef3af180491c7eff25200b45dd5fe5d4d8e63c7e8c9c00f4c8","0x9040b59bd739d9cc2e8f6e894683429e4e876a8106238689ff4c22770ae5fdae1f32d962b30301fa0634ee163b524f35","0xaf3fcd0a45fe9e8fe256dc7eab242ef7f582dd832d147444483c62787ac820fafc6ca55d639a73f76bfa5e7f5462ab8f","0xb934c799d0736953a73d91e761767fdb78454355c4b15c680ce08accb57ccf941b13a1236980001f9e6195801cffd692","0x8871e8e741157c2c326b22cf09551e78da3c1ec0fc0543136f581f1550f8bab03b0a7b80525c1e99812cdbf3a9698f96","0xa8a977f51473a91d178ee8cfa45ffef8d6fd93ab1d6e428f96a3c79816d9c6a93cd70f94d4deda0125fd6816e30f3bea","0xa7688b3b0a4fc1dd16e8ba6dc758d3cfe1b7cf401c31739484c7fa253cce0967df1b290769bcefc9d23d3e0cb19e6218","0x8ae84322662a57c6d729e6ff9d2737698cc2da2daeb1f39e506618750ed23442a6740955f299e4a15dda6db3e534d2c6","0xa04a961cdccfa4b7ef83ced17ab221d6a043b2c718a0d6cc8e6f798507a31f10bf70361f70a049bc8058303fa7f96864","0xb463e39732a7d9daec8a456fb58e54b30a6e160aa522a18b9a9e836488cce3342bcbb2e1deab0f5e6ec0a8796d77197d","0xb1434a11c6750f14018a2d3bcf94390e2948f4f187e93bb22070ca3e5393d339dc328cbfc3e48815f51929465ffe7d81","0x84ff81d73f3828340623d7e3345553610aa22a5432217ef0ebd193cbf4a24234b190c65ca0873c22d10ea7b63bd1fbed","0xb6fe2723f0c47757932c2ddde7a4f8434f665612f7b87b4009c2635d56b6e16b200859a8ade49276de0ef27a2b6c970a","0x9742884ed7cd52b4a4a068a43d3faa02551a424136c85a9313f7cb58ea54c04aa83b0728fd741d1fe39621e931e88f8f","0xb7d2d65ea4d1ad07a5dee39e40d6c03a61264a56b1585b4d76fc5b2a68d80a93a42a0181d432528582bf08d144c2d6a9","0x88c0f66bada89f8a43e5a6ead2915088173d106c76f724f4a97b0f6758aed6ae5c37c373c6b92cdd4aea8f6261f3a374","0x81f9c43582cb42db3900747eb49ec94edb2284999a499d1527f03315fd330e5a509afa3bff659853570e9886aab5b28b","0x821f9d27d6beb416abf9aa5c79afb65a50ed276dbda6060103bc808bcd34426b82da5f23e38e88a55e172f5c294b4d40","0x8ba307b9e7cb63a6c4f3851b321aebfdb6af34a5a4c3bd949ff7d96603e59b27ff4dc4970715d35f7758260ff942c9e9","0xb142eb6c5f846de33227d0bda61d445a7c33c98f0a8365fe6ab4c1fabdc130849be597ef734305894a424ea715372d08","0xa732730ae4512e86a741c8e4c87fee8a05ee840fec0e23b2e037d58dba8dde8d10a9bc5191d34d00598941becbbe467f","0xadce6f7c30fd221f6b10a0413cc76435c4bb36c2d60bca821e5c67409fe9dbb2f4c36ef85eb3d734695e4be4827e9fd3","0xa74f00e0f9b23aff7b2527ce69852f8906dab9d6abe62ecd497498ab21e57542e12af9918d4fd610bb09e10b0929c510","0xa593b6b0ef26448ce4eb3ab07e84238fc020b3cb10d542ff4b16d4e2be1bcde3797e45c9cf753b8dc3b0ffdb63984232","0xaed3913afccf1aa1ac0eb4980eb8426d0baccebd836d44651fd72af00d09fac488a870223c42aca3ceb39752070405ae","0xb2c44c66a5ea7fde626548ba4cef8c8710191343d3dadfd3bb653ce715c0e03056a5303a581d47dde66e70ea5a2d2779","0x8e5029b2ccf5128a12327b5103f7532db599846e422531869560ceaff392236434d87159f597937dbf4054f810c114f4","0x82beed1a2c4477e5eb39fc5b0e773b30cfec77ef2b1bf17eadaf60eb35b6d0dd9d8cf06315c48d3546badb3f21cd0cca","0x90077bd6cc0e4be5fff08e5d07a5a158d36cebd1d1363125bc4fae0866ffe825b26f933d4ee5427ba5cd0c33c19a7b06","0xa7ec0d8f079970e8e34f0ef3a53d3e0e45428ddcef9cc776ead5e542ef06f3c86981644f61c5a637e4faf001fb8c6b3e","0xae6d4add6d1a6f90b22792bc9d40723ee6850c27d0b97eefafd5b7fd98e424aa97868b5287cc41b4fbd7023bca6a322c","0x831aa917533d077da07c01417feaa1408846363ba2b8d22c6116bb858a95801547dd88b7d7fa1d2e3f0a02bdeb2e103d","0x96511b860b07c8a5ed773f36d4aa9d02fb5e7882753bf56303595bcb57e37ccc60288887eb83bef08c657ec261a021a2","0x921d2a3e7e9790f74068623de327443666b634c8443aba80120a45bba450df920b2374d96df1ce3fb1b06dd06f8cf6e3","0xaa74451d51fe82b4581ead8e506ec6cd881010f7e7dd51fc388eb9a557db5d3c6721f81c151d08ebd9c2591689fbc13e","0xa972bfbcf4033d5742d08716c927c442119bdae336bf5dff914523b285ccf31953da2733759aacaa246a9af9f698342c","0xad1fcd0cae0e76840194ce4150cb8a56ebed728ec9272035f52a799d480dfc85840a4d52d994a18b6edb31e79be6e8ad","0xa2c69fe1d36f235215432dad48d75887a44c99dfa0d78149acc74087da215a44bdb5f04e6eef88ff7eff80a5a7decc77","0xa94ab2af2b6ee1bc6e0d4e689ca45380d9fbd3c5a65b9bd249d266a4d4c07bf5d5f7ef2ae6000623aee64027892bf8fe","0x881ec1fc514e926cdc66480ac59e139148ff8a2a7895a49f0dff45910c90cdda97b66441a25f357d6dd2471cddd99bb3","0x884e6d3b894a914c8cef946a76d5a0c8351843b2bffa2d1e56c6b5b99c84104381dd1320c451d551c0b966f4086e60f9","0x817c6c10ce2677b9fc5223500322e2b880583254d0bb0d247d728f8716f5e05c9ff39f135854342a1afecd9fbdcf7c46","0xaaf4a9cb686a14619aa1fc1ac285dd3843ac3dd99f2b2331c711ec87b03491c02f49101046f3c5c538dc9f8dba2a0ac2","0x97ecea5ce53ca720b5d845227ae61d70269a2f53540089305c86af35f0898bfd57356e74a8a5e083fa6e1ea70080bd31","0xa22d811e1a20a75feac0157c418a4bfe745ccb5d29466ffa854dca03e395b6c3504a734341746b2846d76583a780b32e","0x940cbaa0d2b2db94ae96b6b9cf2deefbfd059e3e5745de9aec4a25f0991b9721e5cd37ef71c631575d1a0c280b01cd5b","0xae33cb4951191258a11044682de861bf8d92d90ce751b354932dd9f3913f542b6a0f8a4dc228b3cd9244ac32c4582832","0xa580df5e58c4274fe0f52ac2da1837e32f5c9db92be16c170187db4c358f43e5cfdda7c5911dcc79d77a5764e32325f5","0x81798178cb9d8affa424f8d3be67576ba94d108a28ccc01d330c51d5a63ca45bb8ca63a2f569b5c5fe1303cecd2d777f","0x89975b91b94c25c9c3660e4af4047a8bacf964783010820dbc91ff8281509379cb3b24c25080d5a01174dd9a049118d5","0xa7327fcb3710ed3273b048650bde40a32732ef40a7e58cf7f2f400979c177944c8bc54117ba6c80d5d4260801dddab79","0x92b475dc8cb5be4b90c482f122a51bcb3b6c70593817e7e2459c28ea54a7845c50272af38119406eaadb9bcb993368d0","0x9645173e9ecefc4f2eae8363504f7c0b81d85f8949a9f8a6c01f2d49e0a0764f4eacecf3e94016dd407fc14494fce9f9","0x9215fd8983d7de6ae94d35e6698226fc1454977ae58d42d294be9aad13ac821562ad37d5e7ee5cdfe6e87031d45cd197","0x810360a1c9b88a9e36f520ab5a1eb8bed93f52deefbe1312a69225c0a08edb10f87cc43b794aced9c74220cefcc57e7d","0xad7e810efd61ed4684aeda9ed8bb02fb9ae4b4b63fda8217d37012b94ff1b91c0087043bfa4e376f961fff030c729f3b","0x8b07c95c6a06db8738d10bb03ec11b89375c08e77f0cab7e672ce70b2685667ca19c7e1c8b092821d31108ea18dfd4c7","0x968825d025ded899ff7c57245250535c732836f7565eab1ae23ee7e513201d413c16e1ba3f5166e7ac6cf74de8ceef4f","0x908243370c5788200703ade8164943ad5f8c458219186432e74dbc9904a701ea307fd9b94976c866e6c58595fd891c4b","0x959969d16680bc535cdc6339e6186355d0d6c0d53d7bbfb411641b9bf4b770fd5f575beef5deec5c4fa4d192d455c350","0xad177f4f826a961adeac76da40e2d930748effff731756c797eddc4e5aa23c91f070fb69b19221748130b0961e68a6bb","0x82f8462bcc25448ef7e0739425378e9bb8a05e283ce54aae9dbebaf7a3469f57833c9171672ad43a79778366c72a5e37","0xa28fb275b1845706c2814d9638573e9bc32ff552ebaed761fe96fdbce70395891ca41c400ae438369264e31a2713b15f","0x8a9c613996b5e51dadb587a787253d6081ea446bf5c71096980bf6bd3c4b69905062a8e8a3792de2d2ece3b177a71089","0x8d5aefef9f60cb27c1db2c649221204dda48bb9bf8bf48f965741da051340e8e4cab88b9d15c69f3f84f4c854709f48a","0x93ebf2ca6ad85ab6deace6de1a458706285b31877b1b4d7dcb9d126b63047efaf8c06d580115ec9acee30c8a7212fa55","0xb3ee46ce189956ca298057fa8223b7fd1128cf52f39159a58bca03c71dd25161ac13f1472301f72aef3e1993fe1ab269","0xa24d7a8d066504fc3f5027ccb13120e2f22896860e02c45b5eba1dbd512d6a17c28f39155ea581619f9d33db43a96f92","0xae9ceacbfe12137db2c1a271e1b34b8f92e4816bad1b3b9b6feecc34df0f8b3b0f7ed0133acdf59c537d43d33fc8d429","0x83967e69bf2b361f86361bd705dce0e1ad26df06da6c52b48176fe8dfcbeb03c462c1a4c9e649eff8c654b18c876fdef","0x9148e6b814a7d779c19c31e33a068e97b597de1f8100513db3c581190513edc4d544801ce3dd2cf6b19e0cd6daedd28a","0x94ccdafc84920d320ed22de1e754adea072935d3c5f8c2d1378ebe53d140ea29853f056fb3fb1e375846061a038cc9bc","0xafb43348498c38b0fa5f971b8cdd3a62c844f0eb52bc33daf2f67850af0880fce84ecfb96201b308d9e6168a0d443ae3","0x86d5736520a83538d4cd058cc4b4e84213ed00ebd6e7af79ae787adc17a92ba5359e28ba6c91936d967b4b28d24c3070","0xb5210c1ff212c5b1e9ef9126e08fe120a41e386bb12c22266f7538c6d69c7fd8774f11c02b81fd4e88f9137b020801fe","0xb78cfd19f94d24e529d0f52e18ce6185cb238edc6bd43086270fd51dd99f664f43dd4c7d2fe506762fbd859028e13fcf","0xa6e7220598c554abdcc3fdc587b988617b32c7bb0f82c06205467dbedb58276cc07cae317a190f19d19078773f4c2bbb","0xb88862809487ee430368dccd85a5d72fa4d163ca4aad15c78800e19c1a95be2192719801e315d86cff7795e0544a77e4","0x87ecb13a03921296f8c42ceb252d04716f10e09c93962239fcaa0a7fef93f19ab3f2680bc406170108bc583e9ff2e721","0xa810cd473832b6581c36ec4cb403f2849357ba2d0b54df98ef3004b8a530c078032922a81d40158f5fb0043d56477f6e","0xa247b45dd85ca7fbb718b328f30a03f03c84aef2c583fbdc9fcc9eb8b52b34529e8c8f535505c10598b1b4dac3d7c647","0x96ee0b91313c68bac4aa9e065ce9e1d77e51ca4cff31d6a438718c58264dee87674bd97fc5c6b8008be709521e4fd008","0x837567ad073e42266951a9a54750919280a2ac835a73c158407c3a2b1904cf0d17b7195a393c71a18ad029cbd9cf79ee","0xa6a469c44b67ebf02196213e7a63ad0423aab9a6e54acc6fcbdbb915bc043586993454dc3cd9e4be8f27d67c1050879b","0x8712d380a843b08b7b294f1f06e2f11f4ad6bcc655fdde86a4d8bc739c23916f6fad2b902fe47d6212f03607907e9f0e","0x920adfb644b534789943cdae1bdd6e42828dda1696a440af2f54e6b97f4f97470a1c6ea9fa6a2705d8f04911d055acd1","0xa161c73adf584a0061e963b062f59d90faac65c9b3a936b837a10d817f02fcabfa748824607be45a183dd40f991fe83f","0x874f4ecd408c76e625ea50bc59c53c2d930ee25baf4b4eca2440bfbffb3b8bc294db579caa7c68629f4d9ec24187c1ba","0x8bff18087f112be7f4aa654e85c71fef70eee8ae480f61d0383ff6f5ab1a0508f966183bb3fc4d6f29cb7ca234aa50d3","0xb03b46a3ca3bc743a173cbc008f92ab1aedd7466b35a6d1ca11e894b9482ea9dc75f8d6db2ddd1add99bfbe7657518b7","0x8b4f3691403c3a8ad9e097f02d130769628feddfa8c2b3dfe8cff64e2bed7d6e5d192c1e2ba0ac348b8585e94acd5fa1","0xa0d9ca4a212301f97591bf65d5ef2b2664766b427c9dd342e23cb468426e6a56be66b1cb41fea1889ac5d11a8e3c50a5","0x8c93ed74188ca23b3df29e5396974b9cc135c91fdefdea6c0df694c8116410e93509559af55533a3776ac11b228d69b1","0x82dd331fb3f9e344ebdeeb557769b86a2cc8cc38f6c298d7572a33aea87c261afa9dbd898989139b9fc16bc1e880a099","0xa65faedf326bcfd8ef98a51410c78b021d39206704e8291cd1f09e096a66b9b0486be65ff185ca224c45918ac337ddeb","0xa188b37d363ac072a766fd5d6fa27df07363feff1342217b19e3c37385e42ffde55e4be8355aceaa2f267b6d66b4ac41","0x810fa3ba3e96d843e3bafd3f2995727f223d3567c8ba77d684c993ba1773c66551eb5009897c51b3fe9b37196984f5ec","0x87631537541852da323b4353af45a164f68b304d24c01183bf271782e11687f3fcf528394e1566c2a26cb527b3148e64","0xb721cb2b37b3c477a48e3cc0044167d51ff568a5fd2fb606e5aec7a267000f1ddc07d3db919926ae12761a8e017c767c","0x904dfad4ba2cc1f6e60d1b708438a70b1743b400164cd981f13c064b8328d5973987d4fb9cf894068f29d3deaf624dfb","0xa70491538893552c20939fae6be2f07bfa84d97e2534a6bbcc0f1729246b831103505e9f60e97a8fa7d2e6c1c2384579","0x8726cf1b26b41f443ff7485adcfddc39ace2e62f4d65dd0bb927d933e262b66f1a9b367ded5fbdd6f3b0932553ac1735","0xae8a11cfdf7aa54c08f80cb645e3339187ab3886babe9fae5239ba507bb3dd1c0d161ca474a2df081dcd3d63e8fe445e","0x92328719e97ce60e56110f30a00ac5d9c7a2baaf5f8d22355d53c1c77941e3a1fec7d1405e6fbf8959665fe2ba7a8cad","0x8d9d6255b65798d0018a8cccb0b6343efd41dc14ff2058d3eed9451ceaad681e4a0fa6af67b0a04318aa628024e5553d","0xb70209090055459296006742d946a513f0cba6d83a05249ee8e7a51052b29c0ca9722dc4af5f9816a1b7938a5dac7f79","0xaab7b766b9bf91786dfa801fcef6d575dc6f12b77ecc662eb4498f0312e54d0de9ea820e61508fc8aeee5ab5db529349","0xa8104b462337748b7f086a135d0c3f87f8e51b7165ca6611264b8fb639d9a2f519926cb311fa2055b5fadf03da70c678","0xb0d2460747d5d8b30fc6c6bd0a87cb343ddb05d90a51b465e8f67d499cfc5e3a9e365da05ae233bbee792cdf90ec67d5","0xaa55f5bf3815266b4a149f85ed18e451c93de9163575e3ec75dd610381cc0805bb0a4d7c4af5b1f94d10231255436d2c","0x8d4c6a1944ff94426151909eb5b99cfd92167b967dabe2bf3aa66bb3c26c449c13097de881b2cfc1bf052862c1ef7b03","0x8862296162451b9b6b77f03bf32e6df71325e8d7485cf3335d66fd48b74c2a8334c241db8263033724f26269ad95b395","0x901aa96deb26cda5d9321190ae6624d357a41729d72ef1abfd71bebf6139af6d690798daba53b7bc5923462115ff748a","0x96c195ec4992728a1eb38cdde42d89a7bce150db43adbc9e61e279ea839e538deec71326b618dd39c50d589f78fc0614","0xb6ff8b8aa0837b99a1a8b46fb37f20ad4aecc6a98381b1308697829a59b8442ffc748637a88cb30c9b1f0f28a926c4f6","0x8d807e3dca9e7bef277db1d2cfb372408dd587364e8048b304eff00eacde2c723bfc84be9b98553f83cba5c7b3cba248","0x8800c96adb0195c4fc5b24511450dee503c32bf47044f5e2e25bd6651f514d79a2dd9b01cd8c09f3c9d3859338490f57","0x89fe366096097e38ec28dd1148887112efa5306cc0c3da09562aafa56f4eb000bf46ff79bf0bdd270cbde6bf0e1c8957","0xaf409a90c2776e1e7e3760b2042507b8709e943424606e31e791d42f17873a2710797f5baaab4cc4a19998ef648556b0","0x8d761863c9b6edbd232d35ab853d944f5c950c2b643f84a1a1327ebb947290800710ff01dcfa26dc8e9828481240e8b1","0x90b95e9be1e55c463ed857c4e0617d6dc3674e99b6aa62ed33c8e79d6dfcf7d122f4f4cc2ee3e7c5a49170cb617d2e2e","0xb3ff381efefabc4db38cc4727432e0301949ae4f16f8d1dea9b4f4de611cf5a36d84290a0bef160dac4e1955e516b3b0","0xa8a84564b56a9003adcadb3565dc512239fc79572762cda7b5901a255bc82656bb9c01212ad33d6bef4fbbce18dacc87","0x90a081890364b222eef54bf0075417f85e340d2fec8b7375995f598aeb33f26b44143ebf56fca7d8b4ebb36b5747b0eb","0xade6ee49e1293224ddf2d8ab7f14bb5be6bc6284f60fd5b3a1e0cf147b73cff57cf19763b8a36c5083badc79c606b103","0xb2fa99806dd2fa3de09320b615a2570c416c9bcdb052e592b0aead748bbe407ec9475a3d932ae48b71c2627eb81986a6","0x91f3b7b73c8ccc9392542711c45fe6f236057e6efad587d661ad5cb4d6e88265f86b807bb1151736b1009ab74fd7acb4","0x8800e2a46af96696dfbdcbf2ca2918b3dcf28ad970170d2d1783b52b8d945a9167d052beeb55f56c126da7ffa7059baa","0x9862267a1311c385956b977c9aa08548c28d758d7ba82d43dbc3d0a0fd1b7a221d39e8399997fea9014ac509ff510ac4","0xb7d24f78886fd3e2d283e18d9ad5a25c1a904e7d9b9104bf47da469d74f34162e27e531380dbbe0a9d051e6ffd51d6e7","0xb0f445f9d143e28b9df36b0f2c052da87ee2ca374d9d0fbe2eff66ca6fe5fe0d2c1951b428d58f7314b7e74e45d445ea","0xb63fc4083eabb8437dafeb6a904120691dcb53ce2938b820bb553da0e1eecd476f72495aacb72600cf9cad18698fd3db","0xb9ffd8108eaebd582d665f8690fe8bb207fd85185e6dd9f0b355a09bac1bbff26e0fdb172bc0498df025414e88fe2eda","0x967ed453e1f1a4c5b7b6834cc9f75c13f6889edc0cc91dc445727e9f408487bbf05c337103f61397a10011dfbe25d61d","0x98ceb673aff36e1987d5521a3984a07079c3c6155974bb8b413e8ae1ce84095fe4f7862fba7aefa14753eb26f2a5805f","0x85f01d28603a8fdf6ce6a50cb5c44f8a36b95b91302e3f4cd95c108ce8f4d212e73aec1b8d936520d9226802a2bd9136","0x88118e9703200ca07910345fbb789e7a8f92bd80bbc79f0a9e040e8767d33df39f6eded403a9b636eabf9101e588482a","0x90833a51eef1b10ed74e8f9bbd6197e29c5292e469c854eed10b0da663e2bceb92539710b1858bbb21887bd538d28d89","0xb513b905ec19191167c6193067b5cfdf5a3d3828375360df1c7e2ced5815437dfd37f0c4c8f009d7fb29ff3c8793f560","0xb1b6d405d2d18f9554b8a358cc7e2d78a3b34269737d561992c8de83392ac9a2857be4bf15de5a6c74e0c9d0f31f393c","0xb828bd3e452b797323b798186607849f85d1fb20c616833c0619360dfd6b3e3aa000fd09dafe4b62d74abc41072ff1a9","0x8efde67d0cca56bb2c464731879c9ac46a52e75bac702a63200a5e192b4f81c641f855ca6747752b84fe469cb7113b6c","0xb2762ba1c89ac3c9a983c242e4d1c2610ff0528585ed5c0dfc8a2c0253551142af9b59f43158e8915a1da7cc26b9df67","0x8a3f1157fb820d1497ef6b25cd70b7e16bb8b961b0063ad340d82a79ee76eb2359ca9e15e6d42987ed7f154f5eeaa2da","0xa75e29f29d38f09c879f971c11beb5368affa084313474a5ecafa2896180b9e47ea1995c2733ec46f421e395a1d9cffe","0x8e8c3dd3e7196ef0b4996b531ec79e4a1f211db5d5635e48ceb80ff7568b2ff587e845f97ee703bb23a60945ad64314a","0x8e7f32f4a3e3c584af5e3d406924a0aa34024c42eca74ef6cc2a358fd3c9efaf25f1c03aa1e66bb94b023a2ee2a1cace","0xab7dce05d59c10a84feb524fcb62478906b3fa045135b23afbede3bb32e0c678d8ebe59feabccb5c8f3550ea76cae44b","0xb38bb4b44d827f6fd3bd34e31f9186c59e312dbfadd4a7a88e588da10146a78b1f8716c91ad8b806beb8da65cab80c4c","0x9490ce9442bbbd05438c7f5c4dea789f74a7e92b1886a730544b55ba377840740a3ae4f2f146ee73f47c9278b0e233bc","0x83c003fab22a7178eed1a668e0f65d4fe38ef3900044e9ec63070c23f2827d36a1e73e5c2b883ec6a2afe2450171b3b3","0x9982f02405978ddc4fca9063ebbdb152f524c84e79398955e66fe51bc7c1660ec1afc3a86ec49f58d7b7dde03505731c","0xab337bd83ccdd2322088ffa8d005f450ced6b35790f37ab4534313315ee84312adc25e99cce052863a8bedee991729ed","0x8312ce4bec94366d88f16127a17419ef64285cd5bf9e5eda010319b48085966ed1252ed2f5a9fd3e0259b91bb65f1827","0xa60d5a6327c4041b0c00a1aa2f0af056520f83c9ce9d9ccd03a0bd4d9e6a1511f26a422ea86bd858a1f77438adf07e6c","0xb84a0a0b030bdad83cf5202aa9afe58c9820e52483ab41f835f8c582c129ee3f34aa096d11c1cd922eda02ea1196a882","0x8077d105317f4a8a8f1aadeb05e0722bb55f11abcb490c36c0904401107eb3372875b0ac233144829e734f0c538d8c1d","0x9202503bd29a6ec198823a1e4e098f9cfe359ed51eb5174d1ca41368821bfeebcbd49debfd02952c41359d1c7c06d2b1","0xabc28c155e09365cb77ffead8dc8f602335ef93b2f44e4ef767ce8fc8ef9dd707400f3a722e92776c2e0b40192c06354","0xb0f6d1442533ca45c9399e0a63a11f85ff288d242cea6cb3b68c02e77bd7d158047cae2d25b3bcd9606f8f66d9b32855","0xb01c3d56a0db84dc94575f4b6ee2de4beca3230e86bed63e2066beb22768b0a8efb08ebaf8ac3dedb5fe46708b084807","0x8c8634b0432159f66feaabb165842d1c8ac378f79565b1b90c381aa8450eb4231c3dad11ec9317b9fc2b155c3a771e32","0x8e67f623d69ecd430c9ee0888520b6038f13a2b6140525b056dc0951f0cfed2822e62cf11d952a483107c5c5acac4826","0x9590bb1cba816dd6acd5ac5fba5142c0a19d53573e422c74005e0bcf34993a8138c83124cad35a3df65879dba6134edd","0x801cd96cde0749021a253027118d3ea135f3fcdbe895db08a6c145641f95ebd368dd6a1568d995e1d0084146aebe224a","0x848b5d196427f6fc1f762ee3d36e832b64a76ec1033cfedc8b985dea93932a7892b8ef1035c653fb9dcd9ab2d9a44ac8","0xa1017eb83d5c4e2477e7bd2241b2b98c4951a3b391081cae7d75965cadc1acaec755cf350f1f3d29741b0828e36fedea","0x8d6d2785e30f3c29aad17bd677914a752f831e96d46caf54446d967cb2432be2c849e26f0d193a60bee161ea5c6fe90a","0x935c0ba4290d4595428e034b5c8001cbd400040d89ab00861108e8f8f4af4258e41f34a7e6b93b04bc253d3b9ffc13bf","0xaac02257146246998477921cef2e9892228590d323b839f3e64ea893b991b463bc2f47e1e5092ddb47e70b2f5bce7622","0xb921fde9412970a5d4c9a908ae8ce65861d06c7679af577cf0ad0d5344c421166986bee471fd6a6cecb7d591f06ec985","0x8ef4c37487b139d6756003060600bb6ebac7ea810b9c4364fc978e842f13ac196d1264fbe5af60d76ff6d9203d8e7d3f","0x94b65e14022b5cf6a9b95f94be5ace2711957c96f4211c3f7bb36206bd39cfbd0ea82186cab5ad0577a23214a5c86e9e","0xa31c166d2a2ca1d5a75a5920fef7532681f62191a50d8555fdaa63ba4581c3391cc94a536fc09aac89f64eafceec3f90","0x919a8cc128de01e9e10f5d83b08b52293fdd41bde2b5ae070f3d95842d4a16e5331cf2f3d61c765570c8022403610fa4","0xb23d6f8331eef100152d60483cfa14232a85ee712c8538c9b6417a5a7c5b353c2ac401390c6c215cb101f5cee6b5f43e","0xab357160c08a18319510a571eafff154298ce1020de8e1dc6138a09fcb0fcbcdd8359f7e9386bda00b7b9cdea745ffdc","0xab55079aea34afa5c0bd1124b9cdfe01f325b402fdfa017301bf87812eaa811ea5798c3aaf818074d420d1c782b10ada","0xade616010dc5009e7fc4f8d8b00dc716686a5fa0a7816ad9e503e15839d3b909b69d9dd929b7575376434ffec0d2bea8","0x863997b97ed46898a8a014599508fa3079f414b1f4a0c4fdc6d74ae8b444afa350f327f8bfc2a85d27f9e2d049c50135","0x8d602ff596334efd4925549ed95f2aa762b0629189f0df6dbb162581657cf3ea6863cd2287b4d9c8ad52813d87fcd235","0xb70f68c596dcdeed92ad5c6c348578b26862a51eb5364237b1221e840c47a8702f0fbc56eb520a22c0eed99795d3903e","0x9628088f8e0853cefadee305a8bf47fa990c50fa96a82511bbe6e5dc81ef4b794e7918a109070f92fc8384d77ace226f","0x97e26a46e068b605ce96007197ecd943c9a23881862f4797a12a3e96ba2b8d07806ad9e2a0646796b1889c6b7d75188c","0xb1edf467c068cc163e2d6413cc22b16751e78b3312fe47b7ea82b08a1206d64415b2c8f2a677fa89171e82cc49797150","0xa44d15ef18745b251429703e3cab188420e2d974de07251501799b016617f9630643fcd06f895634d8ecdd579e1bf000","0xabd126df3917ba48c618ee4dbdf87df506193462f792874439043fa1b844466f6f4e0ff2e42516e63b5b23c0892b2695","0xa2a67f57c4aa3c2aa1eeddbfd5009a89c26c2ce8fa3c96a64626aba19514beb125f27df8559506f737de3eae0f1fc18f","0xa633e0132197e6038197304b296ab171f1d8e0d0f34dcf66fe9146ac385b0239232a8470b9205a4802ab432389f4836d","0xa914b3a28509a906c3821463b936455d58ff45dcbe158922f9efb2037f2eb0ce8e92532d29b5d5a3fcd0d23fa773f272","0xa0e1412ce4505daf1a2e59ce4f0fc0e0023e335b50d2b204422f57cd65744cc7a8ed35d5ef131a42c70b27111d3115b7","0xa2339e2f2b6072e88816224fdd612c04d64e7967a492b9f8829db15367f565745325d361fd0607b0def1be384d010d9e","0xa7309fc41203cb99382e8193a1dcf03ac190a7ce04835304eb7e341d78634e83ea47cb15b885601956736d04cdfcaa01","0x81f3ccd6c7f5b39e4e873365f8c37b214e8ab122d04a606fbb7339dc3298c427e922ec7418002561d4106505b5c399ee","0x92c121cf914ca549130e352eb297872a63200e99b148d88fbc9506ad882bec9d0203d65f280fb5b0ba92e336b7f932e8","0xa4b330cf3f064f5b131578626ad7043ce2a433b6f175feb0b52d36134a454ca219373fd30d5e5796410e005b69082e47","0x86fe5774112403ad83f9c55d58317eeb17ad8e1176d9f2f69c2afb7ed83bc718ed4e0245ceab4b377f5f062dcd4c00e7","0x809d152a7e2654c7fd175b57f7928365a521be92e1ed06c05188a95864ddb25f7cab4c71db7d61bbf4cae46f3a1d96ce","0xb82d663e55c2a5ada7e169e9b1a87bc1c0177baf1ec1c96559b4cb1c5214ce1ddf2ab8d345014cab6402f3774235cf5a","0x86580af86df1bd2c385adb8f9a079e925981b7184db66fc5fe5b14cddb82e7d836b06eaeef14924ac529487b23dae111","0xb5f5f4c5c94944ecc804df6ab8687d64e27d988cbfeae1ba7394e0f6adbf778c5881ead7cd8082dd7d68542b9bb4ecd5","0xa6016916146c2685c46e8fdd24186394e2d5496e77e08c0c6a709d4cd7dfa97f1efcef94922b89196819076a91ad37b5","0xb778e7367ded3b6eab53d5fc257f7a87e8faf74a593900f2f517220add2125be3f6142022660d8181df8d164ad9441ce","0x8581b2d36abe6f553add4d24be761bec1b8efaa2929519114346615380b3c55b59e6ad86990e312f7e234d0203bdf59b","0x9917e74fd45c3f71a829ff5498a7f6b5599b48c098dda2339bf04352bfc7f368ccf1a407f5835901240e76452ae807d7","0xafd196ce6f9335069138fd2e3d133134da253978b4ce373152c0f26affe77a336505787594022e610f8feb722f7cc1fb","0xa477491a1562e329764645e8f24d8e228e5ef28c9f74c6b5b3abc4b6a562c15ffb0f680d372aed04d9e1bf944dece7be","0x9767440d58c57d3077319d3a330e5322b9ba16981ec74a5a14d53462eab59ae7fd2b14025bfc63b268862094acb444e6","0x80986d921be3513ef69264423f351a61cb48390c1be8673aee0f089076086aaebea7ebe268fd0aa7182695606116f679","0xa9554c5c921c07b450ee04e34ec58e054ac1541b26ce2ce5a393367a97348ba0089f53db6660ad76b60278b66fd12e3e","0x95097e7d2999b3e84bf052c775581cf361325325f4a50192521d8f4693c830bed667d88f482dc1e3f833aa2bd22d2cbf","0x9014c91d0f85aefd28436b5228c12f6353c055a9326c7efbf5e071e089e2ee7c070fcbc84c5fafc336cbb8fa6fec1ca1","0x90f57ba36ee1066b55d37384942d8b57ae00f3cf9a3c1d6a3dfee1d1af42d4b5fa9baeb0cd7e46687d1d6d090ddb931d","0x8e4b1db12fd760a17214c9e47f1fce6e43c0dbb4589a827a13ac61aaae93759345697bb438a00edab92e0b7b62414683","0x8022a959a513cdc0e9c705e0fc04eafd05ff37c867ae0f31f6d01cddd5df86138a426cab2ff0ac8ff03a62e20f7e8f51","0x914e9a38829834c7360443b8ed86137e6f936389488eccf05b4b4db7c9425611705076ecb3f27105d24b85c852be7511","0x957fb10783e2bd0db1ba66b18e794df710bc3b2b05776be146fa5863c15b1ebdd39747b1a95d9564e1772cdfc4f37b8a","0xb6307028444daed8ed785ac9d0de76bc3fe23ff2cc7e48102553613bbfb5afe0ebe45e4212a27021c8eb870721e62a1f","0x8f76143597777d940b15a01b39c5e1b045464d146d9a30a6abe8b5d3907250e6c7f858ff2308f8591e8b0a7b3f3c568a","0x96163138ac0ce5fd00ae9a289648fd9300a0ca0f63a88481d703ecd281c06a52a3b5178e849e331f9c85ca4ba398f4cc","0xa63ef47c3e18245b0482596a09f488a716df3cbd0f9e5cfabed0d742843e65db8961c556f45f49762f3a6ac8b627b3ef","0x8cb595466552e7c4d42909f232d4063e0a663a8ef6f6c9b7ce3a0542b2459cde04e0e54c7623d404acb5b82775ac04f6","0xb47fe69960eb45f399368807cff16d941a5a4ebad1f5ec46e3dc8a2e4d598a7e6114d8f0ca791e9720fd786070524e2b","0x89eb5ff83eea9df490e5beca1a1fbbbbcf7184a37e2c8c91ede7a1e654c81e8cd41eceece4042ea7918a4f4646b67fd6","0xa84f5d155ed08b9054eecb15f689ba81e44589e6e7207a99790c598962837ca99ec12344105b16641ca91165672f7153","0xa6cc8f25c2d5b2d2f220ec359e6a37a52b95fa6af6e173c65e7cd55299eff4aa9e6d9e6f2769e6459313f1f2aecb0fab","0xafcde944411f017a9f7979755294981e941cc41f03df5e10522ef7c7505e5f1babdd67b3bf5258e8623150062eb41d9b","0x8fab39f39c0f40182fcd996ade2012643fe7731808afbc53f9b26900b4d4d1f0f5312d9d40b3df8baa4739970a49c732","0xae193af9726da0ebe7df1f9ee1c4846a5b2a7621403baf8e66c66b60f523e719c30c6b4f897bb14b27d3ff3da8392eeb","0x8ac5adb82d852eba255764029f42e6da92dcdd0e224d387d1ef94174038db9709ac558d90d7e7c57ad4ce7f89bbfc38c","0xa2066b3458fdf678ee487a55dd5bfb74fde03b54620cb0e25412a89ee28ad0d685e309a51e3e4694be2fa6f1593a344c","0x88d031745dd0ae07d61a15b594be5d4b2e2a29e715d081649ad63605e3404b0c3a5353f0fd9fad9c05c18e93ce674fa1","0x8283cfb0ef743a043f2b77ecaeba3005e2ca50435585b5dd24777ee6bce12332f85e21b446b536da38508807f0f07563","0xb376de22d5f6b0af0b59f7d9764561f4244cf8ffe22890ecd3dcf2ff1832130c9b821e068c9d8773136f4796721e5963","0xae3afc50c764f406353965363840bf28ee85e7064eb9d5f0bb3c31c64ab10f48c853e942ee2c9b51bae59651eaa08c2f","0x948b204d103917461a01a6c57a88f2d66b476eae5b00be20ec8c747650e864bc8a83aee0aff59cb7584b7a3387e0ee48","0x81ab098a082b07f896c5ffd1e4446cb7fb44804cbbf38d125208b233fc82f8ec9a6a8d8dd1c9a1162dc28ffeec0dde50","0xa149c6f1312821ced2969268789a3151bdda213451760b397139a028da609c4134ac083169feb0ee423a0acafd10eceb","0xb0ac9e27a5dadaf523010f730b28f0ebac01f460d3bbbe277dc9d44218abb5686f4fac89ae462682fef9edbba663520a","0x8d0e0073cca273daaaa61b6fc54bfe5a009bc3e20ae820f6c93ba77b19eca517d457e948a2de5e77678e4241807157cb","0xad61d3a2edf7c7533a04964b97499503fd8374ca64286dba80465e68fe932e96749b476f458c6fc57cb1a7ca85764d11","0x90eb5e121ae46bc01a30881eaa556f46bd8457a4e80787cf634aab355082de34ac57d7f497446468225f7721e68e2a47","0x8cdac557de7c42d1f3780e33dec1b81889f6352279be81c65566cdd4952d4c15d79e656cbd46035ab090b385e90245ef","0x82b67e61b88b84f4f4d4f65df37b3e3dcf8ec91ea1b5c008fdccd52da643adbe6468a1cfdb999e87d195afe2883a3b46","0x8503b467e8f5d6048a4a9b78496c58493a462852cab54a70594ae3fd064cfd0deb4b8f336a262155d9fedcaa67d2f6fd","0x8db56c5ac763a57b6ce6832930c57117058e3e5a81532b7d19346346205e2ec614eb1a2ee836ef621de50a7bc9b7f040","0xad344699198f3c6e8c0a3470f92aaffc805b76266734414c298e10b5b3797ca53578de7ccb2f458f5e0448203f55282b","0x80602032c43c9e2a09154cc88b83238343b7a139f566d64cb482d87436b288a98f1ea244fd3bff8da3c398686a900c14","0xa6385bd50ecd548cfb37174cdbb89e10025b5cadaf3cff164c95d7aef5a33e3d6a9bf0c681b9e11db9ef54ebeee2a0c1","0xabf2d95f4aa34b0581eb9257a0cc8462b2213941a5deb8ba014283293e8b36613951b61261cc67bbd09526a54cbbff76","0xa3d5de52f48df72c289ff713e445991f142390798cd42bd9d9dbefaee4af4f5faf09042d126b975cf6b98711c3072553","0x8e627302ff3d686cff8872a1b7c2a57b35f45bf2fc9aa42b049d8b4d6996a662b8e7cbac6597f0cb79b0cc4e29fbf133","0x8510702e101b39a1efbf4e504e6123540c34b5689645e70d0bac1ecc1baf47d86c05cef6c4317a4e99b4edaeb53f2d00","0xaa173f0ecbcc6088f878f8726d317748c81ebf501bba461f163b55d66099b191ec7c55f7702f351a9c8eb42cfa3280e2","0xb560a697eafab695bcef1416648a0a664a71e311ecbe5823ae903bd0ed2057b9d7574b9a86d3fe22aa3e6ddce38ea513","0x8df6304a3d9cf40100f3f687575419c998cd77e5cc27d579cf4f8e98642de3609af384a0337d145dd7c5635172d26a71","0x8105c7f3e4d30a29151849673853b457c1885c186c132d0a98e63096c3774bc9deb956cf957367e633d0913680bda307","0x95373fc22c0917c3c2044ac688c4f29a63ed858a45c0d6d2d0fe97afd6f532dcb648670594290c1c89010ecc69259bef","0x8c2fae9bcadab341f49b55230310df93cac46be42d4caa0d42e45104148a91e527af1b4209c0d972448162aed28fab64","0xb05a77baab70683f76209626eaefdda2d36a0b66c780a20142d23c55bd479ddd4ad95b24579384b6cf62c8eb4c92d021","0x8e6bc6a7ea2755b4aaa19c1c1dee93811fcde514f03485fdc3252f0ab7f032c315614f6336e57cea25dcfb8fb6084eeb","0xb656a27d06aade55eadae2ad2a1059198918ea6cc3fd22c0ed881294d34d5ac7b5e4700cc24350e27d76646263b223aa","0xa296469f24f6f56da92d713afcd4dd606e7da1f79dc4e434593c53695847eefc81c7c446486c4b3b8c8d00c90c166f14","0x87a326f57713ac2c9dffeb3af44b9f3c613a8f952676fc46343299122b47ee0f8d792abaa4b5db6451ced5dd153aabd0","0xb689e554ba9293b9c1f6344a3c8fcb6951d9f9eac4a2e2df13de021aade7c186be27500e81388e5b8bcab4c80f220a31","0x87ae0aa0aa48eac53d1ca5a7b93917de12db9e40ceabf8fdb40884ae771cfdf095411deef7c9f821af0b7070454a2608","0xa71ffa7eae8ace94e6c3581d4cb2ad25d48cbd27edc9ec45baa2c8eb932a4773c3272b2ffaf077b40f76942a1f3af7f2","0x94c218c91a9b73da6b7a495b3728f3028df8ad9133312fc0c03e8c5253b7ccb83ed14688fd4602e2fd41f29a0bc698bd","0xae1e77b90ca33728af07a4c03fb2ef71cd92e2618e7bf8ed4d785ce90097fc4866c29999eb84a6cf1819d75285a03af2","0xb7a5945b277dab9993cf761e838b0ac6eaa903d7111fca79f9fde3d4285af7a89bf6634a71909d095d7619d913972c9c","0x8c43b37be02f39b22029b20aca31bff661abce4471dca88aa3bddefd9c92304a088b2dfc8c4795acc301ca3160656af2","0xb32e5d0fba024554bd5fe8a793ebe8003335ddd7f585876df2048dcf759a01285fecb53daae4950ba57f3a282a4d8495","0x85ea7fd5e10c7b659df5289b2978b2c89e244f269e061b9a15fcab7983fc1962b63546e82d5731c97ec74b6804be63ef","0x96b89f39181141a7e32986ac02d7586088c5a9662cec39843f397f3178714d02f929af70630c12cbaba0268f8ba2d4fa","0x929ab1a2a009b1eb37a2817c89696a06426529ebe3f306c586ab717bd34c35a53eca2d7ddcdef36117872db660024af9","0xa696dccf439e9ca41511e16bf3042d7ec0e2f86c099e4fc8879d778a5ea79e33aa7ce96b23dc4332b7ba26859d8e674d","0xa8fe69a678f9a194b8670a41e941f0460f6e2dbc60470ab4d6ae2679cc9c6ce2c3a39df2303bee486dbfde6844e6b31a","0x95f58f5c82de2f2a927ca99bf63c9fc02e9030c7e46d0bf6b67fe83a448d0ae1c99541b59caf0e1ccab8326231af09a5","0xa57badb2c56ca2c45953bd569caf22968f76ed46b9bac389163d6fe22a715c83d5e94ae8759b0e6e8c2f27bff7748f3f","0x868726fd49963b24acb5333364dffea147e98f33aa19c7919dc9aca0fd26661cfaded74ede7418a5fadbe7f5ae67b67b","0xa8d8550dcc64d9f1dd7bcdab236c4122f2b65ea404bb483256d712c7518f08bb028ff8801f1da6aed6cbfc5c7062e33b","0x97e25a87dae23155809476232178538d4bc05d4ff0882916eb29ae515f2a62bfce73083466cc0010ca956aca200aeacc","0xb4ea26be3f4bd04aa82d7c4b0913b97bcdf5e88b76c57eb1a336cbd0a3eb29de751e1bc47c0e8258adec3f17426d0c71","0x99ee555a4d9b3cf2eb420b2af8e3bc99046880536116d0ce7193464ac40685ef14e0e3c442f604e32f8338cb0ef92558","0x8c64efa1da63cd08f319103c5c7a761221080e74227bbc58b8fb35d08aa42078810d7af3e60446cbaff160c319535648","0x8d9fd88040076c28420e3395cbdfea402e4077a3808a97b7939d49ecbcf1418fe50a0460e1c1b22ac3f6e7771d65169a","0xae3c19882d7a9875d439265a0c7003c8d410367627d21575a864b9cb4918de7dbdb58a364af40c5e045f3df40f95d337","0xb4f7bfacab7b2cafe393f1322d6dcc6f21ffe69cd31edc8db18c06f1a2b512c27bd0618091fd207ba8df1808e9d45914","0x94f134acd0007c623fb7934bcb65ef853313eb283a889a3ffa79a37a5c8f3665f3d5b4876bc66223610c21dc9b919d37","0xaa15f74051171daacdc1f1093d3f8e2d13da2833624b80a934afec86fc02208b8f55d24b7d66076444e7633f46375c6a","0xa32d6bb47ef9c836d9d2371807bafbbbbb1ae719530c19d6013f1d1f813c49a60e4fa51d83693586cba3a840b23c0404","0xb61b3599145ea8680011aa2366dc511a358b7d67672d5b0c5be6db03b0efb8ca5a8294cf220ea7409621f1664e00e631","0x859cafc3ee90b7ececa1ed8ef2b2fc17567126ff10ca712d5ffdd16aa411a5a7d8d32c9cab1fbf63e87dce1c6e2f5f53","0xa2fef1b0b2874387010e9ae425f3a9676d01a095d017493648bcdf3b31304b087ccddb5cf76abc4e1548b88919663b6b","0x939e18c73befc1ba2932a65ede34c70e4b91e74cc2129d57ace43ed2b3af2a9cc22a40fbf50d79a63681b6d98852866d","0xb3b4259d37b1b14aee5b676c9a0dd2d7f679ab95c120cb5f09f9fbf10b0a920cb613655ddb7b9e2ba5af4a221f31303c","0x997255fe51aaca6e5a9cb3359bcbf25b2bb9e30649bbd53a8a7c556df07e441c4e27328b38934f09c09d9500b5fabf66","0xabb91be2a2d860fd662ed4f1c6edeefd4da8dc10e79251cf87f06029906e7f0be9b486462718f0525d5e049472692cb7","0xb2398e593bf340a15f7801e1d1fbda69d93f2a32a889ec7c6ae5e8a37567ac3e5227213c1392ee86cfb3b56ec2787839","0x8ddf10ccdd72922bed36829a36073a460c2118fc7a56ff9c1ac72581c799b15c762cb56cb78e3d118bb9f6a7e56cb25e","0x93e6bc0a4708d16387cacd44cf59363b994dc67d7ada7b6d6dbd831c606d975247541b42b2a309f814c1bfe205681fc6","0xb93fc35c05998cffda2978e12e75812122831523041f10d52f810d34ff71944979054b04de0117e81ddf5b0b4b3e13c0","0x92221631c44d60d68c6bc7b287509f37ee44cbe5fdb6935cee36b58b17c7325098f98f7910d2c3ca5dc885ad1d6dabc7","0xa230124424a57fad3b1671f404a94d7c05f4c67b7a8fbacfccea28887b78d7c1ed40b92a58348e4d61328891cd2f6cee","0xa6a230edb8518a0f49d7231bc3e0bceb5c2ac427f045819f8584ba6f3ae3d63ed107a9a62aad543d7e1fcf1f20605706","0x845be1fe94223c7f1f97d74c49d682472585d8f772762baad8a9d341d9c3015534cc83d102113c51a9dea2ab10d8d27b","0xb44262515e34f2db597c8128c7614d33858740310a49cdbdf9c8677c5343884b42c1292759f55b8b4abc4c86e4728033","0x805592e4a3cd07c1844bc23783408310accfdb769cca882ad4d07d608e590a288b7370c2cb327f5336e72b7083a0e30f","0x95153e8b1140df34ee864f4ca601cb873cdd3efa634af0c4093fbaede36f51b55571ab271e6a133020cd34db8411241f","0x82878c1285cfa5ea1d32175c9401f3cc99f6bb224d622d3fd98cc7b0a27372f13f7ab463ce3a33ec96f9be38dbe2dfe3","0xb7588748f55783077c27fc47d33e20c5c0f5a53fc0ac10194c003aa09b9f055d08ec971effa4b7f760553997a56967b3","0xb36b4de6d1883b6951f59cfae381581f9c6352fcfcf1524fccdab1571a20f80441d9152dc6b48bcbbf00371337ca0bd5","0x89c5523f2574e1c340a955cbed9c2f7b5fbceb260cb1133160dabb7d41c2f613ec3f6e74bbfab3c4a0a6f0626dbe068f","0xa52f58cc39f968a9813b1a8ddc4e83f4219e4dd82c7aa1dd083bea7edf967151d635aa9597457f879771759b876774e4","0x8300a67c2e2e123f89704abfde095463045dbd97e20d4c1157bab35e9e1d3d18f1f4aaba9cbe6aa2d544e92578eaa1b6","0xac6a7f2918768eb6a43df9d3a8a04f8f72ee52f2e91c064c1c7d75cad1a3e83e5aba9fe55bb94f818099ac91ccf2e961","0x8d64a2b0991cf164e29835c8ddef6069993a71ec2a7de8157bbfa2e00f6367be646ed74cbaf524f0e9fe13fb09fa15fd","0x8b2ffe5a545f9f680b49d0a9797a4a11700a2e2e348c34a7a985fc278f0f12def6e06710f40f9d48e4b7fbb71e072229","0x8ab8f71cd337fa19178924e961958653abf7a598e3f022138b55c228440a2bac4176cea3aea393549c03cd38a13eb3fc","0x8419d28318c19ea4a179b7abb43669fe96347426ef3ac06b158d79c0acf777a09e8e770c2fb10e14b3a0421705990b23","0x8bacdac310e1e49660359d0a7a17fe3d334eb820e61ae25e84cb52f863a2f74cbe89c2e9fc3283745d93a99b79132354","0xb57ace3fa2b9f6b2db60c0d861ace7d7e657c5d35d992588aeed588c6ce3a80b6f0d49f8a26607f0b17167ab21b675e4","0x83e265cde477f2ecc164f49ddc7fb255bb05ff6adc347408353b7336dc3a14fdedc86d5a7fb23f36b8423248a7a67ed1","0xa60ada971f9f2d79d436de5d3d045f5ab05308cae3098acaf5521115134b2a40d664828bb89895840db7f7fb499edbc5","0xa63eea12efd89b62d3952bf0542a73890b104dd1d7ff360d4755ebfa148fd62de668edac9eeb20507967ea37fb220202","0xa0275767a270289adc991cc4571eff205b58ad6d3e93778ddbf95b75146d82517e8921bd0d0564e5b75fa0ccdab8e624","0xb9b03fd3bf07201ba3a039176a965d736b4ef7912dd9e9bf69fe1b57c330a6aa170e5521fe8be62505f3af81b41d7806","0xa95f640e26fb1106ced1729d6053e41a16e4896acac54992279ff873e5a969aad1dcfa10311e28b8f409ac1dab7f03bb","0xb144778921742418053cb3c70516c63162c187f00db2062193bb2c14031075dbe055d020cde761b26e8c58d0ea6df2c1","0x8432fbb799e0435ef428d4fefc309a05dd589bce74d7a87faf659823e8c9ed51d3e42603d878e80f439a38be4321c2fa","0xb08ddef14e42d4fd5d8bf39feb7485848f0060d43b51ed5bdda39c05fe154fb111d29719ee61a23c392141358c0cfcff","0x8ae3c5329a5e025b86b5370e06f5e61177df4bda075856fade20a17bfef79c92f54ed495f310130021ba94fb7c33632b","0x92b6d3c9444100b4d7391febfc1dddaa224651677c3695c47a289a40d7a96d200b83b64e6d9df51f534564f272a2c6c6","0xb432bc2a3f93d28b5e506d68527f1efeb2e2570f6be0794576e2a6ef9138926fdad8dd2eabfa979b79ab7266370e86bc","0x8bc315eacedbcfc462ece66a29662ca3dcd451f83de5c7626ef8712c196208fb3d8a0faf80b2e80384f0dd9772f61a23","0xa72375b797283f0f4266dec188678e2b2c060dfed5880fc6bb0c996b06e91a5343ea2b695adaab0a6fd183b040b46b56","0xa43445036fbaa414621918d6a897d3692fdae7b2961d87e2a03741360e45ebb19fcb1703d23f1e15bb1e2babcafc56ac","0xb9636b2ffe305e63a1a84bd44fb402442b1799bd5272638287aa87ca548649b23ce8ce7f67be077caed6aa2dbc454b78","0x99a30bf0921d854c282b83d438a79f615424f28c2f99d26a05201c93d10378ab2cd94a792b571ddae5d4e0c0013f4006","0x8648e3c2f93d70b392443be116b48a863e4b75991bab5db656a4ef3c1e7f645e8d536771dfe4e8d1ceda3be8d32978b0","0xab50dc9e6924c1d2e9d2e335b2d679fc7d1a7632e84964d3bac0c9fe57e85aa5906ec2e7b0399d98ddd022e9b19b5904","0xab729328d98d295f8f3272afaf5d8345ff54d58ff9884da14f17ecbdb7371857fdf2f3ef58080054e9874cc919b46224","0x83fa5da7592bd451cad3ad7702b4006332b3aae23beab4c4cb887fa6348317d234bf62a359e665b28818e5410c278a09","0x8bdbff566ae9d368f114858ef1f009439b3e9f4649f73efa946e678d6c781d52c69af195df0a68170f5f191b2eac286b","0x91245e59b4425fd4edb2a61d0d47c1ccc83d3ced8180de34887b9655b5dcda033d48cde0bdc3b7de846d246c053a02e8","0xa2cb00721e68f1cad8933947456f07144dc69653f96ceed845bd577d599521ba99cdc02421118971d56d7603ed118cbf","0xaf8cd66d303e808b22ec57860dd909ca64c27ec2c60e26ffecfdc1179d8762ffd2739d87b43959496e9fee4108df71df","0x9954136812dffcd5d3f167a500e7ab339c15cfc9b3398d83f64b0daa3dd5b9a851204f424a3493b4e326d3de81e50a62","0x93252254d12511955f1aa464883ad0da793f84d900fea83e1df8bca0f2f4cf5b5f9acbaec06a24160d33f908ab5fea38","0x997cb55c26996586ba436a95566bd535e9c22452ca5d2a0ded2bd175376557fa895f9f4def4519241ff386a063f2e526","0xa12c78ad451e0ac911260ade2927a768b50cb4125343025d43474e7f465cdc446e9f52a84609c5e7e87ae6c9b3f56cda","0xa789d4ca55cbba327086563831b34487d63d0980ba8cf55197c016702ed6da9b102b1f0709ce3da3c53ff925793a3d73","0xa5d76acbb76741ce85be0e655b99baa04f7f587347947c0a30d27f8a49ae78cce06e1cde770a8b618d3db402be1c0c4b","0x873c0366668c8faddb0eb7c86f485718d65f8c4734020f1a18efd5fa123d3ea8a990977fe13592cd01d17e60809cb5ff","0xb659b71fe70f37573ff7c5970cc095a1dc0da3973979778f80a71a347ef25ad5746b2b9608bad4ab9a4a53a4d7df42d7","0xa34cbe05888e5e5f024a2db14cb6dcdc401a9cbd13d73d3c37b348f68688f87c24ca790030b8f84fef9e74b4eab5e412","0x94ce8010f85875c045b0f014db93ef5ab9f1f6842e9a5743dce9e4cb872c94affd9e77c1f1d1ab8b8660b52345d9acb9","0xadefa9b27a62edc0c5b019ddd3ebf45e4de846165256cf6329331def2e088c5232456d3de470fdce3fa758bfdd387512","0xa6b83821ba7c1f83cc9e4529cf4903adb93b26108e3d1f20a753070db072ad5a3689643144bdd9c5ea06bb9a7a515cd0","0xa3a9ddedc2a1b183eb1d52de26718151744db6050f86f3580790c51d09226bf05f15111691926151ecdbef683baa992c","0xa64bac89e7686932cdc5670d07f0b50830e69bfb8c93791c87c7ffa4913f8da881a9d8a8ce8c1a9ce5b6079358c54136","0xa77b5a63452cb1320b61ab6c7c2ef9cfbcade5fd4727583751fb2bf3ea330b5ca67757ec1f517bf4d503ec924fe32fbd","0x8746fd8d8eb99639d8cd0ca34c0d9c3230ed5a312aab1d3d925953a17973ee5aeb66e68667e93caf9cb817c868ea8f3d","0x88a2462a26558fc1fbd6e31aa8abdc706190a17c27fdc4217ffd2297d1b1f3321016e5c4b2384c5454d5717dc732ed03","0xb78893a97e93d730c8201af2e0d3b31cb923d38dc594ffa98a714e627c473d42ea82e0c4d2eeb06862ee22a9b2c54588","0x920cc8b5f1297cf215a43f6fc843e379146b4229411c44c0231f6749793d40f07b9af7699fd5d21fd69400b97febe027","0xa0f0eafce1e098a6b58c7ad8945e297cd93aaf10bc55e32e2e32503f02e59fc1d5776936577d77c0b1162cb93b88518b","0x98480ba0064e97a2e7a6c4769b4d8c2a322cfc9a3b2ca2e67e9317e2ce04c6e1108169a20bd97692e1cb1f1423b14908","0x83dbbb2fda7e287288011764a00b8357753a6a44794cc8245a2275237f11affdc38977214e463ad67aec032f3dfa37e9","0x86442fff37598ce2b12015ff19b01bb8a780b40ad353d143a0f30a06f6d23afd5c2b0a1253716c855dbf445cc5dd6865","0xb8a4c60c5171189414887847b9ed9501bff4e4c107240f063e2d254820d2906b69ef70406c585918c4d24f1dd052142b","0x919f33a98e84015b2034b57b5ffe9340220926b2c6e45f86fd79ec879dbe06a148ae68b77b73bf7d01bd638a81165617","0x95c13e78d89474a47fbc0664f6f806744b75dede95a479bbf844db4a7f4c3ae410ec721cb6ffcd9fa9c323da5740d5ae","0xab7151acc41fffd8ec6e90387700bcd7e1cde291ea669567295bea1b9dd3f1df2e0f31f3588cd1a1c08af8120aca4921","0x80e74c5c47414bd6eeef24b6793fb1fa2d8fb397467045fcff887c52476741d5bc4ff8b6d3387cb53ad285485630537f","0xa296ad23995268276aa351a7764d36df3a5a3cffd7dbeddbcea6b1f77adc112629fdeffa0918b3242b3ccd5e7587e946","0x813d2506a28a2b01cb60f49d6bd5e63c9b056aa56946faf2f33bd4f28a8d947569cfead3ae53166fc65285740b210f86","0x924b265385e1646287d8c09f6c855b094daaee74b9e64a0dddcf9ad88c6979f8280ba30c8597b911ef58ddb6c67e9fe3","0x8d531513c70c2d3566039f7ca47cd2352fd2d55b25675a65250bdb8b06c3843db7b2d29c626eed6391c238fc651cf350","0x82b338181b62fdc81ceb558a6843df767b6a6e3ceedc5485664b4ea2f555904b1a45fbb35f6cf5d96f27da10df82a325","0x92e62faaedea83a37f314e1d3cb4faaa200178371d917938e59ac35090be1db4b4f4e0edb78b9c991de202efe4f313d8","0x99d645e1b642c2dc065bac9aaa0621bc648c9a8351efb6891559c3a41ba737bd155fb32d7731950514e3ecf4d75980e4","0xb34a13968b9e414172fb5d5ece9a39cf2eb656128c3f2f6cc7a9f0c69c6bae34f555ecc8f8837dc34b5e470e29055c78","0xa2a0bb7f3a0b23a2cbc6585d59f87cd7e56b2bbcb0ae48f828685edd9f7af0f5edb4c8e9718a0aaf6ef04553ba71f3b7","0x8e1a94bec053ed378e524b6685152d2b52d428266f2b6eadd4bcb7c4e162ed21ab3e1364879673442ee2162635b7a4d8","0x9944adaff14a85eab81c73f38f386701713b52513c4d4b838d58d4ffa1d17260a6d056b02334850ea9a31677c4b078bd","0xa450067c7eceb0854b3eca3db6cf38669d72cb7143c3a68787833cbca44f02c0be9bfbe082896f8a57debb13deb2afb1","0x8be4ad3ac9ef02f7df09254d569939757101ee2eda8586fefcd8c847adc1efe5bdcb963a0cafa17651befaafb376a531","0x90f6de91ea50255f148ac435e08cf2ac00c772a466e38155bd7e8acf9197af55662c7b5227f88589b71abe9dcf7ba343","0x86e5a24f0748b106dee2d4d54e14a3b0af45a96cbee69cac811a4196403ebbee17fd24946d7e7e1b962ac7f66dbaf610","0xafdd96fbcda7aa73bf9eeb2292e036c25753d249caee3b9c013009cc22e10d3ec29e2aa6ddbb21c4e949b0c0bccaa7f4","0xb5a4e7436d5473647c002120a2cb436b9b28e27ad4ebdd7c5f122b91597c507d256d0cbd889d65b3a908531936e53053","0xb632414c3da704d80ac2f3e5e0e9f18a3637cdc2ebeb613c29300745582427138819c4e7b0bec3099c1b8739dac1807b","0xa28df1464d3372ce9f37ef1db33cc010f752156afae6f76949d98cd799c0cf225c20228ae86a4da592d65f0cffe3951b","0x898b93d0a31f7d3f11f253cb7a102db54b669fd150da302d8354d8e02b1739a47cb9bd88015f3baf12b00b879442464e","0x96fb88d89a12049091070cb0048a381902965e67a8493e3991eaabe5d3b7ff7eecd5c94493a93b174df3d9b2c9511755","0xb899cb2176f59a5cfba3e3d346813da7a82b03417cad6342f19cc8f12f28985b03bf031e856a4743fd7ebe16324805b0","0xa60e2d31bc48e0c0579db15516718a03b73f5138f15037491f4dae336c904e312eda82d50862f4debd1622bb0e56d866","0x979fc8b987b5cef7d4f4b58b53a2c278bd25a5c0ea6f41c715142ea5ff224c707de38451b0ad3aa5e749aa219256650a","0xb2a75bff18e1a6b9cf2a4079572e41205741979f57e7631654a3c0fcec57c876c6df44733c9da3d863db8dff392b44a3","0xb7a0f0e811222c91e3df98ff7f286b750bc3b20d2083966d713a84a2281744199e664879401e77470d44e5a90f3e5181","0x82b74ba21c9d147fbc338730e8f1f8a6e7fc847c3110944eb17a48bea5e06eecded84595d485506d15a3e675fd0e5e62","0xa7f44eef817d5556f0d1abcf420301217d23c69dd2988f44d91ea1f1a16c322263cbacd0f190b9ba22b0f141b9267b4f","0xaadb68164ede84fc1cb3334b3194d84ba868d5a88e4c9a27519eef4923bc4abf81aab8114449496c073c2a6a0eb24114","0xb5378605fabe9a8c12a5dc55ef2b1de7f51aedb61960735c08767a565793cea1922a603a6983dc25f7cea738d0f7c40d","0xa97a4a5cd8d51302e5e670aee78fe6b5723f6cc892902bbb4f131e82ca1dfd5de820731e7e3367fb0c4c1922a02196e3","0x8bdfeb15c29244d4a28896f2b2cb211243cd6a1984a3f5e3b0ebe5341c419beeab3304b390a009ffb47588018034b0ea","0xa9af3022727f2aa2fca3b096968e97edad3f08edcbd0dbca107b892ae8f746a9c0485e0d6eb5f267999b23a845923ed0","0x8e7594034feef412f055590fbb15b6322dc4c6ab7a4baef4685bd13d71a83f7d682b5781bdfa0d1c659489ce9c2b8000","0x84977ca6c865ebee021c58106c1a4ad0c745949ecc5332948002fd09bd9b890524878d0c29da96fd11207621136421fe","0x8687551a79158e56b2375a271136756313122132a6670fa51f99a1b5c229ed8eea1655a734abae13228b3ebfd2a825dd","0xa0227d6708979d99edfc10f7d9d3719fd3fc68b0d815a7185b60307e4c9146ad2f9be2b8b4f242e320d4288ceeb9504c","0x89f75583a16735f9dd8b7782a130437805b34280ccea8dac6ecaee4b83fe96947e7b53598b06fecfffdf57ffc12cc445","0xa0056c3353227f6dd9cfc8e3399aa5a8f1d71edf25d3d64c982910f50786b1e395c508d3e3727ac360e3e040c64b5298","0xb070e61a6d813626144b312ded1788a6d0c7cec650a762b2f8df6e4743941dd82a2511cd956a3f141fc81e15f4e092da","0xb4e6db232e028a1f989bb5fc13416711f42d389f63564d60851f009dcffac01acfd54efa307aa6d4c0f932892d4e62b0","0x89b5991a67db90024ddd844e5e1a03ef9b943ad54194ae0a97df775dde1addf31561874f4e40fbc37a896630f3bbda58","0xad0e8442cb8c77d891df49cdb9efcf2b0d15ac93ec9be1ad5c3b3cca1f4647b675e79c075335c1f681d56f14dc250d76","0xb5d55a6ae65bb34dd8306806cb49b5ccb1c83a282ee47085cf26c4e648e19a52d9c422f65c1cd7e03ca63e926c5e92ea","0xb749501347e5ec07e13a79f0cb112f1b6534393458b3678a77f02ca89dca973fa7b30e55f0b25d8b92b97f6cb0120056","0x94144b4a3ffc5eec6ba35ce9c245c148b39372d19a928e236a60e27d7bc227d18a8cac9983851071935d8ffb64b3a34f","0x92bb4f9f85bc8c028a3391306603151c6896673135f8a7aefedd27acb322c04ef5dac982fc47b455d6740023e0dd3ea3","0xb9633a4a101461a782fc2aa092e9dbe4e2ad00987578f18cd7cf0021a909951d60fe79654eb7897806795f93c8ff4d1c","0x809f0196753024821b48a016eca5dbb449a7c55750f25981bb7a4b4c0e0846c09b8f6128137905055fc43a3f0deb4a74","0xa27dc9cdd1e78737a443570194a03d89285576d3d7f3a3cf15cc55b3013e42635d4723e2e8fe1d0b274428604b630db9","0x861f60f0462e04cd84924c36a28163def63e777318d00884ab8cb64c8df1df0bce5900342163edb60449296484a6c5bf","0xb7bc23fb4e14af4c4704a944253e760adefeca8caee0882b6bbd572c84434042236f39ae07a8f21a560f486b15d82819","0xb9a6eb492d6dd448654214bd01d6dc5ff12067a11537ab82023fc16167507ee25eed2c91693912f4155d1c07ed9650b3","0x97678af29c68f9a5e213bf0fb85c265303714482cfc4c2c00b4a1e8a76ed08834ee6af52357b143a1ca590fb0265ea5a","0x8a15b499e9eca5b6cac3070b5409e8296778222018ad8b53a5d1f6b70ad9bb10c68a015d105c941ed657bf3499299e33","0xb487fefede2e8091f2c7bfe85770db2edff1db83d4effe7f7d87bff5ab1ace35e9b823a71adfec6737fede8d67b3c467","0x8b51b916402aa2c437fce3bcad6dad3be8301a1a7eab9d163085b322ffb6c62abf28637636fe6114573950117fc92898","0xb06a2106d031a45a494adec0881cb2f82275dff9dcdd2bc16807e76f3bec28a6734edd3d54f0be8199799a78cd6228ad","0xaf0a185391bbe2315eb97feac98ad6dd2e5d931d012c621abd6e404a31cc188b286fef14871762190acf086482b2b5e2","0x8e78ee8206506dd06eb7729e32fceda3bebd8924a64e4d8621c72e36758fda3d0001af42443851d6c0aea58562870b43","0xa1ba52a569f0461aaf90b49b92be976c0e73ec4a2c884752ee52ffb62dd137770c985123d405dfb5de70692db454b54a","0x8d51b692fa1543c51f6b62b9acb8625ed94b746ef96c944ca02859a4133a5629da2e2ce84e111a7af8d9a5b836401c64","0xa7a20d45044cf6492e0531d0b8b26ffbae6232fa05a96ed7f06bdb64c2b0f5ca7ec59d5477038096a02579e633c7a3ff","0x84df867b98c53c1fcd4620fef133ee18849c78d3809d6aca0fb6f50ff993a053a455993f216c42ab6090fa5356b8d564","0xa7227c439f14c48e2577d5713c97a5205feb69acb0b449152842e278fa71e8046adfab468089c8b2288af1fc51fa945b","0x855189b3a105670779997690876dfaa512b4a25a24931a912c2f0f1936971d2882fb4d9f0b3d9daba77eaf660e9d05d5","0xb5696bd6706de51c502f40385f87f43040a5abf99df705d6aac74d88c913b8ecf7a99a63d7a37d9bdf3a941b9e432ff5","0xab997beb0d6df9c98d5b49864ef0b41a2a2f407e1687dfd6089959757ba30ed02228940b0e841afe6911990c74d536c4","0xb36b65f85546ebfdbe98823d5555144f96b4ab39279facd19c0de3b8919f105ba0315a0784dce4344b1bc62d8bb4a5a3","0xb8371f0e4450788720ac5e0f6cd3ecc5413d33895083b2c168d961ec2b5c3de411a4cc0712481cbe8df8c2fa1a7af006","0x98325d8026b810a8b7a114171ae59a57e8bbc9848e7c3df992efc523621729fd8c9f52114ce01d7730541a1ada6f1df1","0x8d0e76dbd37806259486cd9a31bc8b2306c2b95452dc395546a1042d1d17863ef7a74c636b782e214d3aa0e8d717f94a","0xa4e15ead76da0214d702c859fb4a8accdcdad75ed08b865842bd203391ec4cba2dcc916455e685f662923b96ee0c023f","0x8618190972086ebb0c4c1b4a6c94421a13f378bc961cc8267a301de7390c5e73c3333864b3b7696d81148f9d4843fd02","0x85369d6cc7342e1aa15b59141517d8db8baaaeb7ab9670f3ba3905353948d575923d283b7e5a05b13a30e7baf1208a86","0x87c51ef42233c24a6da901f28c9a075d9ba3c625687c387ad6757b72ca6b5a8885e6902a3082da7281611728b1e45f26","0xaa6348a4f71927a3106ad0ea8b02fc8d8c65531e4ab0bd0a17243e66f35afe252e40ab8eef9f13ae55a72566ffdaff5c","0x96a3bc976e9d03765cc3fee275fa05b4a84c94fed6b767e23ca689394501e96f56f7a97cffddc579a6abff632bf153be","0x97dbf96c6176379fdb2b888be4e757b2bca54e74124bd068d3fa1dbd82a011bbeb75079da38e0cd22a761fe208ecad9b","0xb70cf0a1d14089a4129ec4e295313863a59da8c7e26bf74cc0e704ed7f0ee4d7760090d0ddf7728180f1bf2c5ac64955","0x882d664714cc0ffe53cbc9bef21f23f3649824f423c4dbad1f893d22c4687ab29583688699efc4d5101aa08b0c3e267a","0x80ecb7cc963e677ccaddbe3320831dd6ee41209acf4ed41b16dc4817121a3d86a1aac9c4db3d8c08a55d28257088af32","0xa25ba667d832b145f9ce18c3f9b1bd00737aa36db020e1b99752c8ef7d27c6c448982bd8d352e1b6df266b8d8358a8d5","0x83734841c13dee12759d40bdd209b277e743b0d08cc0dd1e0b7afd2d65bfa640400eefcf6be4a52e463e5b3d885eeac6","0x848d16505b04804afc773aebabb51b36fd8aacfbb0e09b36c0d5d57df3c0a3b92f33e7d5ad0a7006ec46ebb91df42b8c","0x909a8d793f599e33bb9f1dc4792a507a97169c87cd5c087310bc05f30afcd247470b4b56dec59894c0fb1d48d39bb54e","0x8e558a8559df84a1ba8b244ece667f858095c50bb33a5381e60fcc6ba586b69693566d8819b4246a27287f16846c1dfa","0x84d6b69729f5aaa000cd710c2352087592cfbdf20d5e1166977e195818e593fa1a50d1e04566be23163a2523dc1612f1","0x9536d262b7a42125d89f4f32b407d737ba8d9242acfc99d965913ab3e043dcac9f7072a43708553562cac4cba841df30","0x9598548923ca119d6a15fd10861596601dd1dedbcccca97bb208cdc1153cf82991ea8cc17686fbaa867921065265970c","0xb87f2d4af6d026e4d2836bc3d390a4a18e98a6e386282ce96744603bab74974272e97ac2da281afa21885e2cbb3a8001","0x991ece62bf07d1a348dd22191868372904b9f8cf065ae7aa4e44fd24a53faf6d851842e35fb472895963aa1992894918","0xa8c53dea4c665b30e51d22ca6bc1bc78aaf172b0a48e64a1d4b93439b053877ec26cb5221c55efd64fa841bbf7d5aff4","0x93487ec939ed8e740f15335b58617c3f917f72d07b7a369befd479ae2554d04deb240d4a14394b26192efae4d2f4f35d","0xa44793ab4035443f8f2968a40e043b4555960193ffa3358d22112093aadfe2c136587e4139ffd46d91ed4107f61ea5e0","0xb13fe033da5f0d227c75927d3dacb06dbaf3e1322f9d5c7c009de75cdcba5e308232838785ab69a70f0bedea755e003f","0x970a29b075faccd0700fe60d1f726bdebf82d2cc8252f4a84543ebd3b16f91be42a75c9719a39c4096139f0f31393d58","0xa4c3eb1f7160f8216fc176fb244df53008ff32f2892363d85254002e66e2de21ccfe1f3b1047589abee50f29b9d507e3","0x8c552885eab04ba40922a8f0c3c38c96089c95ff1405258d3f1efe8d179e39e1295cbf67677894c607ae986e4e6b1fb0","0xb3671746fa7f848c4e2ae6946894defadd815230b906b419143523cc0597bc1d6c0a4c1e09d49b66b4a2c11cde3a4de3","0x937a249a95813a5e2ef428e355efd202e15a37d73e56cfb7e57ea9f943f2ce5ca8026f2f1fd25bf164ba89d07077d858","0x83646bdf6053a04aa9e2f112499769e5bd5d0d10f2e13db3ca89bd45c0b3b7a2d752b7d137fb3909f9c62b78166c9339","0xb4eac4b91e763666696811b7ed45e97fd78310377ebea1674b58a2250973f80492ac35110ed1240cd9bb2d17493d708c","0x82db43a99bc6573e9d92a3fd6635dbbb249ac66ba53099c3c0c8c8080b121dd8243cd5c6e36ba0a4d2525bae57f5c89c","0xa64d6a264a681b49d134c655d5fc7756127f1ee7c93d328820f32bca68869f53115c0d27fef35fe71f7bc4fdaed97348","0x8739b7a9e2b4bc1831e7f04517771bc7cde683a5e74e052542517f8375a2f64e53e0d5ac925ef722327e7bb195b4d1d9","0x8f337cdd29918a2493515ebb5cf702bbe8ecb23b53c6d18920cc22f519e276ca9b991d3313e2d38ae17ae8bdfa4f8b7e","0xb0edeab9850e193a61f138ef2739fc42ceec98f25e7e8403bfd5fa34a7bc956b9d0898250d18a69fa4625a9b3d6129da","0xa9920f26fe0a6d51044e623665d998745c9eca5bce12051198b88a77d728c8238f97d4196f26e43b24f8841500b998d0","0x86e655d61502b979eeeeb6f9a7e1d0074f936451d0a1b0d2fa4fb3225b439a3770767b649256fe481361f481a8dbc276","0x84d3b32fa62096831cc3bf013488a9f3f481dfe293ae209ed19585a03f7db8d961a7a9dd0db82bd7f62d612707575d9c","0x81c827826ec9346995ffccf62a241e3b2d32f7357acd1b1f8f7a7dbc97022d3eb51b8a1230e23ce0b401d2e535e8cd78","0x94a1e40c151191c5b055b21e86f32e69cbc751dcbdf759a48580951834b96a1eed75914c0d19a38aefd21fb6c8d43d0c","0xab890222b44bc21b71f7c75e15b6c6e16bb03371acce4f8d4353ff3b8fcd42a14026589c5ed19555a3e15e4d18bfc3a3","0xaccb0be851e93c6c8cc64724cdb86887eea284194b10e7a43c90528ed97e9ec71ca69c6fac13899530593756dd49eab2","0xb630220aa9e1829c233331413ee28c5efe94ea8ea08d0c6bfd781955078b43a4f92915257187d8526873e6c919c6a1de","0xadd389a4d358c585f1274b73f6c3c45b58ef8df11f9d11221f620e241bf3579fba07427b288c0c682885a700cc1fa28d","0xa9fe6ca8bf2961a3386e8b8dcecc29c0567b5c0b3bcf3b0f9169f88e372b80151af883871fc5229815f94f43a6f5b2b0","0xad839ae003b92b37ea431fa35998b46a0afc3f9c0dd54c3b3bf7a262467b13ff3c323ada1c1ae02ac7716528bdf39e3e","0x9356d3fd0edcbbb65713c0f2a214394f831b26f792124b08c5f26e7f734b8711a87b7c4623408da6a091c9aef1f6af3c","0x896b25b083c35ac67f0af3784a6a82435b0e27433d4d74cd6d1eafe11e6827827799490fb1c77c11de25f0d75f14e047","0x8bfa019391c9627e8e5f05c213db625f0f1e51ec68816455f876c7e55b8f17a4f13e5aae9e3fb9e1cf920b1402ee2b40","0x8ba3a6faa6a860a8f3ce1e884aa8769ceded86380a86520ab177ab83043d380a4f535fe13884346c5e51bee68da6ab41","0xa8292d0844084e4e3bb7af92b1989f841a46640288c5b220fecfad063ee94e86e13d3d08038ec2ac82f41c96a3bfe14d","0x8229bb030b2fc566e11fd33c7eab7a1bb7b49fed872ea1f815004f7398cb03b85ea14e310ec19e1f23e0bdaf60f8f76c","0x8cfbf869ade3ec551562ff7f63c2745cc3a1f4d4dc853a0cd42dd5f6fe54228f86195ea8fe217643b32e9f513f34a545","0xac52a3c8d3270ddfe1b5630159da9290a5ccf9ccbdef43b58fc0a191a6c03b8a5974cf6e2bbc7bd98d4a40a3581482d7","0xab13decb9e2669e33a7049b8eca3ca327c40dea15ad6e0e7fa63ed506db1d258bc36ac88b35f65cae0984e937eb6575d","0xb5e748eb1a7a1e274ff0cc56311c198f2c076fe4b7e73e5f80396fe85358549df906584e6bb2c8195b3e2be7736850a5","0xb5cb911325d8f963c41f691a60c37831c7d3bbd92736efa33d1f77a22b3fde7f283127256c2f47e197571e6fe0b46149","0x8a01dc6ed1b55f26427a014faa347130738b191a06b800e32042a46c13f60b49534520214359d68eb2e170c31e2b8672","0xa72fa874866e19b2efb8e069328362bf7921ec375e3bcd6b1619384c3f7ee980f6cf686f3544e9374ff54b4d17a1629c","0x8db21092f7c5f110fba63650b119e82f4b42a997095d65f08f8237b02dd66fdf959f788df2c35124db1dbd330a235671","0x8c65d50433d9954fe28a09fa7ba91a70a590fe7ba6b3060f5e4be0f6cef860b9897fa935fb4ebc42133524eb071dd169","0xb4614058e8fa21138fc5e4592623e78b8982ed72aa35ee4391b164f00c68d277fa9f9eba2eeefc890b4e86eba5124591","0xab2ad3a1bce2fbd55ca6b7c23786171fe1440a97d99d6df4d80d07dd56ac2d7203c294b32fc9e10a6c259381a73f24a1","0x812ae3315fdc18774a8da3713a4679e8ed10b9405edc548c00cacbe25a587d32040566676f135e4723c5dc25df5a22e9","0xa464b75f95d01e5655b54730334f443c8ff27c3cb79ec7af4b2f9da3c2039c609908cd128572e1fd0552eb597e8cef8d","0xa0db3172e93ca5138fe419e1c49a1925140999f6eff7c593e5681951ee0ec1c7e454c851782cbd2b8c9bc90d466e90e0","0x806db23ba7d00b87d544eed926b3443f5f9c60da6b41b1c489fba8f73593b6e3b46ebfcab671ee009396cd77d5e68aa1","0x8bfdf2c0044cc80260994e1c0374588b6653947b178e8b312be5c2a05e05767e98ea15077278506aee7df4fee1aaf89e","0x827f6558c16841b5592ff089c9c31e31eb03097623524394813a2e4093ad2d3f8f845504e2af92195aaa8a1679d8d692","0x925c4f8eab2531135cd71a4ec88e7035b5eea34ba9d799c5898856080256b4a15ed1a746e002552e2a86c9c157e22e83","0xa9f9a368f0e0b24d00a35b325964c85b69533013f9c2cfad9708be5fb87ff455210f8cb8d2ce3ba58ca3f27495552899","0x8ac0d3bebc1cae534024187e7c71f8927ba8fcc6a1926cb61c2b6c8f26bb7831019e635a376146c29872a506784a4aaa","0x97c577be2cbbfdb37ad754fae9df2ada5fc5889869efc7e18a13f8e502fbf3f4067a509efbd46fd990ab47ce9a70f5a8","0x935e7d82bca19f16614aa43b4a3474e4d20d064e4bfdf1cea2909e5c9ab72cfe3e54dc50030e41ee84f3588cebc524e9","0x941aafc08f7c0d94cebfbb1f0aad5202c02e6e37f2c12614f57e727efa275f3926348f567107ee6d8914dd71e6060271","0xaf0fbc1ba05b4b5b63399686df3619968be5d40073de0313cbf5f913d3d4b518d4c249cdd2176468ccaa36040a484f58","0xa0c414f23f46ca6d69ce74c6f8a00c036cb0edd098af0c1a7d39c802b52cfb2d5dbdf93fb0295453d4646e2af7954d45","0x909cf39e11b3875bb63b39687ae1b5d1f5a15445e39bf164a0b14691b4ddb39a8e4363f584ef42213616abc4785b5d66","0xa92bac085d1194fbd1c88299f07a061d0bdd3f980b663e81e6254dbb288bf11478c0ee880e28e01560f12c5ccb3c0103","0x841705cd5cd76b943e2b7c5e845b9dd3c8defe8ef67e93078d6d5e67ade33ad4b0fd413bc196f93b0a4073c855cd97d4","0x8e7eb8364f384a9161e81d3f1d52ceca9b65536ae49cc35b48c3e2236322ba4ae9973e0840802d9fa4f4d82ea833544f","0xaed3ab927548bc8bec31467ba80689c71a168e34f50dcb6892f19a33a099f5aa6b3f9cb79f5c0699e837b9a8c7f27efe","0xb8fbf7696210a36e20edabd77839f4dfdf50d6d015cdf81d587f90284a9bcef7d2a1ff520728d7cc69a4843d6c20dedd","0xa9d533769ce6830211c884ae50a82a7bf259b44ac71f9fb11f0296fdb3981e6b4c1753fe744647b247ebc433a5a61436","0x8b4bdf90d33360b7f428c71cde0a49fb733badba8c726876945f58c620ce7768ae0e98fc8c31fa59d8955a4823336bb1","0x808d42238e440e6571c59e52a35ae32547d502dc24fd1759d8ea70a7231a95859baf30b490a4ba55fa2f3aaa11204597","0x85594701f1d2fee6dc1956bc44c7b31db93bdeec2f3a7d622c1a08b26994760773e3d57521a44cfd7e407ac3fd430429","0xa66de045ce7173043a6825e9dc440ac957e2efb6df0a337f4f8003eb0c719d873a52e6eba3cb0d69d977ca37d9187674","0x87a1c6a1fdff993fa51efa5c3ba034c079c0928a7d599b906336af7c2dcab9721ceaf3108c646490af9dff9a754f54b3","0x926424223e462ceb75aed7c22ade8a7911a903b7e5dd4bc49746ddce8657f4616325cd12667d4393ac52cdd866396d0e","0xb5dc96106593b42b30f06f0b0a1e0c1aafc70432e31807252d3674f0b1ea5e58eac8424879d655c9488d85a879a3e572","0x997ca0987735cc716507cb0124b1d266d218b40c9d8e0ecbf26a1d65719c82a637ce7e8be4b4815d307df717bde7c72a","0x92994d3f57a569b7760324bb5ae4e8e14e1633d175dab06aa57b8e391540e05f662fdc08b8830f489a063f59b689a688","0xa8087fcc6aa4642cb998bea11facfe87eb33b90a9aa428ab86a4124ad032fc7d2e57795311a54ec9f55cc120ebe42df1","0xa9bd7d1de6c0706052ca0b362e2e70e8c8f70f1f026ea189b4f87a08ce810297ebfe781cc8004430776c54c1a05ae90c","0x856d33282e8a8e33a3d237fb0a0cbabaf77ba9edf2fa35a831fdafcadf620561846aa6cbb6bdc5e681118e1245834165","0x9524a7aa8e97a31a6958439c5f3339b19370f03e86b89b1d02d87e4887309dbbe9a3a8d2befd3b7ed5143c8da7e0a8ad","0x824fdf433e090f8acbd258ac7429b21f36f9f3b337c6d0b71d1416a5c88a767883e255b2888b7c906dd2e9560c4af24c","0x88c7fee662ca7844f42ed5527996b35723abffd0d22d4ca203b9452c639a5066031207a5ae763dbc0865b3299d19b1ec","0x919dca5c5595082c221d5ab3a5bc230f45da7f6dec4eb389371e142c1b9c6a2c919074842479c2844b72c0d806170c0c","0xb939be8175715e55a684578d8be3ceff3087f60fa875fff48e52a6e6e9979c955efef8ff67cfa2b79499ea23778e33b0","0x873b6db725e7397d11bc9bed9ac4468e36619135be686790a79bc6ed4249058f1387c9a802ea86499f692cf635851066","0xaeae06db3ec47e9e5647323fa02fac44e06e59b885ad8506bf71b184ab3895510c82f78b6b22a5d978e8218e7f761e9f","0xb99c0a8359c72ab88448bae45d4bf98797a26bca48b0d4460cd6cf65a4e8c3dd823970ac3eb774ae5d0cea4e7fadf33e","0x8f10c8ec41cdfb986a1647463076a533e6b0eec08520c1562401b36bb063ac972aa6b28a0b6ce717254e35940b900e3c","0xa106d9be199636d7add43b942290269351578500d8245d4aae4c083954e4f27f64740a3138a66230391f2d0e6043a8de","0xa469997908244578e8909ff57cffc070f1dbd86f0098df3cfeb46b7a085cfecc93dc69ee7cad90ff1dc5a34d50fe580c","0xa4ef087bea9c20eb0afc0ee4caba7a9d29dfa872137828c721391273e402fb6714afc80c40e98bbd8276d3836bffa080","0xb07a013f73cd5b98dae0d0f9c1c0f35bff8a9f019975c4e1499e9bee736ca6fcd504f9bc32df1655ff333062382cff04","0xb0a77188673e87cc83348c4cc5db1eecf6b5184e236220c8eeed7585e4b928db849944a76ec60ef7708ef6dac02d5592","0xb1284b37e59b529f0084c0dacf0af6c0b91fc0f387bf649a8c74819debf606f7b07fc3e572500016fb145ec2b24e9f17","0x97b20b5b4d6b9129da185adfbf0d3d0b0faeba5b9715f10299e48ea0521709a8296a9264ce77c275a59c012b50b6519a","0xb9d37e946fae5e4d65c1fbfacc8a62e445a1c9d0f882e60cca649125af303b3b23af53c81d7bac544fb7fcfc7a314665","0x8e5acaac379f4bb0127efbef26180f91ff60e4c525bc9b798fc50dfaf4fe8a5aa84f18f3d3cfb8baead7d1e0499af753","0xb0c0b8ab1235bf1cda43d4152e71efc1a06c548edb964eb4afceb201c8af24240bf8ab5cae30a08604e77432b0a5faf0","0x8cc28d75d5c8d062d649cbc218e31c4d327e067e6dbd737ec0a35c91db44fbbd0d40ec424f5ed79814add16947417572","0x95ae6219e9fd47efaa9cb088753df06bc101405ba50a179d7c9f7c85679e182d3033f35b00dbba71fdcd186cd775c52e","0xb5d28fa09f186ebc5aa37453c9b4d9474a7997b8ae92748ecb940c14868792292ac7d10ade01e2f8069242b308cf97e5","0x8c922a0faa14cc6b7221f302df3342f38fc8521ec6c653f2587890192732c6da289777a6cd310747ea7b7d104af95995","0xb9ad5f660b65230de54de535d4c0fcae5bc6b59db21dea5500fdc12eea4470fb8ea003690fdd16d052523418d5e01e8c","0xa39a9dd41a0ff78c82979483731f1cd68d3921c3e9965869662c22e02dde3877802e180ba93f06e7346f96d9fa9261d2","0x8b32875977ec372c583b24234c27ed73aef00cdff61eb3c3776e073afbdeade548de9497c32ec6d703ff8ad0a5cb7fe4","0x9644cbe755a5642fe9d26cfecf170d3164f1848c2c2e271d5b6574a01755f3980b3fc870b98cf8528fef6ecef4210c16","0x81ea9d1fdd9dd66d60f40ce0712764b99da9448ae0b300f8324e1c52f154e472a086dda840cb2e0b9813dc8ce8afd4b5","0x906aaa4a7a7cdf01909c5cfbc7ded2abc4b869213cbf7c922d4171a4f2e637e56f17020b852ad339d83b8ac92f111666","0x939b5f11acbdeff998f2a080393033c9b9d8d5c70912ea651c53815c572d36ee822a98d6dfffb2e339f29201264f2cf4","0xaba4898bf1ccea9b9e2df1ff19001e05891581659c1cbbde7ee76c349c7fc7857261d9785823c9463a8aea3f40e86b38","0x83ca1a56b8a0be4820bdb5a9346357c68f9772e43f0b887729a50d2eb2a326bbcede676c8bf2e51d7c89bbd8fdb778a6","0x94e86e9fe6addfe2c3ee3a547267ed921f4230d877a85bb4442c2d9350c2fa9a9c54e6fe662de82d1a2407e4ab1691c2","0xa0cc3bdef671a59d77c6984338b023fa2b431b32e9ed2abe80484d73edc6540979d6f10812ecc06d4d0c5d4eaca7183c","0xb5343413c1b5776b55ea3c7cdd1f3af1f6bd802ea95effe3f2b91a523817719d2ecc3f8d5f3cc2623ace7e35f99ca967","0x92085d1ed0ed28d8cabe3e7ff1905ed52c7ceb1eac5503760c52fb5ee3a726aba7c90b483c032acc3f166b083d7ec370","0x8ec679520455275cd957fca8122724d287db5df7d29f1702a322879b127bff215e5b71d9c191901465d19c86c8d8d404","0xb65eb2c63d8a30332eb24ee8a0c70156fc89325ebbb38bacac7cf3f8636ad8a472d81ccca80423772abc00192d886d8a","0xa9fe1c060b974bee4d590f2873b28635b61bfcf614e61ff88b1be3eee4320f4874e21e8d666d8ac8c9aba672efc6ecae","0xb3fe2a9a389c006a831dea7e777062df84b5c2803f9574d7fbe10b7e1c125817986af8b6454d6be9d931a5ac94cfe963","0x95418ad13b734b6f0d33822d9912c4c49b558f68d08c1b34a0127fcfa666bcae8e6fda8832d2c75bb9170794a20e4d7c","0xa9a7df761e7f18b79494bf429572140c8c6e9d456c4d4e336184f3f51525a65eb9582bea1e601bdb6ef8150b7ca736a5","0xa0de03b1e75edf7998c8c1ac69b4a1544a6fa675a1941950297917366682e5644a4bda9cdeedfaf9473d7fccd9080b0c","0xa61838af8d95c95edf32663a68f007d95167bf6e41b0c784a30b22d8300cfdd5703bd6d16e86396638f6db6ae7e42a85","0x8866d62084d905c145ff2d41025299d8b702ac1814a7dec4e277412c161bc9a62fed735536789cb43c88693c6b423882","0x91da22c378c81497fe363e7f695c0268443abee50f8a6625b8a41e865638a643f07b157ee566de09ba09846934b4e2d7","0x941d21dd57c9496aa68f0c0c05507405fdd413acb59bc668ce7e92e1936c68ec4b065c3c30123319884149e88228f0b2","0xa77af9b094bc26966ddf2bf9e1520c898194a5ccb694915950dadc204facbe3066d3d89f50972642d76b14884cfbaa21","0x8e76162932346869f4618bde744647f7ab52ab498ad654bdf2a4feeb986ac6e51370841e5acbb589e38b6e7142bb3049","0xb60979ace17d6937ece72e4f015da4657a443dd01cebc7143ef11c09e42d4aa8855999a65a79e2ea0067f31c9fc2ab0f","0xb3e2ffdd5ee6fd110b982fd4fad4b93d0fca65478f986d086eeccb0804960bfaa1919afa743c2239973ea65091fe57d2","0x8ce0ce05e7d7160d44574011da687454dbd3c8b8290aa671731b066e2c82f8cf2d63cb8e932d78c6122ec610e44660e6","0xab005dd8d297045c39e2f72fb1c48edb501ccf3575d3d04b9817b3afee3f0bb0f3f53f64bda37d1d9cde545aae999bae","0x95bd7edb4c4cd60e3cb8a72558845a3cce6bb7032ccdf33d5a49ebb6ddf203bc3c79e7b7e550735d2d75b04c8b2441e8","0x889953ee256206284094e4735dbbb17975bafc7c3cb94c9fbfee4c3e653857bfd49e818f64a47567f721b98411a3b454","0xb188423e707640ab0e75a061e0b62830cde8afab8e1ad3dae30db69ffae4e2fc005bababbdcbd7213b918ed4f70e0c14","0xa97e0fafe011abd70d4f99a0b36638b3d6e7354284588f17a88970ed48f348f88392779e9a038c6cbc9208d998485072","0x87db11014a91cb9b63e8dfaa82cdebca98272d89eb445ee1e3ff9dbaf2b3fad1a03b888cffc128e4fe208ed0dddece0f","0xaad2e40364edd905d66ea4ac9d51f9640d6fda9a54957d26ba233809851529b32c85660fa401dbee3679ec54fa6dd966","0x863e99336ca6edf03a5a259e59a2d0f308206e8a2fb320cfc0be06057366df8e0f94b33a28f574092736b3c5ada84270","0xb34bcc56a057589f34939a1adc51de4ff6a9f4fee9c7fa9aa131e28d0cf0759a0c871b640162acdfbf91f3f1b59a3703","0x935dd28f2896092995c5eff1618e5b6efe7a40178888d7826da9b0503c2d6e68a28e7fac1a334e166d0205f0695ef614","0xb842cd5f8f5de5ca6c68cb4a5c1d7b451984930eb4cc18fd0934d52fdc9c3d2d451b1c395594d73bc3451432bfba653f","0x9014537885ce2debad736bc1926b25fdab9f69b216bf024f589c49dc7e6478c71d595c3647c9f65ff980b14f4bb2283b","0x8e827ccca1dd4cd21707140d10703177d722be0bbe5cac578db26f1ef8ad2909103af3c601a53795435b27bf95d0c9ed","0x8a0b8ad4d466c09d4f1e9167410dbe2edc6e0e6229d4b3036d30f85eb6a333a18b1c968f6ca6d6889bb08fecde017ef4","0x9241ee66c0191b06266332dc9161dede384c4bb4e116dbd0890f3c3790ec5566da4568243665c4725b718ac0f6b5c179","0xaeb4d5fad81d2b505d47958a08262b6f1b1de9373c2c9ba6362594194dea3e002ab03b8cbb43f867be83065d3d370f19","0x8781bc83bb73f7760628629fe19e4714b494dbed444c4e4e4729b7f6a8d12ee347841a199888794c2234f51fa26fc2b9","0xb58864f0acd1c2afa29367e637cbde1968d18589245d9936c9a489c6c495f54f0113ecdcbe4680ac085dd3c397c4d0c3","0x94a24284afaeead61e70f3e30f87248d76e9726759445ca18cdb9360586c60cc9f0ec1c397f9675083e0b56459784e2e","0xaed358853f2b54dcbddf865e1816c2e89be12e940e1abfa661e2ee63ffc24a8c8096be2072fa83556482c0d89e975124","0xb95374e6b4fc0765708e370bc881e271abf2e35c08b056a03b847e089831ef4fe3124b9c5849d9c276eb2e35b3daf264","0xb834cdbcfb24c8f84bfa4c552e7fadc0028a140952fd69ed13a516e1314a4cd35d4b954a77d51a1b93e1f5d657d0315d","0x8fb6d09d23bfa90e7443753d45a918d91d75d8e12ec7d016c0dfe94e5c592ba6aaf483d2f16108d190822d955ad9cdc3","0xaa315cd3c60247a6ad4b04f26c5404c2713b95972843e4b87b5a36a89f201667d70f0adf20757ebe1de1b29ae27dda50","0xa116862dca409db8beff5b1ccd6301cdd0c92ca29a3d6d20eb8b87f25965f42699ca66974dd1a355200157476b998f3b","0xb4c2f5fe173c4dc8311b60d04a65ce1be87f070ac42e13cd19c6559a2931c6ee104859cc2520edebbc66a13dc7d30693","0x8d4a02bf99b2260c334e7d81775c5cf582b00b0c982ce7745e5a90624919028278f5e9b098573bad5515ce7fa92a80c8","0x8543493bf564ce6d97bd23be9bff1aba08bd5821ca834f311a26c9139c92a48f0c2d9dfe645afa95fec07d675d1fd53b","0x9344239d13fde08f98cb48f1f87d34cf6abe8faecd0b682955382a975e6eed64e863fa19043290c0736261622e00045c","0xaa49d0518f343005ca72b9e6c7dcaa97225ce6bb8b908ebbe7b1a22884ff8bfb090890364e325a0d414ad180b8f161d1","0x907d7fd3e009355ab326847c4a2431f688627faa698c13c03ffdd476ecf988678407f029b8543a475dcb3dafdf2e7a9c","0x845f1f10c6c5dad2adc7935f5cd2e2b32f169a99091d4f1b05babe7317b9b1cdce29b5e62f947dc621b9acbfe517a258","0x8f3be8e3b380ea6cdf9e9c237f5e88fd5a357e5ded80ea1fc2019810814de82501273b4da38916881125b6fa0cfd4459","0xb9c7f487c089bf1d20c822e579628db91ed9c82d6ca652983aa16d98b4270c4da19757f216a71b9c13ddee3e6e43705f","0x8ba2d8c88ad2b872db104ea8ddbb006ec2f3749fd0e19298a804bb3a5d94de19285cc7fb19fee58a66f7851d1a66c39f","0x9375ecd3ed16786fe161af5d5c908f56eeb467a144d3bbddfc767e90065b7c94fc53431adebecba2b6c9b5821184d36e","0xa49e069bfadb1e2e8bff6a4286872e2a9765d62f0eaa4fcb0e5af4bbbed8be3510fb19849125a40a8a81d1e33e81c3eb","0x9522cc66757b386aa6b88619525c8ce47a5c346d590bb3647d12f991e6c65c3ab3c0cfc28f0726b6756c892eae1672be","0xa9a0f1f51ff877406fa83a807aeb17b92a283879f447b8a2159653db577848cc451cbadd01f70441e351e9ed433c18bc","0x8ff7533dcff6be8714df573e33f82cf8e9f2bcaaa43e939c4759d52b754e502717950de4b4252fb904560fc31dce94a4","0x959724671e265a28d67c29d95210e97b894b360da55e4cf16e6682e7912491ed8ca14bfaa4dce9c25a25b16af580494f","0x92566730c3002f4046c737032487d0833c971e775de59fe02d9835c9858e2e3bc37f157424a69764596c625c482a2219","0xa84b47ceff13ed9c3e5e9cdf6739a66d3e7c2bd8a6ba318fefb1a9aecf653bb2981da6733ddb33c4b0a4523acc429d23","0xb4ddf571317e44f859386d6140828a42cf94994e2f1dcbcc9777f4eebbfc64fc1e160b49379acc27c4672b8e41835c5d","0x8ab95c94072b853d1603fdd0a43b30db617d13c1d1255b99075198e1947bfa5f59aed2b1147548a1b5e986cd9173d15c","0x89511f2eab33894fd4b3753d24249f410ff7263052c1fef6166fc63a79816656b0d24c529e45ccce6be28de6e375d916","0xa0866160ca63d4f2be1b4ea050dac6b59db554e2ebb4e5b592859d8df339b46fd7cb89aaed0951c3ee540aee982c238a","0x8fcc5cbba1b94970f5ff2eb1922322f5b0aa7d918d4b380c9e7abfd57afd8b247c346bff7b87af82efbce3052511cd1b","0x99aeb2a5e846b0a2874cca02c66ed40d5569eb65ab2495bc3f964a092e91e1517941f2688e79f8cca49cd3674c4e06dc","0xb7a096dc3bad5ca49bee94efd884aa3ff5615cf3825cf95fbe0ce132e35f46581d6482fa82666c7ef5f1643eaee8f1ca","0x94393b1da6eaac2ffd186b7725eca582f1ddc8cdd916004657f8a564a7c588175cb443fc6943b39029f5bbe0add3fad8","0x884b85fe012ccbcd849cb68c3ad832d83b3ef1c40c3954ffdc97f103b1ed582c801e1a41d9950f6bddc1d11f19d5ec76","0xb00061c00131eded8305a7ce76362163deb33596569afb46fe499a7c9d7a0734c084d336b38d168024c2bb42b58e7660","0xa439153ac8e6ca037381e3240e7ba08d056c83d7090f16ed538df25901835e09e27de2073646e7d7f3c65056af6e4ce7","0x830fc9ca099097d1f38b90e6843dc86f702be9d20bdacc3e52cae659dc41df5b8d2c970effa6f83a5229b0244a86fe22","0xb81ea2ffaaff2bb00dd59a9ab825ba5eed4db0d8ac9c8ed1a632ce8f086328a1cddd045fbe1ace289083c1325881b7e7","0xb51ea03c58daf2db32c99b9c4789b183365168cb5019c72c4cc91ac30b5fb7311d3db76e6fa41b7cd4a8c81e2f6cdc94","0xa4170b2c6d09ca5beb08318730419b6f19215ce6c631c854116f904be3bc30dd85a80c946a8ab054d3e307afaa3f8fbc","0x897cc42ff28971ff54d2a55dd6b35cfb8610ac902f3c06e3a5cea0e0a257e870c471236a8e84709211c742a09c5601a6","0xa18f2e98d389dace36641621488664ecbb422088ab03b74e67009b8b8acacaaa24fdcf42093935f355207d934adc52a8","0x92adcfb678cc2ba19c866f3f2b988fdcb4610567f3ab436cc0cb9acaf5a88414848d71133ebdbec1983e38e6190f1b5f","0xa86d43c2ce01b366330d3b36b3ca85f000c3548b8297e48478da1ee7d70d8576d4650cba7852ed125c0d7cb6109aa7f3","0x8ed31ceed9445437d7732dce78a762d72ff32a7636bfb3fd7974b7ae15db414d8184a1766915244355deb354fbc5803b","0x9268f70032584f416e92225d65af9ea18c466ebc7ae30952d56a4e36fd9ea811dde0a126da9220ba3c596ec54d8a335e","0x9433b99ee94f2d3fbdd63b163a2bdf440379334c52308bd24537f7defd807145a062ff255a50d119a7f29f4b85d250e3","0x90ce664f5e4628a02278f5cf5060d1a34f123854634b1870906e5723ac9afd044d48289be283b267d45fcbf3f4656aaf","0xaaf21c4d59378bb835d42ae5c5e5ab7a3c8c36a59e75997989313197752b79a472d866a23683b329ea69b048b87fa13e","0xb83c0589b304cec9ede549fde54f8a7c2a468c6657da8c02169a6351605261202610b2055c639b9ed2d5b8c401fb8f56","0x9370f326ea0f170c2c05fe2c5a49189f20aec93b6b18a5572a818cd4c2a6adb359e68975557b349fb54f065d572f4c92","0xac3232fa5ce6f03fca238bef1ce902432a90b8afce1c85457a6bee5571c033d4bceefafc863af04d4e85ac72a4d94d51","0x80d9ea168ff821b22c30e93e4c7960ce3ad3c1e6deeebedd342a36d01bd942419b187e2f382dbfd8caa34cca08d06a48","0xa387a3c61676fb3381eefa2a45d82625635a666e999aba30e3b037ec9e040f414f9e1ad9652abd3bcad63f95d85038db","0xa1b229fe32121e0b391b0f6e0180670b9dc89d79f7337de4c77ea7ad0073e9593846f06797c20e923092a08263204416","0x92164a9d841a2b828cedf2511213268b698520f8d1285852186644e9a0c97512cafa4bfbe29af892c929ebccd102e998","0x82ee2fa56308a67c7db4fd7ef539b5a9f26a1c2cc36da8c3206ba4b08258fbb3cec6fe5cdbd111433fb1ba2a1e275927","0x8c77bfe9e191f190a49d46f05600603fa42345592539b82923388d72392404e0b29a493a15e75e8b068dddcd444c2928","0x80b927f93ccf79dcf5c5b20bcf5a7d91d7a17bc0401bb7cc9b53a6797feac31026eb114257621f5a64a52876e4474cc1","0xb6b68b6501c37804d4833d5a063dd108a46310b1400549074e3cac84acc6d88f73948b7ad48d686de89c1ec043ae8c1a","0xab3da00f9bdc13e3f77624f58a3a18fc3728956f84b5b549d62f1033ae4b300538e53896e2d943f160618e05af265117","0xb6830e87233b8eace65327fdc764159645b75d2fd4024bf8f313b2dd5f45617d7ecfb4a0b53ccafb5429815a9a1adde6","0xb9251cfe32a6dc0440615aadcd98b6b1b46e3f4e44324e8f5142912b597ee3526bea2431e2b0282bb58f71be5b63f65e","0xaf8d70711e81cdddfb39e67a1b76643292652584c1ce7ce4feb1641431ad596e75c9120e85f1a341e7a4da920a9cdd94","0x98cd4e996594e89495c078bfd52a4586b932c50a449a7c8dfdd16043ca4cda94dafbaa8ad1b44249c99bbcc52152506e","0xb9fc6d1c24f48404a4a64fbe3e43342738797905db46e4132aee5f086aaa4c704918ad508aaefa455cfe1b36572e6242","0xa365e871d30ba9291cedaba1be7b04e968905d003e9e1af7e3b55c5eb048818ae5b913514fb08b24fb4fbdccbb35d0b8","0x93bf99510971ea9af9f1e364f1234c898380677c8e8de9b0dd24432760164e46c787bc9ec42a7ad450500706cf247b2d","0xb872f825a5b6e7b9c7a9ddfeded3516f0b1449acc9b4fd29fc6eba162051c17416a31e5be6d3563f424d28e65bab8b8f","0xb06b780e5a5e8eb4f4c9dc040f749cf9709c8a4c9ef15e925f442b696e41e5095db0778a6c73bcd329b265f2c6955c8b","0x848f1a981f5fc6cd9180cdddb8d032ad32cdfa614fc750d690dbae36cc0cd355cbf1574af9b3ffc8b878f1b2fafb9544","0xa03f48cbff3e9e8a3a655578051a5ae37567433093ac500ed0021c6250a51b767afac9bdb194ee1e3eac38a08c0eaf45","0xb5be78ce638ff8c4aa84352b536628231d3f7558c5be3bf010b28feac3022e64691fa672f358c8b663904aebe24a54ed","0xa9d4da70ff676fa55d1728ba6ab03b471fa38b08854d99e985d88c2d050102d8ccffbe1c90249a5607fa7520b15fe791","0x8fe9f7092ffb0b69862c8e972fb1ecf54308c96d41354ed0569638bb0364f1749838d6d32051fff1599112978c6e229c","0xae6083e95f37770ecae0df1e010456f165d96cfe9a7278c85c15cffd61034081ce5723e25e2bede719dc9341ec8ed481","0xa260891891103089a7afbd9081ea116cfd596fd1015f5b65e10b0961eb37fab7d09c69b7ce4be8bf35e4131848fb3fe4","0x8d729fa32f6eb9fd2f6a140bef34e8299a2f3111bffd0fe463aa8622c9d98bfd31a1df3f3e87cd5abc52a595f96b970e","0xa30ec6047ae4bc7da4daa7f4c28c93aedb1112cfe240e681d07e1a183782c9ff6783ac077c155af23c69643b712a533f","0xac830726544bfe7b5467339e5114c1a75f2a2a8d89453ce86115e6a789387e23551cd64620ead6283dfa4538eb313d86","0x8445c135b7a48068d8ed3e011c6d818cfe462b445095e2fbf940301e50ded23f272d799eea47683fc027430ce14613ef","0x95785411715c9ae9d8293ce16a693a2aa83e3cb1b4aa9f76333d0da2bf00c55f65e21e42e50e6c5772ce213dd7b4f7a0","0xb273b024fa18b7568c0d1c4d2f0c4e79ec509dafac8c5951f14192d63ddbcf2d8a7512c1c1b615cc38fa3e336618e0c5","0xa78b9d3ea4b6a90572eb27956f411f1d105fdb577ee2ffeec9f221da9b45db84bfe866af1f29597220c75e0c37a628d8","0xa4be2bf058c36699c41513c4d667681ce161a437c09d81383244fc55e1c44e8b1363439d0cce90a3e44581fb31d49493","0xb6eef13040f17dd4eba22aaf284d2f988a4a0c4605db44b8d2f4bf9567ac794550b543cc513c5f3e2820242dd704152e","0x87eb00489071fa95d008c5244b88e317a3454652dcb1c441213aa16b28cd3ecaa9b22fec0bdd483c1df71c37119100b1","0x92d388acdcb49793afca329cd06e645544d2269234e8b0b27d2818c809c21726bc9cf725651b951e358a63c83dedee24","0xae27e219277a73030da27ab5603c72c8bd81b6224b7e488d7193806a41343dff2456132274991a4722fdb0ef265d04cd","0x97583e08ecb82bbc27c0c8476d710389fa9ffbead5c43001bd36c1b018f29faa98de778644883e51870b69c5ffb558b5","0x90a799a8ce73387599babf6b7da12767c0591cadd36c20a7990e7c05ea1aa2b9645654ec65308ee008816623a2757a6a","0xa1b47841a0a2b06efd9ab8c111309cc5fc9e1d5896b3e42ed531f6057e5ade8977c29831ce08dbda40348386b1dcc06d","0xb92b8ef59bbddb50c9457691bc023d63dfcc54e0fd88bd5d27a09e0d98ac290fc90e6a8f6b88492043bf7c87fac8f3e4","0xa9d6240b07d62e22ec8ab9b1f6007c975a77b7320f02504fc7c468b4ee9cfcfd945456ff0128bc0ef2174d9e09333f8d","0x8e96534c94693226dc32bca79a595ca6de503af635f802e86442c67e77564829756961d9b701187fe91318da515bf0e6","0xb6ba290623cd8dd5c2f50931c0045d1cfb0c30877bc8fe58cbc3ff61ee8da100045a39153916efa1936f4aee0892b473","0xb43baa7717fac02d4294f5b3bb5e58a65b3557747e3188b482410388daac7a9c177f762d943fd5dcf871273921213da8","0xb9cf00f8fb5e2ef2b836659fece15e735060b2ea39b8e901d3dcbdcf612be8bf82d013833718c04cd46ffaa70b85f42e","0x8017d0c57419e414cbba504368723e751ef990cc6f05dad7b3c2de6360adc774ad95512875ab8337d110bf39a42026fa","0xae7401048b838c0dcd4b26bb6c56d79d51964a0daba780970b6c97daee4ea45854ea0ac0e4139b3fe60dac189f84df65","0x887b237b0cd0f816b749b21db0b40072f9145f7896c36916296973f9e6990ede110f14e5976c906d08987c9836cca57f","0xa88c3d5770148aee59930561ca1223aceb2c832fb5417e188dca935905301fc4c6c2c9270bc1dff7add490a125eb81c6","0xb6cf9b02c0cd91895ad209e38c54039523f137b5848b9d3ad33ae43af6c20c98434952db375fe378de7866f2d0e8b18a","0x84ef3d322ff580c8ad584b1fe4fe346c60866eb6a56e982ba2cf3b021ecb1fdb75ecc6c29747adda86d9264430b3f816","0xa0561c27224baf0927ad144cb71e31e54a064c598373fcf0d66aebf98ab7af1d8e2f343f77baefff69a6da750a219e11","0xaa5cc43f5b8162b016f5e1b61214c0c9d15b1078911c650b75e6cdfb49b85ee04c6739f5b1687d15908444f691f732de","0xad4ac099b935589c7b8fdfdf3db332b7b82bb948e13a5beb121ebd7db81a87d278024a1434bcf0115c54ca5109585c3d","0x8a00466abf3f109a1dcd19e643b603d3af23d42794ef8ca2514dd507ecea44a031ac6dbc18bd02f99701168b25c1791e","0xb00b5900dfad79645f8bee4e5adc7b84eb22e5b1e67df77ccb505b7fc044a6c08a8ea5faca662414eb945f874f884cea","0x950e204e5f17112250b22ea6bb8423baf522fc0af494366f18fe0f949f51d6e6812074a80875cf1ed9c8e7420058d541","0x91e5cbf8bb1a1d50c81608c9727b414d0dd2fb467ebc92f100882a3772e54f94979cfdf8e373fdef7c7fcdd60fec9e00","0xa093f6a857b8caaff80599c2e89c962b415ecbaa70d8fd973155fa976a284c6b29a855f5f7a3521134d00d2972755188","0xb4d55a3551b00da54cc010f80d99ddd2544bde9219a3173dfaadf3848edc7e4056ab532fb75ac26f5f7141e724267663","0xa03ea050fc9b011d1b04041b5765d6f6453a93a1819cd9bd6328637d0b428f08526466912895dcc2e3008ee58822e9a7","0x99b12b3665e473d01bc6985844f8994fb65cb15745024fb7af518398c4a37ff215da8f054e8fdf3286984ae36a73ca5e","0x9972c7e7a7fb12e15f78d55abcaf322c11249cd44a08f62c95288f34f66b51f146302bce750ff4d591707075d9123bd2","0xa64b4a6d72354e596d87cda213c4fc2814009461570ccb27d455bbe131f8d948421a71925425b546d8cf63d5458cd64b","0x91c215c73b195795ede2228b7ed1f6e37892e0c6b0f4a0b5a16c57aa1100c84df9239054a173b6110d6c2b7f4bf1ce52","0x88807198910ec1303480f76a3683870246a995e36adaeadc29c22f0bdba8152fe705bd070b75de657b04934f7d0ccf80","0xb37c0026c7b32eb02cacac5b55cb5fe784b8e48b2945c64d3037af83ece556a117f0ff053a5968c2f5fa230e291c1238","0x94c768384ce212bc2387e91ce8b45e4ff120987e42472888a317abc9dcdf3563b62e7a61c8e98d7cdcbe272167d91fc6","0xa10c2564936e967a390cb14ef6e8f8b04ea9ece5214a38837eda09e79e0c7970b1f83adf017c10efd6faa8b7ffa2c567","0xa5085eed3a95f9d4b1269182ea1e0d719b7809bf5009096557a0674bde4201b0ddc1f0f16a908fc468846b3721748ce3","0x87468eb620b79a0a455a259a6b4dfbc297d0d53336537b771254dd956b145dc816b195b7002647ea218552e345818a3f","0xace2b77ffb87366af0a9cb5d27d6fc4a14323dbbf1643f5f3c4559306330d86461bb008894054394cbfaefeaa0bc2745","0xb27f56e840a54fbd793f0b7a7631aa4cee64b5947e4382b2dfb5eb1790270288884c2a19afebe5dc0c6ef335d4531c1c","0x876e438633931f7f895062ee16c4b9d10428875f7bc79a8e156a64d379a77a2c45bf5430c5ab94330f03da352f1e9006","0xa2512a252587d200d2092b44c914df54e04ff8bcef36bf631f84bde0cf5a732e3dc7f00f662842cfd74b0b0f7f24180e","0x827f1bc8f54a35b7a4bd8154f79bcc055e45faed2e74adf7cf21cca95df44d96899e847bd70ead6bb27b9c0ed97bbd8b","0xa0c92cf5a9ed843714f3aea9fe7b880f622d0b4a3bf66de291d1b745279accf6ba35097849691370f41732ba64b5966b","0xa63f5c1e222775658421c487b1256b52626c6f79cb55a9b7deb2352622cedffb08502042d622eb3b02c97f9c09f9c957","0x8cc093d52651e65fb390e186db6cc4de559176af4624d1c44cb9b0e836832419dacac7b8db0627b96288977b738d785d","0xaa7b6a17dfcec146134562d32a12f7bd7fe9522e300859202a02939e69dbd345ed7ff164a184296268f9984f9312e8fc","0x8ac76721f0d2b679f023d06cbd28c85ae5f4b43c614867ccee88651d4101d4fd352dbdb65bf36bfc3ebc0109e4b0c6f9","0x8d350f7c05fc0dcd9a1170748846fb1f5d39453e4cb31e6d1457bed287d96fc393b2ecc53793ca729906a33e59c6834a","0xb9913510dfc5056d7ec5309f0b631d1ec53e3a776412ada9aefdaf033c90da9a49fdde6719e7c76340e86599b1f0eec2","0x94955626bf4ce87612c5cfffcf73bf1c46a4c11a736602b9ba066328dc52ad6d51e6d4f53453d4ed55a51e0aad810271","0xb0fcab384fd4016b2f1e53f1aafd160ae3b1a8865cd6c155d7073ecc1664e05b1d8bca1def39c158c7086c4e1103345e","0x827de3f03edfbde08570b72de6662c8bfa499b066a0a27ebad9b481c273097d17a5a0a67f01553da5392ec3f149b2a78","0xab7940384c25e9027c55c40df20bd2a0d479a165ced9b1046958353cd69015eeb1e44ed2fd64e407805ba42df10fc7bf","0x8ad456f6ff8cd58bd57567d931f923d0c99141978511b17e03cab7390a72b9f62498b2893e1b05c7c22dd274e9a31919","0xac75399e999effe564672db426faa17a839e57c5ef735985c70cd559a377adec23928382767b55ed5a52f7b11b54b756","0xb17f975a00b817299ac7af5f2024ea820351805df58b43724393bfb3920a8cd747a3bbd4b8286e795521489db3657168","0xa2bed800a6d95501674d9ee866e7314063407231491d794f8cf57d5be020452729c1c7cefd8c50dc1540181f5caab248","0x9743f5473171271ffdd3cc59a3ae50545901a7b45cd4bc3570db487865f3b73c0595bebabbfe79268809ee1862e86e4a","0xb7eab77c2d4687b60d9d7b04e842b3880c7940140012583898d39fcc22d9b9b0a9be2c2e3788b3e6f30319b39c338f09","0x8e2b8f797a436a1b661140e9569dcf3e1eea0a77c7ff2bc4ff0f3e49af04ed2de95e255df8765f1d0927fb456a9926b1","0x8aefea201d4a1f4ff98ffce94e540bb313f2d4dfe7e9db484a41f13fc316ed02b282e1acc9bc6f56cad2dc2e393a44c9","0xb950c17c0e5ca6607d182144aa7556bb0efe24c68f06d79d6413a973b493bfdf04fd147a4f1ab03033a32004cc3ea66f","0xb7b8dcbb179a07165f2dc6aa829fad09f582a71b05c3e3ea0396bf9e6fe73076f47035c031c2101e8e38e0d597eadd30","0xa9d77ed89c77ec1bf8335d08d41c3c94dcca9fd1c54f22837b4e54506b212aa38d7440126c80648ab7723ff18e65ed72","0xa819d6dfd4aef70e52b8402fe5d135f8082d40eb7d3bb5c4d7997395b621e2bb10682a1bad2c9caa33dd818550fc3ec6","0x8f6ee34128fac8bbf13ce2d68b2bb363eb4fd65b297075f88e1446ddeac242500eeb4ef0735e105882ff5ba8c44c139b","0xb4440e48255c1644bcecf3a1e9958f1ec4901cb5b1122ee5b56ffd02cad1c29c4266999dbb85aa2605c1b125490074d4","0xa43304a067bede5f347775d5811cf65a6380a8d552a652a0063580b5c5ef12a0867a39c7912fa219e184f4538eba1251","0xa891ad67a790089ffc9f6d53e6a3d63d3556f5f693e0cd8a7d0131db06fd4520e719cfcc3934f0a8f62a95f90840f1d4","0xaea6df8e9bb871081aa0fc5a9bafb00be7d54012c5baf653791907d5042a326aeee966fd9012a582cc16695f5baf7042","0x8ffa2660dc52ed1cd4eff67d6a84a8404f358a5f713d04328922269bee1e75e9d49afeec0c8ad751620f22352a438e25","0x87ec6108e2d63b06abed350f8b363b7489d642486f879a6c3aa90e5b0f335efc2ff2834eef9353951a42136f8e6a1b32","0x865619436076c2760d9e87ddc905023c6de0a8d56eef12c98a98c87837f2ca3f27fd26a2ad752252dbcbe2b9f1d5a032","0x980437dce55964293cb315c650c5586ffd97e7a944a83f6618af31c9d92c37b53ca7a21bb5bc557c151b9a9e217e7098","0x95d128fc369df4ad8316b72aea0ca363cbc7b0620d6d7bb18f7076a8717a6a46956ff140948b0cc4f6d2ce33b5c10054","0x8c7212d4a67b9ec70ebbca04358ad2d36494618d2859609163526d7b3acc2fc935ca98519380f55e6550f70a9bc76862","0x893a2968819401bf355e85eee0f0ed0406a6d4a7d7f172d0017420f71e00bb0ba984f6020999a3cdf874d3cd8ebcd371","0x9103c1af82dece25d87274e89ea0acd7e68c2921c4af3d8d7c82ab0ed9990a5811231b5b06113e7fa43a6bd492b4564f","0x99cfd87a94eab7d35466caa4ed7d7bb45e5c932b2ec094258fb14bf205659f83c209b83b2f2c9ccb175974b2a33e7746","0x874b6b93e4ee61be3f00c32dd84c897ccd6855c4b6251eb0953b4023634490ed17753cd3223472873cbc6095b2945075","0x84a32c0dc4ea60d33aac3e03e70d6d639cc9c4cc435c539eff915017be3b7bdaba33349562a87746291ebe9bc5671f24","0xa7057b24208928ad67914e653f5ac1792c417f413d9176ba635502c3f9c688f7e2ee81800d7e3dc0a340c464da2fd9c5","0xa03fb9ed8286aacfa69fbd5d953bec591c2ae4153400983d5dbb6cd9ea37fff46ca9e5cceb9d117f73e9992a6c055ad2","0x863b2de04e89936c9a4a2b40380f42f20aefbae18d03750fd816c658aee9c4a03df7b12121f795c85d01f415baaeaa59","0x8526eb9bd31790fe8292360d7a4c3eed23be23dd6b8b8f01d2309dbfdc0cfd33ad1568ddd7f8a610f3f85a9dfafc6a92","0xb46ab8c5091a493d6d4d60490c40aa27950574a338ea5bbc045be3a114af87bdcb160a8c80435a9b7ad815f3cb56a3f3","0xaeadc47b41a8d8b4176629557646202f868b1d728b2dda58a347d937e7ffc8303f20d26d6c00b34c851b8aeec547885d","0xaebb19fc424d72c1f1822aa7adc744cd0ef7e55727186f8df8771c784925058c248406ebeeaf3c1a9ee005a26e9a10c6","0x8ff96e81c1a4a2ab1b4476c21018fae0a67e92129ee36120cae8699f2d7e57e891f5c624902cb1b845b944926a605cc3","0x8251b8d2c43fadcaa049a9e7aff838dae4fb32884018d58d46403ac5f3beb5c518bfd45f03b8abb710369186075eb71c","0xa8b2a64f865f51a5e5e86a66455c093407933d9d255d6b61e1fd81ffafc9538d73caaf342338a66ba8ee166372a3d105","0xaad915f31c6ba7fdc04e2aaac62e84ef434b7ee76a325f07dc430d12c84081999720181067b87d792efd0117d7ee1eab","0xa13db3bb60389883fd41d565c54fb5180d9c47ce2fe7a169ae96e01d17495f7f4fa928d7e556e7c74319c4c25d653eb2","0xa4491b0198459b3f552855d680a59214eb74e6a4d6c5fa3b309887dc50ebea2ecf6d26c040550f7dc478b452481466fb","0x8f017f13d4b1e3f0c087843582b52d5f8d13240912254d826dd11f8703a99a2f3166dfbdfdffd9a3492979d77524276b","0x96c3d5dcd032660d50d7cd9db2914f117240a63439966162b10c8f1f3cf74bc83b0f15451a43b31dbd85e4a7ce0e4bb1","0xb479ec4bb79573d32e0ec93b92bdd7ec8c26ddb5a2d3865e7d4209d119fd3499eaac527615ffac78c440e60ef3867ae0","0xb2c49c4a33aa94b52b6410b599e81ff15490aafa7e43c8031c865a84e4676354a9c81eb4e7b8be6825fdcefd1e317d44","0x906dc51d6a90c089b6704b47592805578a6eed106608eeb276832f127e1b8e858b72e448edcbefb497d152447e0e68ff","0xb0e81c63b764d7dfbe3f3fddc9905aef50f3633e5d6a4af6b340495124abedcff5700dfd1577bbbed7b6bf97d02719cb","0x9304c64701e3b4ed6d146e48a881f7d83a17f58357cca0c073b2bb593afd2d94f6e2a7a1ec511d0a67ad6ff4c3be5937","0xb6fdbd12ba05aa598d80b83f70a15ef90e5cba7e6e75fa038540ee741b644cd1f408a6cecfd2a891ef8d902de586c6b5","0xb80557871a6521b1b3c74a1ba083ae055b575df607f1f7b04c867ba8c8c181ea68f8d90be6031f4d25002cca27c44da2","0xaa7285b8e9712e06b091f64163f1266926a36607f9d624af9996856ed2aaf03a580cb22ce407d1ade436c28b44ca173f","0x8148d72b975238b51e6ea389e5486940d22641b48637d7dfadfa603a605bfc6d74a016480023945d0b85935e396aea5d","0x8a014933a6aea2684b5762af43dcf4bdbb633cd0428d42d71167a2b6fc563ece5e618bff22f1db2ddb69b845b9a2db19","0x990d91740041db770d0e0eb9d9d97d826f09fd354b91c41e0716c29f8420e0e8aac0d575231efba12fe831091ec38d5a","0x9454d0d32e7e308ddec57cf2522fb1b67a2706e33fb3895e9e1f18284129ab4f4c0b7e51af25681d248d7832c05eb698","0xa5bd434e75bac105cb3e329665a35bce6a12f71dd90c15165777d64d4c13a82bceedb9b48e762bd24034e0fc9fbe45f4","0xb09e3b95e41800d4dc29c6ffdaab2cd611a0050347f6414f154a47ee20ee59bf8cf7181454169d479ebce1eb5c777c46","0xb193e341d6a047d15eea33766d656d807b89393665a783a316e9ba10518e5515c8e0ade3d6e15641d917a8a172a5a635","0xade435ec0671b3621dde69e07ead596014f6e1daa1152707a8c18877a8b067bde2895dd47444ffa69db2bbef1f1d8816","0xa7fd3d6d87522dfc56fb47aef9ce781a1597c56a8bbfd796baba907afdc872f753d732bfda1d3402aee6c4e0c189f52d","0xa298cb4f4218d0464b2fab393e512bbc477c3225aa449743299b2c3572f065bc3a42d07e29546167ed9e1b6b3b3a3af3","0xa9ee57540e1fd9c27f4f0430d194b91401d0c642456c18527127d1f95e2dba41c2c86d1990432eb38a692fda058fafde","0x81d6c1a5f93c04e6d8e5a7e0678c1fc89a1c47a5c920bcd36180125c49fcf7c114866b90e90a165823560b19898a7c16","0xa4b7a1ec9e93c899b9fd9aaf264c50e42c36c0788d68296a471f7a3447af4dbc81e4fa96070139941564083ec5b5b5a1","0xb3364e327d381f46940c0e11e29f9d994efc6978bf37a32586636c0070b03e4e23d00650c1440f448809e1018ef9f6d8","0x8056e0913a60155348300e3a62e28b5e30629a90f7dd4fe11289097076708110a1d70f7855601782a3cdc5bdb1ca9626","0xb4980fd3ea17bac0ba9ee1c470b17e575bb52e83ebdd7d40c93f4f87bebeaff1c8a679f9d3d09d635f068d37d5bd28bd","0x905a9299e7e1853648e398901dfcd437aa575c826551f83520df62984f5679cb5f0ea86aa45ed3e18b67ddc0dfafe809","0xab99553bf31a84f2e0264eb34a08e13d8d15e2484aa9352354becf9a15999c76cc568d68274b70a65e49703fc23540d0","0xa43681597bc574d2dae8964c9a8dc1a07613d7a1272bdcb818d98c85d44e16d744250c33f3b5e4d552d97396b55e601f","0xa54e5a31716fccb50245898c99865644405b8dc920ded7a11f3d19bdc255996054b268e16f2e40273f11480e7145f41e","0x8134f3ad5ef2ad4ba12a8a4e4d8508d91394d2bcdc38b7c8c8c0b0a820357ac9f79d286c65220f471eb1adca1d98fc68","0x94e2f755e60471578ab2c1adb9e9cea28d4eec9b0e92e0140770bca7002c365fcabfe1e5fb4fe6cfe79a0413712aa3ef","0xad48f8d0ce7eb3cc6e2a3086ad96f562e5bed98a360721492ae2e74dc158586e77ec8c35d5fd5927376301b7741bad2b","0x8614f0630bdd7fbad3a31f55afd9789f1c605dc85e7dc67e2edfd77f5105f878bb79beded6e9f0b109e38ea7da67e8d5","0x9804c284c4c5e77dabb73f655b12181534ca877c3e1e134aa3f47c23b7ec92277db34d2b0a5d38d2b69e5d1c3008a3e3","0xa51b99c3088e473afdaa9e0a9f7e75a373530d3b04e44e1148da0726b95e9f5f0c7e571b2da000310817c36f84b19f7f","0xac4ff909933b3b76c726b0a382157cdc74ab851a1ac6cef76953c6444441804cc43abb883363f416592e8f6cfbc4550b","0xae7d915eb9fc928b65a29d6edbc75682d08584d0014f7bcf17d59118421ae07d26a02137d1e4de6938bcd1ab8ef48fad","0x852f7e453b1af89b754df6d11a40d5d41ea057376e8ecacd705aacd2f917457f4a093d6b9a8801837fa0f62986ad7149","0x92c6bf5ada5d0c3d4dd8058483de36c215fa98edab9d75242f3eff9db07c734ad67337da6f0eefe23a487bf75a600dee","0xa2b42c09d0db615853763552a48d2e704542bbd786aae016eb58acbf6c0226c844f5fb31e428cb6450b9db855f8f2a6f","0x880cc07968266dbfdcfbc21815cd69e0eddfee239167ac693fb0413912d816f2578a74f7716eecd6deefa68c6eccd394","0xb885b3ace736cd373e8098bf75ba66fa1c6943ca1bc4408cd98ac7074775c4478594f91154b8a743d9c697e1b29f5840","0xa51ce78de512bd87bfa0835de819941dffbf18bec23221b61d8096fc9436af64e0693c335b54e7bfc763f287bdca2db6","0xa3c76166a3bdb9b06ef696e57603b58871bc72883ee9d45171a30fe6e1d50e30bc9c51b4a0f5a7270e19a77b89733850","0xacefc5c6f8a1e7c24d7b41e0fc7f6f3dc0ede6cf3115ffb9a6e54b1d954cbca9bda8ad7a084be9be245a1b8e9770d141","0xb420ed079941842510e31cfad117fa11fb6b4f97dfbc6298cb840f27ebaceba23eeaf3f513bcffbf5e4aae946310182d","0x95c3bb5ef26c5ed2f035aa5d389c6b3c15a6705b9818a3fefaed28922158b35642b2e8e5a1a620fdad07e75ad4b43af4","0x825149f9081ecf07a2a4e3e8b5d21bade86c1a882475d51c55ee909330b70c5a2ac63771c8600c6f38df716af61a3ea1","0x873b935aae16d9f08adbc25353cee18af2f1b8d5f26dec6538d6bbddc515f2217ed7d235dcfea59ae61b428798b28637","0x9294150843a2bedcedb3bb74c43eb28e759cf9499582c5430bccefb574a8ddd4f11f9929257ff4c153990f9970a2558f","0xb619563a811cc531da07f4f04e5c4c6423010ff9f8ed7e6ec9449162e3d501b269fb1c564c09c0429431879b0f45df02","0x91b509b87eb09f007d839627514658c7341bc76d468920fe8a740a8cb96a7e7e631e0ea584a7e3dc1172266f641d0f5c","0x8b8aceace9a7b9b4317f1f01308c3904d7663856946afbcea141a1c615e21ccad06b71217413e832166e9dd915fbe098","0x87b3b36e725833ea0b0f54753c3728c0dbc87c52d44d705ffc709f2d2394414c652d3283bab28dcce09799504996cee0","0xb2670aad5691cbf308e4a6a77a075c4422e6cbe86fdba24e9f84a313e90b0696afb6a067eebb42ba2d10340d6a2f6e51","0x876784a9aff3d54faa89b2bacd3ff5862f70195d0b2edc58e8d1068b3c9074c0da1cfa23671fe12f35e33b8a329c0ccd","0x8b48b9e758e8a8eae182f5cbec96f67d20cca6d3eee80a2d09208eb1d5d872e09ef23d0df8ebbb9b01c7449d0e3e3650","0xb79303453100654c04a487bdcadc9e3578bc80930c489a7069a52e8ca1dba36c492c8c899ce025f8364599899baa287d","0x961b35a6111da54ece6494f24dacd5ea46181f55775b5f03df0e370c34a5046ac2b4082925855325bb42bc2a2c98381d","0xa31feb1be3f5a0247a1f7d487987eb622e34fca817832904c6ee3ee60277e5847945a6f6ea1ac24542c72e47bdf647df","0xa12a2aa3e7327e457e1aae30e9612715dd2cfed32892c1cd6dcda4e9a18203af8a44afb46d03b2eed89f6b9c5a2c0c23","0xa08265a838e69a2ca2f80fead6ccf16f6366415b920c0b22ee359bcd8d4464ecf156f400a16a7918d52e6d733dd64211","0xb723d6344e938d801cca1a00032af200e541d4471fd6cbd38fb9130daa83f6a1dffbbe7e67fc20f9577f884acd7594b2","0xa6733d83ec78ba98e72ddd1e7ff79b7adb0e559e256760d0c590a986e742445e8cdf560d44b29439c26d87edd0b07c8c","0xa61c2c27d3f7b9ff4695a17afedf63818d4bfba390507e1f4d0d806ce8778d9418784430ce3d4199fd3bdbc2504d2af3","0x8332f3b63a6dc985376e8b1b25eeae68be6160fbe40053ba7bcf6f073204f682da72321786e422d3482fd60c9e5aa034","0xa280f44877583fbb6b860d500b1a3f572e3ee833ec8f06476b3d8002058e25964062feaa1e5bec1536d734a5cfa09145","0xa4026a52d277fcea512440d2204f53047718ebfcae7b48ac57ea7f6bfbc5de9d7304db9a9a6cbb273612281049ddaec5","0x95cdf69c831ab2fad6c2535ede9c07e663d2ddccc936b64e0843d2df2a7b1c31f1759c3c20f1e7a57b1c8f0dbb21b540","0x95c96cec88806469c277ab567863c5209027cecc06c7012358e5f555689c0d9a5ffb219a464f086b45817e8536b86d2f","0xafe38d4684132a0f03d806a4c8df556bf589b25271fbc6fe2e1ed16de7962b341c5003755da758d0959d2e6499b06c68","0xa9b77784fda64987f97c3a23c5e8f61b918be0f7c59ba285084116d60465c4a2aaafc8857eb16823282cc83143eb9126","0xa830f05881ad3ce532a55685877f529d32a5dbe56cea57ffad52c4128ee0fad0eeaf0da4362b55075e77eda7babe70e5","0x992b3ad190d6578033c13ed5abfee4ef49cbc492babb90061e3c51ee4b5790cdd4c8fc1abff1fa2c00183b6b64f0bbbe","0xb1015424d9364aeff75de191652dc66484fdbec3e98199a9eb9671ec57bec6a13ff4b38446e28e4d8aedb58dd619cd90","0xa745304604075d60c9db36cada4063ac7558e7ec2835d7da8485e58d8422e817457b8da069f56511b02601289fbb8981","0xa5ba4330bc5cb3dbe0486ddf995632a7260a46180a08f42ae51a2e47778142132463cc9f10021a9ad36986108fefa1a9","0xb419e9fd4babcaf8180d5479db188bb3da232ae77a1c4ed65687c306e6262f8083070a9ac32220cddb3af2ec73114092","0xa49e23dc5f3468f3bf3a0bb7e4a114a788b951ff6f23a3396ae9e12cbff0abd1240878a3d1892105413dbc38818e807c","0xb7ecc7b4831f650202987e85b86bc0053f40d983f252e9832ef503aea81c51221ce93279da4aa7466c026b2d2070e55d","0x96a8c35cb87f84fa84dcd6399cc2a0fd79cc9158ef4bdde4bae31a129616c8a9f2576cd19baa3f497ca34060979aed7d","0x8681b2c00aa62c2b519f664a95dcb8faef601a3b961bb4ce5d85a75030f40965e2983871d41ea394aee934e859581548","0x85c229a07efa54a713d0790963a392400f55fbb1a43995a535dc6c929f20d6a65cf4efb434e0ad1cb61f689b8011a3bc","0x90856f7f3444e5ad44651c28e24cc085a5db4d2ffe79aa53228c26718cf53a6e44615f3c5cda5aa752d5f762c4623c66","0x978999b7d8aa3f28a04076f74d11c41ef9c89fdfe514936c4238e0f13c38ec97e51a5c078ebc6409e517bfe7ccb42630","0xa099914dd7ed934d8e0d363a648e9038eb7c1ec03fa04dbcaa40f7721c618c3ef947afef7a16b4d7ac8c12aa46637f03","0xab2a104fed3c83d16f2cda06878fa5f30c8c9411de71bfb67fd2fc9aa454dcbcf3d299d72f8cc12e919466a50fcf7426","0xa4471d111db4418f56915689482f6144efc4664cfb0311727f36c864648d35734351becc48875df96f4abd3cfcf820f9","0x83be11727cd30ea94ccc8fa31b09b81c9d6a9a5d3a4686af9da99587332fe78c1f94282f9755854bafd6033549afec91","0x88020ff971dc1a01a9e993cd50a5d2131ffdcbb990c1a6aaa54b20d8f23f9546a70918ea57a21530dcc440c1509c24ad","0xae24547623465e87905eaffa1fa5d52bb7c453a8dbd89614fa8819a2abcedaf455c2345099b7324ae36eb0ad7c8ef977","0xb59b0c60997de1ee00b7c388bc7101d136c9803bf5437b1d589ba57c213f4f835a3e4125b54738e78abbc21b000f2016","0xa584c434dfe194546526691b68fa968c831c31da42303a1d735d960901c74011d522246f37f299555416b8cf25c5a548","0x80408ce3724f4837d4d52376d255e10f69eb8558399ae5ca6c11b78b98fe67d4b93157d2b9b639f1b5b64198bfe87713","0xabb941e8d406c2606e0ddc35c113604fdd9d249eacc51cb64e2991e551b8639ce44d288cc92afa7a1e7fc599cfc84b22","0xb223173f560cacb1c21dba0f1713839e348ad02cbfdef0626748604c86f89e0f4c919ed40b583343795bdd519ba952c8","0xaf1c70512ec3a19d98b8a1fc3ff7f7f5048a27d17d438d43f561974bbdd116fcd5d5c21040f3447af3f0266848d47a15","0x8a44809568ebe50405bede19b4d2607199159b26a1b33e03d180e6840c5cf59d991a4fb150d111443235d75ecad085b7","0xb06207cdca46b125a27b3221b5b50cf27af4c527dd7c80e2dbcebbb09778a96df3af67e50f07725239ce3583dad60660","0x993352d9278814ec89b26a11c4a7c4941bf8f0e6781ae79559d14749ee5def672259792db4587f85f0100c7bb812f933","0x9180b8a718b971fd27bc82c8582d19c4b4f012453e8c0ffeeeffe745581fc6c07875ab28be3af3fa3896d19f0c89ac5b","0x8b8e1263eb48d0fe304032dd5ea1f30e73f0121265f7458ba9054d3626894e8a5fef665340abd2ede9653045c2665938","0x99a2beee4a10b7941c24b2092192faf52b819afd033e4a2de050fd6c7f56d364d0cf5f99764c3357cf32399e60fc5d74","0x946a4aad7f8647ea60bee2c5fcdeb6f9a58fb2cfca70c4d10e458027a04846e13798c66506151be3df9454b1e417893f","0xa672a88847652d260b5472d6908d1d57e200f1e492d30dd1cecc441cdfc9b76e016d9bab560efd4d7f3c30801de884a9","0x9414e1959c156cde1eb24e628395744db75fc24b9df4595350aaad0bc38e0246c9b4148f6443ef68b8e253a4a6bcf11c","0x9316e9e4ec5fab4f80d6540df0e3a4774db52f1d759d2e5b5bcd3d7b53597bb007eb1887cb7dc61f62497d51ffc8d996","0x902d6d77bb49492c7a00bc4b70277bc28c8bf9888f4307bb017ac75a962decdedf3a4e2cf6c1ea9f9ba551f4610cbbd7","0xb07025a18b0e32dd5e12ec6a85781aa3554329ea12c4cd0d3b2c22e43d777ef6f89876dd90a9c8fb097ddf61cf18adc5","0xb355a849ad3227caa4476759137e813505ec523cbc2d4105bc7148a4630f9e81918d110479a2d5f5e4cd9ccec9d9d3e3","0xb49532cfdf02ee760109881ad030b89c48ee3bb7f219ccafc13c93aead754d29bdafe345be54c482e9d5672bd4505080","0x9477802410e263e4f938d57fa8f2a6cac7754c5d38505b73ee35ea3f057aad958cb9722ba6b7b3cfc4524e9ca93f9cdc","0x9148ea83b4436339580f3dbc9ba51509e9ab13c03063587a57e125432dd0915f5d2a8f456a68f8fff57d5f08c8f34d6e","0xb00b6b5392b1930b54352c02b1b3b4f6186d20bf21698689bbfc7d13e86538a4397b90e9d5c93fd2054640c4dbe52a4f","0x926a9702500441243cd446e7cbf15dde16400259726794694b1d9a40263a9fc9e12f7bcbf12a27cb9aaba9e2d5848ddc","0xa0c6155f42686cbe7684a1dc327100962e13bafcf3db97971fc116d9f5c0c8355377e3d70979cdbd58fd3ea52440901c","0xa277f899f99edb8791889d0817ea6a96c24a61acfda3ad8c3379e7c62b9d4facc4b965020b588651672fd261a77f1bfc","0x8f528cebb866b501f91afa50e995234bef5bf20bff13005de99cb51eaac7b4f0bf38580cfd0470de40f577ead5d9ba0f","0x963fc03a44e9d502cc1d23250efef44d299befd03b898d07ce63ca607bb474b5cf7c965a7b9b0f32198b04a8393821f7","0xab087438d0a51078c378bf4a93bd48ef933ff0f1fa68d02d4460820df564e6642a663b5e50a5fe509527d55cb510ae04","0xb0592e1f2c54746bb076be0fa480e1c4bebc4225e1236bcda3b299aa3853e3afb401233bdbcfc4a007b0523a720fbf62","0x851613517966de76c1c55a94dc4595f299398a9808f2d2f0a84330ba657ab1f357701d0895f658c18a44cb00547f6f57","0xa2fe9a1dd251e72b0fe4db27be508bb55208f8f1616b13d8be288363ec722826b1a1fd729fc561c3369bf13950bf1fd6","0xb896cb2bc2d0c77739853bc59b0f89b2e008ba1f701c9cbe3bef035f499e1baee8f0ff1e794854a48c320586a2dfc81a","0xa1b60f98e5e5106785a9b81a85423452ee9ef980fa7fa8464f4366e73f89c50435a0c37b2906052b8e58e212ebd366cf","0xa853b0ebd9609656636df2e6acd5d8839c0fda56f7bf9288a943b06f0b67901a32b95e016ca8bc99bd7b5eab31347e72","0xb290fa4c1346963bd5225235e6bdf7c542174dab4c908ab483d1745b9b3a6015525e398e1761c90e4b49968d05e30eea","0xb0f65a33ad18f154f1351f07879a183ad62e5144ad9f3241c2d06533dad09cbb2253949daff1bb02d24d16a3569f7ef0","0xa00db59b8d4218faf5aeafcd39231027324408f208ec1f54d55a1c41228b463b88304d909d16b718cfc784213917b71e","0xb8d695dd33dc2c3bc73d98248c535b2770ad7fa31aa726f0aa4b3299efb0295ba9b4a51c71d314a4a1bd5872307534d1","0xb848057cca2ca837ee49c42b88422303e58ea7d2fc76535260eb5bd609255e430514e927cc188324faa8e657396d63ec","0x92677836061364685c2aaf0313fa32322746074ed5666fd5f142a7e8f87135f45cd10e78a17557a4067a51dfde890371","0xa854b22c9056a3a24ab164a53e5c5cf388616c33e67d8ebb4590cb16b2e7d88b54b1393c93760d154208b5ca822dc68f","0x86fff174920388bfab841118fb076b2b0cdec3fdb6c3d9a476262f82689fb0ed3f1897f7be9dbf0932bb14d346815c63","0x99661cf4c94a74e182752bcc4b98a8c2218a8f2765642025048e12e88ba776f14f7be73a2d79bd21a61def757f47f904","0x8a8893144d771dca28760cba0f950a5d634195fd401ec8cf1145146286caffb0b1a6ba0c4c1828d0a5480ce49073c64c","0x938a59ae761359ee2688571e7b7d54692848eb5dde57ffc572b473001ea199786886f8c6346a226209484afb61d2e526","0x923f68a6aa6616714cf077cf548aeb845bfdd78f2f6851d8148cba9e33a374017f2f3da186c39b82d14785a093313222","0xac923a93d7da7013e73ce8b4a2b14b8fd0cc93dc29d5de941a70285bdd19be4740fedfe0c56b046689252a3696e9c5bc","0xb49b32c76d4ec1a2c68d4989285a920a805993bc6fcce6dacd3d2ddae73373050a5c44ba8422a3781050682fa0ef6ba2","0x8a367941c07c3bdca5712524a1411bad7945c7c48ffc7103b1d4dff2c25751b0624219d1ccde8c3f70c465f954be5445","0xb838f029df455efb6c530d0e370bbbf7d87d61a9aea3d2fe5474c5fe0a39cf235ceecf9693c5c6c5820b1ba8f820bd31","0xa8983b7c715eaac7f13a001d2abc462dfc1559dab4a6b554119c271aa8fe00ffcf6b6949a1121f324d6d26cb877bcbae","0xa2afb24ad95a6f14a6796315fbe0d8d7700d08f0cfaf7a2abe841f5f18d4fecf094406cbd54da7232a159f9c5b6e805e","0x87e8e95ad2d62f947b2766ff405a23f7a8afba14e7f718a691d95369c79955cdebe24c54662553c60a3f55e6322c0f6f","0x87c2cbcecb754e0cc96128e707e5c5005c9de07ffd899efa3437cadc23362f5a1d3fcdd30a1f5bdc72af3fb594398c2a","0x91afd6ee04f0496dc633db88b9370d41c428b04fd991002502da2e9a0ef051bcd7b760e860829a44fbe5539fa65f8525","0x8c50e5d1a24515a9dd624fe08b12223a75ca55196f769f24748686315329b337efadca1c63f88bee0ac292dd0a587440","0x8a07e8f912a38d94309f317c32068e87f68f51bdfa082d96026f5f5f8a2211621f8a3856dda8069386bf15fb2d28c18f","0x94ad1dbe341c44eeaf4dc133eed47d8dbfe752575e836c075745770a6679ff1f0e7883b6aa917462993a7f469d74cab5","0x8745f8bd86c2bb30efa7efb7725489f2654f3e1ac4ea95bd7ad0f3cfa223055d06c187a16192d9d7bdaea7b050c6a324","0x900d149c8d79418cda5955974c450a70845e02e5a4ecbcc584a3ca64d237df73987c303e3eeb79da1af83bf62d9e579f","0x8f652ab565f677fb1a7ba03b08004e3cda06b86c6f1b0b9ab932e0834acf1370abb2914c15b0d08327b5504e5990681c","0x9103097d088be1f75ab9d3da879106c2f597e2cc91ec31e73430647bdd5c33bcfd771530d5521e7e14df6acda44f38a6","0xb0fec7791cfb0f96e60601e1aeced9a92446b61fedab832539d1d1037558612d78419efa87ff5f6b7aab8fd697d4d9de","0xb9d2945bdb188b98958854ba287eb0480ef614199c4235ce5f15fc670b8c5ffe8eeb120c09c53ea8a543a022e6a321ac","0xa9461bb7d5490973ebaa51afc0bb4a5e42acdccb80e2f939e88b77ac28a98870e103e1042899750f8667a8cc9123bae9","0xa37fdf11d4bcb2aed74b9f460a30aa34afea93386fa4cdb690f0a71bc58f0b8df60bec56e7a24f225978b862626fa00e","0xa214420e183e03d531cf91661466ea2187d84b6e814b8b20b3730a9400a7d25cf23181bb85589ebc982cec414f5c2923","0xad09a45a698a6beb3e0915f540ef16e9af7087f53328972532d6b5dfe98ce4020555ece65c6cbad8bd6be8a4dfefe6fd","0xab6742800b02728c92d806976764cb027413d6f86edd08ad8bb5922a2969ee9836878cd39db70db0bd9a2646862acc4f","0x974ca9305bd5ea1dc1755dff3b63e8bfe9f744321046c1395659bcea2a987b528e64d5aa96ac7b015650b2253b37888d","0x84eee9d6bce039c52c2ebc4fccc0ad70e20c82f47c558098da4be2f386a493cbc76adc795b5488c8d11b6518c2c4fab8","0x875d7bda46efcb63944e1ccf760a20144df3b00d53282b781e95f12bfc8f8316dfe6492c2efbf796f1150e36e436e9df","0xb68a2208e0c587b5c31b5f6cb32d3e6058a9642e2d9855da4f85566e1412db528475892060bb932c55b3a80877ad7b4a","0xba006368ecab5febb6ab348644d9b63de202293085ed468df8bc24d992ae8ce468470aa37f36a73630c789fb9c819b30","0x90a196035150846cd2b482c7b17027471372a8ce7d914c4d82b6ea7fa705d8ed5817bd42d63886242585baf7d1397a1c","0xa223b4c85e0daa8434b015fd9170b5561fe676664b67064974a1e9325066ecf88fc81f97ab5011c59fad28cedd04b240","0x82e8ec43139cf15c6bbeed484b62e06cded8a39b5ce0389e4cbe9c9e9c02f2f0275d8d8d4e8dfec8f69a191bef220408","0x81a3fc07a7b68d92c6ee4b6d28f5653ee9ec85f7e2ee1c51c075c1b130a8c5097dc661cf10c5aff1c7114b1a6a19f11a","0x8ed2ef8331546d98819a5dd0e6c9f8cb2630d0847671314a28f277faf68da080b53891dd75c82cbcf7788b255490785d","0xacecabf84a6f9bbed6b2fc2e7e4b48f02ef2f15e597538a73aea8f98addc6badda15e4695a67ecdb505c1554e8f345ec","0xb8f51019b2aa575f8476e03dcadf86cc8391f007e5f922c2a36b2daa63f5a503646a468990cd5c65148d323942193051","0xaaa595a84b403ec65729bc1c8055a94f874bf9adddc6c507b3e1f24f79d3ad359595a672b93aab3394db4e2d4a7d8970","0x895144c55fcbd0f64d7dd69e6855cfb956e02b5658eadf0f026a70703f3643037268fdd673b0d21b288578a83c6338dd","0xa2e92ae6d0d237d1274259a8f99d4ea4912a299816350b876fba5ebc60b714490e198a916e1c38c6e020a792496fa23c","0xa45795fda3b5bb0ad1d3c628f6add5b2a4473a1414c1a232e80e70d1cfffd7f8a8d9861f8df2946999d7dbb56bf60113","0xb6659bf7f6f2fef61c39923e8c23b8c70e9c903028d8f62516d16755cd3fba2fe41c285aa9432dc75ab08f8a1d8a81fc","0xa735609a6bc5bfd85e58234fc439ff1f58f1ff1dd966c5921d8b649e21f006bf2b8642ad8a75063c159aaf6935789293","0xa3c622eb387c9d15e7bda2e3e84d007cb13a6d50d655c3f2f289758e49d3b37b9a35e4535d3cc53d8efd51f407281f19","0x8afe147b53ad99220f5ef9d763bfc91f9c20caecbcf823564236fb0e6ede49414c57d71eec4772c8715cc65a81af0047","0xb5f0203233cf71913951e9c9c4e10d9243e3e4a1f2cb235bf3f42009120ba96e04aa414c9938ea8873b63148478927e8","0x93c52493361b458d196172d7ba982a90a4f79f03aa8008edc322950de3ce6acf4c3977807a2ffa9e924047e02072b229","0xb9e72b805c8ac56503f4a86c82720afbd5c73654408a22a2ac0b2e5caccdfb0e20b59807433a6233bc97ae58cf14c70a","0xaf0475779b5cee278cca14c82da2a9f9c8ef222eb885e8c50cca2315fea420de6e04146590ed0dd5a29c0e0812964df5","0xb430ccab85690db02c2d0eb610f3197884ca12bc5f23c51e282bf3a6aa7e4a79222c3d8761454caf55d6c01a327595f9","0x830032937418b26ee6da9b5206f3e24dc76acd98589e37937e963a8333e5430abd6ce3dd93ef4b8997bd41440eed75d6","0x8820a6d73180f3fe255199f3f175c5eb770461ad5cfdde2fb11508041ed19b8c4ce66ad6ecebf7d7e836cc2318df47ca","0xaef1393e7d97278e77bbf52ef6e1c1d5db721ccf75fe753cf47a881fa034ca61eaa5098ee5a344c156d2b14ff9e284ad","0x8a4a26c07218948c1196c45d927ef4d2c42ade5e29fe7a91eaebe34a29900072ce5194cf28d51f746f4c4c649daf4396","0x84011dc150b7177abdcb715efbd8c201f9cb39c36e6069af5c50a096021768ba40cef45b659c70915af209f904ede3b6","0xb1bd90675411389bb66910b21a4bbb50edce5330850c5ab0b682393950124252766fc81f5ecfc72fb7184387238c402e","0x8dfdcd30583b696d2c7744655f79809f451a60c9ad5bf1226dc078b19f4585d7b3ef7fa9d54e1ac09520d95cbfd20928","0xb351b4dc6d98f75b8e5a48eb7c6f6e4b78451991c9ba630e5a1b9874c15ac450cd409c1a024713bf2cf82dc400e025ef","0xa462b8bc97ac668b97b28b3ae24b9f5de60e098d7b23ecb600d2194cd35827fb79f77c3e50d358f5bd72ee83fef18fa0","0xa183753265c5f7890270821880cce5f9b2965b115ba783c6dba9769536f57a04465d7da5049c7cf8b3fcf48146173c18","0xa8a771b81ed0d09e0da4d79f990e58eabcd2be3a2680419502dd592783fe52f657fe55125b385c41d0ba3b9b9cf54a83","0xa71ec577db46011689d073245e3b1c3222a9b1fe6aa5b83629adec5733dd48617ebea91346f0dd0e6cdaa86e4931b168","0xa334b8b244f0d598a02da6ae0f918a7857a54dce928376c4c85df15f3b0f2ba3ac321296b8b7c9dd47d770daf16c8f8c","0xa29037f8ef925c417c90c4df4f9fb27fb977d04e2b3dd5e8547d33e92ab72e7a00f5461de21e28835319eae5db145eb7","0xb91054108ae78b00e3298d667b913ebc44d8f26e531eae78a8fe26fdfb60271c97efb2dee5f47ef5a3c15c8228138927","0x926c13efbe90604f6244be9315a34f72a1f8d1aab7572df431998949c378cddbf2fe393502c930fff614ff06ae98a0ce","0x995c758fd5600e6537089b1baa4fbe0376ab274ff3e82a17768b40df6f91c2e443411de9cafa1e65ea88fb8b87d504f4","0x9245ba307a7a90847da75fca8d77ec03fdfc812c871e7a2529c56a0a79a6de16084258e7a9ac4ae8a3756f394336e21c","0x99e0cfa2bb57a7e624231317044c15e52196ecce020db567c8e8cb960354a0be9862ee0c128c60b44777e65ac315e59f","0xad4f6b3d27bbbb744126601053c3dc98c07ff0eb0b38a898bd80dce778372846d67e5ab8fb34fb3ad0ef3f235d77ba7f","0xa0f12cae3722bbbca2e539eb9cc7614632a2aefe51410430070a12b5bc5314ecec5857b7ff8f41e9980cac23064f7c56","0xb487f1bc59485848c98222fd3bc36c8c9bb3d2912e2911f4ceca32c840a7921477f9b1fe00877e05c96c75d3eecae061","0xa6033db53925654e18ecb3ce715715c36165d7035db9397087ac3a0585e587998a53973d011ac6d48af439493029cee6","0xa6b4d09cd01c70a3311fd131d3710ccf97bde3e7b80efd5a8c0eaeffeb48cca0f951ced905290267b115b06d46f2693b","0xa9dff1df0a8f4f218a98b6f818a693fb0d611fed0fc3143537cbd6578d479af13a653a8155e535548a2a0628ae24fa58","0xa58e469f65d366b519f9a394cacb7edaddac214463b7b6d62c2dbc1316e11c6c5184ce45c16de2d77f990dcdd8b55430","0x989e71734f8119103586dc9a3c5f5033ddc815a21018b34c1f876cdfc112efa868d5751bf6419323e4e59fa6a03ece1c","0xa2da00e05036c884369e04cf55f3de7d659cd5fa3f849092b2519dd263694efe0f051953d9d94b7e121f0aee8b6174d7","0x968f3c029f57ee31c4e1adea89a7f92e28483af9a74f30fbdb995dc2d40e8e657dff8f8d340d4a92bf65f54440f2859f","0x932778df6f60ac1639c1453ef0cbd2bf67592759dcccb3e96dcc743ff01679e4c7dd0ef2b0833dda548d32cb4eba49e2","0xa805a31139f8e0d6dae1ac87d454b23a3dc9fc653d4ca18d4f8ebab30fc189c16e73981c2cb7dd6f8c30454a5208109d","0xa9ba0991296caa2aaa4a1ceacfb205544c2a2ec97088eace1d84ee5e2767656a172f75d2f0c4e16a3640a0e0dec316e0","0xb1e49055c968dced47ec95ae934cf45023836d180702e20e2df57e0f62fb85d7ac60d657ba3ae13b8560b67210449459","0xa94e1da570a38809c71e37571066acabff7bf5632737c9ab6e4a32856924bf6211139ab3cedbf083850ff2d0e0c0fcfc","0x88ef1bb322000c5a5515b310c838c9af4c1cdbb32eab1c83ac3b2283191cd40e9573747d663763a28dad0d64adc13840","0xa987ce205f923100df0fbd5a85f22c9b99b9b9cbe6ddfa8dfda1b8fe95b4f71ff01d6c5b64ca02eb24edb2b255a14ef0","0x84fe8221a9e95d9178359918a108de4763ebfa7a6487facb9c963406882a08a9a93f492f8e77cf9e7ea41ae079c45993","0xaa1cf3dc7c5dcfa15bbbc811a4bb6dbac4fba4f97fb1ed344ab60264d7051f6eef19ea9773441d89929ee942ed089319","0x8f6a7d610d59d9f54689bbe6a41f92d9f6096cde919c1ab94c3c7fcecf0851423bc191e5612349e10f855121c0570f56","0xb5af1fa7894428a53ea520f260f3dc3726da245026b6d5d240625380bfb9c7c186df0204bb604efac5e613a70af5106e","0xa5bce6055ff812e72ce105f147147c7d48d7a2313884dd1f488b1240ee320f13e8a33f5441953a8e7a3209f65b673ce1","0xb9b55b4a1422677d95821e1d042ab81bbf0bf087496504021ec2e17e238c2ca6b44fb3b635a5c9eac0871a724b8d47c3","0x941c38e533ce4a673a3830845b56786585e5fe49c427f2e5c279fc6db08530c8f91db3e6c7822ec6bb4f956940052d18","0xa38e191d66c625f975313c7007bbe7431b5a06ed2da1290a7d5d0f2ec73770d476efd07b8e632de64597d47df175cbb0","0x94ba76b667abf055621db4c4145d18743a368d951565632ed4e743dd50dd3333507c0c34f286a5c5fdbf38191a2255cd","0xa5ca38c60be5602f2bfa6e00c687ac96ac36d517145018ddbee6f12eb0faa63dd57909b9eeed26085fe5ac44e55d10ab","0xb00fea3b825e60c1ed1c5deb4b551aa65a340e5af36b17d5262c9cd2c508711e4dc50dc2521a2c16c7c901902266e64a","0x971b86fc4033485e235ccb0997a236206ba25c6859075edbcdf3c943116a5030b7f75ebca9753d863a522ba21a215a90","0xb3b31f52370de246ee215400975b674f6da39b2f32514fe6bd54e747752eedca22bb840493b44a67df42a3639c5f901f","0xaffbbfac9c1ba7cbfa1839d2ae271dd6149869b75790bf103230637da41857fc326ef3552ff31c15bda0694080198143","0xa95d42aa7ef1962520845aa3688f2752d291926f7b0d73ea2ee24f0612c03b43f2b0fe3c9a9a99620ffc8d487b981bc2","0x914a266065caf64985e8c5b1cb2e3f4e3fe94d7d085a1881b1fefa435afef4e1b39a98551d096a62e4f5cc1a7f0fdc2e","0x81a0b4a96e2b75bc1bf2dbd165d58d55cfd259000a35504d1ffb18bc346a3e6f07602c683723864ffb980f840836fd8d","0x91c1556631cddd4c00b65b67962b39e4a33429029d311c8acf73a18600e362304fb68bccb56fde40f49e95b7829e0b87","0x8befbacc19e57f7c885d1b7a6028359eb3d80792fe13b92a8400df21ce48deb0bb60f2ddb50e3d74f39f85d7eab23adc","0x92f9458d674df6e990789690ec9ca73dacb67fc9255b58c417c555a8cc1208ace56e8e538f86ba0f3615573a0fbac00d","0xb4b1b3062512d6ae7417850c08c13f707d5838e43d48eb98dd4621baf62eee9e82348f80fe9b888a12874bfa538771f8","0xa13c4a3ac642ede37d9c883f5319e748d2b938f708c9d779714108a449b343f7b71a6e3ef4080fee125b416762920273","0xaf44983d5fc8cceee0551ef934e6e653f2d3efa385e5c8a27a272463a6f333e290378cc307c2b664eb923c78994e706e","0xa389fd6c59fe2b4031cc244e22d3991e541bd203dd5b5e73a6159e72df1ab41d49994961500dcde7989e945213184778","0x8d2141e4a17836c548de9598d7b298b03f0e6c73b7364979a411c464e0628e21cff6ac3d6decdba5d1c4909eff479761","0x980b22ef53b7bdf188a3f14bc51b0dbfdf9c758826daa3cbc1e3986022406a8aa9a6a79e400567120b88c67faa35ce5f","0xa28882f0a055f96df3711de5d0aa69473e71245f4f3e9aa944e9d1fb166e02caa50832e46da6d3a03b4801735fd01b29","0x8db106a37d7b88f5d995c126abb563934dd8de516af48e85695d02b1aea07f79217e3cdd03c6f5ca57421830186c772b","0xb5a7e50da0559a675c472f7dfaee456caab6695ab7870541b2be8c2b118c63752427184aad81f0e1afc61aef1f28c46f","0x9962118780e20fe291d10b64f28d09442a8e1b5cffd0f3dd68d980d0614050a626c616b44e9807fbee7accecae00686a","0xb38ddf33745e8d2ad6a991aefaf656a33c5f8cbe5d5b6b6fd03bd962153d8fd0e01b5f8f96d80ae53ab28d593ab1d4e7","0x857dc12c0544ff2c0c703761d901aba636415dee45618aba2e3454ff9cbc634a85c8b05565e88520ff9be2d097c8b2b1","0xa80d465c3f8cc63af6d74a6a5086b626c1cb4a8c0fee425964c3bd203d9d7094e299f81ce96d58afc20c8c9a029d9dae","0x89e1c8fbde8563763be483123a3ed702efac189c6d8ab4d16c85e74bbaf856048cc42d5d6e138633a38572ba5ec3f594","0x893a594cf495535f6d216508f8d03c317dcf03446668cba688da90f52d0111ac83d76ad09bf5ea47056846585ee5c791","0xaadbd8be0ae452f7f9450c7d2957598a20cbf10139a4023a78b4438172d62b18b0de39754dd2f8862dbd50a3a0815e53","0xae7d39670ecca3eb6db2095da2517a581b0e8853bdfef619b1fad9aacd443e7e6a40f18209fadd44038a55085c5fe8b2","0x866ef241520eacb6331593cfcb206f7409d2f33d04542e6e52cba5447934e02d44c471f6c9a45963f9307e9809ab91d9","0xb1a09911ad3864678f7be79a9c3c3eb5c84a0a45f8dcb52c67148f43439aeaaa9fd3ed3471276b7e588b49d6ebe3033a","0xadd07b7f0dbb34049cd8feeb3c18da5944bf706871cfd9f14ff72f6c59ad217ebb1f0258b13b167851929387e4e34cfe","0xae048892d5c328eefbdd4fba67d95901e3c14d974bfc0a1fc68155ca9f0d59e61d7ba17c6c9948b120cf35fd26e6fee9","0x9185b4f3b7da0ddb4e0d0f09b8a9e0d6943a4611e43f13c3e2a767ed8592d31e0ba3ebe1914026a3627680274291f6e5","0xa9c022d4e37b0802284ce3b7ee9258628ab4044f0db4de53d1c3efba9de19d15d65cc5e608dbe149c21c2af47d0b07b5","0xb24dbd5852f8f24921a4e27013b6c3fa8885b973266cb839b9c388efad95821d5d746348179dcc07542bd0d0aefad1ce","0xb5fb4f279300876a539a27a441348764908bc0051ebd66dc51739807305e73db3d2f6f0f294ffb91b508ab150eaf8527","0xace50841e718265b290c3483ed4b0fdd1175338c5f1f7530ae9a0e75d5f80216f4de37536adcbc8d8c95982e88808cd0","0xb19cadcde0f63bd1a9c24bd9c2806f53c14c0b9735bf351601498408ba503ddbd2037c891041cbba47f58b8c483f3b21","0xb6061e63558d312eb891b97b39aa552fa218568d79ee26fe6dd5b864aea9e3216d8f2e2f3b093503be274766dac41426","0x89730fdb2876ab6f0fe780d695f6e12090259027e789b819956d786e977518057e5d1d7f5ab24a3ae3d5d4c97773bd2b","0xb6fa841e81f9f2cad0163a02a63ae96dc341f7ae803b616efc6e1da2fbea551c1b96b11ad02c4afbdf6d0cc9f23da172","0x8fb66187182629c861ddb6896d7ed3caf2ad050c3dba8ab8eb0d7a2c924c3d44c48d1a148f9e33fb1f061b86972f8d21","0x86022ac339c1f84a7fa9e05358c1a5b316b4fc0b83dbe9c8c7225dc514f709d66490b539359b084ce776e301024345fa","0xb50b9c321468da950f01480bb62b6edafd42f83c0001d6e97f2bd523a1c49a0e8574fb66380ea28d23a7c4d54784f9f0","0xa31c05f7032f30d1dac06678be64d0250a071fd655e557400e4a7f4c152be4d5c7aa32529baf3e5be7c4bd49820054f6","0xb95ac0848cd322684772119f5b682d90a66bbf9dac411d9d86d2c34844bbd944dbaf8e47aa41380455abd51687931a78","0xae4a6a5ce9553b65a05f7935e61e496a4a0f6fd8203367a2c627394c9ce1e280750297b74cdc48fd1d9a31e93f97bef4","0xa22daf35f6e9b05e52e0b07f7bd1dbbebd2c263033fb0e1b2c804e2d964e2f11bc0ece6aca6af079dd3a9939c9c80674","0x902150e0cb1f16b9b59690db35281e28998ce275acb313900da8b2d8dfd29fa1795f8ca3ff820c31d0697de29df347c1","0xb17b5104a5dc665cdd7d47e476153d715eb78c6e5199303e4b5445c21a7fa7cf85fe7cfd08d7570f4e84e579b005428c","0xa03f49b81c15433f121680aa02d734bb9e363af2156654a62bcb5b2ba2218398ccb0ff61104ea5d7df5b16ea18623b1e","0x802101abd5d3c88876e75a27ffc2f9ddcce75e6b24f23dba03e5201281a7bd5cc7530b6a003be92d225093ca17d3c3bb","0xa4d183f63c1b4521a6b52226fc19106158fc8ea402461a5cccdaa35fee93669df6a8661f45c1750cd01308149b7bf08e","0x8d17c22e0c8403b69736364d460b3014775c591032604413d20a5096a94d4030d7c50b9fe3240e31d0311efcf9816a47","0x947225acfcce5992eab96276f668c3cbe5f298b90a59f2bb213be9997d8850919e8f496f182689b5cbd54084a7332482","0x8df6f4ed216fc8d1905e06163ba1c90d336ab991a18564b0169623eb39b84e627fa267397da15d3ed754d1f3423bff07","0x83480007a88f1a36dea464c32b849a3a999316044f12281e2e1c25f07d495f9b1710b4ba0d88e9560e72433addd50bc2","0xb3019d6e591cf5b33eb972e49e06c6d0a82a73a75d78d383dd6f6a4269838289e6e07c245f54fed67f5c9bb0fd5e1c5f","0x92e8ce05e94927a9fb02debadb99cf30a26172b2705003a2c0c47b3d8002bf1060edb0f6a5750aad827c98a656b19199","0xac2aff801448dbbfc13cca7d603fd9c69e82100d997faf11f465323b97255504f10c0c77401e4d1890339d8b224f5803","0xb0453d9903d08f508ee27e577445dc098baed6cde0ac984b42e0f0efed62760bd58d5816cf1e109d204607b7b175e30c","0xae68dc4ba5067e825d46d2c7c67f1009ceb49d68e8d3e4c57f4bcd299eb2de3575d42ea45e8722f8f28497a6e14a1cfe","0xb22486c2f5b51d72335ce819bbafb7fa25eb1c28a378a658f13f9fc79cd20083a7e573248d911231b45a5cf23b561ca7","0x89d1201d1dbd6921867341471488b4d2fd0fc773ae1d4d074c78ae2eb779a59b64c00452c2a0255826fca6b3d03be2b1","0xa2998977c91c7a53dc6104f5bc0a5b675e5350f835e2f0af69825db8af4aeb68435bdbcc795f3dd1f55e1dd50bc0507f","0xb0be4937a925b3c05056ed621910d535ccabf5ab99fd3b9335080b0e51d9607d0fd36cb5781ff340018f6acfca4a9736","0xaea145a0f6e0ba9df8e52e84bb9c9de2c2dc822f70d2724029b153eb68ee9c17de7d35063dcd6a39c37c59fdd12138f7","0x91cb4545d7165ee8ffbc74c874baceca11fdebbc7387908d1a25877ca3c57f2c5def424dab24148826832f1e880bede0","0xb3b579cb77573f19c571ad5eeeb21f65548d7dff9d298b8d7418c11f3e8cd3727c5b467f013cb87d6861cfaceee0d2e3","0xb98a1eeec2b19fecc8378c876d73645aa52fb99e4819903735b2c7a885b242787a30d1269a04bfb8573d72d9bbc5f0f0","0x940c1f01ed362bd588b950c27f8cc1d52276c71bb153d47f07ec85b038c11d9a8424b7904f424423e714454d5e80d1cd","0xaa343a8ecf09ce11599b8cf22f7279cf80f06dbf9f6d62cb05308dbbb39c46fd0a4a1240b032665fbb488a767379b91b","0x87c3ac72084aca5974599d3232e11d416348719e08443acaba2b328923af945031f86432e170dcdd103774ec92e988c9","0x91d6486eb5e61d2b9a9e742c20ec974a47627c6096b3da56209c2b4e4757f007e793ebb63b2b246857c9839b64dc0233","0xaebcd3257d295747dd6fc4ff910d839dd80c51c173ae59b8b2ec937747c2072fa85e3017f9060aa509af88dfc7529481","0xb3075ba6668ca04eff19efbfa3356b92f0ab12632dcda99cf8c655f35b7928c304218e0f9799d68ef9f809a1492ff7db","0x93ba7468bb325639ec2abd4d55179c69fd04eaaf39fc5340709227bbaa4ad0a54ea8b480a1a3c8d44684e3be0f8d1980","0xa6aef86c8c0d92839f38544d91b767c582568b391071228ff5a5a6b859c87bf4f81a7d926094a4ada1993ddbd677a920","0x91dcd6d14207aa569194aa224d1e5037b999b69ade52843315ca61ba26abe9a76412c9e88259bc5cf5d7b95b97d9c3bc","0xb3b483d31c88f78d49bd065893bc1e3d2aa637e27dedb46d9a7d60be7660ce7a10aaaa7deead362284a52e6d14021178","0x8e5730070acf8371461ef301cc4523e8e672aa0e3d945d438a0e0aa6bdf8cb9c685dcf38df429037b0c8aff3955c6f5b","0xb8c6d769890a8ee18dc4f9e917993315877c97549549b34785a92543cbeec96a08ae3a28d6e809c4aacd69de356c0012","0x95ca86cd384eaceaa7c077c5615736ca31f36824bd6451a16142a1edc129fa42b50724aeed7c738f08d7b157f78b569e","0x94df609c6d71e8eee7ab74226e371ccc77e01738fe0ef1a6424435b4570fe1e5d15797b66ed0f64eb88d4a3a37631f0e","0x89057b9783212add6a0690d6bb99097b182738deff2bd9e147d7fd7d6c8eacb4c219923633e6309ad993c24572289901","0x83a0f9f5f265c5a0e54defa87128240235e24498f20965009fef664f505a360b6fb4020f2742565dfc7746eb185bcec0","0x91170da5306128931349bc3ed50d7df0e48a68b8cc8420975170723ac79d8773e4fa13c5f14dc6e3fafcad78379050b1","0xb7178484d1b55f7e56a4cc250b6b2ec6040437d96bdfddfa7b35ed27435860f3855c2eb86c636f2911b012eb83b00db8","0xac0b00c4322d1e4208e09cd977b4e54d221133ff09551f75b32b0b55d0e2be80941dda26257b0e288c162e63c7e9cf68","0x9690ed9e7e53ed37ff362930e4096b878b12234c332fd19d5d064824084245952eda9f979e0098110d6963e468cf513e","0xb6fa547bb0bb83e5c5be0ed462a8783fba119041c136a250045c09d0d2af330c604331e7de960df976ff76d67f8000cd","0x814603907c21463bcf4e59cfb43066dfe1a50344ae04ef03c87c0f61b30836c3f4dea0851d6fa358c620045b7f9214c8","0x9495639e3939fad2a3df00a88603a5a180f3c3a0fe4d424c35060e2043e0921788003689887b1ed5be424d9a89bb18bb","0xaba4c02d8d57f2c92d5bc765885849e9ff8393d6554f5e5f3e907e5bfac041193a0d8716d7861104a4295d5a03c36b03","0x8ead0b56c1ca49723f94a998ba113b9058059321da72d9e395a667e6a63d5a9dac0f5717cec343f021695e8ced1f72af","0xb43037f7e3852c34ed918c5854cd74e9d5799eeddfe457d4f93bb494801a064735e326a76e1f5e50a339844a2f4a8ec9","0x99db8422bb7302199eb0ff3c3d08821f8c32f53a600c5b6fb43e41205d96adae72be5b460773d1280ad1acb806af9be8","0x8a9be08eae0086c0f020838925984df345c5512ff32e37120b644512b1d9d4fecf0fd30639ca90fc6cf334a86770d536","0x81b43614f1c28aa3713a309a88a782fb2bdfc4261dd52ddc204687791a40cf5fd6a263a8179388596582cccf0162efc2","0xa9f3a8b76912deb61d966c75daf5ddb868702ebec91bd4033471c8e533183df548742a81a2671de5be63a502d827437d","0x902e2415077f063e638207dc7e14109652e42ab47caccd6204e2870115791c9defac5425fd360b37ac0f7bd8fe7011f8","0xaa18e4fdc1381b59c18503ae6f6f2d6943445bd00dd7d4a2ad7e5adad7027f2263832690be30d456e6d772ad76f22350","0xa348b40ba3ba7d81c5d4631f038186ebd5e5f314f1ea737259151b07c3cc8cf0c6ed4201e71bcc1c22fefda81a20cde6","0xaa1306f7ac1acbfc47dc6f7a0cb6d03786cec8c8dc8060388ccda777bca24bdc634d03e53512c23dba79709ff64f8620","0x818ccfe46e700567b7f3eb400e5a35f6a5e39b3db3aa8bc07f58ace35d9ae5a242faf8dbccd08d9a9175bbce15612155","0xb7e3da2282b65dc8333592bb345a473f03bd6df69170055fec60222de9897184536bf22b9388b08160321144d0940279","0xa4d976be0f0568f4e57de1460a1729129252b44c552a69fceec44e5b97c96c711763360d11f9e5bf6d86b4976bf40d69","0x85d185f0397c24c2b875b09b6328a23b87982b84ee880f2677a22ff4c9a1ba9f0fea000bb3f7f66375a00d98ebafce17","0xb4ccbb8c3a2606bd9b87ce022704663af71d418351575f3b350d294f4efc68c26f9a2ce49ff81e6ff29c3b63d746294e","0x93ffd3265fddb63724dfde261d1f9e22f15ecf39df28e4d89e9fea03221e8e88b5dd9b77628bacaa783c6f91802d47cc","0xb1fd0f8d7a01378e693da98d03a2d2fda6b099d03454b6f2b1fa6472ff6bb092751ce6290059826b74ac0361eab00e1e","0xa89f440c71c561641589796994dd2769616b9088766e983c873fae0716b95c386c8483ab8a4f367b6a68b72b7456dd32","0xaf4fe92b01d42d03dd5d1e7fa55e96d4bbcb7bf7d4c8c197acd16b3e0f3455807199f683dcd263d74547ef9c244b35cc","0xa8227f6e0a344dfe76bfbe7a1861be32c4f4bed587ccce09f9ce2cf481b2dda8ae4f566154bc663d15f962f2d41761bd","0xa7b361663f7495939ed7f518ba45ea9ff576c4e628995b7aea026480c17a71d63fc2c922319f0502eb7ef8f14a406882","0x8ddcf382a9f39f75777160967c07012cfa89e67b19714a7191f0c68eaf263935e5504e1104aaabd0899348c972a8d3c6","0x98c95b9f6f5c91f805fb185eedd06c6fc4457d37dd248d0be45a6a168a70031715165ea20606245cbdf8815dc0ac697f","0x805b44f96e001e5909834f70c09be3efcd3b43632bcac5b6b66b6d227a03a758e4b1768ce2a723045681a1d34562aaeb","0xb0e81b07cdc45b3dca60882676d9badb99f25c461b7efe56e3043b80100bb62d29e1873ae25eb83087273160ece72a55","0xb0c53f0abe78ee86c7b78c82ae1f7c070bb0b9c45c563a8b3baa2c515d482d7507bb80771e60b38ac13f78b8af92b4a9","0xa7838ef6696a9e4d2e5dfd581f6c8d6a700467e8fd4e85adabb5f7a56f514785dd4ab64f6f1b48366f7d94728359441b","0x88c76f7700a1d23c30366a1d8612a796da57b2500f97f88fdf2d76b045a9d24e7426a8ffa2f4e86d3046937a841dad58","0xad8964baf98c1f02e088d1d9fcb3af6b1dfa44cdfe0ed2eae684e7187c33d3a3c28c38e8f4e015f9c04d451ed6f85ff6","0x90e9d00a098317ececaa9574da91fc149eda5b772dedb3e5a39636da6603aa007804fa86358550cfeff9be5a2cb7845e","0xa56ff4ddd73d9a6f5ab23bb77efa25977917df63571b269f6a999e1ad6681a88387fcc4ca3b26d57badf91b236503a29","0x97ad839a6302c410a47e245df84c01fb9c4dfef86751af3f9340e86ff8fc3cd52fa5ff0b9a0bd1d9f453e02ca80658a6","0xa4c8c44cbffa804129e123474854645107d1f0f463c45c30fd168848ebea94880f7c0c5a45183e9eb837f346270bdb35","0xa72e53d0a1586d736e86427a93569f52edd2f42b01e78aee7e1961c2b63522423877ae3ac1227a2cf1e69f8e1ff15bc3","0x8559f88a7ef13b4f09ac82ae458bbae6ab25671cfbf52dae7eac7280d6565dd3f0c3286aec1a56a8a16dc3b61d78ce47","0x8221503f4cdbed550876c5dc118a3f2f17800c04e8be000266633c83777b039a432d576f3a36c8a01e8fd18289ebc10b","0x99bfbe5f3e46d4d898a578ba86ed26de7ed23914bd3bcdf3c791c0bcd49398a52419077354a5ab75cea63b6c871c6e96","0xaa134416d8ff46f2acd866c1074af67566cfcf4e8be8d97329dfa0f603e1ff208488831ce5948ac8d75bfcba058ddcaa","0xb02609d65ebfe1fe8e52f21224a022ea4b5ea8c1bd6e7b9792eed8975fc387cdf9e3b419b8dd5bcce80703ab3a12a45f","0xa4f14798508698fa3852e5cac42a9db9797ecee7672a54988aa74037d334819aa7b2ac7b14efea6b81c509134a6b7ad2","0x884f01afecbcb987cb3e7c489c43155c416ed41340f61ecb651d8cba884fb9274f6d9e7e4a46dd220253ae561614e44c","0xa05523c9e71dce1fe5307cc71bd721feb3e1a0f57a7d17c7d1c9fb080d44527b7dbaa1f817b1af1c0b4322e37bc4bb1e","0x8560aec176a4242b39f39433dd5a02d554248c9e49d3179530815f5031fee78ba9c71a35ceeb2b9d1f04c3617c13d8f0","0x996aefd402748d8472477cae76d5a2b92e3f092fc834d5222ae50194dd884c9fb8b6ed8e5ccf8f6ed483ddbb4e80c747","0x8fd09900320000cbabc40e16893e2fcf08815d288ec19345ad7b6bb22f7d78a52b6575a3ca1ca2f8bc252d2eafc928ec","0x939e51f73022bc5dc6862a0adf8fb8a3246b7bfb9943cbb4b27c73743926cc20f615a036c7e5b90c80840e7f1bfee0e7","0xa0a6258700cadbb9e241f50766573bf9bdb7ad380b1079dc3afb4054363d838e177b869cad000314186936e40359b1f2","0x972699a4131c8ed27a2d0e2104d54a65a7ff1c450ad9da3a325c662ab26869c21b0a84d0700b98c8b5f6ce3b746873d7","0xa454c7fe870cb8aa6491eafbfb5f7872d6e696033f92e4991d057b59d70671f2acdabef533e229878b60c7fff8f748b1","0xa167969477214201f09c79027b10221e4707662e0c0fde81a0f628249f2f8a859ce3d30a7dcc03b8ecca8f7828ad85c7","0x8ff6b7265175beb8a63e1dbf18c9153fb2578c207c781282374f51b40d57a84fd2ef2ea2b9c6df4a54646788a62fd17f","0xa3d7ebeccde69d73d8b3e76af0da1a30884bb59729503ff0fb0c3bccf9221651b974a6e72ea33b7956fc3ae758226495","0xb71ef144c9a98ce5935620cb86c1590bd4f48e5a2815d25c0cdb008fde628cf628c31450d3d4f67abbfeb16178a74cfd","0xb5e0a16d115134f4e2503990e3f2035ed66b9ccf767063fe6747870d97d73b10bc76ed668550cb82eedc9a2ca6f75524","0xb30ffaaf94ee8cbc42aa2c413175b68afdb207dbf351fb20be3852cb7961b635c22838da97eaf43b103aff37e9e725cc","0x98aa7d52284f6c1f22e272fbddd8c8698cf8f5fbb702d5de96452141fafb559622815981e50b87a72c2b1190f59a7deb","0x81fbacda3905cfaf7780bb4850730c44166ed26a7c8d07197a5d4dcd969c09e94a0461638431476c16397dd7bdc449f9","0x95e47021c1726eac2e5853f570d6225332c6e48e04c9738690d53e07c6b979283ebae31e2af1fc9c9b3e59f87e5195b1","0xac024a661ba568426bb8fce21780406537f518075c066276197300841e811860696f7588188bc01d90bace7bc73d56e3","0xa4ebcaf668a888dd404988ab978594dee193dad2d0aec5cdc0ccaf4ec9a7a8228aa663db1da8ddc52ec8472178e40c32","0xa20421b8eaf2199d93b083f2aff37fb662670bd18689d046ae976d1db1fedd2c2ff897985ecc6277b396db7da68bcb27","0x8bc33d4b40197fd4d49d1de47489d10b90d9b346828f53a82256f3e9212b0cbc6930b895e879da9cec9fedf026aadb3e","0xaaafdd1bec8b757f55a0433eddc0a39f818591954fd4e982003437fcceb317423ad7ee74dbf17a2960380e7067a6b4e2","0xaad34277ebaed81a6ec154d16736866f95832803af28aa5625bf0461a71d02b1faba02d9d9e002be51c8356425a56867","0x976e9c8b150d08706079945bd0e84ab09a648ecc6f64ded9eb5329e57213149ae409ae93e8fbd8eda5b5c69f5212b883","0x8097fae1653247d2aed4111533bc378171d6b2c6d09cbc7baa9b52f188d150d645941f46d19f7f5e27b7f073c1ebd079","0x83905f93b250d3184eaba8ea7d727c4464b6bdb027e5cbe4f597d8b9dc741dcbea709630bd4fd59ce24023bec32fc0f3","0x8095030b7045cff28f34271386e4752f9a9a0312f8df75de4f424366d78534be2b8e1720a19cb1f9a2d21105d790a225","0xa7b7b73a6ae2ed1009c49960374b0790f93c74ee03b917642f33420498c188a169724945a975e5adec0a1e83e07fb1b2","0x856a41c54df393b6660b7f6354572a4e71c8bfca9cabaffb3d4ef2632c015e7ee2bc10056f3eccb3dbed1ad17d939178","0xa8f7a55cf04b38cd4e330394ee6589da3a07dc9673f74804fdf67b364e0b233f14aec42e783200a2e4666f7c5ff62490","0x82c529f4e543c6bca60016dc93232c115b359eaee2798a9cf669a654b800aafe6ab4ba58ea8b9cdda2b371c8d62fa845","0x8caab020c1baddce77a6794113ef1dfeafc5f5000f48e97f4351b588bf02f1f208101745463c480d37f588d5887e6d8c","0x8fa91b3cc400f48b77b6fd77f3b3fbfb3f10cdff408e1fd22d38f77e087b7683adad258804409ba099f1235b4b4d6fea","0x8aa02787663d6be9a35677d9d8188b725d5fcd770e61b11b64e3def8808ea5c71c0a9afd7f6630c48634546088fcd8e2","0xb5635b7b972e195cab878b97dea62237c7f77eb57298538582a330b1082f6207a359f2923864630136d8b1f27c41b9aa","0x8257bb14583551a65975946980c714ecd6e5b629672bb950b9caacd886fbd22704bc9e3ba7d30778adab65dc74f0203a","0xab5fe1cd12634bfa4e5c60d946e2005cbd38f1063ec9a5668994a2463c02449a0a185ef331bd86b68b6e23a8780cb3ba","0xa7d3487da56cda93570cc70215d438204f6a2709bfb5fda6c5df1e77e2efc80f4235c787e57fbf2c74aaff8cbb510a14","0xb61cff7b4c49d010e133319fb828eb900f8a7e55114fc86b39c261a339c74f630e1a7d7e1350244ada566a0ff3d46c4b","0x8d4d1d55d321d278db7a85522ccceca09510374ca81d4d73e3bb5249ace7674b73900c35a531ec4fa6448fabf7ad00dc","0x966492248aee24f0f56c8cfca3c8ec6ba3b19abb69ae642041d4c3be8523d22c65c4dafcab4c58989ccc4e0bd2f77919","0xb20c320a90cb220b86e1af651cdc1e21315cd215da69f6787e28157172f93fc8285dcd59b039c626ed8ca4633cba1a47","0xaae9e6b22f018ceb5c0950210bb8182cb8cb61014b7e14581a09d36ebd1bbfebdb2b82afb7fdb0cf75e58a293d9c456d","0x875547fb67951ad37b02466b79f0c9b985ccbc500cfb431b17823457dc79fb9597ec42cd9f198e15523fcd88652e63a4","0x92afce49773cb2e20fb21e4f86f18e0959ebb9c33361547ddb30454ee8e36b1e234019cbdca0e964cb292f7f77df6b90","0x8af85343dfe1821464c76ba11c216cbef697b5afc69c4d821342e55afdac047081ec2e3f7b09fc14b518d9a23b78c003","0xb7de4a1648fd63f3a918096ea669502af5357438e69dac77cb8102b6e6c15c76e033cfaa80dafc806e535ede5c1a20aa","0xac80e9b545e8bd762951d96c9ce87f629d01ffcde07efc2ef7879ca011f1d0d8a745abf26c9d452541008871304fac00","0xa4cf0f7ed724e481368016c38ea5816698a5f68eb21af4d3c422d2ba55f96a33e427c2aa40de1b56a7cfac7f7cf43ab0","0x899b0a678bb2db2cae1b44e75a661284844ebcdd87abf308fedeb2e4dbe5c5920c07db4db7284a7af806a2382e8b111a","0xaf0588a2a4afce2b1b13c1230816f59e8264177e774e4a341b289a101dcf6af813638fed14fb4d09cb45f35d5d032609","0xa4b8df79e2be76e9f5fc5845f06fe745a724cf37c82fcdb72719b77bdebea3c0e763f37909373e3a94480cc5e875cba0","0x83e42c46d88930c8f386b19fd999288f142d325e2ebc86a74907d6d77112cb0d449bc511c95422cc810574031a8cbba9","0xb5e39534070de1e5f6e27efbdd3dc917d966c2a9b8cf2d893f964256e95e954330f2442027dc148c776d63a95bcde955","0x958607569dc28c075e658cd4ae3927055c6bc456eef6212a6fea8205e48ed8777a8064f584cda38fe5639c371e2e7fba","0x812adf409fa63575113662966f5078a903212ffb65c9b0bbe62da0f13a133443a7062cb8fd70f5e5dd5559a32c26d2c8","0xa679f673e5ce6a3cce7fa31f22ee3785e96bcb55e5a776e2dd3467bef7440e3555d1a9b87cb215e86ee9ed13a090344b","0xafedbb34508b159eb25eb2248d7fe328f86ef8c7d84c62d5b5607d74aae27cc2cc45ee148eb22153b09898a835c58df4","0xb75505d4f6b67d31e665cfaf5e4acdb5838ae069166b7fbcd48937c0608a59e40a25302fcc1873d2e81c1782808c70f0","0xb62515d539ec21a155d94fc00ea3c6b7e5f6636937bce18ed5b618c12257fb82571886287fd5d1da495296c663ebc512","0xab8e1a9446bbdd588d1690243b1549d230e6149c28f59662b66a8391a138d37ab594df38e7720fae53217e5c3573b5be","0xb31e8abf4212e03c3287bb2c0a153065a7290a16764a0bac8f112a72e632185a654bb4e88fdd6053e6c7515d9719fadb","0xb55165477fe15b6abd2d0f4fddaa9c411710dcc4dd712daba3d30e303c9a3ee5415c256f9dc917ecf18c725b4dbab059","0xa0939d4f57cacaae549b78e87cc234de4ff6a35dc0d9cd5d7410abc30ebcd34c135e008651c756e5a9d2ca79c40ef42b","0x8cf10e50769f3443340844aad4d56ec790850fed5a41fcbd739abac4c3015f0a085a038fbe7fae9f5ad899cce5069f6b","0x924055e804d82a99ea4bb160041ea4dc14b568abf379010bc1922fde5d664718c31d103b8b807e3a1ae809390e708c73","0x8ec0f9d26f71b0f2e60a179e4fd1778452e2ffb129d50815e5d7c7cb9415fa69ae5890578086e8ef6bfde35ad2a74661","0x98c7f12b15ec4426b59f737f73bf5faea4572340f4550b7590dfb7f7ffedb2372e3e555977c63946d579544c53210ad0","0x8a935f7a955c78f69d66f18eee0092e5e833fa621781c9581058e219af4d7ceee48b84e472e159dda6199715fb2f9acf","0xb78d4219f95a2dbfaa7d0c8a610c57c358754f4f43c2af312ab0fe8f10a5f0177e475332fb8fd23604e474fc2abeb051","0x8d086a14803392b7318c28f1039a17e3cfdcece8abcaca3657ec3d0ac330842098a85c0212f889fabb296dfb133ce9aa","0xa53249f417aac82f2c2a50c244ce21d3e08a5e5a8bd33bec2a5ab0d6cd17793e34a17edfa3690899244ce201e2fb9986","0x8619b0264f9182867a1425be514dc4f1ababc1093138a728a28bd7e4ecc99b9faaff68c23792264bc6e4dce5f52a5c52","0x8c171edbbbde551ec19e31b2091eb6956107dd9b1f853e1df23bff3c10a3469ac77a58335eee2b79112502e8e163f3de","0xa9d19ec40f0ca07c238e9337c6d6a319190bdba2db76fb63902f3fb459aeeb50a1ac30db5b25ee1b4201f3ca7164a7f4","0xb9c6ec14b1581a03520b8d2c1fbbc31fb8ceaef2c0f1a0d0080b6b96e18442f1734bea7ef7b635d787c691de4765d469","0x8cb437beb4cfa013096f40ccc169a713dc17afee6daa229a398e45fd5c0645a9ad2795c3f0cd439531a7151945d7064d","0xa6e8740cc509126e146775157c2eb278003e5bb6c48465c160ed27888ca803fa12eee1f6a8dd7f444f571664ed87fdc1","0xb75c1fecc85b2732e96b3f23aefb491dbd0206a21d682aee0225838dc057d7ed3b576176353e8e90ae55663f79e986e4","0xad8d249b0aea9597b08358bce6c77c1fd552ef3fbc197d6a1cfe44e5e6f89b628b12a6fb04d5dcfcbacc51f46e4ae7bb","0xb998b2269932cbd58d04b8e898d373ac4bb1a62e8567484f4f83e224061bc0f212459f1daae95abdbc63816ae6486a55","0x827988ef6c1101cddc96b98f4a30365ff08eea2471dd949d2c0a9b35c3bbfa8c07054ad1f4c88c8fbf829b20bb5a9a4f","0x8692e638dd60babf7d9f2f2d2ce58e0ac689e1326d88311416357298c6a2bffbfebf55d5253563e7b3fbbf5072264146","0xa685d75b91aea04dbc14ab3c1b1588e6de96dae414c8e37b8388766029631b28dd860688079b12d09cd27f2c5af11adf","0xb57eced93eec3371c56679c259b34ac0992286be4f4ff9489d81cf9712403509932e47404ddd86f89d7c1c3b6391b28c","0xa1c8b4e42ebcbd8927669a97f1b72e236fb19249325659e72be7ddaaa1d9e81ca2abb643295d41a8c04a2c01f9c0efd7","0x877c33de20d4ed31674a671ba3e8f01a316581e32503136a70c9c15bf0b7cb7b1cba6cd4eb641fad165fb3c3c6c235fd","0xa2a469d84ec478da40838f775d11ad38f6596eb41caa139cc190d6a10b5108c09febae34ffdafac92271d2e73c143693","0x972f817caedb254055d52e963ed28c206848b6c4cfdb69dbc961c891f8458eaf582a6d4403ce1177d87bc2ea410ef60a","0xaccbd739e138007422f28536381decc54bb6bd71d93edf3890e54f9ef339f83d2821697d1a4ac1f5a98175f9a9ecb9b5","0x8940f8772e05389f823b62b3adc3ed541f91647f0318d7a0d3f293aeeb421013de0d0a3664ea53dd24e5fbe02d7efef6","0x8ecce20f3ef6212edef07ec4d6183fda8e0e8cad2c6ccd0b325e75c425ee1faba00b5c26b4d95204238931598d78f49d","0x97cc72c36335bd008afbed34a3b0c7225933faba87f7916d0a6d2161e6f82e0cdcda7959573a366f638ca75d30e9dab1","0x9105f5de8699b5bdb6bd3bb6cc1992d1eac23929c29837985f83b22efdda92af64d9c574aa9640475087201bbbe5fd73","0x8ffb33c4f6d05c413b9647eb6933526a350ed2e4278ca2ecc06b0e8026d8dbe829c476a40e45a6df63a633090a3f82ef","0x8bfc6421fdc9c2d2aaa68d2a69b1a2728c25b84944cc3e6a57ff0c94bfd210d1cbf4ff3f06702d2a8257024d8be7de63","0xa80e1dc1dddfb41a70220939b96dc6935e00b32fb8be5dff4eed1f1c650002ff95e4af481c43292e3827363b7ec4768a","0x96f714ebd54617198bd636ba7f7a7f8995a61db20962f2165078d9ed8ee764d5946ef3cbdc7ebf8435bb8d5dd4c1deac","0x8cdb0890e33144d66391d2ae73f5c71f5a861f72bc93bff6cc399fc25dd1f9e17d8772592b44593429718784802ac377","0x8ccf9a7f80800ee770b92add734ed45a73ecc31e2af0e04364eefc6056a8223834c7c0dc9dfc52495bdec6e74ce69994","0xaa0875f423bd68b5f10ba978ddb79d3b96ec093bfbac9ff366323193e339ed7c4578760fb60f60e93598bdf1e5cc4995","0xa9214f523957b59c7a4cb61a40251ad72aba0b57573163b0dc0f33e41d2df483fb9a1b85a5e7c080e9376c866790f8cb","0xb6224b605028c6673a536cc8ff9aeb94e7a22e686fda82cf16068d326469172f511219b68b2b3affb7933af0c1f80d07","0xb6d58968d8a017c6a34e24c2c09852f736515a2c50f37232ac6b43a38f8faa7572cc31dade543b594b61b5761c4781d0","0x8a97cefe5120020c38deeb861d394404e6c993c6cbd5989b6c9ebffe24f46ad11b4ba6348e2991cbf3949c28cfc3c99d","0x95bf046f8c3a9c0ce2634be4de3713024daec3fc4083e808903b25ce3ac971145af90686b451efcc72f6b22df0216667","0xa6a4e2f71b8fa28801f553231eff2794c0f10d12e7e414276995e21195abc9c2983a8997e41af41e78d19ff6fbb2680b","0x8e5e62a7ca9c2f58ebaab63db2ff1fb1ff0877ae94b7f5e2897f273f684ae639dff44cc65718f78a9c894787602ab26a","0x8542784383eec4f565fcb8b9fc2ad8d7a644267d8d7612a0f476fc8df3aff458897a38003d506d24142ad18f93554f2b","0xb7db68ba4616ea072b37925ec4fb39096358c2832cc6d35169e032326b2d6614479f765ae98913c267105b84afcb9bf2","0x8b31dbb9457d23d416c47542c786e07a489af35c4a87dadb8ee91bea5ac4a5315e65625d78dad2cf8f9561af31b45390","0xa8545a1d91ac17257732033d89e6b7111db8242e9c6ebb0213a88906d5ef407a2c6fdb444e29504b06368b6efb4f4839","0xb1bd85d29ebb28ccfb05779aad8674906b267c2bf8cdb1f9a0591dd621b53a4ee9f2942687ee3476740c0b4a7621a3ae","0xa2b54534e152e46c50d91fff03ae9cd019ff7cd9f4168b2fe7ac08ef8c3bbc134cadd3f9d6bd33d20ae476c2a8596c8a","0xb19b571ff4ae3e9f5d95acda133c455e72c9ea9973cae360732859836c0341c4c29ab039224dc5bc3deb824e031675d8","0x940b5f80478648bac025a30f3efeb47023ce20ee98be833948a248bca6979f206bb28fc0f17b90acf3bb4abd3d14d731","0x8f106b40588586ac11629b96d57808ad2808915d89539409c97414aded90b4ff23286a692608230a52bff696055ba5d6","0xae6bda03aa10da3d2abbc66d764ca6c8d0993e7304a1bdd413eb9622f3ca1913baa6da1e9f4f9e6cf847f14f44d6924d","0xa18e7796054a340ef826c4d6b5a117b80927afaf2ebd547794c400204ae2caf277692e2eabb55bc2f620763c9e9da66d","0x8d2d25180dc2c65a4844d3e66819ccfcf48858f0cc89e1c77553b463ec0f7feb9a4002ce26bc618d1142549b9850f232","0x863f413a394de42cc8166c1c75d513b91d545fff1de6b359037a742c70b008d34bf8e587afa2d62c844d0c6f0ea753e7","0x83cd0cf62d63475e7fcad18a2e74108499cdbf28af2113cfe005e3b5887794422da450b1944d0a986eb7e1f4c3b18f25","0xb4f8b350a6d88fea5ab2e44715a292efb12eb52df738c9b2393da3f1ddee68d0a75b476733ccf93642154bceb208f2b8","0xb3f52aaa4cd4221cb9fc45936cc67fd3864bf6d26bf3dd86aa85aa55ecfc05f5e392ecce5e7cf9406b4b1c4fce0398c8","0xb33137084422fb643123f40a6df2b498065e65230fc65dc31791c330e898c51c3a65ff738930f32c63d78f3c9315f85b","0x91452bfa75019363976bb7337fe3a73f1c10f01637428c135536b0cdc7da5ce558dae3dfc792aa55022292600814a8ef","0xad6ba94c787cd4361ca642c20793ea44f1f127d4de0bb4a77c7fbfebae0fcadbf28e2cb6f0c12c12a07324ec8c19761d","0x890aa6248b17f1501b0f869c556be7bf2b1d31a176f9978bb97ab7a6bd4138eed32467951c5ef1871944b7f620542f43","0x82111db2052194ee7dd22ff1eafffac0443cf969d3762cceae046c9a11561c0fdce9c0711f88ac01d1bed165f8a7cee3","0xb1527b71df2b42b55832f72e772a466e0fa05743aacc7814f4414e4bcc8d42a4010c9e0fd940e6f254cafedff3cd6543","0x922370fa49903679fc565f09c16a5917f8125e72acfeb060fcdbadbd1644eb9f4016229756019c93c6d609cda5d5d174","0xaa4c7d98a96cab138d2a53d4aee8ebff6ef903e3b629a92519608d88b3bbd94de5522291a1097e6acf830270e64c8ee1","0xb3dc21608a389a72d3a752883a382baaafc61ecc44083b832610a237f6a2363f24195acce529eb4aed4ef0e27a12b66e","0x94619f5de05e07b32291e1d7ab1d8b7337a2235e49d4fb5f3055f090a65e932e829efa95db886b32b153bdd05a53ec8c","0xade1e92722c2ffa85865d2426fb3d1654a16477d3abf580cfc45ea4b92d5668afc9d09275d3b79283e13e6b39e47424d","0xb7201589de7bed094911dd62fcd25c459a8e327ac447b69f541cdba30233063e5ddffad0b67e9c3e34adcffedfd0e13d","0x809d325310f862d6549e7cb40f7e5fc9b7544bd751dd28c4f363c724a0378c0e2adcb5e42ec8f912f5f49f18f3365c07","0xa79c20aa533de7a5d671c99eb9eb454803ba54dd4f2efa3c8fec1a38f8308e9905c71e9282955225f686146388506ff6","0xa85eeacb5e8fc9f3ed06a3fe2dc3108ab9f8c5877b148c73cf26e4e979bf5795edbe2e63a8d452565fd1176ed40402b2","0x97ef55662f8a1ec0842b22ee21391227540adf7708f491436044f3a2eb18c471525e78e1e14fa292507c99d74d7437c6","0x93110d64ed5886f3d16ce83b11425576a3a7a9bb831cd0de3f9a0b0f2270a730d68136b4ef7ff035ede004358f419b5c","0xac9ed0a071517f0ae4f61ce95916a90ba9a77a3f84b0ec50ef7298acdcd44d1b94525d191c39d6bd1bb68f4471428760","0x98abd6a02c7690f5a339adf292b8c9368dfc12e0f8069cf26a5e0ce54b4441638f5c66ea735142f3c28e00a0024267e6","0xb51efb73ba6d44146f047d69b19c0722227a7748b0e8f644d0fc9551324cf034c041a2378c56ce8b58d06038fb8a78de","0x8f115af274ef75c1662b588b0896b97d71f8d67986ae846792702c4742ab855952865ce236b27e2321967ce36ff93357","0xb3c4548f14d58b3ab03c222da09e4381a0afe47a72d18d50a94e0008797f78e39e99990e5b4757be62310d400746e35a","0xa9b1883bd5f31f909b8b1b6dcb48c1c60ed20aa7374b3ffa7f5b2ed036599b5bef33289d23c80a5e6420d191723b92f7","0x85d38dffd99487ae5bb41ab4a44d80a46157bbbe8ef9497e68f061721f74e4da513ccc3422936b059575975f6787c936","0xadf870fcb96e972c033ab7a35d28ae79ee795f82bc49c3bd69138f0e338103118d5529c53f2d72a9c0d947bf7d312af2","0xab4c7a44e2d9446c6ff303eb49aef0e367a58b22cc3bb27b4e69b55d1d9ee639c9234148d2ee95f9ca8079b1457d5a75","0xa386420b738aba2d7145eb4cba6d643d96bda3f2ca55bb11980b318d43b289d55a108f4bc23a9606fb0bccdeb3b3bb30","0x847020e0a440d9c4109773ecca5d8268b44d523389993b1f5e60e541187f7c597d79ebd6e318871815e26c96b4a4dbb1","0xa530aa7e5ca86fcd1bec4b072b55cc793781f38a666c2033b510a69e110eeabb54c7d8cbcb9c61fee531a6f635ffa972","0x87364a5ea1d270632a44269d686b2402da737948dac27f51b7a97af80b66728b0256547a5103d2227005541ca4b7ed04","0x8816fc6e16ea277de93a6d793d0eb5c15e9e93eb958c5ef30adaf8241805adeb4da8ce19c3c2167f971f61e0b361077d","0x8836a72d301c42510367181bb091e4be377777aed57b73c29ef2ce1d475feedd7e0f31676284d9a94f6db01cc4de81a2","0xb0d9d8b7116156d9dde138d28aa05a33e61f8a85839c1e9071ccd517b46a5b4b53acb32c2edd7150c15bc1b4bd8db9e3","0xae931b6eaeda790ba7f1cd674e53dc87f6306ff44951fa0df88d506316a5da240df9794ccbd7215a6470e6b31c5ea193","0x8c6d5bdf87bd7f645419d7c6444e244fe054d437ed1ba0c122fde7800603a5fadc061e5b836cb22a6cfb2b466f20f013","0x90d530c6d0cb654999fa771b8d11d723f54b8a8233d1052dc1e839ea6e314fbed3697084601f3e9bbb71d2b4eaa596df","0xb0d341a1422588c983f767b1ed36c18b141774f67ef6a43cff8e18b73a009da10fc12120938b8bba27f225bdfd3138f9","0xa131b56f9537f460d304e9a1dd75702ace8abd68cb45419695cb8dee76998139058336c87b7afd6239dc20d7f8f940cc","0xaa6c51fa28975f709329adee1bbd35d49c6b878041841a94465e8218338e4371f5cb6c17f44a63ac93644bf28f15d20f","0x88440fb584a99ebd7f9ea04aaf622f6e44e2b43bbb49fb5de548d24a238dc8f26c8da2ccf03dd43102bda9f16623f609","0x9777b8695b790e702159a4a750d5e7ff865425b95fa0a3c15495af385b91c90c00a6bd01d1b77bffe8c47d01baae846f","0x8b9d764ece7799079e63c7f01690c8eff00896a26a0d095773dea7a35967a8c40db7a6a74692f0118bf0460c26739af4","0x85808c65c485520609c9e61fa1bb67b28f4611d3608a9f7a5030ee61c3aa3c7e7dc17fff48af76b4aecee2cb0dbd22ac","0xad2783a76f5b3db008ef5f7e67391fda4e7e36abde6b3b089fc4835b5c339370287935af6bd53998bed4e399eda1136d","0x96f18ec03ae47c205cc4242ca58e2eff185c9dca86d5158817e2e5dc2207ab84aadda78725f8dc080a231efdc093b940","0x97de1ab6c6cc646ae60cf7b86df73b9cf56cc0cd1f31b966951ebf79fc153531af55ca643b20b773daa7cab784b832f7","0x870ba266a9bfa86ef644b1ef025a0f1b7609a60de170fe9508de8fd53170c0b48adb37f19397ee8019b041ce29a16576","0xad990e888d279ac4e8db90619d663d5ae027f994a3992c2fbc7d262b5990ae8a243e19157f3565671d1cb0de17fe6e55","0x8d9d5adcdd94c5ba3be4d9a7428133b42e485f040a28d16ee2384758e87d35528f7f9868de9bd23d1a42a594ce50a567","0x85a33ed75d514ece6ad78440e42f7fcdb59b6f4cff821188236d20edae9050b3a042ce9bc7d2054296e133d033e45022","0x92afd2f49a124aaba90de59be85ff269457f982b54c91b06650c1b8055f9b4b0640fd378df02a00e4fc91f7d226ab980","0x8c0ee09ec64bd831e544785e3d65418fe83ed9c920d9bb4d0bf6dd162c1264eb9d6652d2def0722e223915615931581c","0x8369bedfa17b24e9ad48ebd9c5afea4b66b3296d5770e09b00446c5b0a8a373d39d300780c01dcc1c6752792bccf5fd0","0x8b9e960782576a59b2eb2250d346030daa50bbbec114e95cdb9e4b1ba18c3d34525ae388f859708131984976ca439d94","0xb682bface862008fea2b5a07812ca6a28a58fd151a1d54c708fc2f8572916e0d678a9cb8dc1c10c0470025c8a605249e","0xa38d5e189bea540a824b36815fc41e3750760a52be0862c4cac68214febdc1a754fb194a7415a8fb7f96f6836196d82a","0xb9e7fbda650f18c7eb8b40e42cc42273a7298e65e8be524292369581861075c55299ce69309710e5b843cb884de171bd","0xb6657e5e31b3193874a1bace08f42faccbd3c502fb73ad87d15d18a1b6c2a146f1baa929e6f517db390a5a47b66c0acf","0xae15487312f84ed6265e4c28327d24a8a0f4d2d17d4a5b7c29b974139cf93223435aaebe3af918f5b4bb20911799715f","0x8bb4608beb06bc394e1a70739b872ce5a2a3ffc98c7547bf2698c893ca399d6c13686f6663f483894bccaabc3b9c56ad","0xb58ac36bc6847077584308d952c5f3663e3001af5ecf2e19cb162e1c58bd6c49510205d453cffc876ca1dc6b8e04a578","0x924f65ced61266a79a671ffb49b300f0ea44c50a0b4e3b02064faa99fcc3e4f6061ea8f38168ab118c5d47bd7804590e","0x8d67d43b8a06b0ff4fafd7f0483fa9ed1a9e3e658a03fb49d9d9b74e2e24858dc1bed065c12392037b467f255d4e5643","0xb4d4f87813125a6b355e4519a81657fa97c43a6115817b819a6caf4823f1d6a1169683fd68f8d025cdfa40ebf3069acb","0xa7fd4d2c8e7b59b8eed3d4332ae94b77a89a2616347402f880bc81bde072220131e6dbec8a605be3a1c760b775375879","0x8d4a7d8fa6f55a30df37bcf74952e2fa4fd6676a2e4606185cf154bdd84643fd01619f8fb8813a564f72e3f574f8ce30","0x8086fb88e6260e9a9c42e9560fde76315ff5e5680ec7140f2a18438f15bc2cc7d7d43bfb5880b180b738c20a834e6134","0x916c4c54721de03934fee6f43de50bb04c81f6f8dd4f6781e159e71c40c60408aa54251d457369d133d4ba3ed7c12cb4","0x902e5bf468f11ed9954e2a4a595c27e34abe512f1d6dc08bbca1c2441063f9af3dc5a8075ab910a10ff6c05c1c644a35","0xa1302953015e164bf4c15f7d4d35e3633425a78294406b861675667eec77765ff88472306531e5d3a4ec0a2ff0dd6a9e","0x87874461df3c9aa6c0fa91325576c0590f367075f2f0ecfeb34afe162c04c14f8ce9d608c37ac1adc8b9985bc036e366","0x84b50a8a61d3cc609bfb0417348133e698fe09a6d37357ce3358de189efcf35773d78c57635c2d26c3542b13cc371752","0xacaed2cff8633d12c1d12bb7270c54d65b0b0733ab084fd47f81d0a6e1e9b6f300e615e79538239e6160c566d8bb8d29","0x889e6a0e136372ca4bac90d1ab220d4e1cad425a710e8cdd48b400b73bb8137291ceb36a39440fa84305783b1d42c72f","0x90952e5becec45b2b73719c228429a2c364991cf1d5a9d6845ae5b38018c2626f4308daa322cab1c72e0f6c621bb2b35","0x8f5a97a801b6e9dcd66ccb80d337562c96f7914e7169e8ff0fda71534054c64bf2a9493bb830623d612cfe998789be65","0x84f3df8b9847dcf1d63ca470dc623154898f83c25a6983e9b78c6d2d90a97bf5e622445be835f32c1e55e6a0a562ea78","0x91d12095cd7a88e7f57f254f02fdb1a1ab18984871dead2f107404bcf8069fe68258c4e6f6ebd2477bddf738135400bb","0xb771a28bc04baef68604d4723791d3712f82b5e4fe316d7adc2fc01b935d8e644c06d59b83bcb542afc40ebafbee0683","0x872f6341476e387604a7e93ae6d6117e72d164e38ebc2b825bc6df4fcce815004d7516423c190c1575946b5de438c08d","0x90d6b4aa7d40a020cdcd04e8b016d041795961a8e532a0e1f4041252131089114a251791bf57794cadb7d636342f5d1c","0x899023ba6096a181448d927fed7a0fe858be4eac4082a42e30b3050ee065278d72fa9b9d5ce3bc1372d4cbd30a2f2976","0xa28f176571e1a9124f95973f414d5bdbf5794d41c3839d8b917100902ac4e2171eb940431236cec93928a60a77ede793","0x838dbe5bcd29c4e465d02350270fa0036cd46f8730b13d91e77afb7f5ed16525d0021d3b2ae173a76c378516a903e0cb","0x8e105d012dd3f5d20f0f1c4a7e7f09f0fdd74ce554c3032e48da8cce0a77260d7d47a454851387770f5c256fa29bcb88","0x8f4df0f9feeb7a487e1d138d13ea961459a6402fd8f8cabb226a92249a0d04ded5971f3242b9f90d08da5ff66da28af6","0xad1cfda4f2122a20935aa32fb17c536a3653a18617a65c6836700b5537122af5a8206befe9eaea781c1244c43778e7f1","0x832c6f01d6571964ea383292efc8c8fa11e61c0634a25fa180737cc7ab57bc77f25e614aac9a2a03d98f27b3c1c29de2","0x903f89cc13ec6685ac7728521898781fecb300e9094ef913d530bf875c18bcc3ceed7ed51e7b482d45619ab4b025c2e9","0xa03c474bb915aad94f171e8d96f46abb2a19c9470601f4c915512ec8b9e743c3938450a2a5b077b4618b9df8809e1dc1","0x83536c8456f306045a5f38ae4be2e350878fa7e164ea408d467f8c3bc4c2ee396bd5868008c089183868e4dfad7aa50b","0x88f26b4ea1b236cb326cd7ad7e2517ec8c4919598691474fe15d09cabcfc37a8d8b1b818f4d112432ee3a716b0f37871","0xa44324e3fe96e9c12b40ded4f0f3397c8c7ee8ff5e96441118d8a6bfad712d3ac990b2a6a23231a8f691491ac1fd480f","0xb0de4693b4b9f932191a21ee88629964878680152a82996c0019ffc39f8d9369bbe2fe5844b68d6d9589ace54af947e4","0x8e5d8ba948aea5fd26035351a960e87f0d23efddd8e13236cc8e4545a3dda2e9a85e6521efb8577e03772d3637d213d9","0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556","0x8731176363ad7658a2862426ee47a5dce9434216cef60e6045fa57c40bb3ce1e78dac4510ae40f1f31db5967022ced32","0xb10c9a96745722c85bdb1a693100104d560433d45b9ac4add54c7646a7310d8e9b3ca9abd1039d473ae768a18e489845","0xa2ac374dfbb464bf850b4a2caf15b112634a6428e8395f9c9243baefd2452b4b4c61b0cb2836d8eae2d57d4900bf407e","0xb69fe3ded0c4f5d44a09a0e0f398221b6d1bf5dbb8bc4e338b93c64f1a3cac1e4b5f73c2b8117158030ec03787f4b452","0x8852cdbaf7d0447a8c6f211b4830711b3b5c105c0f316e3a6a18dcfbb9be08bd6f4e5c8ae0c3692da08a2dfa532f9d5c","0x93bbf6d7432a7d98ade3f94b57bf9f4da9bc221a180a370b113066dd42601bb9e09edd79e2e6e04e00423399339eebda","0xa80941c391f1eeafc1451c59e4775d6a383946ff22997aeaadf806542ba451d3b0f0c6864eeba954174a296efe2c1550","0xa045fe2bb011c2a2f71a0181a8f457a3078470fb74c628eab8b59aef69ffd0d649723bf74d6885af3f028bc5a104fb39","0xb9d8c35911009c4c8cad64692139bf3fc16b78f5a19980790cb6a7aea650a25df4231a4437ae0c351676a7e42c16134f","0x94c79501ded0cfcbab99e1841abe4a00a0252b3870e20774c3da16c982d74c501916ec28304e71194845be6e3113c7ab","0x900a66418b082a24c6348d8644ddb1817df5b25cb33044a519ef47cc8e1f7f1e38d2465b7b96d32ed472d2d17f8414c6","0xb26f45d393b8b2fcb29bdbb16323dc7f4b81c09618519ab3a39f8ee5bd148d0d9f3c0b5dfab55b5ce14a1cb9206d777b","0xaa1a87735fc493a80a96a9a57ca40a6d9c32702bfcaa9869ce1a116ae65d69cefe2f3e79a12454b4590353e96f8912b4","0xa922b188d3d0b69b4e4ea2a2aa076566962844637da12c0832105d7b31dea4a309eee15d12b7a336be3ea36fcbd3e3b7","0x8f3841fcf4105131d8c4d9885e6e11a46c448226401cf99356c291fadb864da9fa9d30f3a73c327f23f9fd99a11d633e","0x9791d1183fae270e226379af6c497e7da803ea854bb20afa74b253239b744c15f670ee808f708ede873e78d79a626c9a","0xa4cad52e3369491ada61bf28ada9e85de4516d21c882e5f1cd845bea9c06e0b2887b0c5527fcff6fc28acd3c04f0a796","0xb9ac86a900899603452bd11a7892a9bfed8054970bfcbeaa8c9d1930db891169e38d6977f5258c25734f96c8462eee3b","0xa3a154c28e5580656a859f4efc2f5ebfa7eaa84ca40e3f134fa7865e8581586db74992dbfa4036aa252fba103773ddde","0x95cc2a0c1885a029e094f5d737e3ecf4d26b99036453a8773c77e360101f9f98676ee246f6f732a377a996702d55691f","0x842651bbe99720438d8d4b0218feb60481280c05beb17750e9ca0d8c0599a60f873b7fbdcc7d8835ba9a6d57b16eec03","0x81ee54699da98f5620307893dcea8f64670609fa20e5622265d66283adeac122d458b3308c5898e6c57c298db2c8b24f","0xb97868b0b2bc98032d68352a535a1b341b9ff3c7af4e3a7f3ebc82d3419daa1b5859d6aedc39994939623c7cd878bd9b","0xb60325cd5d36461d07ef253d826f37f9ee6474a760f2fff80f9873d01fd2b57711543cdc8d7afa1c350aa753c2e33dea","0x8c205326c11d25a46717b780c639d89714c7736c974ae71287e3f4b02e6605ac2d9b4928967b1684f12be040b7bf2dd3","0x95a392d82db51e26ade6c2ccd3396d7e40aff68fa570b5951466580d6e56dda51775dce5cf3a74a7f28c3cb2eb551c4d","0x8f2cc8071eb56dffb70bda6dd433b556221dc8bba21c53353c865f00e7d4d86c9e39f119ea9a8a12ef583e9a55d9a6b6","0x9449a71af9672aaf8856896d7e3d788b22991a7103f75b08c0abbcc2bfe60fda4ed8ce502cea4511ff0ea52a93e81222","0x857090ab9fdb7d59632d068f3cc8cf27e61f0d8322d30e6b38e780a1f05227199b4cd746aac1311c36c659ef20931f28","0x98a891f4973e7d9aaf9ac70854608d4f7493dffc7e0987d7be9dd6029f6ea5636d24ef3a83205615ca1ff403750058e1","0xa486e1365bbc278dd66a2a25d258dc82f46b911103cb16aab3945b9c95ae87b386313a12b566df5b22322ede0afe25ad","0xa9a1eb399ed95d396dccd8d1ac718043446f8b979ec62bdce51c617c97a312f01376ab7fb87d27034e5f5570797b3c33","0xb7abc3858d7a74bb446218d2f5a037e0fae11871ed9caf44b29b69c500c1fa1dcfad64c9cdccc9d80d5e584f06213deb","0x8cfb09fe2e202faa4cebad932b1d35f5ca204e1c2a0c740a57812ac9a6792130d1312aabd9e9d4c58ca168bfebd4c177","0xa90a305c2cd0f184787c6be596fa67f436afd1f9b93f30e875f817ac2aae8bdd2e6e656f6be809467e6b3ad84adb86b1","0x80a9ef993c2b009ae172cc8f7ec036f5734cf4f4dfa06a7db4d54725e7fbfae5e3bc6f22687bdbb6961939d6f0c87537","0x848ade1901931e72b955d7db1893f07003e1708ff5d93174bac5930b9a732640f0578839203e9b77eb27965c700032d3","0x93fdf4697609c5ae9c33b9ca2f5f1af44abeb2b98dc4fdf732cf7388de086f410730dc384d9b7a7f447bb009653c8381","0x89ce3fb805aea618b5715c0d22a9f46da696b6fa86794f56fdf1d44155a33d42daf1920bcbe36cbacf3cf4c92df9cbc7","0x829ce2c342cf82aa469c65f724f308f7a750bd1494adc264609cd790c8718b8b25b5cab5858cf4ee2f8f651d569eea67","0xaf2f0cee7bf413204be8b9df59b9e4991bc9009e0d6dbe6815181df0ec2ca93ab8f4f3135b1c14d8f53d74bff0bd6f27","0xb87998cecf7b88cde93d1779f10a521edd5574a2fbd240102978639ec57433ba08cdb53849038a329cebbe74657268d2","0xa64542a1261a6ed3d720c2c3a802303aad8c4c110c95d0f12e05c1065e66f42da494792b6bfc5b9272363f3b1d457f58","0x86a6fd042e4f282fadf07a4bfee03fc96a3aea49f7a00f52bf249a20f1ec892326855410e61f37fbb27d9305eb2fc713","0x967ea5bc403b6db269682f7fd0df90659350d7e1aa66bc4fab4c9dfcd75ed0bba4b52f1cebc5f34dc8ba810793727629","0xa52990f9f3b8616ce3cdc2c74cd195029e6a969753dcf2d1630438700e7d6ebde36538532b3525ac516f5f2ce9dd27a3","0xa64f7ff870bab4a8bf0d4ef6f5c744e9bf1021ed08b4c80903c7ad318e80ba1817c3180cc45cb5a1cae1170f0241655f","0xb00f706fa4de1f663f021e8ad3d155e84ce6084a409374b6e6cd0f924a0a0b51bebaaaf1d228c77233a73b0a5a0df0e9","0x8b882cc3bff3e42babdb96df95fb780faded84887a0a9bab896bef371cdcf169d909f5658649e93006aa3c6e1146d62e","0x9332663ef1d1dcf805c3d0e4ce7a07d9863fb1731172e766b3cde030bf81682cc011e26b773fb9c68e0477b4ae2cfb79","0xa8aa8151348dbd4ef40aaeb699b71b4c4bfd3218560c120d85036d14f678f6736f0ec68e80ce1459d3d35feccc575164","0xa16cd8b729768f51881c213434aa28301fa78fcb554ddd5f9012ee1e4eae7b5cb3dd88d269d53146dea92d10790faf0b","0x86844f0ef9d37142faf3b1e196e44fbe280a3ba4189aa05c356778cb9e3b388a2bff95eed305ada8769935c9974e4c57","0xae2eec6b328fccf3b47bcdac32901ac2744a51beb410b04c81dea34dee4912b619466a4f5e2780d87ecefaebbe77b46d","0x915df4c38d301c8a4eb2dc5b1ba0ffaad67cbb177e0a80095614e9c711f4ef24a4cef133f9d982a63d2a943ba6c8669d","0xae6a2a4dedfc2d1811711a8946991fede972fdf2a389b282471280737536ffc0ac3a6d885b1f8bda0366eb0b229b9979","0xa9b628c63d08b8aba6b1317f6e91c34b2382a6c85376e8ef2410a463c6796740ae936fc4e9e0737cb9455d1daa287bd8","0x848e30bf7edf2546670b390d5cf9ab71f98fcb6add3c0b582cb34996c26a446dee5d1bde4fdcde4fc80c10936e117b29","0x907d6096c7c8c087d1808dd995d5d2b9169b3768c3f433475b50c2e2bd4b082f4d543afd8b0b0ddffa9c66222a72d51d","0xa59970a2493b07339124d763ac9d793c60a03354539ecbcf6035bc43d1ea6e35718202ae6d7060b7d388f483d971573c","0xb9cfef2af9681b2318f119d8611ff6d9485a68d8044581b1959ab1840cbca576dbb53eec17863d2149966e9feb21122f","0xad47271806161f61d3afa45cdfe2babceef5e90031a21779f83dc8562e6076680525b4970b2f11fe9b2b23c382768323","0x8e425a99b71677b04fe044625d338811fbb8ee32368a424f6ab2381c52e86ee7a6cecedf777dc97181519d41c351bc22","0x86b55b54d7adefc12954a9252ee23ae83efe8b5b4b9a7dc307904413e5d69868c7087a818b2833f9b004213d629be8ad","0xa14fda6b93923dd11e564ae4457a66f397741527166e0b16a8eb91c6701c244fd1c4b63f9dd3515193ec88fa6c266b35","0xa9b17c36ae6cd85a0ed7f6cabc5b47dc8f80ced605db327c47826476dc1fb8f8669aa7a7dc679fbd4ee3d8e8b4bd6a6f","0x82a0829469c1458d959c821148f15dacae9ea94bf56c59a6ab2d4dd8b3d16d73e313b5a3912a6c1f131d73a8f06730c4","0xb22d56d549a53eaef549595924bdb621ff807aa4513feedf3fdcbf7ba8b6b9cfa4481c2f67fc642db397a6b794a8b63a","0x974c59c24392e2cb9294006cbe3c52163e255f3bd0c2b457bdc68a6338e6d5b6f87f716854492f8d880a6b896ccf757c","0xb70d247ba7cad97c50b57f526c2ba915786e926a94e8f8c3eebc2e1be6f4255411b9670e382060049c8f4184302c40b2","0xad80201fe75ef21c3ddbd98cf23591e0d7a3ba1036dfe77785c32f44755a212c31f0ceb0a0b6f5ee9b6dc81f358d30c3","0x8c656e841f9bb90b9a42d425251f3fdbc022a604d75f5845f479ed4be23e02aaf9e6e56cde351dd7449c50574818a199","0x8b88dd3fa209d3063b7c5b058f7249ee9900fbc2287d16da61a0704a0a1d71e45d9c96e1cda7fdf9654534ec44558b22","0x961da00cc8750bd84d253c08f011970ae1b1158ad6778e8ed943d547bceaf52d6d5a212a7de3bf2706688c4389b827d2","0xa5dd379922549a956033e3d51a986a4b1508e575042b8eaa1df007aa77cf0b8c2ab23212f9c075702788fa9c53696133","0xac8fcfde3a349d1e93fc8cf450814e842005c545c4844c0401bc80e6b96cdb77f29285a14455e167c191d4f312e866cd","0xac63d79c799783a8466617030c59dd5a8f92ee6c5204676fd8d881ce5f7f8663bdbeb0379e480ea9b6340ab0dc88e574","0x805874fde19ce359041ae2bd52a39e2841acabfd31f965792f2737d7137f36d4e4722ede8340d8c95afa6af278af8acb","0x8d2f323a228aa8ba7b7dc1399138f9e6b41df1a16a7069003ab8104b8b68506a45141bc5fe66acf430e23e13a545190b","0xa1610c721a2d9af882bb6b39bea97cff1527a3aea041d25934de080214ae77c959e79957164440686d15ab301e897d4d","0xaba16d29a47fc36f12b654fde513896723e2c700c4190f11b26aa4011da57737ad717daa02794aa3246e4ae5f0b0cc3a","0xa406db2f15fdd135f346cc4846623c47edd195e80ba8c7cb447332095314d565e4040694ca924696bb5ee7f8996ea0ba","0x8b30e2cd9b47d75ba57b83630e40f832249af6c058d4f490416562af451993eec46f3e1f90bc4d389e4c06abd1b32a46","0xaacf9eb7036e248e209adbfc3dd7ce386569ea9b312caa4b240726549db3c68c4f1c8cbf8ed5ea9ea60c7e57c9df3b8e","0xb20fcac63bf6f5ee638a42d7f89be847f348c085ddcbec3fa318f4323592d136c230495f188ef2022aa355cc2b0da6f9","0x811eff750456a79ec1b1249d76d7c1547065b839d8d4aaad860f6d4528eb5b669473dcceeeea676cddbc3980b68461b7","0xb52d14ae33f4ab422f953392ae76a19c618cc31afc96290bd3fe2fb44c954b5c92c4789f3f16e8793f2c0c1691ade444","0xa7826dafeeba0db5b66c4dfcf2b17fd7b40507a5a53ac2e42942633a2cb30b95ba1739a6e9f3b7a0e0f1ec729bf274e2","0x8acfd83ddf7c60dd7c8b20c706a3b972c65d336b8f9b3d907bdd8926ced271430479448100050b1ef17578a49c8fa616","0xaf0c69f65184bb06868029ad46f8465d75c36814c621ac20a5c0b06a900d59305584f5a6709683d9c0e4b6cd08d650a6","0xb6cc8588191e00680ee6c3339bd0f0a17ad8fd7f4be57d5d7075bede0ea593a19e67f3d7c1a20114894ee5bfcab71063","0xa82fd4f58635129dbb6cc3eb9391cf2d28400018b105fc41500fbbd12bd890b918f97d3d359c29dd3b4c4e34391dfab0","0x92fc544ed65b4a3625cf03c41ddff7c039bc22d22c0d59dcc00efd5438401f2606adb125a1d5de294cca216ec8ac35a3","0x906f67e4a32582b71f15940523c0c7ce370336935e2646bdaea16a06995256d25e99df57297e39d6c39535e180456407","0x97510337ea5bbd5977287339197db55c60533b2ec35c94d0a460a416ae9f60e85cee39be82abeeacd5813cf54df05862","0x87e6894643815c0ea48cb96c607266c5ee4f1f82ba5fe352fb77f9b6ed14bfc2b8e09e80a99ac9047dfcf62b2ae26795","0xb6fd55dd156622ad7d5d51b7dde75e47bd052d4e542dd6449e72411f68275775c846dde301e84613312be8c7bce58b07","0xb98461ac71f554b2f03a94e429b255af89eec917e208a8e60edf5fc43b65f1d17a20de3f31d2ce9f0cb573c25f2f4d98","0x96f0dea40ca61cefbee41c4e1fe9a7d81fbe1f49bb153d083ab70f5d0488a1f717fd28cedcf6aa18d07cce2c62801898","0x8d7c3ab310184f7dc34b6ce4684e4d29a31e77b09940448ea4daac730b7eb308063125d4dd229046cf11bfd521b771e0","0x96f0564898fe96687918bbf0a6adead99cf72e3a35ea3347e124af9d006221f8e82e5a9d2fe80094d5e8d48e610f415e","0xad50fcb92c2675a398cf07d4c40a579e44bf8d35f27cc330b57e54d5ea59f7d898af0f75dccfe3726e5471133d70f92b","0x828beed62020361689ae7481dd8f116902b522fb0c6c122678e7f949fdef70ead011e0e6bffd25678e388744e17cdb69","0x8349decac1ca16599eee2efc95bcaabf67631107da1d34a2f917884bd70dfec9b4b08ab7bc4379d6c73b19c0b6e54fb8","0xb2a6a2e50230c05613ace9e58bb2e98d94127f196f02d9dddc53c43fc68c184549ca12d713cb1b025d8260a41e947155","0x94ff52181aadae832aed52fc3b7794536e2a31a21fc8be3ea312ca5c695750d37f08002f286b33f4023dba1e3253ecfa","0xa21d56153c7e5972ee9a319501be4faff199fdf09bb821ea9ce64aa815289676c00f105e6f00311b3a5b627091b0d0fc","0xa27a60d219f1f0c971db73a7f563b371b5c9fc3ed1f72883b2eac8a0df6698400c9954f4ca17d7e94e44bd4f95532afb","0xa2fc56fae99b1f18ba5e4fe838402164ce82f8a7f3193d0bbd360c2bac07c46f9330c4c7681ffb47074c6f81ee6e7ac6","0xb748e530cd3afb96d879b83e89c9f1a444f54e55372ab1dcd46a0872f95ce8f49cf2363fc61be82259e04f555937ed16","0x8bf8993e81080c7cbba1e14a798504af1e4950b2f186ab3335b771d6acaee4ffe92131ae9c53d74379d957cb6344d9cd","0x96774d0ef730d22d7ab6d9fb7f90b9ead44285219d076584a901960542756700a2a1603cdf72be4708b267200f6c36a9","0xb47703c2ab17be1e823cc7bf3460db1d6760c0e33862c90ca058845b2ff234b0f9834ddba2efb2ee1770eb261e7d8ffd","0x84319e67c37a9581f8b09b5e4d4ae88d0a7fb4cbb6908971ab5be28070c3830f040b1de83ee663c573e0f2f6198640e4","0x96811875fa83133e0b3c0e0290f9e0e28bca6178b77fdf5350eb19344d453dbd0d71e55a0ef749025a5a2ca0ad251e81","0x81a423423e9438343879f2bfd7ee9f1c74ebebe7ce3cfffc8a11da6f040cc4145c3b527bd3cf63f9137e714dbcb474ef","0xb8c3535701ddbeec2db08e17a4fa99ba6752d32ece5331a0b8743676f421fcb14798afc7c783815484f14693d2f70db8","0x81aee980c876949bf40782835eec8817d535f6f3f7e00bf402ddd61101fdcd60173961ae90a1cf7c5d060339a18c959d","0x87e67b928d97b62c49dac321ce6cb680233f3a394d4c9a899ac2e8db8ccd8e00418e66cdfd68691aa3cb8559723b580c","0x8eac204208d99a2b738648df96353bbb1b1065e33ee4f6bba174b540bbbd37d205855e1f1e69a6b7ff043ca377651126","0x848e6e7a54ad64d18009300b93ea6f459ce855971dddb419b101f5ac4c159215626fadc20cc3b9ab1701d8f6dfaddd8b","0x88aa123d9e0cf309d46dddb6acf634b1ade3b090a2826d6e5e78669fa1220d6df9a6697d7778cd9b627db17eea846126","0x9200c2a629b9144d88a61151b661b6c4256cc5dadfd1e59a8ce17a013c2d8f7e754aabe61663c3b30f1bc47784c1f8cf","0xb6e1a2827c3bdda91715b0e1b1f10dd363cef337e7c80cac1f34165fc0dea7c8b69747e310563db5818390146ce3e231","0x92c333e694f89f0d306d54105b2a5dcc912dbe7654d9e733edab12e8537350815be472b063e56cfde5286df8922fdecb","0xa6fac04b6d86091158ebb286586ccfec2a95c9786e14d91a9c743f5f05546073e5e3cc717635a0c602cad8334e922346","0xa581b4af77feebc1fb897d49b5b507c6ad513d8f09b273328efbb24ef0d91eb740d01b4d398f2738125dacfe550330cd","0x81c4860cccf76a34f8a2bc3f464b7bfd3e909e975cce0d28979f457738a56e60a4af8e68a3992cf273b5946e8d7f76e2","0x8d1eaa09a3180d8af1cbaee673db5223363cc7229a69565f592fa38ba0f9d582cedf91e15dabd06ebbf2862fc0feba54","0x9832f49b0147f4552402e54593cfa51f99540bffada12759b71fcb86734be8e500eea2d8b3d036710bdf04c901432de9","0x8bdb0e8ec93b11e5718e8c13cb4f5de545d24829fd76161216340108098dfe5148ed25e3b57a89a516f09fa79043734d","0xab96f06c4b9b0b2c0571740b24fca758e6976315053a7ecb20119150a9fa416db2d3a2e0f8168b390bb063f0c1caf785","0xab777f5c52acd62ecf4d1f168b9cc8e1a9b45d4ec6a8ff52c583e867c2239aba98d7d3af977289b367edce03d9c2dfb1","0xa09d3ce5e748da84802436951acc3d3ea5d8ec1d6933505ed724d6b4b0d69973ab0930daec9c6606960f6e541e4a3ce2","0x8ef94f7be4d85d5ad3d779a5cf4d7b2fc3e65c52fb8e1c3c112509a4af77a0b5be994f251e5e40fabeeb1f7d5615c22b","0xa7406a5bf5708d9e10922d3c5c45c03ef891b8d0d74ec9f28328a72be4cdc05b4f2703fa99366426659dfca25d007535","0xb7f52709669bf92a2e070bfe740f422f0b7127392c5589c7f0af71bb5a8428697c762d3c0d74532899da24ea7d8695c2","0xb9dfb0c8df84104dbf9239ccefa4672ef95ddabb8801b74997935d1b81a78a6a5669a3c553767ec19a1281f6e570f4ff","0xae4d5c872156061ce9195ac640190d8d71dd406055ee43ffa6f9893eb24b870075b74c94d65bc1d5a07a6573282b5520","0xafe6bd3eb72266d333f1807164900dcfa02a7eb5b1744bb3c86b34b3ee91e3f05e38fa52a50dc64eeb4bdb1dd62874b8","0x948043cf1bc2ef3c01105f6a78dc06487f57548a3e6ef30e6ebc51c94b71e4bf3ff6d0058c72b6f3ecc37efd7c7fa8c0","0xa22fd17c2f7ffe552bb0f23fa135584e8d2d8d75e3f742d94d04aded2a79e22a00dfe7acbb57d44e1cdb962fb22ae170","0x8cd0f4e9e4fb4a37c02c1bde0f69359c43ab012eb662d346487be0c3758293f1ca560122b059b091fddce626383c3a8f","0x90499e45f5b9c81426f3d735a52a564cafbed72711d9279fdd88de8038e953bc48c57b58cba85c3b2e4ce56f1ddb0e11","0x8c30e4c034c02958384564cac4f85022ef36ab5697a3d2feaf6bf105049675bbf23d01b4b6814711d3d9271abff04cac","0x81f7999e7eeea30f3e1075e6780bbf054f2fb6f27628a2afa4d41872a385b4216dd5f549da7ce6cf39049b2251f27fb7","0xb36a7191f82fc39c283ffe53fc1f5a9a00b4c64eee7792a8443475da9a4d226cf257f226ea9d66e329af15d8f04984ec","0xaad4da528fdbb4db504f3041c747455baff5fcd459a2efd78f15bdf3aea0bdb808343e49df88fe7a7c8620009b7964a3","0x99ebd8c6dd5dd299517fb6381cfc2a7f443e6e04a351440260dd7c2aee3f1d8ef06eb6c18820b394366ecdfd2a3ce264","0x8873725b81871db72e4ec3643084b1cdce3cbf80b40b834b092767728605825c19b6847ad3dcf328438607e8f88b4410","0xb008ee2f895daa6abd35bd39b6f7901ae4611a11a3271194e19da1cdcc7f1e1ea008fe5c5440e50d2c273784541ad9c5","0x9036feafb4218d1f576ef89d0e99124e45dacaa6d816988e34d80f454d10e96809791d5b78f7fd65f569e90d4d7238c5","0x92073c1d11b168e4fa50988b0288638b4868e48bbc668c5a6dddf5499875d53be23a285acb5e4bad60114f6cf6c556e9","0x88c87dfcb8ba6cbfe7e1be081ccfadbd589301db2cb7c99f9ee5d7db90aa297ed1538d5a867678a763f2deede5fd219a","0xb42a562805c661a50f5dea63108002c0f27c0da113da6a9864c9feb5552225417c0356c4209e8e012d9bcc9d182c7611","0x8e6317d00a504e3b79cd47feb4c60f9df186467fe9ca0f35b55c0364db30528f5ff071109dabb2fc80bb9cd4949f0c24","0xb7b1ea6a88694f8d2f539e52a47466695e39e43a5eb9c6f23bca15305fe52939d8755cc3ac9d6725e60f82f994a3772f","0xa3cd55161befe795af93a38d33290fb642b8d80da8b786c6e6fb02d393ea308fbe87f486994039cbd7c7b390414594b6","0xb416d2d45b44ead3b1424e92c73c2cf510801897b05d1724ff31cbd741920cd858282fb5d6040fe1f0aa97a65bc49424","0x950ee01291754feace97c2e933e4681e7ddfbc4fcd079eb6ff830b0e481d929c93d0c7fb479c9939c28ca1945c40da09","0x869bd916aee8d86efe362a49010382674825d49195b413b4b4018e88ce43fe091b475d0b863ff0ba2259400f280c2b23","0x9782f38cd9c9d3385ec286ebbc7cba5b718d2e65a5890b0a5906b10a89dc8ed80d417d71d7c213bf52f2af1a1f513ea7","0x91cd33bc2628d096269b23faf47ee15e14cb7fdc6a8e3a98b55e1031ea0b68d10ba30d97e660f7e967d24436d40fad73","0x8becc978129cc96737034c577ae7225372dd855da8811ae4e46328e020c803833b5bdbc4a20a93270e2b8bd1a2feae52","0xa36b1d8076783a9522476ce17f799d78008967728ce920531fdaf88303321bcaf97ecaa08e0c01f77bc32e53c5f09525","0xb4720e744943f70467983aa34499e76de6d59aa6fadf86f6b787fdce32a2f5b535b55db38fe2da95825c51002cfe142d","0x91ad21fc502eda3945f6de874d1b6bf9a9a7711f4d61354f9e5634fc73f9c06ada848de15ab0a75811d3250be862827d","0x84f78e2ebf5fc077d78635f981712daf17e2475e14c2a96d187913006ad69e234746184a51a06ef510c9455b38acb0d7","0x960aa7906e9a2f11db64a26b5892ac45f20d2ccb5480f4888d89973beb6fa0dfdc06d68d241ff5ffc7f1b82b1aac242d","0xa99365dcd1a00c66c9db6924b97c920f5c723380e823b250db85c07631b320ec4e92e586f7319e67a522a0578f7b6d6c","0xa25d92d7f70cf6a88ff317cfec071e13774516da664f5fac0d4ecaa65b8bf4eb87a64a4d5ef2bd97dfae98d388dbf5cc","0xa7af47cd0041295798f9779020a44653007444e8b4ef0712982b06d0dcdd434ec4e1f7c5f7a049326602cb605c9105b7","0xaefe172eac5568369a05980931cc476bebd9dea573ba276d59b9d8c4420784299df5a910033b7e324a6c2dfc62e3ef05","0xb69bc9d22ffa645baa55e3e02522e9892bb2daa7fff7c15846f13517d0799766883ee09ae0869df4139150c5b843ca8a","0x95a10856140e493354fdd12722c7fdded21b6a2ffbc78aa2697104af8ad0c8e2206f44b0bfee077ef3949d46bbf7c16b","0x891f2fcd2c47cbea36b7fa715968540c233313f05333f09d29aba23c193f462ed490dd4d00969656e89c53155fdfe710","0xa6c33e18115e64e385c843dde34e8a228222795c7ca90bc2cc085705d609025f3351d9be61822c69035a49fb3e48f2d5","0xb87fb12f12c0533b005adad0487f03393ff682e13575e3cb57280c3873b2c38ba96a63c49eef7a442753d26b7005230b","0xb905c02ba451bfd411c135036d92c27af3b0b1c9c2f1309d6948544a264b125f39dd41afeff4666b12146c545adc168a","0x8b29c513f43a78951cf742231cf5457a6d9d55edf45df5481a0f299a418d94effef561b15d2c1a01d1b8067e7153fda9","0xb9941cccd51dc645920d2781c81a317e5a33cb7cf76427b60396735912cb6d2ca9292bb4d36b6392467d390d2c58d9f3","0xa8546b627c76b6ef5c93c6a98538d8593dbe21cb7673fd383d5401b0c935eea0bdeeefeb1af6ad41bad8464fb87bbc48","0xaa286b27de2812de63108a1aec29d171775b69538dc6198640ac1e96767c2b83a50391f49259195957d457b493b667c9","0xa932fb229f641e9abbd8eb2bd874015d97b6658ab6d29769fc23b7db9e41dd4f850382d4c1f08af8f156c5937d524473","0xa1412840fcc86e2aeec175526f2fb36e8b3b8d21a78412b7266daf81e51b3f68584ed8bd42a66a43afdd8c297b320520","0x89c78be9efb624c97ebca4fe04c7704fa52311d183ffd87737f76b7dadc187c12c982bd8e9ed7cd8beb48cdaafd2fd01","0xa3f5ddec412a5bec0ce15e3bcb41c6214c2b05d4e9135a0d33c8e50a78eaba71e0a5a6ea8b45854dec5c2ed300971fc2","0x9721f9cec7a68b7758e3887548790de49fa6a442d0396739efa20c2f50352a7f91d300867556d11a703866def2d5f7b5","0xa23764e140a87e5991573521af039630dd28128bf56eed2edbed130fd4278e090b60cf5a1dca9de2910603d44b9f6d45","0xa1a6494a994215e48ab55c70efa8ffdddce6e92403c38ae7e8dd2f8288cad460c6c7db526bbdf578e96ca04d9fe12797","0xb1705ea4cb7e074efe0405fc7b8ee2ec789af0426142f3ec81241cacd4f7edcd88e39435e4e4d8e7b1df64f3880d6613","0x85595d061d677116089a6064418b93eb44ff79e68d12bd9625078d3bbc440a60d0b02944eff6054433ee34710ae6fbb4","0x9978d5e30bedb7526734f9a1febd973a70bfa20890490e7cc6f2f9328feab1e24f991285dbc3711d892514e2d7d005ad","0xaf30243c66ea43b9f87a061f947f7bce745f09194f6e95f379c7582b9fead920e5d6957eaf05c12ae1282ada4670652f","0xa1930efb473f88001e47aa0b2b2a7566848cccf295792e4544096ecd14ee5d7927c173a8576b405bfa2eec551cd67eb5","0xb0446d1c590ee5a45f7e22d269c044f3848c97aec1d226b44bfd0e94d9729c28a38bccddc3a1006cc5fe4e3c24f001f2","0xb8a8380172df3d84b06176df916cf557966d4f2f716d3e9437e415d75b646810f79f2b2b71d857181b7fc944018883a3","0xa563afec25b7817bfa26e19dc9908bc00aa8fc3d19be7d6de23648701659009d10e3e4486c28e9c6b13d48231ae29ac5","0xa5a8e80579de886fb7d6408f542791876885947b27ad6fa99a8a26e381f052598d7b4e647b0115d4b5c64297e00ce28e","0x8f87afcc7ad33c51ac719bade3cd92da671a37a82c14446b0a2073f4a0a23085e2c8d31913ed2d0be928f053297de8f6","0xa43c455ce377e0bc434386c53c752880687e017b2f5ae7f8a15c044895b242dffde4c92fb8f8bb50b18470b17351b156","0x8368f8b12a5bceb1dba25adb3a2e9c7dc9b1a77a1f328e5a693f5aec195cd1e06b0fe9476b554c1c25dac6c4a5b640a3","0x919878b27f3671fc78396f11531c032f3e2bd132d04cc234fa4858676b15fb1db3051c0b1db9b4fc49038216f11321ce","0xb48cd67fb7f1242696c1f877da4bdf188eac676cd0e561fbac1a537f7b8229aff5a043922441d603a26aae56a15faee4","0xa3e0fdfd4d29ea996517a16f0370b54787fefe543c2fe73bfc6f9e560c1fd30dad8409859e2d7fa2d44316f24746c712","0x8bb156ade8faf149df7bea02c140c7e392a4742ae6d0394d880a849127943e6f26312033336d3b9fdc0092d71b5efe87","0x8845e5d5cc555ca3e0523244300f2c8d7e4d02aaebcb5bd749d791208856c209a6f84dd99fd55968c9f0ab5f82916707","0xa3e90bb5c97b07789c2f32dff1aec61d0a2220928202f5ad5355ae71f8249237799d6c8a22602e32e572cb12eabe0c17","0xb150bcc391884c996149dc3779ce71f15dda63a759ee9cc05871f5a8379dcb62b047098922c0f26c7bd04deb394c33f9","0x95cd4ad88d51f0f2efcfd0c2df802fe252bb9704d1afbf9c26a248df22d55da87bdfaf41d7bc6e5df38bd848f0b13f42","0xa05a49a31e91dff6a52ac8b9c2cfdd646a43f0d488253f9e3cfbce52f26667166bbb9b608fc358763a65cbf066cd6d05","0xa59c3c1227fdd7c2e81f5e11ef5c406da44662987bac33caed72314081e2eed66055d38137e01b2268e58ec85dd986c0","0xb7020ec3bd73a99861f0f1d88cf5a19abab1cbe14b7de77c9868398c84bb8e18dbbe9831838a96b6d6ca06e82451c67b","0x98d1ff2525e9718ee59a21d8900621636fcd873d9a564b8dceb4be80a194a0148daf1232742730b3341514b2e5a5436c","0x886d97b635975fc638c1b6afc493e5998ca139edba131b75b65cfe5a8e814f11bb678e0eeee5e6e5cd913ad3f2fefdfc","0x8fb9fd928d38d5d813b671c924edd56601dd7163b686c13f158645c2f869d9250f3859aa5463a39258c90fef0f41190a","0xaac35e1cd655c94dec3580bb3800bd9c2946c4a9856f7d725af15fbea6a2d8ca51c8ad2772abed60ee0e3fb9cb24046b","0xb8d71fa0fa05ac9e443c9b4929df9e7f09a919be679692682e614d24227e04894bfc14a5c73a62fb927fedff4a0e4aa7","0xa45a19f11fbbb531a704badbb813ed8088ab827c884ee4e4ebf363fa1132ff7cfa9d28be9c85b143e4f7cdbc94e7cf1a","0x82b54703a4f295f5471b255ab59dce00f0fe90c9fb6e06b9ee48b15c91d43f4e2ef4a96c3118aeb03b08767be58181bb","0x8283264c8e6d2a36558f0d145c18576b6600ff45ff99cc93eca54b6c6422993cf392668633e5df396b9331e873d457e5","0x8c549c03131ead601bc30eb6b9537b5d3beb7472f5bb1bcbbfd1e9f3704477f7840ab3ab7f7dc13bbbbcdff886a462d4","0xafbb0c520ac1b5486513587700ad53e314cb74bfbc12e0b5fbdcfdaac36d342e8b59856196a0d84a25cff6e6e1d17e76","0x89e4c22ffb51f2829061b3c7c1983c5c750cad158e3a825d46f7cf875677da5d63f653d8a297022b5db5845c9271b32b","0xafb27a86c4c2373088c96b9adf4433f2ebfc78ac5c526e9f0510670b6e4e5e0057c0a4f75b185e1a30331b9e805c1c15","0xa18e16b57445f88730fc5d3567bf5a176861dc14c7a08ed2996fe80eed27a0e7628501bcb78a1727c5e9ac55f29c12c4","0x93d61bf88b192d6825cf4e1120af1c17aa0f994d158b405e25437eaeefae049f7b721a206e7cc8a04fdc29d3c42580a1","0xa99f2995a2e3ed2fd1228d64166112038de2f516410aa439f4c507044e2017ea388604e2d0f7121256fadf7fbe7023d1","0x914fd91cffc23c32f1c6d0e98bf660925090d873367d543034654389916f65f552e445b0300b71b61b721a72e9a5983c","0xb42a578a7787b71f924e7def425d849c1c777156b1d4170a8ee7709a4a914e816935131afd9a0412c4cb952957b20828","0x82fb30590e84b9e45db1ec475a39971cf554dc01bcc7050bc89265740725c02e2be5a972168c5170c86ae83e5b0ad2c0","0xb14f8d8e1e93a84976289e0cf0dfa6f3a1809e98da16ee5c4932d0e1ed6bf8a07697fdd4dd86a3df84fb0003353cdcc0","0x85d7a2f4bda31aa2cb208b771fe03291a4ebdaf6f1dc944c27775af5caec412584c1f45bc741fca2a6a85acb3f26ad7d","0xaf02e56ce886ff2253bc0a68faad76f25ead84b2144e5364f3fb9b648f03a50ee9dc0b2c33ebacf7c61e9e43201ef9ef","0x87e025558c8a0b0abd06dfc350016847ea5ced7af2d135a5c9eec9324a4858c4b21510fb0992ec52a73447f24945058e","0x80fff0bafcd058118f5e7a4d4f1ae0912efeb281d2cbe4d34ba8945cc3dbe5d8baf47fb077343b90b8d895c90b297aca","0xb6edcf3a40e7b1c3c0148f47a263cd819e585a51ef31c2e35a29ce6f04c53e413f743034c0d998d9c00a08ba00166f31","0xabb87ed86098c0c70a76e557262a494ff51a30fb193f1c1a32f8e35eafa34a43fcc07aa93a3b7a077d9e35afa07b1a3d","0xa280214cd3bb0fb7ecd2d8bcf518cbd9078417f2b91d2533ec2717563f090fb84f2a5fcfdbbeb2a2a1f8a71cc5aa5941","0xa63083ca7238ea2b57d15a475963cf1d4f550d8cd76db290014a0461b90351f1f26a67d674c837b0b773b330c7c3d534","0xa8fa39064cb585ece5263e2f42f430206476bf261bd50f18d2b694889bd79d04d56410664cecad62690e5c5a20b3f6ff","0x85ba52ce9d700a5dcf6c5b00559acbe599d671ce5512467ff4b6179d7fad550567ce2a9c126a50964e3096458ea87920","0xb913501e1008f076e5eac6d883105174f88b248e1c9801e568fefaffa1558e4909364fc6d9512aa4d125cbd7cc895f05","0x8eb33b5266c8f2ed4725a6ad147a322e44c9264cf261c933cbbe230a43d47fca0f29ec39756b20561dabafadd5796494","0x850ebc8b661a04318c9db5a0515066e6454fa73865aa4908767a837857ecd717387f614acb614a88e075d4edc53a2f5a","0xa08d6b92d866270f29f4ce23a3f5d99b36b1e241a01271ede02817c8ec3f552a5c562db400766c07b104a331835c0c64","0x8131804c89bb3e74e9718bfc4afa547c1005ff676bd4db9604335032b203390cfa54478d45c6c78d1fe31a436ed4be9f","0x9106d94f23cc1eacec8316f16d6f0a1cc160967c886f51981fdb9f3f12ee1182407d2bb24e5b873de58cb1a3ee915a6b","0xa13806bfc3eae7a7000c9d9f1bd25e10218d4e67f59ae798b145b098bca3edad2b1040e3fc1e6310e612fb8818f459ac","0x8c69fbca502046cb5f6db99900a47b34117aef3f4b241690cdb3b84ca2a2fc7833e149361995dc41fa78892525bce746","0x852c473150c91912d58ecb05769222fa18312800c3f56605ad29eec9e2d8667b0b81c379048d3d29100ed2773bb1f3c5","0xb1767f6074426a00e01095dbb1795beb4e4050c6411792cbad6537bc444c3165d1058bafd1487451f9c5ddd209e0ae7e","0x80c600a5fe99354ce59ff0f84c760923dc8ff66a30bf47dc0a086181785ceb01f9b951c4e66df800ea6d705e8bc47055","0xb5cf19002fbc88a0764865b82afcb4d64a50196ea361e5c71dff7de084f4dcbbc34ec94a45cc9e0247bd51da565981aa","0x93e67a254ea8ce25e112d93cc927fadaa814152a2c4ec7d9a56eaa1ed47aec99b7e9916b02e64452cc724a6641729bbb","0xace70b32491bda18eee4a4d041c3bc9effae9340fe7e6c2f5ad975ee0874c17f1a7da7c96bd85fccff9312c518fac6e9","0xab4cfa02065017dd7f1aadc66f2c92f78f0f11b8597c03a5d69d82cb2eaf95a4476a836ac102908f137662472c8d914b","0xa40b8cd8deb8ae503d20364d64cab7c2801b7728a9646ed19c65edea6a842756a2f636283494299584ad57f4bb12cd0b","0x8594e11d5fc2396bcd9dbf5509ce4816dbb2b7305168021c426171fb444d111da5a152d6835ad8034542277011c26c0e","0x8024de98c26b4c994a66628dc304bb737f4b6859c86ded552c5abb81fd4c6c2e19d5a30beed398a694b9b2fdea1dd06a","0x8843f5872f33f54df8d0e06166c1857d733995f67bc54abb8dfa94ad92407cf0179bc91b0a50bbb56cdc2b350d950329","0xb8bab44c7dd53ef9edf497dcb228e2a41282c90f00ba052fc52d57e87b5c8ab132d227af1fcdff9a12713d1f980bcaae","0x982b4d7b29aff22d527fd82d2a52601d95549bfb000429bb20789ed45e5abf1f4b7416c7b7c4b79431eb3574b29be658","0x8eb1f571b6a1878e11e8c1c757e0bc084bab5e82e897ca9be9b7f4b47b91679a8190bf0fc8f799d9b487da5442415857","0xa6e74b588e5af935c8b243e888582ef7718f8714569dd4992920740227518305eb35fab674d21a5551cca44b3e511ef2","0xa30fc2f3a4cb4f50566e82307de73cd7bd8fe2c1184e9293c136a9b9e926a018d57c6e4f308c95b9eb8299e94d90a2a1","0xa50c5869ca5d2b40722c056a32f918d47e0b65ca9d7863ca7d2fb4a7b64fe523fe9365cf0573733ceaadebf20b48fff8","0x83bbdd32c04d17581418cf360749c7a169b55d54f2427390defd9f751f100897b2d800ce6636c5bbc046c47508d60c8c","0xa82904bdf614de5d8deaff688c8a5e7ac5b3431687acbcda8fa53960b7c417a39c8b2e462d7af91ce6d79260f412db8e","0xa4362e31ff4b05d278b033cf5eebea20de01714ae16d4115d04c1da4754269873afc8171a6f56c5104bfd7b0db93c3e7","0xb5b8daa63a3735581e74a021b684a1038cea77168fdb7fdf83c670c2cfabcfc3ab2fc7359069b5f9048188351aef26b5","0xb48d723894b7782d96ac8433c48faca1bdfa5238019c451a7f47d958097cce3ae599b876cf274269236b9d6ff8b6d7ca","0x98ffff6a61a3a6205c7820a91ca2e7176fab5dba02bc194c4d14942ac421cb254183c705506ab279e4f8db066f941c6c","0xae7db24731da2eaa6efc4f7fcba2ecc26940ddd68038dce43acf2cee15b72dc4ef42a7bfdd32946d1ed78786dd7696b3","0xa656db14f1de9a7eb84f6301b4acb2fbf78bfe867f48a270e416c974ab92821eb4df1cb881b2d600cfed0034ac784641","0xaa315f8ecba85a5535e9a49e558b15f39520fce5d4bf43131bfbf2e2c9dfccc829074f9083e8d49f405fb221d0bc4c3c","0x90bffba5d9ff40a62f6c8e9fc402d5b95f6077ed58d030c93e321b8081b77d6b8dac3f63a92a7ddc01585cf2c127d66c","0xabdd733a36e0e0f05a570d0504e73801bf9b5a25ff2c78786f8b805704997acb2e6069af342538c581144d53149fa6d3","0xb4a723bb19e8c18a01bd449b1bb3440ddb2017f10bb153da27deb7a6a60e9bb37619d6d5435fbb1ba617687838e01dd0","0x870016b4678bab3375516db0187a2108b2e840bae4d264b9f4f27dbbc7cc9cac1d7dc582d7a04d6fd1ed588238e5e513","0x80d33d2e20e8fc170aa3cb4f69fffb72aeafb3b5bb4ea0bc79ab55da14142ca19b2d8b617a6b24d537366e3b49cb67c3","0xa7ee76aec273aaae03b3b87015789289551969fb175c11557da3ab77e39ab49d24634726f92affae9f4d24003050d974","0x8415ea4ab69d779ebd42d0fe0c6aef531d6a465a5739e429b1fcf433ec45aa8296c527e965a20f0ec9f340c9273ea3cf","0x8c7662520794e8b4405d0b33b5cac839784bc86a5868766c06cbc1fa306dbe334978177417b31baf90ce7b0052a29c56","0x902b2abecc053a3dbdea9897ee21e74821f3a1b98b2d560a514a35799f4680322550fd3a728d4f6d64e1de98033c32b8","0xa05e84ed9ecab8d508d670c39f2db61ad6e08d2795ec32a3c9d0d3737ef3801618f4fc2a95f90ec2f068606131e076c5","0x8b9208ff4d5af0c2e3f53c9375da666773ac57197dfabb0d25b1c8d0588ba7f3c15ee9661bb001297f322ea2fbf6928b","0xa3c827741b34a03254d4451b5ab74a96f2b9f7fb069e2f5adaf54fd97cc7a4d516d378db5ca07da87d8566d6eef13726","0x8509d8a3f4a0ed378e0a1e28ea02f6bf1d7f6c819c6c2f5297c7df54c895b848f841653e32ba2a2c22c2ff739571acb8","0xa0ce988b7d3c40b4e496aa83a09e4b5472a2d98679622f32bea23e6d607bc7de1a5374fb162bce0549a67dad948519be","0xaa8a3dd12bd60e3d2e05f9c683cdcb8eab17fc59134815f8d197681b1bcf65108cba63ac5c58ee632b1e5ed6bba5d474","0x8b955f1d894b3aefd883fb4b65f14cd37fc2b9db77db79273f1700bef9973bf3fd123897ea2b7989f50003733f8f7f21","0xac79c00ddac47f5daf8d9418d798d8af89fc6f1682e7e451f71ea3a405b0d36af35388dd2a332af790bc83ca7b819328","0xa0d44dd2a4438b809522b130d0938c3fe7c5c46379365dbd1810a170a9aa5818e1c783470dd5d0b6d4ac7edbb7330910","0xa30b69e39ad43dd540a43c521f05b51b5f1b9c4eed54b8162374ae11eac25da4f5756e7b70ce9f3c92c2eeceee7431ed","0xac43220b762c299c7951222ea19761ab938bf38e4972deef58ed84f4f9c68c230647cf7506d7cbfc08562fcca55f0485","0xb28233b46a8fb424cfa386a845a3b5399d8489ceb83c8f3e05c22c934798d639c93718b7b68ab3ce24c5358339e41cbb","0xac30d50ee8ce59a10d4b37a3a35e62cdb2273e5e52232e202ca7d7b8d09d28958ee667fae41a7bb6cdc6fe8f6e6c9c85","0xb199842d9141ad169f35cc7ff782b274cbaa645fdb727761e0a89edbf0d781a15f8218b4bf4eead326f2903dd88a9cc1","0x85e018c7ddcad34bb8285a737c578bf741ccd547e68c734bdb3808380e12c5d4ef60fc896b497a87d443ff9abd063b38","0x8c856e6ba4a815bdb891e1276f93545b7072f6cb1a9aa6aa5cf240976f29f4dee01878638500a6bf1daf677b96b54343","0xb8a47555fa8710534150e1a3f13eab33666017be6b41005397afa647ea49708565f2b86b77ad4964d140d9ced6b4d585","0x8cd1f1db1b2f4c85a3f46211599caf512d5439e2d8e184663d7d50166fd3008f0e9253272f898d81007988435f715881","0xb1f34b14612c973a3eceb716dc102b82ab18afef9de7630172c2780776679a7706a4874e1df3eaadf541fb009731807f","0xb25464af9cff883b55be2ff8daf610052c02df9a5e147a2cf4df6ce63edcdee6dc535c533590084cc177da85c5dc0baa","0x91c3c4b658b42d8d3448ae1415d4541d02379a40dc51e36a59bd6e7b9ba3ea51533f480c7c6e8405250ee9b96a466c29","0x86dc027b95deb74c36a58a1333a03e63cb5ae22d3b29d114cfd2271badb05268c9d0c819a977f5e0c6014b00c1512e3a","0xae0e6ff58eb5fa35da5107ebeacf222ab8f52a22bb1e13504247c1dfa65320f40d97b0e6b201cb6613476687cb2f0681","0x8f13415d960b9d7a1d93ef28afc2223e926639b63bdefce0f85e945dfc81670a55df288893a0d8b3abe13c5708f82f91","0x956f67ca49ad27c1e3a68c1faad5e7baf0160c459094bf6b7baf36b112de935fdfd79fa4a9ea87ea8de0ac07272969f4","0x835e45e4a67df9fb51b645d37840b3a15c171d571a10b03a406dd69d3c2f22df3aa9c5cbe1e73f8d767ce01c4914ea9a","0x919b938e56d4b32e2667469d0bdccb95d9dda3341aa907683ee70a14bbbe623035014511c261f4f59b318b610ac90aa3","0x96b48182121ccd9d689bf1dfdc228175564cd68dc904a99c808a7f0053a6f636c9d953e12198bdf2ea49ea92772f2e18","0xac5e5a941d567fa38fdbcfa8cf7f85bb304e3401c52d88752bcd516d1fa9bac4572534ea2205e38423c1df065990790f","0xac0bd594fb85a8d4fc26d6df0fa81f11919401f1ecf9168b891ec7f061a2d9368af99f7fd8d9b43b2ce361e7b8482159","0x83d92c69ca540d298fe80d8162a1c7af3fa9b49dfb69e85c1d136a3ec39fe419c9fa78e0bb6d96878771fbd37fe92e40","0xb35443ae8aa66c763c2db9273f908552fe458e96696b90e41dd509c17a5c04ee178e3490d9c6ba2dc0b8f793c433c134","0x923b2d25aa45b2e580ffd94cbb37dc8110f340f0f011217ee1bd81afb0714c0b1d5fb4db86006cdd2457563276f59c59","0x96c9125d38fca1a61ac21257b696f8ac3dae78def50285e44d90ea293d591d1c58f703540a7e4e99e070afe4646bbe15","0xb57946b2332077fbcdcb406b811779aefd54473b5559a163cd65cb8310679b7e2028aa55c12a1401fdcfcac0e6fae29a","0x845daedc5cf972883835d7e13c937b63753c2200324a3b8082a6c4abb4be06c5f7c629d4abe4bfaf1d80a1f073eb6ce6","0x91a55dfd0efefcd03dc6dacc64ec93b8d296cb83c0ee72400a36f27246e7f2a60e73b7b70ba65819e9cfb73edb7bd297","0x8874606b93266455fe8fdd25df9f8d2994e927460af06f2e97dd4d2d90db1e6b06d441b72c2e76504d753badca87fb37","0x8ee99e6d231274ff9252c0f4e84549da173041299ad1230929c3e3d32399731c4f20a502b4a307642cac9306ccd49d3c","0x8836497714a525118e20849d6933bb8535fb6f72b96337d49e3133d936999c90a398a740f42e772353b5f1c63581df6d","0xa6916945e10628f7497a6cdc5e2de113d25f7ade3e41e74d3de48ccd4fce9f2fa9ab69645275002e6f49399b798c40af","0x9597706983107eb23883e0812e1a2c58af7f3499d50c6e29b455946cb9812fde1aa323d9ed30d1c0ffd455abe32303cd","0xa24ee89f7f515cc33bdbdb822e7d5c1877d337f3b2162303cfc2dae028011c3a267c5cb4194afa63a4856a6e1c213448","0x8cd25315e4318801c2776824ae6e7d543cb85ed3bc2498ba5752df2e8142b37653cf9e60104d674be3aeb0a66912e97a","0xb5085ecbe793180b40dbeb879f4c976eaaccaca3a5246807dced5890e0ed24d35f3f86955e2460e14fb44ff5081c07ba","0x960188cc0b4f908633a6840963a6fa2205fc42c511c6c309685234911c5304ef4c304e3ae9c9c69daa2fb6a73560c256","0xa32d0a70bf15d569b4cda5aebe3e41e03c28bf99cdd34ffa6c5d58a097f322772acca904b3a47addb6c7492a7126ebac","0x977f72d06ad72d4aa4765e0f1f9f4a3231d9f030501f320fe7714cc5d329d08112789fa918c60dd7fdb5837d56bb7fc6","0x99fa038bb0470d45852bb871620d8d88520adb701712fcb1f278fed2882722b9e729e6cdce44c82caafad95e37d0e6f7","0xb855e8f4fc7634ada07e83b6c719a1e37acb06394bc8c7dcab7747a8c54e5df3943915f021364bd019fdea103864e55f","0x88bc2cd7458532e98c596ef59ea2cf640d7cc31b4c33cef9ed065c078d1d4eb49677a67de8e6229cc17ea48bace8ee5a","0xaaa78a3feaa836d944d987d813f9b9741afb076e6aca1ffa42682ab06d46d66e0c07b8f40b9dbd63e75e81efa1ef7b08","0xb7b080420cc4d808723b98b2a5b7b59c81e624ab568ecdfdeb8bf3aa151a581b6f56e983ef1b6f909661e25db40b0c69","0xabee85c462ac9a2c58e54f06c91b3e5cd8c5f9ab5b5deb602b53763c54826ed6deb0d6db315a8d7ad88733407e8d35e2","0x994d075c1527407547590df53e9d72dd31f037c763848d1662eebd4cefec93a24328c986802efa80e038cb760a5300f5","0xab8777640116dfb6678e8c7d5b36d01265dfb16321abbfc277da71556a34bb3be04bc4ae90124ed9c55386d2bfb3bda0","0x967e3a828bc59409144463bcf883a3a276b5f24bf3cbfdd7a42343348cba91e00b46ac285835a9b91eef171202974204","0x875a9f0c4ffe5bb1d8da5e3c8e41d0397aa6248422a628bd60bfae536a651417d4e8a7d2fb98e13f2dad3680f7bd86d3","0xacaa330c3e8f95d46b1880126572b238dbb6d04484d2cd4f257ab9642d8c9fc7b212188b9c7ac9e0fd135c520d46b1bf","0xaceb762edbb0f0c43dfcdb01ea7a1ac5918ca3882b1e7ebc4373521742f1ed5250d8966b498c00b2b0f4d13212e6dd0b","0x81d072b4ad258b3646f52f399bced97c613b22e7ad76373453d80b1650c0ca87edb291a041f8253b649b6e5429bb4cff","0x980a47d27416ac39c7c3a0ebe50c492f8c776ea1de44d5159ac7d889b6d554357f0a77f0e5d9d0ff41aae4369eba1fc2","0x8b4dfd5ef5573db1476d5e43aacfb5941e45d6297794508f29c454fe50ea622e6f068b28b3debe8635cf6036007de2e3","0xa60831559d6305839515b68f8c3bc7abbd8212cc4083502e19dd682d56ca37c9780fc3ce4ec2eae81ab23b221452dc57","0x951f6b2c1848ced9e8a2339c65918e00d3d22d3e59a0a660b1eca667d18f8430d737884e9805865ef3ed0fe1638a22d9","0xb02e38fe790b492aa5e89257c4986c9033a8b67010fa2add9787de857d53759170fdd67715ca658220b4e14b0ca48124","0xa51007e4346060746e6b0e4797fc08ef17f04a34fe24f307f6b6817edbb8ce2b176f40771d4ae8a60d6152cbebe62653","0xa510005b05c0b305075b27b243c9d64bcdce85146b6ed0e75a3178b5ff9608213f08c8c9246f2ca6035a0c3e31619860","0xaaff4ef27a7a23be3419d22197e13676d6e3810ceb06a9e920d38125745dc68a930f1741c9c2d9d5c875968e30f34ab5","0x864522a9af9857de9814e61383bebad1ba9a881696925a0ea6bfc6eff520d42c506bbe5685a9946ed710e889765be4a0","0xb63258c080d13f3b7d5b9f3ca9929f8982a6960bdb1b0f8676f4dca823971601672f15e653917bf5d3746bb220504913","0xb51ce0cb10869121ae310c7159ee1f3e3a9f8ad498827f72c3d56864808c1f21fa2881788f19ece884d3f705cd7bd0c5","0x95d9cecfc018c6ed510e441cf84c712d9909c778c16734706c93222257f64dcd2a9f1bd0b400ca271e22c9c487014274","0x8beff4d7d0140b86380ff4842a9bda94c2d2be638e20ac68a4912cb47dbe01a261857536375208040c0554929ced1ddc","0x891ff49258749e2b57c1e9b8e04b12c77d79c3308b1fb615a081f2aacdfb4b39e32d53e069ed136fdbd43c53b87418fa","0x9625cad224e163d387738825982d1e40eeff35fe816d10d7541d15fdc4d3eee48009090f3faef4024b249205b0b28f72","0x8f3947433d9bd01aa335895484b540a9025a19481a1c40b4f72dd676bfcf332713714fd4010bde936eaf9470fd239ed0","0xa00ec2d67789a7054b53f0e858a8a232706ccc29a9f3e389df7455f1a51a2e75801fd78469a13dbc25d28399ae4c6182","0xa3f65884506d4a62b8775a0ea0e3d78f5f46bc07910a93cd604022154eabdf1d73591e304d61edc869e91462951975e1","0xa14eef4fd5dfac311713f0faa9a60415e3d30b95a4590cbf95f2033dffb4d16c02e7ceff3dcd42148a4e3bc49cce2dd4","0x8afa11c0eef3c540e1e3460bc759bb2b6ea90743623f88e62950c94e370fe4fd01c22b6729beba4dcd4d581198d9358f","0xafb05548a69f0845ffcc5f5dc63e3cdb93cd270f5655173b9a950394b0583663f2b7164ba6df8d60c2e775c1d9f120af","0x97f179e01a947a906e1cbeafa083960bc9f1bade45742a3afee488dfb6011c1c6e2db09a355d77f5228a42ccaa7bdf8e","0x8447fca4d35f74b3efcbd96774f41874ca376bf85b79b6e66c92fa3f14bdd6e743a051f12a7fbfd87f319d1c6a5ce217","0xa57ca39c23617cd2cf32ff93b02161bd7baf52c4effb4679d9d5166406e103bc8f3c6b5209e17c37dbb02deb8bc72ddd","0x9667c7300ff80f0140be002b0e36caab07aaee7cce72679197c64d355e20d96196acaf54e06e1382167d081fe6f739c1","0x828126bb0559ce748809b622677267ca896fa2ee76360fd2c02990e6477e06a667241379ca7e65d61a5b64b96d7867de","0x8b8835dea6ba8cf61c91f01a4b3d2f8150b687a4ee09b45f2e5fc8f80f208ae5d142d8e3a18153f0722b90214e60c5a7","0xa98e8ff02049b4da386e3ee93db23bbb13dfeb72f1cfde72587c7e6d962780b7671c63e8ac3fbaeb1a6605e8d79e2f29","0x87a4892a0026d7e39ef3af632172b88337cb03669dea564bcdb70653b52d744730ebb5d642e20cb627acc9dbb547a26b","0x877352a22fc8052878a57effc159dac4d75fe08c84d3d5324c0bab6d564cdf868f33ceee515eee747e5856b62cfa0cc7","0x8b801ba8e2ff019ee62f64b8cb8a5f601fc35423eb0f9494b401050103e1307dc584e4e4b21249cd2c686e32475e96c3","0xa9e7338d6d4d9bfec91b2af28a8ed13b09415f57a3a00e5e777c93d768fdb3f8e4456ae48a2c6626b264226e911a0e28","0x99c05fedf40ac4726ed585d7c1544c6e79619a0d3fb6bda75a08c7f3c0008e8d5e19ed4da48de3216135f34a15eba17c","0xa61cce8a1a8b13a4a650fdbec0eeea8297c352a8238fb7cac95a0df18ed16ee02a3daa2de108fa122aca733bd8ad7855","0xb97f37da9005b440b4cb05870dd881bf8491fe735844f2d5c8281818583b38e02286e653d9f2e7fa5e74c3c3eb616540","0xa72164a8554da8e103f692ac5ebb4aece55d5194302b9f74b6f2a05335b6e39beede0bf7bf8c5bfd4d324a784c5fb08c","0xb87e8221c5341cd9cc8bb99c10fe730bc105550f25ed4b96c0d45e6142193a1b2e72f1b3857373a659b8c09be17b3d91","0xa41fb1f327ef91dcb7ac0787918376584890dd9a9675c297c45796e32d6e5985b12f9b80be47fc3a8596c245f419d395","0x90dafa3592bdbb3465c92e2a54c2531822ba0459d45d3e7a7092fa6b823f55af28357cb51896d4ec2d66029c82f08e26","0xa0a9adc872ebc396557f484f1dd21954d4f4a21c4aa5eec543f5fa386fe590839735c01f236574f7ff95407cd12de103","0xb8c5c940d58be7538acf8672852b5da3af34f82405ef2ce8e4c923f1362f97fc50921568d0fd2fe846edfb0823e62979","0x85aaf06a8b2d0dac89dafd00c28533f35dbd074978c2aaa5bef75db44a7b12aeb222e724f395513b9a535809a275e30b","0x81f3cbe82fbc7028c26a6c1808c604c63ba023a30c9f78a4c581340008dbda5ec07497ee849a2183fcd9124f7936af32","0xa11ac738de75fd60f15a34209d3825d5e23385796a4c7fc5931822f3f380af977dd0f7b59fbd58eed7777a071e21b680","0x85a279c493de03db6fa6c3e3c1b1b29adc9a8c4effc12400ae1128da8421954fa8b75ad19e5388fe4543b76fb0812813","0x83a217b395d59ab20db6c4adb1e9713fc9267f5f31a6c936042fe051ce8b541f579442f3dcf0fa16b9e6de9fd3518191","0x83a0b86e7d4ed8f9ccdc6dfc8ff1484509a6378fa6f09ed908e6ab9d1073f03011dc497e14304e4e3d181b57de06a5ab","0xa63ad69c9d25704ce1cc8e74f67818e5ed985f8f851afa8412248b2df5f833f83b95b27180e9e7273833ed0d07113d3b","0x99b1bc2021e63b561fe44ddd0af81fcc8627a91bfeecbbc989b642bc859abc0c8d636399701aad7bbaf6a385d5f27d61","0xb53434adb66f4a807a6ad917c6e856321753e559b1add70824e5c1e88191bf6993fccb9b8b911fc0f473fb11743acacd","0x97ed3b9e6fb99bf5f945d4a41f198161294866aa23f2327818cdd55cb5dc4c1a8eff29dd8b8d04902d6cd43a71835c82","0xb1e808260e368a18d9d10bdea5d60223ba1713b948c782285a27a99ae50cc5fc2c53d407de07155ecc16fb8a36d744a0","0xa3eb4665f18f71833fec43802730e56b3ee5a357ea30a888ad482725b169d6f1f6ade6e208ee081b2e2633079b82ba7d","0xab8beb2c8353fc9f571c18fdd02bdb977fc883313469e1277b0372fbbb33b80dcff354ca41de436d98d2ed710faa467e","0xaa9071cfa971e4a335a91ad634c98f2be51544cb21f040f2471d01bb97e1df2277ae1646e1ea8f55b7ba9f5c8c599b39","0x80b7dbfdcaf40f0678012acc634eba44ea51181475180d9deb2050dc4f2de395289edd0223018c81057ec79b04b04c49","0x89623d7f6cb17aa877af14de842c2d4ab7fd576d61ddd7518b5878620a01ded40b6010de0da3cdf31d837eecf30e9847","0xa773bb024ae74dd24761f266d4fb27d6fd366a8634febe8235376b1ae9065c2fe12c769f1d0407867dfbe9f5272c352f","0x8455a561c3aaa6ba64c881a5e13921c592b3a02e968f4fb24a2243c36202795d0366d9cc1a24e916f84d6e158b7aeac7","0x81d8bfc4b283cf702a40b87a2b96b275bdbf0def17e67d04842598610b67ea08c804d400c3e69fa09ea001eaf345b276","0xb8f8f82cb11fea1c99467013d7e167ff03deb0c65a677fab76ded58826d1ba29aa7cf9fcd7763615735ea3ad38e28719","0x89a6a04baf9cccc1db55179e1650b1a195dd91fb0aebc197a25143f0f393524d2589975e3fbfc2547126f0bced7fd6f2","0xb81b2162df045390f04df07cbd0962e6b6ca94275a63edded58001a2f28b2ae2af2c7a6cba4ecd753869684e77e7e799","0xa3757f722776e50de45c62d9c4a2ee0f5655a512344c4cbec542d8045332806568dd626a719ef21a4eb06792ca70f204","0x8c5590df96ec22179a4e8786de41beb44f987a1dcc508eb341eecbc0b39236fdfad47f108f852e87179ccf4e10091e59","0x87502f026ed4e10167419130b88c3737635c5b9074c364e1dd247cef5ef0fc064b4ae99b187e33301e438bbd2fe7d032","0xaf925a2165e980ced620ff12289129fe17670a90ae0f4db9d4b39bd887ccb1f5d2514ac9ecf910f6390a8fc66bd5be17","0x857fca899828cf5c65d26e3e8a6e658542782fc72762b3b9c73514919f83259e0f849a9d4838b40dc905fe43024d0d23","0x87ffebdbfb69a9e1007ebac4ffcb4090ff13705967b73937063719aa97908986effcb7262fdadc1ae0f95c3690e3245d","0xa9ff6c347ac6f4c6ab993b748802e96982eaf489dc69032269568412fc9a79e7c2850dfc991b28211b3522ee4454344b","0xa65b3159df4ec48bebb67cb3663cd744027ad98d970d620e05bf6c48f230fa45bf17527fe726fdf705419bb7a1bb913e","0x84b97b1e6408b6791831997b03cd91f027e7660fd492a93d95daafe61f02427371c0e237c75706412f442991dfdff989","0xab761c26527439b209af0ae6afccd9340bbed5fbe098734c3145b76c5d2cd7115d9227b2eb523882b7317fbb09180498","0xa0479a8da06d7a69c0b0fee60df4e691c19c551f5e7da286dab430bfbcabf31726508e20d26ea48c53365a7f00a3ad34","0xa732dfc9baa0f4f40b5756d2e8d8937742999623477458e0bc81431a7b633eefc6f53b3b7939fe0a020018549c954054","0x901502436a1169ba51dc479a5abe7c8d84e0943b16bc3c6a627b49b92cd46263c0005bc324c67509edd693f28e612af1","0xb627aee83474e7f84d1bab9b7f6b605e33b26297ac6bbf52d110d38ba10749032bd551641e73a383a303882367af429b","0x95108866745760baef4a46ef56f82da6de7e81c58b10126ebd2ba2cd13d339f91303bf2fb4dd104a6956aa3b13739503","0x899ed2ade37236cec90056f3569bc50f984f2247792defafcceb49ad0ca5f6f8a2f06573705300e07f0de0c759289ff5","0xa9f5eee196d608efe4bcef9bf71c646d27feb615e21252cf839a44a49fd89da8d26a758419e0085a05b1d59600e2dc42","0xb36c6f68fed6e6c85f1f4a162485f24817f2843ec5cbee45a1ebfa367d44892e464949c6669f7972dc7167af08d55d25","0xaaaede243a9a1b6162afbc8f571a52671a5a4519b4062e3f26777664e245ba873ed13b0492c5dbf0258c788c397a0e9e","0x972b4fb39c31cbe127bf9a32a5cc10d621ebdd9411df5e5da3d457f03b2ab2cd1f6372d8284a4a9400f0b06ecdbfd38e","0x8f6ca1e110e959a4b1d9a5ce5f212893cec21db40d64d5ac4d524f352d72198f923416a850bf845bc5a22a79c0ea2619","0xa0f3c93b22134f66f04b2553a53b738644d1665ceb196b8494b315a4c28236fb492017e4a0de4224827c78e42f9908b7","0x807fb5ee74f6c8735b0b5ca07e28506214fe4047dbeb00045d7c24f7849e98706aea79771241224939cb749cf1366c7d","0x915eb1ff034224c0b645442cdb7d669303fdc00ca464f91aaf0b6fde0b220a3a74ff0cb043c26c9f3a5667b3fdaa9420","0x8fda6cef56ed33fefffa9e6ac8e6f76b1af379f89761945c63dd448801f7bb8ca970504a7105fac2f74f652ccff32327","0x87380cffdcffb1d0820fa36b63cc081e72187f86d487315177d4d04da4533eb19a0e2ff6115ceab528887819c44a5164","0x8cd89e03411a18e7f16f968b89fb500c36d47d229f6487b99e62403a980058db5925ce249206743333538adfad168330","0x974451b1df33522ce7056de9f03e10c70bf302c44b0741a59df3d6877d53d61a7394dcee1dd46e013d7cb9d73419c092","0x98c35ddf645940260c490f384a49496a7352bb8e3f686feed815b1d38f59ded17b1ad6e84a209e773ed08f7b8ff1e4c2","0x963f386cf944bb9b2ddebb97171b64253ea0a2894ac40049bdd86cda392292315f3a3d490ca5d9628c890cfb669f0acb","0x8d507712152babd6d142ee682638da8495a6f3838136088df9424ef50d5ec28d815a198c9a4963610b22e49b4cdf95e9","0x83d4bc6b0be87c8a4f1e9c53f257719de0c73d85b490a41f7420e777311640937320557ff2f1d9bafd1daaa54f932356","0x82f5381c965b7a0718441131c4d13999f4cdce637698989a17ed97c8ea2e5bdb5d07719c5f7be8688edb081b23ede0f4","0xa6ebecab0b72a49dfd01d69fa37a7f74d34fb1d4fef0aa10e3d6fceb9eccd671225c230af89f6eb514250e41a5f91f52","0x846d185bdad6e11e604df7f753b7a08a28b643674221f0e750ebdb6b86ec584a29c869e131bca868972a507e61403f6a","0x85a98332292acb744bd1c0fd6fdcf1f889a78a2c9624d79413ffa194cc8dfa7821a4b60cde8081d4b5f71f51168dd67f","0x8f7d97c3b4597880d73200d074eb813d95432306e82dafc70b580b8e08cb8098b70f2d07b4b3ac6a4d77e92d57035031","0x8185439c8751e595825d7053518cbe121f191846a38d4dbcb558c3f9d7a3104f3153401adaaaf27843bbe2edb504bfe3","0xb3c00d8ece1518fca6b1215a139b0a0e26d9cba1b3a424f7ee59f30ce800a5db967279ed60958dd1f3ee69cf4dd1b204","0xa2e6cb6978e883f9719c3c0d44cfe8de0cc6f644b98f98858433bea8bbe7b612c8aca5952fccce4f195f9d54f9722dc2","0x99663087e3d5000abbec0fbda4e7342ec38846cc6a1505191fb3f1a337cb369455b7f8531a6eb8b0f7b2c4baf83cbe2b","0xab0836c6377a4dbc7ca6a4d6cf021d4cd60013877314dd05f351706b128d4af6337711ed3443cb6ca976f40d74070a9a","0x87abfd5126152fd3bac3c56230579b489436755ea89e0566aa349490b36a5d7b85028e9fb0710907042bcde6a6f5d7e3","0x974ba1033f75f60e0cf7c718a57ae1da3721cf9d0fb925714c46f027632bdd84cd9e6de4cf4d00bc55465b1c5ebb7384","0xa607b49d73689ac64f25cec71221d30d53e781e1100d19a2114a21da6507a60166166369d860bd314acb226596525670","0xa7c2b0b915d7beba94954f2aa7dd08ec075813661e2a3ecca5d28a0733e59583247fed9528eb28aba55b972cdbaf06eb","0xb8b3123e44128cc8efbe3270f2f94e50ca214a4294c71c3b851f8cbb70cb67fe9536cf07d04bf7fe380e5e3a29dd3c15","0xa59a07e343b62ad6445a0859a32b58c21a593f9ddbfe52049650f59628c93715aa1f4e1f45b109321756d0eeec8a5429","0x94f51f8a4ed18a6030d0aaa8899056744bd0e9dc9ac68f62b00355cddab11da5da16798db75f0bfbce0e5bdfe750c0b6","0x97460a97ca1e1fa5ce243b81425edc0ec19b7448e93f0b55bc9785eedeeafe194a3c8b33a61a5c72990edf375f122777","0x8fa859a089bc17d698a7ee381f37ce9beadf4e5b44fce5f6f29762bc04f96faff5d58c48c73631290325f05e9a1ecf49","0xabdf38f3b20fc95eff31de5aa9ef1031abfa48f1305ee57e4d507594570401503476d3bcc493838fc24d6967a3082c7f","0xb8914bfb82815abb86da35c64d39ab838581bc0bf08967192697d9663877825f2b9d6fbdcf9b410463482b3731361aef","0xa8187f9d22b193a5f578999954d6ec9aa9b32338ccadb8a3e1ce5bad5ea361d69016e1cdfac44e9d6c54e49dd88561b9","0xaac262cb7cba7fd62c14daa7b39677cabc1ef0947dd06dd89cac8570006a200f90d5f0353e84f5ff03179e3bebe14231","0xa630ef5ece9733b8c46c0a2df14a0f37647a85e69c63148e79ffdcc145707053f9f9d305c3f1cf3c7915cb46d33abd07","0xb102c237cb2e254588b6d53350dfda6901bd99493a3fbddb4121d45e0b475cf2663a40d7b9a75325eda83e4ba1e68cb3","0x86a930dd1ddcc16d1dfa00aa292cb6c2607d42c367e470aa920964b7c17ab6232a7108d1c2c11fc40fb7496547d0bbf8","0xa832fdc4500683e72a96cce61e62ac9ee812c37fe03527ad4cf893915ca1962cee80e72d4f82b20c8fc0b764376635a1","0x88ad985f448dabb04f8808efd90f273f11f5e6d0468b5489a1a6a3d77de342992a73eb842d419034968d733f101ff683","0x98a8538145f0d86f7fbf9a81c9140f6095c5bdd8960b1c6f3a1716428cd9cca1bf8322e6d0af24e6169abcf7df2b0ff6","0x9048c6eba5e062519011e177e955a200b2c00b3a0b8615bdecdebc217559d41058d3315f6d05617be531ef0f6aef0e51","0x833bf225ab6fc68cdcacf1ec1b50f9d05f5410e6cdcd8d56a3081dc2be8a8d07b81534d1ec93a25c2e270313dfb99e3b","0xa84bcd24c3da5e537e64a811b93c91bfc84d7729b9ead7f79078989a6eb76717d620c1fad17466a0519208651e92f5ff","0xb7cdd0a3fbd79aed93e1b5a44ca44a94e7af5ed911e4492f332e3a5ed146c7286bde01b52276a2fcc02780d2109874dd","0x8a19a09854e627cb95750d83c20c67442b66b35896a476358f993ba9ac114d32c59c1b3d0b8787ee3224cf3888b56c64","0xa9abd5afb8659ee52ada8fa5d57e7dd355f0a7350276f6160bec5fbf70d5f99234dd179eb221c913e22a49ec6d267846","0x8c13c4274c0d30d184e73eaf812200094bbbd57293780bdadbceb262e34dee5b453991e7f37c7333a654fc71c69d6445","0xa4320d73296ff8176ce0127ca1921c450e2a9c06eff936681ebaffb5a0b05b17fded24e548454de89aca2dcf6d7a9de4","0xb2b8b3e15c1f645f07783e5628aba614e60157889db41d8161d977606788842b67f83f361eae91815dc0abd84e09abd5","0xad26c3aa35ddfddc15719b8bb6c264aaec7065e88ac29ba820eb61f220fef451609a7bb037f3722d022e6c86e4f1dc88","0xb8615bf43e13ae5d7b8dd903ce37190800cd490f441c09b22aa29d7a29ed2c0417b7a08ead417868f1de2589deaadd80","0x8d3425e1482cd1e76750a76239d33c06b3554c3c3c87c15cb7ab58b1cee86a4c5c4178b44e23f36928365a1b484bde02","0x806893a62e38c941a7dd6f249c83af16596f69877cc737d8f73f6b8cd93cbc01177a7a276b2b8c6b0e5f2ad864db5994","0x86618f17fa4b0d65496b661bbb5ba3bc3a87129d30a4b7d4f515b904f4206ca5253a41f49fd52095861e5e065ec54f21","0x9551915da1304051e55717f4c31db761dcdcf3a1366c89a4af800a9e99aca93a357bf928307f098e62b44a02cb689a46","0x8f79c4ec0ec1146cb2a523b52fe33def90d7b5652a0cb9c2d1c8808a32293e00aec6969f5b1538e3a94cd1efa3937f86","0xa0c03e329a707300081780f1e310671315b4c6a4cedcb29697aedfabb07a9d5df83f27b20e9c44cf6b16e39d9ded5b98","0x86a7cfa7c8e7ce2c01dd0baec2139e97e8e090ad4e7b5f51518f83d564765003c65968f85481bbb97cb18f005ccc7d9f","0xa33811770c6dfda3f7f74e6ad0107a187fe622d61b444bbd84fd7ef6e03302e693b093df76f6ab39bb4e02afd84a575a","0x85480f5c10d4162a8e6702b5e04f801874d572a62a130be94b0c02b58c3c59bdcd48cd05f0a1c2839f88f06b6e3cd337","0x8e181011564b17f7d787fe0e7f3c87f6b62da9083c54c74fd6c357a1f464c123c1d3d8ade3cf72475000b464b14e2be3","0x8ee178937294b8c991337e0621ab37e9ffa4ca2bdb3284065c5e9c08aad6785d50cf156270ff9daf9a9127289710f55b","0x8bd1e8e2d37379d4b172f1aec96f2e41a6e1393158d7a3dbd9a95c8dd4f8e0b05336a42efc11a732e5f22b47fc5c271d","0x8f3da353cd487c13136a85677de8cedf306faae0edec733cf4f0046f82fa4639db4745b0095ff33a9766aba50de0cbcf","0x8d187c1e97638df0e4792b78e8c23967dac43d98ea268ca4aabea4e0fa06cb93183fd92d4c9df74118d7cc27bf54415e","0xa4c992f08c2f8bac0b74b3702fb0c75c9838d2ce90b28812019553d47613c14d8ce514d15443159d700b218c5a312c49","0xa6fd1874034a34c3ea962a316c018d9493d2b3719bb0ec4edbc7c56b240802b2228ab49bee6f04c8a3e9f6f24a48c1c2","0xb2efed8e799f8a15999020900dc2c58ece5a3641c90811b86a5198e593d7318b9d53b167818ccdfbe7df2414c9c34011","0x995ff7de6181ddf95e3ead746089c6148da3508e4e7a2323c81785718b754d356789b902e7e78e2edc6b0cbd4ff22c78","0x944073d24750a9068cbd020b834afc72d2dde87efac04482b3287b40678ad07588519a4176b10f2172a2c463d063a5cd","0x99db4b1bb76475a6fd75289986ef40367960279524378cc917525fb6ba02a145a218c1e9caeb99332332ab486a125ac0","0x89fce4ecd420f8e477af4353b16faabb39e063f3f3c98fde2858b1f2d1ef6eed46f0975a7c08f233b97899bf60ccd60a","0x8c09a4f07a02b80654798bc63aada39fd638d3e3c4236ccd8a5ca280350c31e4a89e5f4c9aafb34116e71da18c1226b8","0x85325cfa7ded346cc51a2894257eab56e7488dbff504f10f99f4cd2b630d913003761a50f175ed167e8073f1b6b63fb0","0xb678b4fbec09a8cc794dcbca185f133578f29e354e99c05f6d07ac323be20aecb11f781d12898168e86f2e0f09aca15e","0xa249cfcbca4d9ba0a13b5f6aac72bf9b899adf582f9746bb2ad043742b28915607467eb794fca3704278f9136f7642be","0x9438e036c836a990c5e17af3d78367a75b23c37f807228362b4d13e3ddcb9e431348a7b552d09d11a2e9680704a4514f","0x925ab70450af28c21a488bfb5d38ac994f784cf249d7fd9ad251bb7fd897a23e23d2528308c03415074d43330dc37ef4","0xa290563904d5a8c0058fc8330120365bdd2ba1fdbaef7a14bc65d4961bb4217acfaed11ab82669e359531f8bf589b8db","0xa7e07a7801b871fc9b981a71e195a3b4ba6b6313bc132b04796a125157e78fe5c11a3a46cf731a255ac2d78a4ae78cd0","0xb26cd2501ee72718b0eebab6fb24d955a71f363f36e0f6dff0ab1d2d7836dab88474c0cef43a2cc32701fca7e82f7df3","0xa1dc3b6c968f3de00f11275092290afab65b2200afbcfa8ddc70e751fa19dbbc300445d6d479a81bda3880729007e496","0xa9bc213e28b630889476a095947d323b9ac6461dea726f2dc9084473ae8e196d66fb792a21905ad4ec52a6d757863e7d","0xb25d178df8c2df8051e7c888e9fa677fde5922e602a95e966db9e4a3d6b23ce043d7dc48a5b375c6b7c78e966893e8c3","0xa1c8d88d72303692eaa7adf68ea41de4febec40cc14ae551bb4012afd786d7b6444a3196b5d9d5040655a3366d96b7cd","0xb22bd44f9235a47118a9bbe2ba5a2ba9ec62476061be2e8e57806c1a17a02f9a51403e849e2e589520b759abd0117683","0xb8add766050c0d69fe81d8d9ea73e1ed05f0135d093ff01debd7247e42dbb86ad950aceb3b50b9af6cdc14ab443b238f","0xaf2cf95f30ef478f018cf81d70d47d742120b09193d8bb77f0d41a5d2e1a80bfb467793d9e2471b4e0ad0cb2c3b42271","0x8af5ef2107ad284e246bb56e20fef2a255954f72de791cbdfd3be09f825298d8466064f3c98a50496c7277af32b5c0bc","0x85dc19558572844c2849e729395a0c125096476388bd1b14fa7f54a7c38008fc93e578da3aac6a52ff1504d6ca82db05","0xae8c9b43c49572e2e166d704caf5b4b621a3b47827bb2a3bcd71cdc599bba90396fd9a405261b13e831bb5d44c0827d7","0xa7ba7efede25f02e88f6f4cbf70643e76784a03d97e0fbd5d9437c2485283ad7ca3abb638a5f826cd9f6193e5dec0b6c","0x94a9d122f2f06ef709fd8016fd4b712d88052245a65a301f5f177ce22992f74ad05552b1f1af4e70d1eac62cef309752","0x82d999b3e7cf563833b8bc028ff63a6b26eb357dfdb3fd5f10e33a1f80a9b2cfa7814d871b32a7ebfbaa09e753e37c02","0xaec6edcde234df502a3268dd2c26f4a36a2e0db730afa83173f9c78fcb2b2f75510a02b80194327b792811caefda2725","0x94c0bfa66c9f91d462e9194144fdd12d96f9bbe745737e73bab8130607ee6ea9d740e2cfcbbd00a195746edb6369ee61","0xab7573dab8c9d46d339e3f491cb2826cabe8b49f85f1ede78d845fc3995537d1b4ab85140b7d0238d9c24daf0e5e2a7e","0x87e8b16832843251fe952dadfd01d41890ed4bb4b8fa0254550d92c8cced44368225eca83a6c3ad47a7f81ff8a80c984","0x9189d2d9a7c64791b19c0773ad4f0564ce6bea94aa275a917f78ad987f150fdb3e5e26e7fef9982ac184897ecc04683f","0xb3661bf19e2da41415396ae4dd051a9272e8a2580b06f1a1118f57b901fa237616a9f8075af1129af4eabfefedbe2f1c","0xaf43c86661fb15daf5d910a4e06837225e100fb5680bd3e4b10f79a2144c6ec48b1f8d6e6b98e067d36609a5d038889a","0x82ac0c7acaa83ddc86c5b4249aae12f28155989c7c6b91e5137a4ce05113c6cbc16f6c44948b0efd8665362d3162f16a","0x8f268d1195ab465beeeb112cd7ffd5d5548559a8bc01261106d3555533fc1971081b25558d884d552df0db1cddda89d8","0x8ef7caa5521f3e037586ce8ac872a4182ee20c7921c0065ed9986c047e3dda08294da1165f385d008b40d500f07d895f","0x8c2f98f6880550573fad46075d3eba26634b5b025ce25a0b4d6e0193352c8a1f0661064027a70fe8190b522405f9f4e3","0xb7653f353564feb164f0f89ec7949da475b8dad4a4d396d252fc2a884f6932d027b7eb2dc4d280702c74569319ed701a","0xa026904f4066333befd9b87a8fad791d014096af60cdd668ef919c24dbe295ff31f7a790e1e721ba40cf5105abca67f4","0x988f982004ada07a22dd345f2412a228d7a96b9cae2c487de42e392afe1e35c2655f829ce07a14629148ce7079a1f142","0x9616add009067ed135295fb74d5b223b006b312bf14663e547a0d306694ff3a8a7bb9cfc466986707192a26c0bce599f","0xad4c425de9855f6968a17ee9ae5b15e0a5b596411388cf976df62ecc6c847a6e2ddb2cea792a5f6e9113c2445dba3e5c","0xb698ac9d86afa3dc69ff8375061f88e3b0cff92ff6dfe747cebaf142e813c011851e7a2830c10993b715e7fd594604a9","0xa386fa189847bb3b798efca917461e38ead61a08b101948def0f82cd258b945ed4d45b53774b400af500670149e601b7","0x905c95abda2c68a6559d8a39b6db081c68cef1e1b4be63498004e1b2f408409be9350b5b5d86a30fd443e2b3e445640a","0x9116dade969e7ce8954afcdd43e5cab64dc15f6c1b8da9d2d69de3f02ba79e6c4f6c7f54d6bf586d30256ae405cd1e41","0xa3084d173eacd08c9b5084a196719b57e47a0179826fda73466758235d7ecdb87cbcf097bd6b510517d163a85a7c7edd","0x85bb00415ad3c9be99ff9ba83672cc59fdd24356b661ab93713a3c8eab34e125d8867f628a3c3891b8dc056e69cd0e83","0x8d58541f9f39ed2ee4478acce5d58d124031338ec11b0d55551f00a5a9a6351faa903a5d7c132dc5e4bb026e9cbd18e4","0xa622adf72dc250e54f672e14e128c700166168dbe0474cecb340da175346e89917c400677b1bc1c11fcc4cc26591d9db","0xb3f865014754b688ca8372e8448114fff87bf3ca99856ab9168894d0c4679782c1ced703f5b74e851b370630f5e6ee86","0xa7e490b2c40c2446fcd91861c020da9742c326a81180e38110558bb5d9f2341f1c1885e79b364e6419023d1cbdc47380","0xb3748d472b1062e54572badbb8e87ac36534407f74932e7fc5b8392d008e8e89758f1671d1e4d30ab0fa40551b13bb5e","0x89898a5c5ec4313aabc607b0049fd1ebad0e0c074920cf503c9275b564d91916c2c446d3096491c950b7af3ac5e4b0ed","0x8eb8c83fef2c9dd30ea44e286e9599ec5c20aba983f702e5438afe2e5b921884327ad8d1566c72395587efac79ca7d56","0xb92479599e806516ce21fb0bd422a1d1d925335ebe2b4a0a7e044dd275f30985a72b97292477053ac5f00e081430da80","0xa34ae450a324fe8a3c25a4d653a654f9580ed56bbea213b8096987bbad0f5701d809a17076435e18017fea4d69f414bc","0x81381afe6433d62faf62ea488f39675e0091835892ecc238e02acf1662669c6d3962a71a3db652f6fe3bc5f42a0e5dc5","0xa430d475bf8580c59111103316fe1aa79c523ea12f1d47a976bbfae76894717c20220e31cf259f08e84a693da6688d70","0xb842814c359754ece614deb7d184d679d05d16f18a14b288a401cef5dad2cf0d5ee90bad487b80923fc5573779d4e4e8","0x971d9a2627ff2a6d0dcf2af3d895dfbafca28b1c09610c466e4e2bff2746f8369de7f40d65b70aed135fe1d72564aa88","0x8f4ce1c59e22b1ce7a0664caaa7e53735b154cfba8d2c5cc4159f2385843de82ab58ed901be876c6f7fce69cb4130950","0x86cc9dc321b6264297987000d344fa297ef45bcc2a4df04e458fe2d907ad304c0ea2318e32c3179af639a9a56f3263cf","0x8229e0876dfe8f665c3fb19b250bd89d40f039bbf1b331468b403655be7be2e104c2fd07b9983580c742d5462ca39a43","0x99299d73066e8eb128f698e56a9f8506dfe4bd014931e86b6b487d6195d2198c6c5bf15cccb40ccf1f8ddb57e9da44a2","0xa3a3be37ac554c574b393b2f33d0a32a116c1a7cfeaf88c54299a4da2267149a5ecca71f94e6c0ef6e2f472b802f5189","0xa91700d1a00387502cdba98c90f75fbc4066fefe7cc221c8f0e660994c936badd7d2695893fde2260c8c11d5bdcdd951","0x8e03cae725b7f9562c5c5ab6361644b976a68bada3d7ca508abca8dfc80a469975689af1fba1abcf21bc2a190dab397d","0xb01461ad23b2a8fa8a6d241e1675855d23bc977dbf4714add8c4b4b7469ccf2375cec20e80cedfe49361d1a30414ac5b","0xa2673bf9bc621e3892c3d7dd4f1a9497f369add8cbaa3472409f4f86bd21ac67cfac357604828adfee6ada1835365029","0xa042dff4bf0dfc33c178ba1b335e798e6308915128de91b12e5dbbab7c4ac8d60a01f6aea028c3a6d87b9b01e4e74c01","0x86339e8a75293e4b3ae66b5630d375736b6e6b6b05c5cda5e73fbf7b2f2bd34c18a1d6cefede08625ce3046e77905cb8","0xaf2ebe1b7d073d03e3d98bc61af83bf26f7a8c130fd607aa92b75db22d14d016481b8aa231e2c9757695f55b7224a27f","0xa00ee882c9685e978041fd74a2c465f06e2a42ffd3db659053519925be5b454d6f401e3c12c746e49d910e4c5c9c5e8c","0x978a781c0e4e264e0dad57e438f1097d447d891a1e2aa0d5928f79a9d5c3faae6f258bc94fdc530b7b2fa6a9932bb193","0xaa4b7ce2e0c2c9e9655bf21e3e5651c8503bce27483017b0bf476be743ba06db10228b3a4c721219c0779747f11ca282","0xb003d1c459dacbcf1a715551311e45d7dbca83a185a65748ac74d1800bbeaba37765d9f5a1a221805c571910b34ebca8","0x95b6e531b38648049f0d19de09b881baa1f7ea3b2130816b006ad5703901a05da57467d1a3d9d2e7c73fb3f2e409363c","0xa6cf9c06593432d8eba23a4f131bb7f72b9bd51ab6b4b772a749fe03ed72b5ced835a349c6d9920dba2a39669cb7c684","0xaa3d59f6e2e96fbb66195bc58c8704e139fa76cd15e4d61035470bd6e305db9f98bcbf61ac1b95e95b69ba330454c1b3","0xb57f97959c208361de6d7e86dff2b873068adb0f158066e646f42ae90e650079798f165b5cd713141cd3a2a90a961d9a","0xa76ee8ed9052f6a7a8c69774bb2597be182942f08115baba03bf8faaeaee526feba86120039fe8ca7b9354c3b6e0a8e6","0x95689d78c867724823f564627d22d25010f278674c6d2d0cdb10329169a47580818995d1d727ce46c38a1e47943ebb89","0xab676d2256c6288a88e044b3d9ffd43eb9d5aaee00e8fc60ac921395fb835044c71a26ca948e557fed770f52d711e057","0x96351c72785c32e5d004b6f4a1259fb8153d631f0c93fed172f18e8ba438fbc5585c1618deeabd0d6d0b82173c2e6170","0x93dd8d3db576418e22536eba45ab7f56967c6c97c64260d6cddf38fb19c88f2ec5cd0e0156f50e70855eee8a2b879ffd","0xad6ff16f40f6de3d7a737f8e6cebd8416920c4ff89dbdcd75eabab414af9a6087f83ceb9aff7680aa86bff98bd09c8cc","0x84de53b11671abc9c38710e19540c5c403817562aeb22a88404cdaff792c1180f717dbdfe8f54940c062c4d032897429","0x872231b9efa1cdd447b312099a5c164c560440a9441d904e70f5abfc3b2a0d16be9a01aca5e0a2599a61e19407587e3d","0x88f44ac27094a2aa14e9dc40b099ee6d68f97385950f303969d889ee93d4635e34dff9239103bdf66a4b7cbba3e7eb7a","0xa59afebadf0260e832f6f44468443562f53fbaf7bcb5e46e1462d3f328ac437ce56edbca617659ac9883f9e13261fad7","0xb1990e42743a88de4deeacfd55fafeab3bc380cb95de43ed623d021a4f2353530bcab9594389c1844b1c5ea6634c4555","0x85051e841149a10e83f56764e042182208591396d0ce78c762c4a413e6836906df67f38c69793e158d64fef111407ba3","0x9778172bbd9b1f2ec6bbdd61829d7b39a7df494a818e31c654bf7f6a30139899c4822c1bf418dd4f923243067759ce63","0x9355005b4878c87804fc966e7d24f3e4b02bed35b4a77369d01f25a3dcbff7621b08306b1ac85b76fe7b4a3eb5f839b1","0x8f9dc6a54fac052e236f8f0e1f571ac4b5308a43acbe4cc8183bce26262ddaf7994e41cf3034a4cbeca2c505a151e3b1","0x8cc59c17307111723fe313046a09e0e32ea0cce62c13814ab7c6408c142d6a0311d801be4af53fc9240523f12045f9ef","0x8e6057975ed40a1932e47dd3ac778f72ee2a868d8540271301b1aa6858de1a5450f596466494a3e0488be4fbeb41c840","0x812145efbd6559ae13325d56a15940ca4253b17e72a9728986b563bb5acc13ec86453796506ac1a8f12bd6f9e4a288c3","0x911da0a6d6489eb3dab2ec4a16e36127e8a291ae68a6c2c9de33e97f3a9b1f00da57a94e270a0de79ecc5ecb45d19e83","0xb72ea85973f4b2a7e6e71962b0502024e979a73c18a9111130e158541fa47bbaaf53940c8f846913a517dc69982ba9e1","0xa7a56ad1dbdc55f177a7ad1d0af78447dc2673291e34e8ab74b26e2e2e7d8c5fe5dc89e7ef60f04a9508847b5b3a8188","0xb52503f6e5411db5d1e70f5fb72ccd6463fa0f197b3e51ca79c7b5a8ab2e894f0030476ada72534fa4eb4e06c3880f90","0xb51c7957a3d18c4e38f6358f2237b3904618d58b1de5dec53387d25a63772e675a5b714ad35a38185409931157d4b529","0xb86b4266e719d29c043d7ec091547aa6f65bbf2d8d831d1515957c5c06513b72aa82113e9645ad38a7bc3f5383504fa6","0xb95b547357e6601667b0f5f61f261800a44c2879cf94e879def6a105b1ad2bbf1795c3b98a90d588388e81789bd02681","0xa58fd4c5ae4673fa350da6777e13313d5d37ed1dafeeb8f4f171549765b84c895875d9d3ae6a9741f3d51006ef81d962","0x9398dc348d078a604aadc154e6eef2c0be1a93bb93ba7fe8976edc2840a3a318941338cc4d5f743310e539d9b46613d2","0x902c9f0095014c4a2f0dccaaab543debba6f4cc82c345a10aaf4e72511725dbed7a34cd393a5f4e48a3e5142b7be84ed","0xa7c0447849bb44d04a0393a680f6cd390093484a79a147dd238f5d878030d1c26646d88211108e59fe08b58ad20c6fbd","0x80db045535d6e67a422519f5c89699e37098449d249698a7cc173a26ccd06f60238ae6cc7242eb780a340705c906790c","0x8e52b451a299f30124505de2e74d5341e1b5597bdd13301cc39b05536c96e4380e7f1b5c7ef076f5b3005a868657f17c","0x824499e89701036037571761e977654d2760b8ce21f184f2879fda55d3cda1e7a95306b8abacf1caa79d3cc075b9d27f","0x9049b956b77f8453d2070607610b79db795588c0cec12943a0f5fe76f358dea81e4f57a4692112afda0e2c05c142b26f","0x81911647d818a4b5f4990bfd4bc13bf7be7b0059afcf1b6839333e8569cdb0172fd2945410d88879349f677abaed5eb3","0xad4048f19b8194ed45b6317d9492b71a89a66928353072659f5ce6c816d8f21e69b9d1817d793effe49ca1874daa1096","0x8d22f7b2ddb31458661abd34b65819a374a1f68c01fc6c9887edeba8b80c65bceadb8f57a3eb686374004b836261ef67","0x92637280c259bc6842884db3d6e32602a62252811ae9b019b3c1df664e8809ffe86db88cfdeb8af9f46435c9ee790267","0xa2f416379e52e3f5edc21641ea73dc76c99f7e29ea75b487e18bd233856f4c0183429f378d2bfc6cd736d29d6cadfa49","0x882cb6b76dbdc188615dcf1a8439eba05ffca637dd25197508156e03c930b17b9fed2938506fdd7b77567cb488f96222","0xb68b621bb198a763fb0634eddb93ed4b5156e59b96c88ca2246fd1aea3e6b77ed651e112ac41b30cd361fadc011d385e","0xa3cb22f6b675a29b2d1f827cacd30df14d463c93c3502ef965166f20d046af7f9ab7b2586a9c64f4eae4fad2d808a164","0x8302d9ce4403f48ca217079762ce42cee8bc30168686bb8d3a945fbd5acd53b39f028dce757b825eb63af2d5ae41169d","0xb2eef1fbd1a176f1f4cd10f2988c7329abe4eb16c7405099fb92baa724ab397bc98734ef7d4b24c0f53dd90f57520d04","0xa1bbef0bd684a3f0364a66bde9b29326bac7aa3dde4caed67f14fb84fed3de45c55e406702f1495a3e2864d4ee975030","0x976acdb0efb73e3a3b65633197692dedc2adaed674291ae3df76b827fc866d214e9cac9ca46baefc4405ff13f953d936","0xb9fbf71cc7b6690f601f0b1c74a19b7d14254183a2daaafec7dc3830cba5ae173d854bbfebeca985d1d908abe5ef0cda","0x90591d7b483598c94e38969c4dbb92710a1a894bcf147807f1bcbd8aa3ac210b9f2be65519aa829f8e1ccdc83ad9b8cf","0xa30568577c91866b9c40f0719d46b7b3b2e0b4a95e56196ac80898a2d89cc67880e1229933f2cd28ee3286f8d03414d7","0x97589a88c3850556b359ec5e891f0937f922a751ac7c95949d3bbc7058c172c387611c0f4cb06351ef02e5178b3dd9e4","0x98e7bbe27a1711f4545df742f17e3233fbcc63659d7419e1ca633f104cb02a32c84f2fac23ca2b84145c2672f68077ab","0xa7ddb91636e4506d8b7e92aa9f4720491bb71a72dadc47c7f4410e15f93e43d07d2b371951a0e6a18d1bd087aa96a5c4","0xa7c006692227a06db40bceac3d5b1daae60b5692dd9b54772bedb5fea0bcc91cbcdb530cac31900ffc70c5b3ffadc969","0x8d3ec6032778420dfa8be52066ba0e623467df33e4e1901dbadd586c5d750f4ccde499b5197e26b9ea43931214060f69","0x8d9a8410518ea64f89df319bfd1fc97a0971cdb9ad9b11d1f8fe834042ea7f8dce4db56eeaf179ff8dda93b6db93e5ce","0xa3c533e9b3aa04df20b9ff635cb1154ce303e045278fcf3f10f609064a5445552a1f93989c52ce852fd0bbd6e2b6c22e","0x81934f3a7f8c1ae60ec6e4f212986bcc316118c760a74155d06ce0a8c00a9b9669ec4e143ca214e1b995e41271774fd9","0xab8e2d01a71192093ef8fafa7485e795567cc9db95a93fb7cc4cf63a391ef89af5e2bfad4b827fffe02b89271300407f","0x83064a1eaa937a84e392226f1a60b7cfad4efaa802f66de5df7498962f7b2649924f63cd9962d47906380b97b9fe80e1","0xb4f5e64a15c6672e4b55417ee5dc292dcf93d7ea99965a888b1cc4f5474a11e5b6520eacbcf066840b343f4ceeb6bf33","0xa63d278b842456ef15c278b37a6ea0f27c7b3ffffefca77c7a66d2ea06c33c4631eb242bbb064d730e70a8262a7b848a","0x83a41a83dbcdf0d22dc049de082296204e848c453c5ab1ba75aa4067984e053acf6f8b6909a2e1f0009ed051a828a73b","0x819485b036b7958508f15f3c19436da069cbe635b0318ebe8c014cf1ef9ab2df038c81161b7027475bcfa6fff8dd9faf","0xaa40e38172806e1e045e167f3d1677ef12d5dcdc89b43639a170f68054bd196c4fae34c675c1644d198907a03f76ba57","0x969bae484883a9ed1fbed53b26b3d4ee4b0e39a6c93ece5b3a49daa01444a1c25727dabe62518546f36b047b311b177c","0x80a9e73a65da99664988b238096a090d313a0ee8e4235bc102fa79bb337b51bb08c4507814eb5baec22103ec512eaab0","0x86604379aec5bddda6cbe3ef99c0ac3a3c285b0b1a15b50451c7242cd42ae6b6c8acb717dcca7917838432df93a28502","0xa23407ee02a495bed06aa7e15f94cfb05c83e6d6fba64456a9bbabfa76b2b68c5c47de00ba169e710681f6a29bb41a22","0x98cff5ecc73b366c6a01b34ac9066cb34f7eeaf4f38a5429bad2d07e84a237047e2a065c7e8a0a6581017dadb4695deb","0x8de9f68a938f441f3b7ab84bb1f473c5f9e5c9e139e42b7ccee1d254bd57d0e99c2ccda0f3198f1fc5737f6023dd204e","0xb0ce48d815c2768fb472a315cad86aa033d0e9ca506f146656e2941829e0acb735590b4fbc713c2d18d3676db0a954ac","0x82f485cdefd5642a6af58ac6817991c49fac9c10ace60f90b27f1788cc026c2fe8afc83cf499b3444118f9f0103598a8","0x82c24550ed512a0d53fc56f64cc36b553823ae8766d75d772dacf038c460f16f108f87a39ceef7c66389790f799dbab3","0x859ffcf1fe9166388316149b9acc35694c0ea534d43f09dae9b86f4aa00a23b27144dda6a352e74b9516e8c8d6fc809c","0xb8f7f353eec45da77fb27742405e5ad08d95ec0f5b6842025be9def3d9892f85eb5dd0921b41e6eff373618dba215bca","0x8ccca4436f9017e426229290f5cd05eac3f16571a4713141a7461acfe8ae99cd5a95bf5b6df129148693c533966145da","0xa2c67ecc19c0178b2994846fea4c34c327a5d786ac4b09d1d13549d5be5996d8a89021d63d65cb814923388f47cc3a03","0xaa0ff87d676b418ec08f5cbf577ac7e744d1d0e9ebd14615b550eb86931eafd2a36d4732cc5d6fab1713fd7ab2f6f7c0","0x8aef4730bb65e44efd6bb9441c0ae897363a2f3054867590a2c2ecf4f0224e578c7a67f10b40f8453d9f492ac15a9b2d","0x86a187e13d8fba5addcfdd5b0410cedd352016c930f913addd769ee09faa6be5ca3e4b1bdb417a965c643a99bd92be42","0xa0a4e9632a7a094b14b29b78cd9c894218cdf6783e61671e0203865dc2a835350f465fbaf86168f28af7c478ca17bc89","0xa8c7b02d8deff2cd657d8447689a9c5e2cd74ef57c1314ac4d69084ac24a7471954d9ff43fe0907d875dcb65fd0d3ce5","0x97ded38760aa7be6b6960b5b50e83b618fe413cbf2bcc1da64c05140bcc32f5e0e709cd05bf8007949953fac5716bad9","0xb0d293835a24d64c2ae48ce26e550b71a8c94a0883103757fb6b07e30747f1a871707d23389ba2b2065fa6bafe220095","0x8f9e291bf849feaa575592e28e3c8d4b7283f733d41827262367ea1c40f298c7bcc16505255a906b62bf15d9f1ba85fb","0x998f4e2d12708b4fd85a61597ca2eddd750f73c9e0c9b3cf0825d8f8e01f1628fd19797dcaed3b16dc50331fc6b8b821","0xb30d1f8c115d0e63bf48f595dd10908416774c78b3bbb3194192995154d80ea042d2e94d858de5f8aa0261b093c401fd","0xb5d9c75bb41f964cbff3f00e96d9f1480c91df8913f139f0d385d27a19f57a820f838eb728e46823cbff00e21c660996","0xa6edec90b5d25350e2f5f0518777634f9e661ec9d30674cf5b156c4801746d62517751d90074830ac0f4b09911c262f1","0x82f98da1264b6b75b8fbeb6a4d96d6a05b25c24db0d57ba3a38efe3a82d0d4e331b9fc4237d6494ccfe4727206457519","0xb89511843453cf4ecd24669572d6371b1e529c8e284300c43e0d5bb6b3aaf35aeb634b3cb5c0a2868f0d5e959c1d0772","0xa82bf065676583e5c1d3b81987aaae5542f522ba39538263a944bb33ea5b514c649344a96c0205a3b197a3f930fcda6c","0xa37b47ea527b7e06c460776aa662d9a49ff4149d3993f1a974b0dd165f7171770d189b0e2ea54fd5fccb6a14b116e68a","0xa1017677f97dda818274d47556d09d0e4ccacb23a252f82a6cfe78c630ad46fb9806307445a59fb61262182de3a2b29c","0xb01e9fcac239ba270e6877b79273ddd768bf8a51d2ed8a051b1c11e18eff3de5920e2fcbfbd26f06d381eddd3b1f1e1b","0x82fcd53d803b1c8e4ed76adc339b7f3a5962d37042b9683aabac7513ac68775d4a566a9460183926a6a95dbe7d551a1f","0xa763e78995d55cd21cdb7ef75d9642d6e1c72453945e346ab6690c20a4e1eeec61bb848ef830ae4b56182535e3c71d8f","0xb769f4db602251d4b0a1186782799bdcef66de33c110999a5775c50b349666ffd83d4c89714c4e376f2efe021a5cfdb2","0xa59cbd1b785efcfa6e83fc3b1d8cf638820bc0c119726b5368f3fba9dce8e3414204fb1f1a88f6c1ff52e87961252f97","0x95c8c458fd01aa23ecf120481a9c6332ebec2e8bb70a308d0576926a858457021c277958cf79017ddd86a56cacc2d7db","0x82eb41390800287ae56e77f2e87709de5b871c8bdb67c10a80fc65f3acb9f7c29e8fa43047436e8933f27449ea61d94d","0xb3ec25e3545eb83aed2a1f3558d1a31c7edde4be145ecc13b33802654b77dc049b4f0065069dd9047b051e52ab11dcdd","0xb78a0c715738f56f0dc459ab99e252e3b579b208142836b3c416b704ca1de640ca082f29ebbcee648c8c127df06f6b1e","0xa4083149432eaaf9520188ebf4607d09cf664acd1f471d4fb654476e77a9eaae2251424ffda78d09b6cb880df35c1219","0x8c52857d68d6e9672df3db2df2dbf46b516a21a0e8a18eec09a6ae13c1ef8f369d03233320dd1c2c0bbe00abfc1ea18b","0x8c856089488803066bff3f8d8e09afb9baf20cecc33c8823c1c0836c3d45498c3de37e87c016b705207f60d2b00f8609","0x831a3df39be959047b2aead06b4dcd3012d7b29417f642b83c9e8ce8de24a3dbbd29c6fdf55e2db3f7ea04636c94e403","0xaed84d009f66544addabe404bf6d65af7779ce140dc561ff0c86a4078557b96b2053b7b8a43432ffb18cd814f143b9da","0x93282e4d72b0aa85212a77b336007d8ba071eea17492da19860f1ad16c1ea8867ccc27ef5c37c74b052465cc11ea4f52","0xa7b78b8c8d057194e8d68767f1488363f77c77bddd56c3da2bc70b6354c7aa76247c86d51f7371aa38a4aa7f7e3c0bb7","0xb1c77283d01dcd1bde649b5b044eac26befc98ff57cbee379fb5b8e420134a88f2fc7f0bf04d15e1fbd45d29e7590fe6","0xa4aa8de70330a73b2c6458f20a1067eed4b3474829b36970a8df125d53bbdda4f4a2c60063b7cccb0c80fc155527652f","0x948a6c79ba1b8ad7e0bed2fae2f0481c4e41b4d9bbdd9b58164e28e9065700e83f210c8d5351d0212e0b0b68b345b3a5","0x86a48c31dcbbf7b082c92d28e1f613a2378a910677d7db3a349dc089e4a1e24b12eee8e8206777a3a8c64748840b7387","0x976adb1af21e0fc34148917cf43d933d7bfd3fd12ed6c37039dcd5a4520e3c6cf5868539ba5bf082326430deb8a4458d","0xb93e1a4476f2c51864bb4037e7145f0635eb2827ab91732b98d49b6c07f6ac443111aa1f1da76d1888665cb897c3834e","0x8afd46fb23bf869999fa19784b18a432a1f252d09506b8dbb756af900518d3f5f244989b3d7c823d9029218c655d3dc6","0x83f1e59e3abeed18cdc632921672673f1cb6e330326e11c4e600e13e0d5bc11bdc970ae12952e15103a706fe720bf4d6","0x90ce4cc660714b0b673d48010641c09c00fc92a2c596208f65c46073d7f349dd8e6e077ba7dcef9403084971c3295b76","0x8b09b0f431a7c796561ecf1549b85048564de428dac0474522e9558b6065fede231886bc108539c104ce88ebd9b5d1b0","0x85d6e742e2fb16a7b0ba0df64bc2c0dbff9549be691f46a6669bca05e89c884af16822b85faefefb604ec48c8705a309","0xa87989ee231e468a712c66513746fcf03c14f103aadca0eac28e9732487deb56d7532e407953ab87a4bf8961588ef7b0","0xb00da10efe1c29ee03c9d37d5918e391ae30e48304e294696b81b434f65cf8c8b95b9d1758c64c25e534d045ba28696f","0x91c0e1fb49afe46c7056400baa06dbb5f6e479db78ee37e2d76c1f4e88994357e257b83b78624c4ef6091a6c0eb8254d","0x883fb797c498297ccbf9411a3e727c3614af4eccde41619b773dc7f3259950835ee79453debf178e11dec4d3ada687a0","0xa14703347e44eb5059070b2759297fcfcfc60e6893c0373eea069388eba3950aa06f1c57cd2c30984a2d6f9e9c92c79e","0xafebc7585b304ceba9a769634adff35940e89cd32682c78002822aab25eec3edc29342b7f5a42a56a1fec67821172ad5","0xaea3ff3822d09dba1425084ca95fd359718d856f6c133c5fabe2b2eed8303b6e0ba0d8698b48b93136a673baac174fd9","0xaf2456a09aa777d9e67aa6c7c49a1845ea5cdda2e39f4c935c34a5f8280d69d4eec570446998cbbe31ede69a91e90b06","0x82cada19fed16b891ef3442bafd49e1f07c00c2f57b2492dd4ee36af2bd6fd877d6cb41188a4d6ce9ec8d48e8133d697","0x82a21034c832287f616619a37c122cee265cc34ae75e881fcaea4ea7f689f3c2bc8150bbf7dbcfd123522bfb7f7b1d68","0x86877217105f5d0ec3eeff0289fc2a70d505c9fdf7862e8159553ef60908fb1a27bdaf899381356a4ef4649072a9796c","0x82b196e49c6e861089a427c0b4671d464e9d15555ffb90954cd0d630d7ae02eb3d98ceb529d00719c2526cd96481355a","0xa29b41d0d43d26ce76d4358e0db2b77df11f56e389f3b084d8af70a636218bd3ac86b36a9fe46ec9058c26a490f887f7","0xa4311c4c20c4d7dd943765099c50f2fd423e203ccfe98ff00087d205467a7873762510cac5fdce7a308913ed07991ed7","0xb1f040fc5cc51550cb2c25cf1fd418ecdd961635a11f365515f0cb4ffb31da71f48128c233e9cc7c0cf3978d757ec84e","0xa9ebae46f86d3bd543c5f207ed0d1aed94b8375dc991161d7a271f01592912072e083e2daf30c146430894e37325a1b9","0x826418c8e17ad902b5fe88736323a47e0ca7a44bce4cbe27846ec8fe81de1e8942455dda6d30e192cdcc73e11df31256","0x85199db563427c5edcbac21f3d39fec2357be91fb571982ddcdc4646b446ad5ced84410de008cb47b3477ee0d532daf8","0xb7eed9cd400b2ca12bf1d9ae008214b8561fb09c8ad9ff959e626ffde00fee5ff2f5b6612e231f2a1a9b1646fcc575e3","0x8b40bf12501dcbac78f5a314941326bfcddf7907c83d8d887d0bb149207f85d80cd4dfbd7935439ea7b14ea39a3fded7","0x83e3041af302485399ba6cd5120e17af61043977083887e8d26b15feec4a6b11171ac5c06e6ad0971d4b58a81ff12af3","0x8f5b9a0eecc589dbf8c35a65d5e996a659277ef6ea509739c0cb7b3e2da9895e8c8012de662e5b23c5fa85d4a8f48904","0x835d71ed5e919d89d8e6455f234f3ff215462c4e3720c371ac8c75e83b19dfe3ae15a81547e4dc1138e5f5997f413cc9","0x8b7d2e4614716b1db18e9370176ea483e6abe8acdcc3dcdf5fb1f4d22ca55d652feebdccc171c6de38398d9f7bfdec7a","0x93eace72036fe57d019676a02acf3d224cf376f166658c1bf705db4f24295881d477d6fdd7916efcfceff8c7a063deda","0xb1ac460b3d516879a84bc886c54f020a9d799e7c49af3e4d7de5bf0d2793c852254c5d8fe5616147e6659512e5ccb012","0xacd0947a35cb167a48bcd9667620464b54ac0e78f9316b4aa92dcaab5422d7a732087e52e1c827faa847c6b2fe6e7766","0x94ac33d21c3d12ff762d32557860e911cd94d666609ddcc42161b9c16f28d24a526e8b10bb03137257a92cec25ae637d","0x832e02058b6b994eadd8702921486241f9a19e68ed1406dad545e000a491ae510f525ccf9d10a4bba91c68f2c53a0f58","0x9471035d14f78ff8f463b9901dd476b587bb07225c351161915c2e9c6114c3c78a501379ab6fb4eb03194c457cbd22bf","0xab64593e034c6241d357fcbc32d8ea5593445a5e7c24cac81ad12bd2ef01843d477a36dc1ba21dbe63b440750d72096a","0x9850f3b30045e927ad3ec4123a32ed2eb4c911f572b6abb79121873f91016f0d80268de8b12e2093a4904f6e6cab7642","0x987212c36b4722fe2e54fa30c52b1e54474439f9f35ca6ad33c5130cd305b8b54b532dd80ffd2c274105f20ce6d79f6e","0x8b4d0c6abcb239b5ed47bef63bc17efe558a27462c8208fa652b056e9eae9665787cd1aee34fbb55beb045c8bfdb882b","0xa9f3483c6fee2fe41312d89dd4355d5b2193ac413258993805c5cbbf0a59221f879386d3e7a28e73014f10e65dd503d9","0xa2225da3119b9b7c83d514b9f3aeb9a6d9e32d9cbf9309cbb971fd53c4b2c001d10d880a8ad8a7c281b21d85ceca0b7c","0xa050be52e54e676c151f7a54453bbb707232f849beab4f3bf504b4d620f59ed214409d7c2bd3000f3ff13184ccda1c35","0xadbccf681e15b3edb6455a68d292b0a1d0f5a4cb135613f5e6db9943f02181341d5755875db6ee474e19ace1c0634a28","0x8b6eff675632a6fad0111ec72aacc61c7387380eb87933fd1d098856387d418bd38e77d897e65d6fe35951d0627c550b","0xaabe2328ddf90989b15e409b91ef055cb02757d34987849ae6d60bef2c902bf8251ed21ab30acf39e500d1d511e90845","0x92ba4eb1f796bc3d8b03515f65c045b66e2734c2da3fc507fdd9d6b5d1e19ab3893726816a32141db7a31099ca817d96","0x8a98b3cf353138a1810beb60e946183803ef1d39ac4ea92f5a1e03060d35a4774a6e52b14ead54f6794d5f4022b8685c","0x909f8a5c13ec4a59b649ed3bee9f5d13b21d7f3e2636fd2bb3413c0646573fdf9243d63083356f12f5147545339fcd55","0x9359d914d1267633141328ed0790d81c695fea3ddd2d406c0df3d81d0c64931cf316fe4d92f4353c99ff63e2aefc4e34","0xb88302031681b54415fe8fbfa161c032ea345c6af63d2fb8ad97615103fd4d4281c5a9cae5b0794c4657b97571a81d3b","0x992c80192a519038082446b1fb947323005b275e25f2c14c33cc7269e0ec038581cc43705894f94bad62ae33a8b7f965","0xa78253e3e3eece124bef84a0a8807ce76573509f6861d0b6f70d0aa35a30a123a9da5e01e84969708c40b0669eb70aa6","0x8d5724de45270ca91c94792e8584e676547d7ac1ac816a6bb9982ee854eb5df071d20545cdfd3771cd40f90e5ba04c8e","0x825a6f586726c68d45f00ad0f5a4436523317939a47713f78fd4fe81cd74236fdac1b04ecd97c2d0267d6f4981d7beb1"],"g2_monomial":["0x93e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8","0xb5bfd7dd8cdeb128843bc287230af38926187075cbfbefa81009a2ce615ac53d2914e5870cb452d2afaaab24f3499f72185cbfee53492714734429b7b38608e23926c911cceceac9a36851477ba4c60b087041de621000edc98edada20c1def2","0xb5337ba0ce5d37224290916e268e2060e5c14f3f9fc9e1ec3af5a958e7a0303122500ce18f1a4640bf66525bd10e763501fe986d86649d8d45143c08c3209db3411802c226e9fe9a55716ac4a0c14f9dcef9e70b2bb309553880dc5025eab3cc","0xb3c1dcdc1f62046c786f0b82242ef283e7ed8f5626f72542aa2c7a40f14d9094dd1ebdbd7457ffdcdac45fd7da7e16c51200b06d791e5e43e257e45efdf0bd5b06cd2333beca2a3a84354eb48662d83aef5ecf4e67658c851c10b13d8d87c874","0x954d91c7688983382609fca9e211e461f488a5971fd4e40d7e2892037268eacdfd495cfa0a7ed6eb0eb11ac3ae6f651716757e7526abe1e06c64649d80996fd3105c20c4c94bc2b22d97045356fe9d791f21ea6428ac48db6f9e68e30d875280","0x88a6b6bb26c51cf9812260795523973bb90ce80f6820b6c9048ab366f0fb96e48437a7f7cb62aedf64b11eb4dfefebb0147608793133d32003cb1f2dc47b13b5ff45f1bb1b2408ea45770a08dbfaec60961acb8119c47b139a13b8641e2c9487","0x85cd7be9728bd925d12f47fb04b32d9fad7cab88788b559f053e69ca18e463113ecc8bbb6dbfb024835f901b3a957d3108d6770fb26d4c8be0a9a619f6e3a4bf15cbfd48e61593490885f6cee30e4300c5f9cf5e1c08e60a2d5b023ee94fcad0","0x80477dba360f04399821a48ca388c0fa81102dd15687fea792ee8c1114e00d1bc4839ad37ac58900a118d863723acfbe08126ea883be87f50e4eabe3b5e72f5d9e041db8d9b186409fd4df4a7dde38c0e0a3b1ae29b098e5697e7f110b6b27e4","0xb7a6aec08715a9f8672a2b8c367e407be37e59514ac19dd4f0942a68007bba3923df22da48702c63c0d6b3efd3c2d04e0fe042d8b5a54d562f9f33afc4865dcbcc16e99029e25925580e87920c399e710d438ac1ce3a6dc9b0d76c064a01f6f7","0xac1b001edcea02c8258aeffbf9203114c1c874ad88dae1184fadd7d94cd09053649efd0ca413400e6e9b5fa4eac33261000af88b6bd0d2abf877a4f0355d2fb4d6007adb181695201c5432e50b850b51b3969f893bddf82126c5a71b042b7686","0x90043fda4de53fb364fab2c04be5296c215599105ecff0c12e4917c549257125775c29f2507124d15f56e30447f367db0596c33237242c02d83dfd058735f1e3c1ff99069af55773b6d51d32a68bf75763f59ec4ee7267932ae426522b8aaab6","0xa8660ce853e9dc08271bf882e29cd53397d63b739584dda5263da4c7cc1878d0cf6f3e403557885f557e184700575fee016ee8542dec22c97befe1d10f414d22e84560741cdb3e74c30dda9b42eeaaf53e27822de2ee06e24e912bf764a9a533","0x8fe3921a96d0d065e8aa8fce9aa42c8e1461ca0470688c137be89396dd05103606dab6cdd2a4591efd6addf72026c12e065da7be276dee27a7e30afa2bd81c18f1516e7f068f324d0bad9570b95f6bd02c727cd2343e26db0887c3e4e26dceda","0x8ae1ad97dcb9c192c9a3933541b40447d1dc4eebf380151440bbaae1e120cc5cdf1bcea55180b128d8e180e3af623815191d063cc0d7a47d55fb7687b9d87040bf7bc1a7546b07c61db5ccf1841372d7c2fe4a5431ffff829f3c2eb590b0b710","0x8c2fa96870a88150f7876c931e2d3cc2adeaaaf5c73ef5fa1cf9dfa0991ae4819f9321af7e916e5057d87338e630a2f21242c29d76963cf26035b548d2a63d8ad7bd6efefa01c1df502cbdfdfe0334fb21ceb9f686887440f713bf17a89b8081","0xb9aa98e2f02bb616e22ee5dd74c7d1049321ac9214d093a738159850a1dbcc7138cb8d26ce09d8296368fd5b291d74fa17ac7cc1b80840fdd4ee35e111501e3fa8485b508baecda7c1ab7bd703872b7d64a2a40b3210b6a70e8a6ffe0e5127e3","0x9292db67f8771cdc86854a3f614a73805bf3012b48f1541e704ea4015d2b6b9c9aaed36419769c87c49f9e3165f03edb159c23b3a49c4390951f78e1d9b0ad997129b17cdb57ea1a6638794c0cca7d239f229e589c5ae4f9fe6979f7f8cba1d7","0x91cd9e86550f230d128664f7312591fee6a84c34f5fc7aed557bcf986a409a6de722c4330453a305f06911d2728626e611acfdf81284f77f60a3a1595053a9479964fd713117e27c0222cc679674b03bc8001501aaf9b506196c56de29429b46","0xa9516b73f605cc31b89c68b7675dc451e6364595243d235339437f556cf22d745d4250c1376182273be2d99e02c10eee047410a43eff634d051aeb784e76cb3605d8e079b9eb6ad1957dfdf77e1cd32ce4a573c9dfcc207ca65af6eb187f6c3d","0xa9667271f7d191935cc8ad59ef3ec50229945faea85bfdfb0d582090f524436b348aaa0183b16a6231c00332fdac2826125b8c857a2ed9ec66821cfe02b3a2279be2412441bc2e369b255eb98614e4be8490799c4df22f18d47d24ec70bba5f7","0xa4371144d2aa44d70d3cb9789096d3aa411149a6f800cb46f506461ee8363c8724667974252f28aea61b6030c05930ac039c1ee64bb4bd56532a685cae182bf2ab935eee34718cffcb46cae214c77aaca11dbb1320faf23c47247db1da04d8dc","0x89a7eb441892260b7e81168c386899cd84ffc4a2c5cad2eae0d1ab9e8b5524662e6f660fe3f8bfe4c92f60b060811bc605b14c5631d16709266886d7885a5eb5930097127ec6fb2ebbaf2df65909cf48f253b3d5e22ae48d3e9a2fd2b01f447e","0x9648c42ca97665b5eccb49580d8532df05eb5a68db07f391a2340769b55119eaf4c52fe4f650c09250fa78a76c3a1e271799b8333cc2628e3d4b4a6a3e03da1f771ecf6516dd63236574a7864ff07e319a6f11f153406280d63af9e2b5713283","0x9663bf6dd446ea7a90658ee458578d4196dc0b175ef7fcfa75f44d41670850774c2e46c5a6be132a2c072a3c0180a24f0305d1acac49d2d79878e5cda80c57feda3d01a6af12e78b5874e2a4b3717f11c97503b41a4474e2e95b179113726199","0xb212aeb4814e0915b432711b317923ed2b09e076aaf558c3ae8ef83f9e15a83f9ea3f47805b2750ab9e8106cb4dc6ad003522c84b03dc02829978a097899c773f6fb31f7fe6b8f2d836d96580f216fec20158f1590c3e0d7850622e15194db05","0x925f005059bf07e9ceccbe66c711b048e236ade775720d0fe479aebe6e23e8af281225ad18e62458dc1b03b42ad4ca290d4aa176260604a7aad0d9791337006fbdebe23746f8060d42876f45e4c83c3643931392fde1cd13ff8bddf8111ef974","0x9553edb22b4330c568e156a59ef03b26f5c326424f830fe3e8c0b602f08c124730ffc40bc745bec1a22417adb22a1a960243a10565c2be3066bfdb841d1cd14c624cd06e0008f4beb83f972ce6182a303bee3fcbcabc6cfe48ec5ae4b7941bfc","0x935f5a404f0a78bdcce709899eda0631169b366a669e9b58eacbbd86d7b5016d044b8dfc59ce7ed8de743ae16c2343b50e2f925e88ba6319e33c3fc76b314043abad7813677b4615c8a97eb83cc79de4fedf6ccbcfa4d4cbf759a5a84e4d9742","0xa5b014ab936eb4be113204490e8b61cd38d71da0dec7215125bcd131bf3ab22d0a32ce645bca93e7b3637cf0c2db3d6601a0ddd330dc46f9fae82abe864ffc12d656c88eb50c20782e5bb6f75d18760666f43943abb644b881639083e122f557","0x935b7298ae52862fa22bf03bfc1795b34c70b181679ae27de08a9f5b4b884f824ef1b276b7600efa0d2f1d79e4a470d51692fd565c5cf8343dd80e5d3336968fc21c09ba9348590f6206d4424eb229e767547daefa98bc3aa9f421158dee3f2a","0x9830f92446e708a8f6b091cc3c38b653505414f8b6507504010a96ffda3bcf763d5331eb749301e2a1437f00e2415efb01b799ad4c03f4b02de077569626255ac1165f96ea408915d4cf7955047620da573e5c439671d1fa5c833fb11de7afe6","0x840dcc44f673fff3e387af2bb41e89640f2a70bcd2b92544876daa92143f67c7512faf5f90a04b7191de01f3e2b1bde00622a20dc62ca23bbbfaa6ad220613deff43908382642d4d6a86999f662efd64b1df448b68c847cfa87630a3ffd2ec76","0x92950c895ed54f7f876b2fda17ecc9c41b7accfbdd42c210cc5b475e0737a7279f558148531b5c916e310604a1de25a80940c94fe5389ae5d6a5e9c371be67bceea1877f5401725a6595bcf77ece60905151b6dfcb68b75ed2e708c73632f4fd","0x8010246bf8e94c25fd029b346b5fbadb404ef6f44a58fd9dd75acf62433d8cc6db66974f139a76e0c26dddc1f329a88214dbb63276516cf325c7869e855d07e0852d622c332ac55609ba1ec9258c45746a2aeb1af0800141ee011da80af175d4","0xb0f1bad257ebd187bdc3f37b23f33c6a5d6a8e1f2de586080d6ada19087b0e2bf23b79c1b6da1ee82271323f5bdf3e1b018586b54a5b92ab6a1a16bb3315190a3584a05e6c37d5ca1e05d702b9869e27f513472bcdd00f4d0502a107773097da","0x9636d24f1ede773ce919f309448dd7ce023f424afd6b4b69cb98c2a988d849a283646dc3e469879daa1b1edae91ae41f009887518e7eb5578f88469321117303cd3ac2d7aee4d9cb5f82ab9ae3458e796dfe7c24284b05815acfcaa270ff22e2","0xb373feb5d7012fd60578d7d00834c5c81df2a23d42794fed91aa9535a4771fde0341c4da882261785e0caca40bf83405143085e7f17e55b64f6c5c809680c20b050409bf3702c574769127c854d27388b144b05624a0e24a1cbcc4d08467005b","0xb15680648949ce69f82526e9b67d9b55ce5c537dc6ab7f3089091a9a19a6b90df7656794f6edc87fb387d21573ffc847062623685931c2790a508cbc8c6b231dd2c34f4d37d4706237b1407673605a604bcf6a50cc0b1a2db20485e22b02c17e","0x8817e46672d40c8f748081567b038a3165f87994788ec77ee8daea8587f5540df3422f9e120e94339be67f186f50952504cb44f61e30a5241f1827e501b2de53c4c64473bcc79ab887dd277f282fbfe47997a930dd140ac08b03efac88d81075","0xa6e4ef6c1d1098f95aae119905f87eb49b909d17f9c41bcfe51127aa25fee20782ea884a7fdf7d5e9c245b5a5b32230b07e0dbf7c6743bf52ee20e2acc0b269422bd6cf3c07115df4aa85b11b2c16630a07c974492d9cdd0ec325a3fabd95044","0x8634aa7c3d00e7f17150009698ce440d8e1b0f13042b624a722ace68ead870c3d2212fbee549a2c190e384d7d6ac37ce14ab962c299ea1218ef1b1489c98906c91323b94c587f1d205a6edd5e9d05b42d591c26494a6f6a029a2aadb5f8b6f67","0x821a58092900bdb73decf48e13e7a5012a3f88b06288a97b855ef51306406e7d867d613d9ec738ebacfa6db344b677d21509d93f3b55c2ebf3a2f2a6356f875150554c6fff52e62e3e46f7859be971bf7dd9d5b3e1d799749c8a97c2e04325df","0x8dba356577a3a388f782e90edb1a7f3619759f4de314ad5d95c7cc6e197211446819c4955f99c5fc67f79450d2934e3c09adefc91b724887e005c5190362245eec48ce117d0a94d6fa6db12eda4ba8dde608fbbd0051f54dcf3bb057adfb2493","0xa32a690dc95c23ed9fb46443d9b7d4c2e27053a7fcc216d2b0020a8cf279729c46114d2cda5772fd60a97016a07d6c5a0a7eb085a18307d34194596f5b541cdf01b2ceb31d62d6b55515acfd2b9eec92b27d082fbc4dc59fc63b551eccdb8468","0xa040f7f4be67eaf0a1d658a3175d65df21a7dbde99bfa893469b9b43b9d150fc2e333148b1cb88cfd0447d88fa1a501d126987e9fdccb2852ecf1ba907c2ca3d6f97b055e354a9789854a64ecc8c2e928382cf09dda9abde42bbdf92280cdd96","0x864baff97fa60164f91f334e0c9be00a152a416556b462f96d7c43b59fe1ebaff42f0471d0bf264976f8aa6431176eb905bd875024cf4f76c13a70bede51dc3e47e10b9d5652d30d2663b3af3f08d5d11b9709a0321aba371d2ef13174dcfcaf","0x95a46f32c994133ecc22db49bad2c36a281d6b574c83cfee6680b8c8100466ca034b815cfaedfbf54f4e75188e661df901abd089524e1e0eb0bf48d48caa9dd97482d2e8c1253e7e8ac250a32fd066d5b5cb08a8641bdd64ecfa48289dca83a3","0xa2cce2be4d12144138cb91066e0cd0542c80b478bf467867ebef9ddaf3bd64e918294043500bf5a9f45ee089a8d6ace917108d9ce9e4f41e7e860cbce19ac52e791db3b6dde1c4b0367377b581f999f340e1d6814d724edc94cb07f9c4730774","0xb145f203eee1ac0a1a1731113ffa7a8b0b694ef2312dabc4d431660f5e0645ef5838e3e624cfe1228cfa248d48b5760501f93e6ab13d3159fc241427116c4b90359599a4cb0a86d0bb9190aa7fabff482c812db966fd2ce0a1b48cb8ac8b3bca","0xadabe5d215c608696e03861cbd5f7401869c756b3a5aadc55f41745ad9478145d44393fec8bb6dfc4ad9236dc62b9ada0f7ca57fe2bae1b71565dbf9536d33a68b8e2090b233422313cc96afc7f1f7e0907dc7787806671541d6de8ce47c4cd0","0xae7845fa6b06db53201c1080e01e629781817f421f28956589c6df3091ec33754f8a4bd4647a6bb1c141ac22731e3c1014865d13f3ed538dcb0f7b7576435133d9d03be655f8fbb4c9f7d83e06d1210aedd45128c2b0c9bab45a9ddde1c862a5","0x9159eaa826a24adfa7adf6e8d2832120ebb6eccbeb3d0459ffdc338548813a2d239d22b26451fda98cc0c204d8e1ac69150b5498e0be3045300e789bcb4e210d5cd431da4bdd915a21f407ea296c20c96608ded0b70d07188e96e6c1a7b9b86b","0xa9fc6281e2d54b46458ef564ffaed6944bff71e389d0acc11fa35d3fcd8e10c1066e0dde5b9b6516f691bb478e81c6b20865281104dcb640e29dc116daae2e884f1fe6730d639dbe0e19a532be4fb337bf52ae8408446deb393d224eee7cfa50","0x84291a42f991bfb36358eedead3699d9176a38f6f63757742fdbb7f631f2c70178b1aedef4912fed7b6cf27e88ddc7eb0e2a6aa4b999f3eb4b662b93f386c8d78e9ac9929e21f4c5e63b12991fcde93aa64a735b75b535e730ff8dd2abb16e04","0xa1b7fcacae181495d91765dfddf26581e8e39421579c9cbd0dd27a40ea4c54af3444a36bf85a11dda2114246eaddbdd619397424bb1eb41b5a15004b902a590ede5742cd850cf312555be24d2df8becf48f5afba5a8cd087cb7be0a521728386","0x92feaaf540dbd84719a4889a87cdd125b7e995a6782911931fef26da9afcfbe6f86aaf5328fe1f77631491ce6239c5470f44c7791506c6ef1626803a5794e76d2be0af92f7052c29ac6264b7b9b51f267ad820afc6f881460521428496c6a5f1","0xa525c925bfae1b89320a5054acc1fa11820f73d0cf28d273092b305467b2831fab53b6daf75fb926f332782d50e2522a19edcd85be5eb72f1497193c952d8cd0bcc5d43b39363b206eae4cb1e61668bde28a3fb2fc1e0d3d113f6dfadb799717","0x98752bb6f5a44213f40eda6aa4ff124057c1b13b6529ab42fe575b9afa66e59b9c0ed563fb20dff62130c436c3e905ee17dd8433ba02c445b1d67182ab6504a90bbe12c26a754bbf734665c622f76c62fe2e11dd43ce04fd2b91a8463679058b","0xa9aa9a84729f7c44219ff9e00e651e50ddea3735ef2a73fdf8ed8cd271961d8ed7af5cd724b713a89a097a3fe65a3c0202f69458a8b4c157c62a85668b12fc0d3957774bc9b35f86c184dd03bfefd5c325da717d74192cc9751c2073fe9d170e","0xb221c1fd335a4362eff504cd95145f122bf93ea02ae162a3fb39c75583fc13a932d26050e164da97cff3e91f9a7f6ff80302c19dd1916f24acf6b93b62f36e9665a8785413b0c7d930c7f1668549910f849bca319b00e59dd01e5dec8d2edacc","0xa71e2b1e0b16d754b848f05eda90f67bedab37709550171551050c94efba0bfc282f72aeaaa1f0330041461f5e6aa4d11537237e955e1609a469d38ed17f5c2a35a1752f546db89bfeff9eab78ec944266f1cb94c1db3334ab48df716ce408ef","0xb990ae72768779ba0b2e66df4dd29b3dbd00f901c23b2b4a53419226ef9232acedeb498b0d0687c463e3f1eead58b20b09efcefa566fbfdfe1c6e48d32367936142d0a734143e5e63cdf86be7457723535b787a9cfcfa32fe1d61ad5a2617220","0x8d27e7fbff77d5b9b9bbc864d5231fecf817238a6433db668d5a62a2c1ee1e5694fdd90c3293c06cc0cb15f7cbeab44d0d42be632cb9ff41fc3f6628b4b62897797d7b56126d65b694dcf3e298e3561ac8813fbd7296593ced33850426df42db","0xa92039a08b5502d5b211a7744099c9f93fa8c90cedcb1d05e92f01886219dd464eb5fb0337496ad96ed09c987da4e5f019035c5b01cc09b2a18b8a8dd419bc5895388a07e26958f6bd26751929c25f89b8eb4a299d822e2d26fec9ef350e0d3c","0x92dcc5a1c8c3e1b28b1524e3dd6dbecd63017c9201da9dbe077f1b82adc08c50169f56fc7b5a3b28ec6b89254de3e2fd12838a761053437883c3e01ba616670cea843754548ef84bcc397de2369adcca2ab54cd73c55dc68d87aec3fc2fe4f10"]} \ No newline at end of file diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 7274bbf029..565b8d7890 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -11,12 +11,23 @@ //! To add a new built-in testnet, add it to the `define_hardcoded_nets` invocation in the `eth2_config` //! crate. +use bytes::Bytes; use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use pretty_reqwest_error::PrettyReqwestError; +use reqwest::{Client, Error}; +use sensitive_url::SensitiveUrl; +use sha2::{Digest, Sha256}; +use slog::{info, warn, Logger}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; -use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId}; +use std::str::FromStr; +use std::time::Duration; +use types::{BeaconState, ChainSpec, Config, Epoch, EthSpec, EthSpecId, Hash256}; +use url::Url; + +pub use eth2_config::GenesisStateSource; pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; @@ -32,6 +43,55 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; +/// Contains the bytes from the trusted setup json. +/// The mainnet trusted setup is also reused in testnets. +/// +/// This is done to ensure that testnets also inherit the high security and +/// randomness of the mainnet kzg trusted setup ceremony. +/// +/// Note: The trusted setup for both mainnet and minimal presets are the same. +pub const TRUSTED_SETUP_BYTES: &[u8] = + include_bytes!("../built_in_network_configs/trusted_setup.json"); + +/// Returns `Some(TrustedSetup)` if the deneb fork epoch is set and `None` otherwise. +/// +/// Returns an error if the trusted setup parsing failed. +fn get_trusted_setup_from_config(config: &Config) -> Option<Vec<u8>> { + config + .deneb_fork_epoch + .filter(|epoch| epoch.value != Epoch::max_value()) + .map(|_| TRUSTED_SETUP_BYTES.to_vec()) +} + +/// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the +/// binary whilst also supporting loading them from a file at runtime. +#[derive(Clone, PartialEq, Debug)] +pub enum GenesisStateBytes { + Slice(&'static [u8]), + Vec(Vec<u8>), +} + +impl AsRef<[u8]> for GenesisStateBytes { + fn as_ref(&self) -> &[u8] { + match self { + GenesisStateBytes::Slice(slice) => slice, + GenesisStateBytes::Vec(vec) => vec.as_ref(), + } + } +} + +impl From<&'static [u8]> for GenesisStateBytes { + fn from(slice: &'static [u8]) -> Self { + GenesisStateBytes::Slice(slice) + } +} + +impl From<Vec<u8>> for GenesisStateBytes { + fn from(vec: Vec<u8>) -> Self { + GenesisStateBytes::Vec(vec) + } +} + /// Specifies an Eth2 network. /// /// See the crate-level documentation for more details. @@ -41,8 +101,10 @@ pub struct Eth2NetworkConfig { /// value to be the block number where the first deposit occurs. pub deposit_contract_deploy_block: u64, pub boot_enr: Option<Vec<Enr<CombinedKey>>>, - pub genesis_state_bytes: Option<Vec<u8>>, + pub genesis_state_source: GenesisStateSource, + pub genesis_state_bytes: Option<GenesisStateBytes>, pub config: Config, + pub kzg_trusted_setup: Option<Vec<u8>>, } impl Eth2NetworkConfig { @@ -58,6 +120,9 @@ impl Eth2NetworkConfig { /// Instantiates `Self` from a `HardcodedNet`. fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> { + let config: Config = serde_yaml::from_reader(net.config) + .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?; + let kzg_trusted_setup = get_trusted_setup_from_config(&config); Ok(Self { deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block) .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, @@ -65,10 +130,12 @@ impl Eth2NetworkConfig { serde_yaml::from_reader(net.boot_enr) .map_err(|e| format!("Unable to parse boot enr: {:?}", e))?, ), - genesis_state_bytes: Some(net.genesis_state_bytes.to_vec()) - .filter(|bytes| !bytes.is_empty()), - config: serde_yaml::from_reader(net.config) - .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, + genesis_state_source: net.genesis_state_source, + genesis_state_bytes: Some(net.genesis_state_bytes) + .filter(|bytes| !bytes.is_empty()) + .map(Into::into), + config, + kzg_trusted_setup, }) } @@ -81,8 +148,29 @@ impl Eth2NetworkConfig { } /// Returns `true` if this configuration contains a `BeaconState`. - pub fn beacon_state_is_known(&self) -> bool { - self.genesis_state_bytes.is_some() + pub fn genesis_state_is_known(&self) -> bool { + self.genesis_state_source != GenesisStateSource::Unknown + } + + /// The `genesis_validators_root` of the genesis state. + pub fn genesis_validators_root<E: EthSpec>(&self) -> Result<Option<Hash256>, String> { + if let GenesisStateSource::Url { + genesis_validators_root, + .. + } = self.genesis_state_source + { + Hash256::from_str(genesis_validators_root) + .map(Option::Some) + .map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + }) + } else { + self.get_genesis_state_from_bytes::<E>() + .map(|state| Some(state.genesis_validators_root())) + } } /// Construct a consolidated `ChainSpec` from the YAML config. @@ -96,15 +184,68 @@ impl Eth2NetworkConfig { } /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid. - pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> { + /// + /// If the genesis state is configured to be downloaded from a URL, then the + /// `genesis_state_url` will override the built-in list of download URLs. + pub async fn genesis_state<E: EthSpec>( + &self, + genesis_state_url: Option<&str>, + timeout: Duration, + log: &Logger, + ) -> Result<Option<BeaconState<E>>, String> { let spec = self.chain_spec::<E>()?; - let genesis_state_bytes = self - .genesis_state_bytes - .as_ref() - .ok_or("Genesis state is unknown")?; + match &self.genesis_state_source { + GenesisStateSource::Unknown => Ok(None), + GenesisStateSource::IncludedBytes => { + let state = self.get_genesis_state_from_bytes()?; + Ok(Some(state)) + } + GenesisStateSource::Url { + urls: built_in_urls, + checksum, + genesis_validators_root, + } => { + let checksum = Hash256::from_str(checksum).map_err(|e| { + format!("Unable to parse genesis state bytes checksum: {:?}", e) + })?; + let bytes = if let Some(specified_url) = genesis_state_url { + download_genesis_state(&[specified_url], timeout, checksum, log).await + } else { + download_genesis_state(built_in_urls, timeout, checksum, log).await + }?; + let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { + format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e) + })?; - BeaconState::from_ssz_bytes(genesis_state_bytes, &spec) - .map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e)) + let genesis_validators_root = + Hash256::from_str(genesis_validators_root).map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + })?; + if state.genesis_validators_root() != genesis_validators_root { + return Err(format!( + "Downloaded genesis validators root {:?} does not match expected {:?}", + state.genesis_validators_root(), + genesis_validators_root + )); + } + + Ok(Some(state)) + } + } + } + + fn get_genesis_state_from_bytes<E: EthSpec>(&self) -> Result<BeaconState<E>, String> { + let spec = self.chain_spec::<E>()?; + self.genesis_state_bytes + .as_ref() + .map(|bytes| { + BeaconState::from_ssz_bytes(bytes.as_ref(), &spec) + .map_err(|e| format!("Built-in genesis state SSZ bytes are invalid: {:?}", e)) + }) + .ok_or("Genesis state bytes missing from Eth2NetworkConfig")? } /// Write the files to the directory. @@ -162,7 +303,7 @@ impl Eth2NetworkConfig { File::create(&file) .map_err(|e| format!("Unable to create {:?}: {:?}", file, e)) .and_then(|mut file| { - file.write_all(genesis_state_bytes) + file.write_all(genesis_state_bytes.as_ref()) .map_err(|e| format!("Unable to write {:?}: {:?}", file, e)) })?; } @@ -198,7 +339,7 @@ impl Eth2NetworkConfig { // The genesis state is a special case because it uses SSZ, not YAML. let genesis_file_path = base_dir.join(GENESIS_STATE_FILE); - let genesis_state_bytes = if genesis_file_path.exists() { + let (genesis_state_bytes, genesis_state_source) = if genesis_file_path.exists() { let mut bytes = vec![]; File::open(&genesis_file_path) .map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e)) @@ -207,20 +348,115 @@ impl Eth2NetworkConfig { .map_err(|e| format!("Unable to read {:?}: {:?}", file, e)) })?; - Some(bytes).filter(|bytes| !bytes.is_empty()) + let state = Some(bytes).filter(|bytes| !bytes.is_empty()); + let genesis_state_source = if state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + (state, genesis_state_source) } else { - None + (None, GenesisStateSource::Unknown) }; + let kzg_trusted_setup = get_trusted_setup_from_config(&config); + Ok(Self { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes, + genesis_state_source, + genesis_state_bytes: genesis_state_bytes.map(Into::into), config, + kzg_trusted_setup, }) } } +/// Try to download a genesis state from each of the `urls` in the order they +/// are defined. Return `Ok` if any url returns a response that matches the +/// given `checksum`. +async fn download_genesis_state( + urls: &[&str], + timeout: Duration, + checksum: Hash256, + log: &Logger, +) -> Result<Vec<u8>, String> { + if urls.is_empty() { + return Err( + "The genesis state is not present in the binary and there are no known download URLs. \ + Please use --checkpoint-sync-url or --genesis-state-url." + .to_string(), + ); + } + + let mut errors = vec![]; + for url in urls { + // URLs are always expected to be the base URL of a server that supports + // the beacon-API. + let url = parse_state_download_url(url)?; + let redacted_url = SensitiveUrl::new(url.clone()) + .map(|url| url.to_string()) + .unwrap_or_else(|_| "<REDACTED>".to_string()); + + info!( + log, + "Downloading genesis state"; + "server" => &redacted_url, + "timeout" => ?timeout, + "info" => "this may take some time on testnets with large validator counts" + ); + + let client = Client::new(); + let response = get_state_bytes(timeout, url, client).await; + + match response { + Ok(bytes) => { + // Check the server response against our local checksum. + if Sha256::digest(bytes.as_ref())[..] == checksum[..] { + return Ok(bytes.into()); + } else { + warn!( + log, + "Genesis state download failed"; + "server" => &redacted_url, + "timeout" => ?timeout, + ); + errors.push(format!( + "Response from {} did not match local checksum", + redacted_url + )) + } + } + Err(e) => errors.push(PrettyReqwestError::from(e).to_string()), + } + } + Err(format!( + "Unable to download a genesis state from {} source(s): {}", + errors.len(), + errors.join(",") + )) +} + +async fn get_state_bytes(timeout: Duration, url: Url, client: Client) -> Result<Bytes, Error> { + client + .get(url) + .header("Accept", "application/octet-stream") + .timeout(timeout) + .send() + .await? + .error_for_status()? + .bytes() + .await +} + +/// Parses the `url` and joins the necessary state download path. +fn parse_state_download_url(url: &str) -> Result<Url, String> { + Url::parse(url) + .map_err(|e| format!("Invalid genesis state URL: {:?}", e))? + .join("eth/v2/debug/beacon/states/genesis") + .map_err(|e| format!("Failed to append genesis state path to URL: {:?}", e)) +} + #[cfg(test)] mod tests { use super::*; @@ -257,10 +493,13 @@ mod tests { assert_eq!(spec, config.chain_spec::<GnosisEthSpec>().unwrap()); } - #[test] - fn mainnet_genesis_state() { + #[tokio::test] + async fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); - config.beacon_state::<E>().expect("beacon state can decode"); + config + .genesis_state::<E>(None, Duration::from_secs(1), &logging::test_logger()) + .await + .expect("beacon state can decode"); } #[test] @@ -274,10 +513,10 @@ mod tests { fn hard_coded_nets_work() { for net in HARDCODED_NETS { let config = Eth2NetworkConfig::from_hardcoded_net(net) - .unwrap_or_else(|_| panic!("{:?}", net.name)); + .unwrap_or_else(|e| panic!("{:?}: {:?}", net.name, e)); // Ensure we can parse the YAML config to a chain spec. - if net.name == types::GNOSIS { + if config.config.preset_base == types::GNOSIS { config.chain_spec::<GnosisEthSpec>().unwrap(); } else { config.chain_spec::<MainnetEthSpec>().unwrap(); @@ -285,10 +524,25 @@ mod tests { assert_eq!( config.genesis_state_bytes.is_some(), - net.genesis_is_known, + net.genesis_state_source == GenesisStateSource::IncludedBytes, "{:?}", net.name ); + + if let GenesisStateSource::Url { + urls, + checksum, + genesis_validators_root, + } = net.genesis_state_source + { + Hash256::from_str(checksum).expect("the checksum must be a valid 32-byte value"); + Hash256::from_str(genesis_validators_root) + .expect("the GVR must be a valid 32-byte value"); + for url in urls { + parse_state_download_url(url).expect("url must be valid"); + } + } + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } @@ -324,11 +578,22 @@ mod tests { let base_dir = temp_dir.path().join("my_testnet"); let deposit_contract_deploy_block = 42; - let testnet: Eth2NetworkConfig = Eth2NetworkConfig { + let genesis_state_source = if genesis_state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + + let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes), + genesis_state_source, + genesis_state_bytes: genesis_state + .as_ref() + .map(Encode::as_ssz_bytes) + .map(Into::into), config, + kzg_trusted_setup: None, }; testnet diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index 8e6f0c0e5c..f471757065 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -2,13 +2,13 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -eth2_wallet = { path = "../../crypto/eth2_wallet" } -lockfile = { path = "../lockfile" } +eth2_wallet = { workspace = true } +lockfile = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/common/filesystem/Cargo.toml b/common/filesystem/Cargo.toml index 66cbedbb83..fd026bd517 100644 --- a/common/filesystem/Cargo.toml +++ b/common/filesystem/Cargo.toml @@ -2,7 +2,7 @@ name = "filesystem" version = "0.1.0" authors = ["Mark Mackey <mark@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index 06e33555d9..6d90534401 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -2,10 +2,10 @@ name = "lighthouse_metrics" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = "1.4.0" +lazy_static = { workspace = true } prometheus = "0.13.0" diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 96ac266476..3c4f9fe50c 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -2,7 +2,7 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -11,4 +11,4 @@ git-version = "0.3.4" target_info = "0.1.0" [dev-dependencies] -regex = "1.5.5" +regex = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index e874432fbc..c31917e040 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.3.0-", - fallback = "Lighthouse/v4.3.0" + prefix = "Lighthouse/v4.5.0-", + fallback = "Lighthouse/v4.5.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/lockfile/Cargo.toml b/common/lockfile/Cargo.toml index b9616e8715..d0cca13d45 100644 --- a/common/lockfile/Cargo.toml +++ b/common/lockfile/Cargo.toml @@ -2,10 +2,10 @@ name = "lockfile" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -fs2 = "0.4.3" +fs2 = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index b6179d9e78..9c5321591b 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -2,21 +2,21 @@ name = "logging" version = "0.2.0" authors = ["blacktemplar <blacktemplar@a1.net>"] -edition = "2021" +edition = { workspace = true } [features] test_logger = [] # Print log output to stderr when running tests instead of dropping it [dependencies] -slog = "2.5.2" -slog-term = "2.6.0" -tokio = { version = "1.26.0", features = ["sync"] } -lighthouse_metrics = { path = "../lighthouse_metrics" } -lazy_static = "1.4.0" -sloggers = { version = "2.1.1", features = ["json"] } -slog-async = "2.7.0" +slog = { workspace = true } +slog-term = { workspace = true } +tokio = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +sloggers = { workspace = true } +slog-async = { workspace = true } take_mut = "0.2.2" -parking_lot = "0.12.1" -serde = "1.0.153" -serde_json = "1.0.94" -chrono = "0.4.23" +parking_lot = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml index 405b7be5d8..c1bd15f9f8 100644 --- a/common/lru_cache/Cargo.toml +++ b/common/lru_cache/Cargo.toml @@ -2,7 +2,10 @@ name = "lru_cache" version = "0.1.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -fnv = "1.0.7" +fnv = { workspace = true } + +[dev-dependencies] +mock_instant = "0.3" diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 966741ca4d..0b2fd83568 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -1,7 +1,13 @@ //! This implements a time-based LRU cache for fast checking of duplicates use fnv::FnvHashSet; +#[cfg(test)] +use mock_instant::Instant; use std::collections::VecDeque; -use std::time::{Duration, Instant}; + +#[cfg(not(test))] +use std::time::Instant; + +use std::time::Duration; struct Element<Key> { /// The key being inserted. @@ -222,16 +228,16 @@ mod test { cache.insert("a"); cache.insert("b"); - std::thread::sleep(Duration::from_millis(20)); + mock_instant::MockClock::advance(Duration::from_millis(20)); cache.insert("a"); // a is newer now - std::thread::sleep(Duration::from_millis(85)); + mock_instant::MockClock::advance(Duration::from_millis(85)); assert!(cache.contains(&"a"),); // b was inserted first but was not as recent it should have been removed assert!(!cache.contains(&"b")); - std::thread::sleep(Duration::from_millis(16)); + mock_instant::MockClock::advance(Duration::from_millis(16)); assert!(!cache.contains(&"a")); } } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index c88ec0bd5a..4a5f39b661 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -2,13 +2,13 @@ name = "malloc_utils" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -lighthouse_metrics = { path = "../lighthouse_metrics" } -lazy_static = "1.4.0" +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } libc = "0.2.79" -parking_lot = "0.12.0" +parking_lot = { workspace = true } jemalloc-ctl = { version = "0.5.0", optional = true } # Jemalloc's background_threads feature requires Linux (pthreads). diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index bfb5e72042..3731229c39 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -2,22 +2,21 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan <pawandhananjay@gmail.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { version = "0.11.0", features = ["json","stream"] } -task_executor = { path = "../task_executor" } -tokio = "1.14.0" -eth2 = {path = "../eth2"} -serde_json = "1.0.58" -serde = "1.0.116" -serde_derive = "1.0.116" -lighthouse_version = { path = "../lighthouse_version"} -lighthouse_metrics = { path = "../lighthouse_metrics" } -slog = "2.5.2" -store = { path = "../../beacon_node/store" } -lazy_static = "1.4.0" -regex = "1.5.5" -sensitive_url = { path = "../sensitive_url" } +reqwest = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +eth2 = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +lighthouse_version = { workspace = true } +lighthouse_metrics = { workspace = true } +slog = { workspace = true } +store = { workspace = true } +lazy_static = { workspace = true } +regex = { workspace = true } +sensitive_url = { workspace = true } diff --git a/common/monitoring_api/src/types.rs b/common/monitoring_api/src/types.rs index 9765e34613..cf33ccb9c0 100644 --- a/common/monitoring_api/src/types.rs +++ b/common/monitoring_api/src/types.rs @@ -1,7 +1,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; pub const VERSION: u64 = 1; pub const CLIENT_NAME: &str = "lighthouse"; diff --git a/common/oneshot_broadcast/Cargo.toml b/common/oneshot_broadcast/Cargo.toml index baefe10661..12c9b40bc8 100644 --- a/common/oneshot_broadcast/Cargo.toml +++ b/common/oneshot_broadcast/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "oneshot_broadcast" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -parking_lot = "0.12.0" +parking_lot = { workspace = true } diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml index ca9f4812b0..dc79832cd3 100644 --- a/common/pretty_reqwest_error/Cargo.toml +++ b/common/pretty_reqwest_error/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "pretty_reqwest_error" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { version = "0.11.0", features = ["json","stream"] } -sensitive_url = { path = "../sensitive_url" } +reqwest = { workspace = true } +sensitive_url = { workspace = true } diff --git a/common/pretty_reqwest_error/src/lib.rs b/common/pretty_reqwest_error/src/lib.rs index 4c605f38ae..0aaee5965e 100644 --- a/common/pretty_reqwest_error/src/lib.rs +++ b/common/pretty_reqwest_error/src/lib.rs @@ -55,6 +55,12 @@ impl fmt::Debug for PrettyReqwestError { } } +impl fmt::Display for PrettyReqwestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + impl From<reqwest::Error> for PrettyReqwestError { fn from(inner: reqwest::Error) -> Self { Self(inner) diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index 6de591efcf..d218c8d93a 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -2,10 +2,10 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L <mjladson@pm.me>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -url = "2.2.1" -serde = "1.0.116" +url = { workspace = true } +serde = { workspace = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index d9fdd73126..31e53779a8 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -2,10 +2,10 @@ name = "slot_clock" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -types = { path = "../../consensus/types" } -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../lighthouse_metrics" } -parking_lot = "0.12.0" +types = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +parking_lot = { workspace = true } diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 1c8813ca2f..6bf7464500 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -7,8 +7,8 @@ mod system_time_slot_clock; use std::time::Duration; -pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; +pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; use types::consts::merge::INTERVALS_PER_SLOT; @@ -137,4 +137,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone { slot_clock.set_current_time(freeze_at); slot_clock } + + /// Returns the delay between the start of the slot and when a request for block components + /// missed over gossip in the current slot should be made via RPC. + /// + /// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be + /// changed in the future. + fn single_lookup_delay(&self) -> Duration { + self.unagg_attestation_production_delay() / 2 + } } diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index 0956710b82..5f0de80d90 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -1,13 +1,12 @@ [package] name = "system_health" version = "0.1.0" -edition = "2021" +edition = { workspace = true } [dependencies] -lighthouse_network = { path = "../../beacon_node/lighthouse_network" } -types = { path = "../../consensus/types" } -sysinfo = "0.26.5" -serde = "1.0.116" -serde_derive = "1.0.116" -serde_json = "1.0.58" -parking_lot = "0.12.0" +lighthouse_network = { workspace = true } +types = { workspace = true } +sysinfo = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +parking_lot = { workspace = true } diff --git a/common/target_check/Cargo.toml b/common/target_check/Cargo.toml index 2cf2cacc64..206a636be4 100644 --- a/common/target_check/Cargo.toml +++ b/common/target_check/Cargo.toml @@ -2,7 +2,7 @@ name = "target_check" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] static_assertions = "1.1.0" diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 08bb565870..38f4eca369 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -2,13 +2,13 @@ name = "task_executor" version = "0.1.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -slog = "2.5.2" -futures = "0.3.7" -exit-future = "0.2.0" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../lighthouse_metrics" } -sloggers = { version = "2.1.1", features = ["json"] } +tokio = { workspace = true } +slog = { workspace = true } +futures = { workspace = true } +exit-future = { workspace = true } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +sloggers = { workspace = true } diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 8794eeea21..79308797a4 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -2,12 +2,12 @@ name = "test_random_derive" version = "0.2.0" authors = ["thojest <thojest@gmail.com>"] -edition = "2021" +edition = { workspace = true } description = "Procedural derive macros for implementation of TestRandom trait" [lib] proc-macro = true [dependencies] -syn = "1.0.42" -quote = "1.0.7" +syn = { workspace = true } +quote = { workspace = true } diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 6c72ecb449..648c20121a 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -20,9 +20,8 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream { let name = &derived_input.ident; let (impl_generics, ty_generics, where_clause) = &derived_input.generics.split_for_impl(); - let struct_data = match &derived_input.data { - syn::Data::Struct(s) => s, - _ => panic!("test_random_derive only supports structs."), + let syn::Data::Struct(struct_data) = &derived_input.data else { + panic!("test_random_derive only supports structs."); }; // Build quotes for fields that should be generated and those that should be built from diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 2dd041ff07..3d70cad272 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "unused_port" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lru_cache = { path = "../lru_cache" } -lazy_static = "1.4.0" -parking_lot = "0.12.0" +lru_cache = { workspace = true } +lazy_static = { workspace = true } +parking_lot = { workspace = true } diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 8accddfcb9..ae8742fe07 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -2,7 +2,7 @@ name = "validator_dir" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [features] insecure_keys = [] @@ -10,17 +10,17 @@ insecure_keys = [] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bls = { path = "../../crypto/bls" } -eth2_keystore = { path = "../../crypto/eth2_keystore" } -filesystem = { path = "../filesystem" } -types = { path = "../../consensus/types" } -rand = "0.8.5" -deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.5.0" -hex = "0.4.2" -derivative = "2.1.1" -lockfile = { path = "../lockfile" } -directory = { path = "../directory" } +bls = { workspace = true } +eth2_keystore = { workspace = true } +filesystem = { workspace = true } +types = { workspace = true } +rand = { workspace = true } +deposit_contract = { workspace = true } +tree_hash = { workspace = true } +hex = { workspace = true } +derivative = { workspace = true } +lockfile = { workspace = true } +directory = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index e66aeddfb8..85c1901bad 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -2,20 +2,20 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = "0.3.2" -eth2 = { path = "../eth2" } -types = { path = "../../consensus/types" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -state_processing = { path = "../../consensus/state_processing" } -safe_arith = { path = "../../consensus/safe_arith" } -serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.14.0", features = ["sync"] } +warp = { workspace = true } +eth2 = { workspace = true } +types = { workspace = true } +beacon_chain = { workspace = true } +state_processing = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +tokio = { workspace = true } headers = "0.3.2" -lighthouse_metrics = { path = "../lighthouse_metrics" } -lazy_static = "1.4.0" +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } serde_array_query = "0.1.0" diff --git a/common/warp_utils/src/cors.rs b/common/warp_utils/src/cors.rs index 314ea9c8f7..55043dfd7d 100644 --- a/common/warp_utils/src/cors.rs +++ b/common/warp_utils/src/cors.rs @@ -10,10 +10,14 @@ pub fn set_builder_origins( default_origin: (IpAddr, u16), ) -> Result<Builder, String> { if let Some(allow_origin) = allow_origin { - let origins = allow_origin - .split(',') - .map(|s| verify_cors_origin_str(s).map(|_| s)) - .collect::<Result<Vec<_>, _>>()?; + let mut origins = vec![]; + for origin in allow_origin.split(',') { + verify_cors_origin_str(origin)?; + if origin == "*" { + return Ok(builder.allow_any_origin()); + } + origins.push(origin) + } Ok(builder.allow_origins(origins)) } else { let origin = match default_origin.0 { diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 1b9d89db91..d93b74ca95 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -87,7 +87,7 @@ pub fn scrape_process_health_metrics() { // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `Health` for all platforms. if let Ok(health) = ProcessHealth::observe() { - set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64); + set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64); diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 0f43c8890f..05edc34856 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -2,20 +2,20 @@ name = "cached_tree_hash" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -ethereum-types = "0.14.1" -ssz_types = "0.5.3" -ethereum_hashing = "1.0.0-beta.2" -ethereum_ssz_derive = "0.5.0" -ethereum_ssz = "0.5.0" -tree_hash = "0.5.0" -smallvec = "1.6.1" +ethereum-types = { workspace = true } +ssz_types = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz_derive = { workspace = true } +ethereum_ssz = { workspace = true } +tree_hash = { workspace = true } +smallvec = { workspace = true } [dev-dependencies] -quickcheck = "0.9.2" -quickcheck_macros = "0.9.1" +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 3864d52d47..7a06d7352b 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -2,19 +2,19 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { path = "../types" } -state_processing = { path = "../state_processing" } -proto_array = { path = "../proto_array" } -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } +types = { workspace = true } +state_processing = { workspace = true } +proto_array = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +slog = { workspace = true } [dev-dependencies] -beacon_chain = { path = "../../beacon_node/beacon_chain" } -store = { path = "../../beacon_node/store" } -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +beacon_chain = { workspace = true } +store = { workspace = true } +tokio = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 4f563f8639..bdd74c1a2a 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -197,7 +197,8 @@ impl<T> From<proto_array::Error> for Error<T> { /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode)] +#[ssz(enum_behaviour = "tag")] pub enum PayloadVerificationStatus { /// An EL has declared the execution payload to be valid. Verified, @@ -355,7 +356,7 @@ where spec: &ChainSpec, ) -> Result<Self, Error<T::Error>> { // Sanity check: the anchor must lie on an epoch boundary. - if anchor_block.slot() % E::slots_per_epoch() != 0 { + if anchor_state.slot() % E::slots_per_epoch() != 0 { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), @@ -391,6 +392,7 @@ where let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let proto_array = ProtoArrayForkChoice::new::<E>( + current_slot, finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), @@ -679,7 +681,7 @@ where .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?; // Blocks cannot be in the future. If they are, their consideration must be delayed until - // the are in the past. + // they are in the past. // // Note: presently, we do not delay consideration. We just drop the block. if block.slot() > current_slot { @@ -721,7 +723,8 @@ where // Add proposer score boost if the block is timely. let is_before_attesting_interval = block_delay < Duration::from_secs(spec.seconds_per_slot / INTERVALS_PER_SLOT); - if current_slot == block.slot() && is_before_attesting_interval { + let is_first_block = self.fc_store.proposer_boost_root().is_zero(); + if current_slot == block.slot() && is_before_attesting_interval && is_first_block { self.fc_store.set_proposer_boost_root(block_root); } @@ -749,7 +752,7 @@ where .unrealized_justified_checkpoint .zip(parent_block.unrealized_finalized_checkpoint) .filter(|(parent_justified, parent_finalized)| { - parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch + parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 == block_epoch }); let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( @@ -761,7 +764,8 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - BeaconBlockRef::Capella(_) + BeaconBlockRef::Deneb(_) + | BeaconBlockRef::Capella(_) | BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => match progressive_balances_mode { ProgressiveBalancesMode::Disabled => { diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index d28210aa1b..9d39eb3e38 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1,9 +1,5 @@ #![cfg(not(debug_assertions))] -use std::fmt; -use std::sync::Mutex; -use std::time::Duration; - use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; @@ -14,6 +10,9 @@ use beacon_chain::{ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; +use std::fmt; +use std::sync::Mutex; +use std::time::Duration; use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, @@ -195,12 +194,13 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot).await; + let (block_contents, state_) = self.harness.make_block(state, slot).await; state = state_; - if !predicate(block.message(), &state) { + if !predicate(block_contents.0.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { + let block = block_contents.0.clone(); + if let Ok(block_hash) = self.harness.process_block_result(block_contents).await { self.harness.attest_block( &state, block.state_root(), @@ -324,8 +324,8 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; - func(&mut signed_block, &mut state); + let (mut block_tuple, mut state) = self.harness.make_block(state, slot).await; + func(&mut block_tuple.0, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain @@ -333,8 +333,8 @@ impl ForkChoiceTest { .fork_choice_write_lock() .on_block( current_slot, - signed_block.message(), - signed_block.canonical_root(), + block_tuple.0.message(), + block_tuple.0.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -367,8 +367,8 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; - mutation_func(&mut signed_block, &mut state); + let (mut block_tuple, mut state) = self.harness.make_block(state, slot).await; + mutation_func(&mut block_tuple.0, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness @@ -377,8 +377,8 @@ impl ForkChoiceTest { .fork_choice_write_lock() .on_block( current_slot, - signed_block.message(), - signed_block.canonical_root(), + block_tuple.0.message(), + block_tuple.0.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -1353,6 +1353,14 @@ async fn progressive_balances_cache_attester_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() + // Note: This test may fail if the shuffling used changes, right now it re-runs with + // deterministic shuffling. A shuffling change my cause the slashed proposer to propose + // again in the next epoch, which results in a block processing failure + // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip + // the slot in this scenario rather than panic-ing. The same applies to + // `progressive_balances_cache_proposer_slashing`. + .apply_blocks(1) + .await .add_previous_epoch_attester_slashing() .await // expect fork choice to import blocks successfully after a previous epoch attester is @@ -1376,6 +1384,14 @@ async fn progressive_balances_cache_proposer_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() + // Note: This test may fail if the shuffling used changes, right now it re-runs with + // deterministic shuffling. A shuffling change my cause the slashed proposer to propose + // again in the next epoch, which results in a block processing failure + // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip + // the slot in this scenario rather than panic-ing. The same applies to + // `progressive_balances_cache_attester_slashing`. + .apply_blocks(1) + .await .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) .await // expect fork choice to import blocks successfully after a previous epoch proposer is diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 73dfec40f9..03bec9d380 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -2,11 +2,11 @@ name = "int_to_bytes" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -bytes = "1.0.1" +bytes = { workspace = true } [dev-dependencies] yaml-rust = "0.4.4" -hex = "0.4.2" +hex = { workspace = true } diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2b883f8646..3bee25eaac 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -2,17 +2,17 @@ name = "merkle_proof" version = "0.2.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -ethereum-types = "0.14.1" -ethereum_hashing = "1.0.0-beta.2" -lazy_static = "1.4.0" -safe_arith = { path = "../safe_arith" } +ethereum-types = { workspace = true } +ethereum_hashing = { workspace = true } +lazy_static = { workspace = true } +safe_arith = { workspace = true } [dev-dependencies] -quickcheck = "0.9.2" -quickcheck_macros = "0.9.1" +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 81a535e34a..99f98cf545 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -2,18 +2,17 @@ name = "proto_array" version = "0.2.0" authors = ["Paul Hauner <paul@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [[bin]] name = "proto_array" path = "src/bin.rs" [dependencies] -types = { path = "../types" } -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -serde = "1.0.116" -serde_derive = "1.0.116" -serde_yaml = "0.8.13" -safe_arith = { path = "../safe_arith" } -superstruct = "0.5.0" \ No newline at end of file +types = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } +safe_arith = { workspace = true } +superstruct = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 157f072ad3..ebb639819d 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,7 +5,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, @@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( + self.finalized_block_slot, self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 7b6afb94f5..4726715a16 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,6 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -1035,13 +1035,11 @@ impl ProtoArray { .epoch .start_slot(E::slots_per_epoch()); - let mut node = if let Some(node) = self + let Some(mut node) = self .indices .get(&root) .and_then(|index| self.nodes.get(*index)) - { - node - } else { + else { // An unknown root is not a finalized descendant. This line can only // be reached if the user supplies a root that is not known to fork // choice. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index fe831b3c35..6fc677073e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,7 +7,7 @@ use crate::{ ssz_container::SszContainer, JustifiedBalances, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::{ @@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] pub fn new<E: EthSpec>( + current_slot: Slot, finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -380,7 +381,7 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::<E>(block, finalized_block_slot) + .on_block::<E>(block, current_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -983,6 +984,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( + genesis_slot, genesis_slot, state_root, genesis_checkpoint, @@ -1108,6 +1110,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( + genesis_slot, genesis_slot, junk_state_root, genesis_checkpoint, diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index d212f98842..6f2e4b811c 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -2,7 +2,7 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/consensus/safe_arith/src/iter.rs b/consensus/safe_arith/src/iter.rs index 1fc3d3a1a7..d5ee51b588 100644 --- a/consensus/safe_arith/src/iter.rs +++ b/consensus/safe_arith/src/iter.rs @@ -28,10 +28,10 @@ mod test { #[test] fn unsigned_sum_small() { - let v = vec![400u64, 401, 402, 403, 404, 405, 406]; + let arr = [400u64, 401, 402, 403, 404, 405, 406]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } @@ -61,10 +61,10 @@ mod test { #[test] fn signed_sum_almost_overflow() { - let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; + let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index f19cd1d29d..7279fd28fa 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -2,44 +2,42 @@ name = "state_processing" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dev-dependencies] -env_logger = "0.9.0" -beacon_chain = { path = "../../beacon_node/beacon_chain" } -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +env_logger = { workspace = true } +beacon_chain = { workspace = true } +tokio = { workspace = true } [dependencies] -bls = { path = "../../crypto/bls" } +bls = { workspace = true } integer-sqrt = "0.1.5" -itertools = "0.10.0" -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -ssz_types = "0.5.3" -merkle_proof = { path = "../merkle_proof" } -safe_arith = { path = "../safe_arith" } -tree_hash = "0.5.0" -types = { path = "../types", default-features = false } -rayon = "1.4.1" -ethereum_hashing = "1.0.0-beta.2" -int_to_bytes = { path = "../int_to_bytes" } -smallvec = "1.6.1" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -lighthouse_metrics = { path = "../../common/lighthouse_metrics", optional = true } -lazy_static = { version = "1.4.0", optional = true } -derivative = "2.1.1" +itertools = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +ssz_types = { workspace = true } +merkle_proof = { workspace = true } +safe_arith = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +rayon = { workspace = true } +ethereum_hashing = { workspace = true } +int_to_bytes = { workspace = true } +smallvec = { workspace = true } +arbitrary = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +derivative = { workspace = true } [features] -default = ["legacy-arith", "metrics"] +default = ["legacy-arith"] fake_crypto = ["bls/fake_crypto"] legacy-arith = ["types/legacy-arith"] -metrics = ["lighthouse_metrics", "lazy_static"] arbitrary-fuzz = [ - "arbitrary", "types/arbitrary-fuzz", - "bls/arbitrary", "merkle_proof/arbitrary", "ethereum_ssz/arbitrary", "ssz_types/arbitrary", "tree_hash/arbitrary", ] +portable = ["bls/supranational-portable"] \ No newline at end of file diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ed5e642941..f502d7f692 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -3,6 +3,8 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, VerifyBlockRoot, }; +use itertools::Itertools; +use std::iter::Peekable; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -25,7 +27,7 @@ pub struct BlockReplayer< 'a, Spec: EthSpec, Error = BlockReplayError, - StateRootIter = StateRootIterDefault<Error>, + StateRootIter: Iterator<Item = Result<(Hash256, Slot), Error>> = StateRootIterDefault<Error>, > { state: BeaconState<Spec>, spec: &'a ChainSpec, @@ -36,7 +38,7 @@ pub struct BlockReplayer< post_block_hook: Option<PostBlockHook<'a, Spec, Error>>, pre_slot_hook: Option<PreSlotHook<'a, Spec, Error>>, post_slot_hook: Option<PostSlotHook<'a, Spec, Error>>, - state_root_iter: Option<StateRootIter>, + pub(crate) state_root_iter: Option<Peekable<StateRootIter>>, state_root_miss: bool, _phantom: PhantomData<Error>, } @@ -138,7 +140,7 @@ where /// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both /// endpoints). pub fn state_root_iter(mut self, iter: StateRootIter) -> Self { - self.state_root_iter = Some(iter); + self.state_root_iter = Some(iter.peekable()); self } @@ -192,7 +194,7 @@ where // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter - .take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) + .peeking_take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) .find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot)) .transpose()?; diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index 499d8fa8f8..e4e30230af 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -44,8 +44,21 @@ pub fn get_attestation_participation_flag_indices<T: EthSpec>( if is_matching_source && inclusion_delay <= T::slots_per_epoch().integer_sqrt() { participation_flag_indices.push(TIMELY_SOURCE_FLAG_INDEX); } - if is_matching_target && inclusion_delay <= T::slots_per_epoch() { - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + match state { + &BeaconState::Base(_) + | &BeaconState::Altair(_) + | &BeaconState::Merge(_) + | &BeaconState::Capella(_) => { + if is_matching_target && inclusion_delay <= T::slots_per_epoch() { + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + } + } + &BeaconState::Deneb(_) => { + if is_matching_target { + // [Modified in Deneb:EIP7045] + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + } + } } if is_matching_head && inclusion_delay == spec.min_attestation_inclusion_delay { participation_flag_indices.push(TIMELY_HEAD_FLAG_INDEX); diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index d54da43a04..d8b1c1a107 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -53,11 +53,12 @@ pub fn slash_validator<T: EthSpec>( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - whistleblower_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)?, }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index ccf8cefb69..8e49a0d498 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,14 +1,14 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use ssz_derive::{Decode, Encode}; use std::collections::{hash_map::Entry, HashMap}; -use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug)] +#[derive(Debug, PartialEq, Clone, Encode, Decode)] pub struct ConsensusContext<T: EthSpec> { /// Slot to act as an identifier/safeguard slot: Slot, @@ -17,9 +17,10 @@ pub struct ConsensusContext<T: EthSpec> { /// Block root of the block at `slot`. current_block_root: Option<Hash256>, /// Cache of indexed attestations constructed during block processing. + /// We can skip serializing / deserializing this as the cache will just be rebuilt + #[ssz(skip_serializing, skip_deserializing)] indexed_attestations: HashMap<(AttestationData, BitList<T::MaxValidatorsPerCommittee>), IndexedAttestation<T>>, - _phantom: PhantomData<T>, } #[derive(Debug, PartialEq, Clone)] @@ -42,7 +43,6 @@ impl<T: EthSpec> ConsensusContext<T> { proposer_index: None, current_block_root: None, indexed_attestations: HashMap::new(), - _phantom: PhantomData, } } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index ebbc8f9f31..284a7019f3 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,9 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -91,6 +93,23 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>( } } + // Upgrade to deneb if configured from genesis + if spec + .deneb_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_deneb(&mut state, spec)?; + + // Remove intermediate Capella fork from `state.fork`. + state.fork_mut().previous_version = spec.deneb_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#testing + if let Some(ExecutionPayloadHeader::Deneb(header)) = execution_payload_header { + *state.latest_execution_payload_header_deneb_mut()? = header; + } + } + // Now that we have our validators, initialize the caches (including the committees) state.build_caches(spec)?; diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index 360b007678..d8a51135e8 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,5 +1,3 @@ -#![cfg(feature = "metrics")] - use lazy_static::lazy_static; pub use lighthouse_metrics::*; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b8b76a499d..b9a147a5ad 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -26,6 +26,7 @@ pub use verify_exit::verify_exit; pub mod altair; pub mod block_signature_verifier; +pub mod deneb; pub mod errors; mod is_valid_indexed_attestation; pub mod process_operations; @@ -166,11 +167,11 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>( // `process_randao` as the former depends on the `randao_mix` computed with the reveal of the // previous block. if is_execution_enabled(state, block.body()) { - let payload = block.body().execution_payload()?; + let body = block.body(); if state_processing_strategy == StateProcessingStrategy::Accurate { - process_withdrawals::<T, Payload>(state, payload, spec)?; + process_withdrawals::<T, Payload>(state, body.execution_payload()?, spec)?; } - process_execution_payload::<T, Payload>(state, payload, spec)?; + process_execution_payload::<T, Payload>(state, body, spec)?; } process_randao(state, block, verify_randao, ctxt, spec)?; @@ -355,9 +356,10 @@ pub fn get_new_eth1_data<T: EthSpec>( pub fn partially_verify_execution_payload<T: EthSpec, Payload: AbstractExecPayload<T>>( state: &BeaconState<T>, block_slot: Slot, - payload: Payload::Ref<'_>, + body: BeaconBlockBodyRef<T, Payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + let payload = body.execution_payload()?; if is_merge_transition_complete(state) { block_verify!( payload.parent_hash() == state.latest_execution_payload_header()?.block_hash(), @@ -384,6 +386,17 @@ pub fn partially_verify_execution_payload<T: EthSpec, Payload: AbstractExecPaylo } ); + if let Ok(blob_commitments) = body.blob_kzg_commitments() { + // Verify commitments are under the limit. + block_verify!( + blob_commitments.len() <= T::max_blobs_per_block(), + BlockProcessingError::ExecutionInvalidBlobsLen { + max: T::max_blobs_per_block(), + actual: blob_commitments.len(), + } + ); + } + Ok(()) } @@ -396,11 +409,11 @@ pub fn partially_verify_execution_payload<T: EthSpec, Payload: AbstractExecPaylo /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload pub fn process_execution_payload<T: EthSpec, Payload: AbstractExecPayload<T>>( state: &mut BeaconState<T>, - payload: Payload::Ref<'_>, + body: BeaconBlockBodyRef<T, Payload>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - partially_verify_execution_payload::<T, Payload>(state, state.slot(), payload, spec)?; - + partially_verify_execution_payload::<T, Payload>(state, state.slot(), body, spec)?; + let payload = body.execution_payload()?; match state.latest_execution_payload_header_mut()? { ExecutionPayloadHeaderRefMut::Merge(header_mut) => { match payload.to_execution_payload_header() { @@ -414,6 +427,12 @@ pub fn process_execution_payload<T: EthSpec, Payload: AbstractExecPayload<T>>( _ => return Err(BlockProcessingError::IncorrectStateType), } } + ExecutionPayloadHeaderRefMut::Deneb(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Deneb(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } } Ok(()) @@ -422,15 +441,19 @@ pub fn process_execution_payload<T: EthSpec, Payload: AbstractExecPayload<T>>( /// These functions will definitely be called before the merge. Their entire purpose is to check if /// the merge has happened or if we're on the transition block. Thus we don't want to propagate /// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to -/// repeaetedly write code to treat these errors as false. +/// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete<T: EthSpec>(state: &BeaconState<T>) -> bool { - // We must check defaultness against the payload header with 0x0 roots, as that's what's meant - // by `ExecutionPayloadHeader()` in the spec. - state - .latest_execution_payload_header() - .map(|header| !header.is_default_with_zero_roots()) - .unwrap_or(false) + match state { + // We must check defaultness against the payload header with 0x0 roots, as that's what's meant + // by `ExecutionPayloadHeader()` in the spec. + BeaconState::Merge(_) => state + .latest_execution_payload_header() + .map(|header| !header.is_default_with_zero_roots()) + .unwrap_or(false), + BeaconState::Deneb(_) | BeaconState::Capella(_) => true, + BeaconState::Base(_) | BeaconState::Altair(_) => false, + } } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block pub fn is_merge_transition_block<T: EthSpec, Payload: AbstractExecPayload<T>>( @@ -526,7 +549,7 @@ pub fn process_withdrawals<T: EthSpec, Payload: AbstractExecPayload<T>>( ) -> Result<(), BlockProcessingError> { match state { BeaconState::Merge(_) => Ok(()), - BeaconState::Capella(_) => { + BeaconState::Capella(_) | BeaconState::Deneb(_) => { let expected_withdrawals = get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; diff --git a/consensus/state_processing/src/per_block_processing/deneb.rs b/consensus/state_processing/src/per_block_processing/deneb.rs new file mode 100644 index 0000000000..8f7cb0514f --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/deneb.rs @@ -0,0 +1,9 @@ +use ethereum_hashing::hash_fixed; +use types::consts::deneb::VERSIONED_HASH_VERSION_KZG; +use types::{KzgCommitment, VersionedHash}; + +pub fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash { + let mut hashed_commitment = hash_fixed(&kzg_commitment.0); + hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG; + VersionedHash::from(hashed_commitment) +} diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 0aba1d83fa..de1c132951 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -78,6 +78,10 @@ pub enum BlockProcessingError { expected: u64, found: u64, }, + ExecutionInvalidBlobsLen { + max: usize, + actual: usize, + }, ExecutionInvalid, ConsensusContext(ContextError), WithdrawalsRootMismatch { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 1dbcb7fb8f..cb24a7ba7e 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -95,7 +95,7 @@ pub mod base { } } -pub mod altair { +pub mod altair_deneb { use super::*; use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; @@ -267,8 +267,9 @@ pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>( } BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) - | BeaconBlockBodyRef::Capella(_) => { - altair::process_attestations( + | BeaconBlockBodyRef::Capella(_) + | BeaconBlockBodyRef::Deneb(_) => { + altair_deneb::process_attestations( state, block_body.attestations(), verify_signatures, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index c05d3f057d..fcd324e9eb 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -387,12 +387,23 @@ where let exit = &signed_exit.message; let proposer_index = exit.validator_index as usize; - let domain = spec.get_domain( - exit.epoch, - Domain::VoluntaryExit, - &state.fork(), - state.genesis_validators_root(), - ); + let domain = match state { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => spec.get_domain( + exit.epoch, + Domain::VoluntaryExit, + &state.fork(), + state.genesis_validators_root(), + ), + // EIP-7044 + BeaconState::Deneb(_) => spec.compute_domain( + Domain::VoluntaryExit, + spec.capella_fork_version, + state.genesis_validators_root(), + ), + }; let message = exit.signing_root(domain); diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 16fa2462f5..75eb438967 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,11 +1,11 @@ -#![cfg(all(test, not(feature = "fake_crypto")))] +#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))] use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -34,7 +34,7 @@ async fn get_harness<E: EthSpec>( // Set the state and block to be in the last slot of the `epoch_offset`th epoch. let last_slot_of_epoch = (MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch()); - let harness = BeaconChainHarness::builder(E::default()) + let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default()) .default_spec() .keypairs(KEYPAIRS[0..num_validators].to_vec()) .fresh_ephemeral_store() @@ -63,7 +63,7 @@ async fn valid_block_ok() { let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness + let ((block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; @@ -89,7 +89,7 @@ async fn invalid_block_header_state_slot() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; + let ((signed_block, _), mut state) = harness.make_block_return_pre_state(state, slot).await; let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); @@ -120,7 +120,7 @@ async fn invalid_parent_block_root() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; let (mut block, signature) = signed_block.deconstruct(); @@ -155,7 +155,7 @@ async fn invalid_block_signature() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_return_pre_state(state, slot + Slot::new(1)) .await; let (block, _) = signed_block.deconstruct(); @@ -188,7 +188,7 @@ async fn invalid_randao_reveal_signature() { let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness + let ((signed_block, _), mut state) = harness .make_block_with_modifier(state, slot + 1, |block| { *block.body_mut().randao_reveal_mut() = Signature::empty(); }) @@ -1035,3 +1035,51 @@ async fn fork_spanning_exit() { ) .expect_err("phase0 exit does not verify against bellatrix state"); } + +/// Check that the block replayer does not consume state roots unnecessarily. +#[tokio::test] +async fn block_replayer_peeking_state_roots() { + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; + + let target_state = harness.get_current_state(); + let target_block_root = harness.head_block_root(); + let target_block = harness + .chain + .get_blinded_block(&target_block_root) + .unwrap() + .unwrap(); + + let parent_block_root = target_block.parent_root(); + let parent_block = harness + .chain + .get_blinded_block(&parent_block_root) + .unwrap() + .unwrap(); + let parent_state = harness + .chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + // Omit the state root for `target_state` but provide a dummy state root at the *next* slot. + // If the block replayer is peeking at the state roots rather than consuming them, then the + // dummy state should still be there after block replay completes. + let dummy_state_root = Hash256::repeat_byte(0xff); + let dummy_slot = target_state.slot() + 1; + let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))]; + let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) + .state_root_iter(state_root_iter.into_iter()) + .no_signature_verification() + .apply_blocks(vec![target_block], None) + .unwrap(); + + assert_eq!( + block_replayer + .state_root_iter + .unwrap() + .next() + .unwrap() + .unwrap(), + (dummy_state_root, dummy_slot) + ); +} diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 303a6e3913..b7aa4643e4 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -32,13 +32,22 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>( attestation: data.slot, } ); - verify!( - state.slot() <= data.slot.safe_add(T::slots_per_epoch())?, - Invalid::IncludedTooLate { - state: state.slot(), - attestation: data.slot, + match state { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => { + verify!( + state.slot() <= data.slot.safe_add(T::slots_per_epoch())?, + Invalid::IncludedTooLate { + state: state.slot(), + attestation: data.slot, + } + ); } - ); + // [Modified in Deneb:EIP7045] + BeaconState::Deneb(_) => {} + } verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) } diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 6350685f82..d5d06037cd 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -40,7 +40,7 @@ pub fn process_epoch<T: EthSpec>( match state { BeaconState::Base(_) => base::process_epoch(state, spec), BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), - BeaconState::Capella(_) => capella::process_epoch(state, spec), + BeaconState::Capella(_) | BeaconState::Deneb(_) => capella::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 6eb2f97766..89bc4ab5a3 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -21,7 +21,6 @@ pub enum EpochProcessingSummary<T: EthSpec> { impl<T: EthSpec> EpochProcessingSummary<T> { /// Updates some Prometheus metrics with some values in `self`. - #[cfg(feature = "metrics")] pub fn observe_metrics(&self) -> Result<(), ParticipationCacheError> { metrics::set_gauge( &metrics::PARTICIPATION_PREV_EPOCH_HEAD_ATTESTING_GWEI_TOTAL, diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 4fd2d68586..833be41387 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -50,9 +50,9 @@ pub fn process_registry_updates<T: EthSpec>( .collect_vec(); // Dequeue validators for activation up to churn limit - let churn_limit = state.get_churn_limit(spec)? as usize; + let activation_churn_limit = state.get_activation_churn_limit(spec)? as usize; let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; - for index in activation_queue.into_iter().take(churn_limit) { + for index in activation_queue.into_iter().take(activation_churn_limit) { state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index ead06edbf5..e89a78c4d8 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,6 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use crate::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -21,7 +23,7 @@ impl From<ArithError> for Error { /// /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. -/// Providing the `state_root` makes this function several orders of magniude faster. +/// Providing the `state_root` makes this function several orders of magnitude faster. pub fn per_slot_processing<T: EthSpec>( state: &mut BeaconState<T>, state_root: Option<Hash256>, @@ -59,6 +61,10 @@ pub fn per_slot_processing<T: EthSpec>( if spec.capella_fork_epoch == Some(state.current_epoch()) { upgrade_to_capella(state, spec)?; } + // Deneb + if spec.deneb_fork_epoch == Some(state.current_epoch()) { + upgrade_to_deneb(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index a57d5923f8..1509ee0e50 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,7 +1,9 @@ pub mod altair; pub mod capella; +pub mod deneb; pub mod merge; pub use altair::upgrade_to_altair; pub use capella::upgrade_to_capella; +pub use deneb::upgrade_to_deneb; pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/deneb.rs b/consensus/state_processing/src/upgrade/deneb.rs new file mode 100644 index 0000000000..c253a8c162 --- /dev/null +++ b/consensus/state_processing/src/upgrade/deneb.rs @@ -0,0 +1,76 @@ +use std::mem; +use types::{BeaconState, BeaconStateDeneb, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; + +/// Transform a `Capella` state into an `Deneb` state. +pub fn upgrade_to_deneb<E: EthSpec>( + pre_state: &mut BeaconState<E>, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_capella_mut()?; + + let previous_fork_version = pre.fork.current_version; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Deneb(BeaconStateDeneb { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: previous_fork_version, + current_version: spec.deneb_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_deneb(), + // Capella + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), + // Caches + total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 303e5cfba1..ea9b603c5b 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -2,18 +2,18 @@ name = "swap_or_not_shuffle" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [[bench]] name = "benches" harness = false [dev-dependencies] -criterion = "0.3.3" +criterion = { workspace = true } [dependencies] -ethereum_hashing = "1.0.0-beta.2" -ethereum-types = "0.14.1" +ethereum_hashing = { workspace = true } +ethereum-types = { workspace = true } [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index f030f2e97a..db15f53537 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -2,71 +2,70 @@ name = "types" version = "0.2.1" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"] -edition = "2021" +edition = { workspace = true } [[bench]] name = "benches" harness = false [dependencies] -merkle_proof = { path = "../../consensus/merkle_proof" } -bls = { path = "../../crypto/bls", features = ["arbitrary"] } -compare_fields = { path = "../../common/compare_fields" } -compare_fields_derive = { path = "../../common/compare_fields_derive" } +merkle_proof = { workspace = true } +bls = { workspace = true, features = ["arbitrary"] } +kzg = { workspace = true } +compare_fields = { workspace = true } +compare_fields_derive = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = { version = "0.14.1", features = ["arbitrary"] } -ethereum_hashing = "1.0.0-beta.2" -hex = "0.4.2" -int_to_bytes = { path = "../int_to_bytes" } -log = "0.4.11" -rayon = "1.4.1" -rand = "0.8.5" -safe_arith = { path = "../safe_arith" } -serde = {version = "1.0.116" , features = ["rc"] } -serde_derive = "1.0.116" -slog = "2.5.2" -ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } -ethereum_ssz_derive = "0.5.0" -ssz_types = { version = "0.5.3", features = ["arbitrary"] } -swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } +ethereum-types = { workspace = true, features = ["arbitrary"] } +ethereum_hashing = { workspace = true } +hex = { workspace = true } +int_to_bytes = { workspace = true } +log = { workspace = true } +rayon = { workspace = true } +rand = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true, features = ["rc"] } +slog = { workspace = true } +ethereum_ssz = { workspace = true, features = ["arbitrary"] } +ethereum_ssz_derive = { workspace = true } +ssz_types = { workspace = true, features = ["arbitrary"] } +swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { version = "0.5.0", features = ["arbitrary"] } -tree_hash_derive = "0.5.0" +tree_hash = { workspace = true, features = ["arbitrary"] } +tree_hash_derive = { workspace = true } rand_xorshift = "0.3.0" -cached_tree_hash = { path = "../cached_tree_hash" } -serde_yaml = "0.8.13" -tempfile = "3.1.0" -derivative = "2.1.1" -rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } +cached_tree_hash = { workspace = true } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +derivative = { workspace = true } +rusqlite = { workspace = true } # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # `AbstractExecPayload` -arbitrary = { version = "1.0", features = ["derive"] } -ethereum_serde_utils = "0.5.0" -regex = "1.5.5" -lazy_static = "1.4.0" -parking_lot = "0.12.0" -itertools = "0.10.0" -superstruct = "0.6.0" +arbitrary = { workspace = true, features = ["derive"] } +ethereum_serde_utils = { workspace = true } +regex = { workspace = true } +lazy_static = { workspace = true } +parking_lot = { workspace = true } +itertools = { workspace = true } +superstruct = { workspace = true } metastruct = "0.1.0" -serde_json = "1.0.74" -smallvec = "1.8.0" -serde_with = "1.13.0" -maplit = "1.0.2" -strum = { version = "0.24.0", features = ["derive"] } +serde_json = { workspace = true } +smallvec = { workspace = true } +maplit = { workspace = true } +strum = { workspace = true } [dev-dependencies] -criterion = "0.3.3" -beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -state_processing = { path = "../state_processing" } -tokio = "1.14.0" -paste = "1.0.14" +criterion = { workspace = true } +beacon_chain = { workspace = true } +state_processing = { workspace = true } +tokio = { workspace = true } +paste = { workspace = true } [features] default = ["sqlite", "legacy-arith"] # Allow saturating arithmetic on slots and epochs. Enabled by default, but deprecated. legacy-arith = [] -sqlite = ["rusqlite"] +sqlite = [] # The `arbitrary-fuzz` feature is a no-op provided for backwards compatibility. # For simplicity `Arbitrary` is now derived regardless of the feature's presence. arbitrary-fuzz = [] +portable = ["bls/supranational-portable"] \ No newline at end of file diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml new file mode 100644 index 0000000000..b78a950275 --- /dev/null +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -0,0 +1,12 @@ +# Gnosis preset - Deneb +# NOTE: The below are PLACEHOLDER values from Mainnet. +# Gnosis preset for the Deneb fork TBD: https://github.com/gnosischain/configs/tree/main/presets/gnosis + +# Misc +# --------------------------------------------------------------- +# `uint64(4096)` +FIELD_ELEMENTS_PER_BLOB: 4096 +# `uint64(2**12)` (= 4096) +MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/consensus/types/presets/mainnet/deneb.yaml b/consensus/types/presets/mainnet/deneb.yaml new file mode 100644 index 0000000000..23889fd18e --- /dev/null +++ b/consensus/types/presets/mainnet/deneb.yaml @@ -0,0 +1,10 @@ +# Mainnet preset - Deneb + +# Misc +# --------------------------------------------------------------- +# `uint64(4096)` +FIELD_ELEMENTS_PER_BLOB: 4096 +# `uint64(2**12)` (= 4096) +MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml new file mode 100644 index 0000000000..3da2f80a74 --- /dev/null +++ b/consensus/types/presets/minimal/deneb.yaml @@ -0,0 +1,10 @@ +# Minimal preset - Deneb + +# Misc +# --------------------------------------------------------------- +# [customized] +FIELD_ELEMENTS_PER_BLOB: 4096 +# [customized] +MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 20d66cd447..ac31e78cb7 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 5c333e0d45..ac4a583cbb 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,6 +1,6 @@ use derivative::Derivative; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 286502b449..7578981f51 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{Checkpoint, Hash256, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 93a4c147b6..22b03dda61 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(arbitrary::Arbitrary, Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index c563495074..c2bbea637e 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 1b40fe76d4..7215c779e7 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,12 +1,12 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyDeneb, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -72,6 +72,8 @@ pub struct BeaconBlock<T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload pub body: BeaconBlockBodyMerge<T, Payload>, #[superstruct(only(Capella), partial_getter(rename = "body_capella"))] pub body: BeaconBlockBodyCapella<T, Payload>, + #[superstruct(only(Deneb), partial_getter(rename = "body_deneb"))] + pub body: BeaconBlockBodyDeneb<T, Payload>, } pub type BlindedBeaconBlock<E> = BeaconBlock<E, BlindedPayload<E>>; @@ -124,8 +126,9 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlock<T, Payload> { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> { - BeaconBlockCapella::from_ssz_bytes(bytes) - .map(BeaconBlock::Capella) + BeaconBlockDeneb::from_ssz_bytes(bytes) + .map(BeaconBlock::Deneb) + .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) .or_else(|_| BeaconBlockMerge::from_ssz_bytes(bytes).map(BeaconBlock::Merge)) .or_else(|_| BeaconBlockAltair::from_ssz_bytes(bytes).map(BeaconBlock::Altair)) .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) @@ -198,12 +201,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockRef<'a, T, Payl /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result<ForkName, InconsistentFork> { let fork_at_slot = spec.fork_name_at_slot::<T>(self.slot()); - let object_fork = match self { - BeaconBlockRef::Base { .. } => ForkName::Base, - BeaconBlockRef::Altair { .. } => ForkName::Altair, - BeaconBlockRef::Merge { .. } => ForkName::Merge, - BeaconBlockRef::Capella { .. } => ForkName::Capella, - }; + let object_fork = self.fork_name_unchecked(); if fork_at_slot == object_fork { Ok(object_fork) @@ -215,6 +213,19 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockRef<'a, T, Payl } } + /// Returns the name of the fork pertaining to `self`. + /// + /// Does not check that the fork is consistent with the slot. + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + BeaconBlockRef::Base { .. } => ForkName::Base, + BeaconBlockRef::Altair { .. } => ForkName::Altair, + BeaconBlockRef::Merge { .. } => ForkName::Merge, + BeaconBlockRef::Capella { .. } => ForkName::Capella, + BeaconBlockRef::Deneb { .. } => ForkName::Deneb, + } + } + /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. pub fn body(&self) -> BeaconBlockBodyRef<'a, T, Payload> { map_beacon_block_ref_into_beacon_block_body_ref!(&'a _, *self, |block, cons| cons( @@ -556,6 +567,36 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockCape } } +impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockDeneb<T, Payload> { + /// Returns an empty Deneb block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockDeneb { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyDeneb { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Deneb::default(), + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl<E: EthSpec> From<BeaconBlockBase<E, BlindedPayload<E>>> for BeaconBlockBase<E, FullPayload<E>> @@ -635,6 +676,7 @@ impl_from!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: impl_from!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyMerge<_, _>| body.into()); impl_from!(BeaconBlockCapella, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyCapella<_, _>| body.into()); +impl_from!(BeaconBlockDeneb, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyDeneb<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -666,6 +708,7 @@ impl_clone_as_blinded!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload< impl_clone_as_blinded!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>); impl_clone_as_blinded!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>); impl_clone_as_blinded!(BeaconBlockCapella, <E, FullPayload<E>>, <E, BlindedPayload<E>>); +impl_clone_as_blinded!(BeaconBlockDeneb, <E, FullPayload<E>>, <E, BlindedPayload<E>>); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -781,6 +824,25 @@ mod tests { }); } + #[test] + fn roundtrip_4844_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Deneb.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockDeneb { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyDeneb::random_for_test(rng), + }; + let block = BeaconBlock::Deneb(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; @@ -796,9 +858,12 @@ mod tests { let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); let capella_epoch = altair_fork_epoch + 1; let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + let deneb_epoch = capella_epoch + 1; + let deneb_slot = deneb_epoch.start_slot(E::slots_per_epoch()); spec.altair_fork_epoch = Some(altair_epoch); spec.capella_fork_epoch = Some(capella_epoch); + spec.deneb_fork_epoch = Some(deneb_epoch); // BeaconBlockBase { @@ -865,5 +930,27 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) .expect_err("bad capella block cannot be decoded"); } + + // BeaconBlockDeneb + { + let good_block = BeaconBlock::Deneb(BeaconBlockDeneb { + slot: deneb_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = capella_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good deneb block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad deneb block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index dce1be742f..2f7c6891e4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::marker::PhantomData; @@ -9,11 +9,16 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +pub type KzgCommitments<T> = + VariableList<KzgCommitment, <T as EthSpec>::MaxBlobCommitmentsPerBlock>; +pub type KzgCommitmentOpts<T> = + FixedVector<Option<KzgCommitment>, <T as EthSpec>::MaxBlobsPerBlock>; + /// The body of a `BeaconChain` block, containing operations. /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -51,7 +56,7 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay pub attestations: VariableList<Attestation<T>, T::MaxAttestations>, pub deposits: VariableList<Deposit, T::MaxDeposits>, pub voluntary_exits: VariableList<SignedVoluntaryExit, T::MaxVoluntaryExits>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub sync_aggregate: SyncAggregate<T>, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded @@ -62,9 +67,14 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] #[serde(flatten)] pub execution_payload: Payload::Capella, - #[superstruct(only(Capella))] + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + #[serde(flatten)] + pub execution_payload: Payload::Deneb, + #[superstruct(only(Capella, Deneb))] pub bls_to_execution_changes: VariableList<SignedBlsToExecutionChange, T::MaxBlsToExecutionChanges>, + #[superstruct(only(Deneb))] + pub blob_kzg_commitments: KzgCommitments<T>, #[superstruct(only(Base, Altair))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -85,6 +95,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), Self::Merge(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), } } } @@ -97,6 +108,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, + BeaconBlockBodyRef::Deneb { .. } => ForkName::Deneb, } } } @@ -321,6 +333,50 @@ impl<E: EthSpec> From<BeaconBlockBodyCapella<E, FullPayload<E>>> } } +impl<E: EthSpec> From<BeaconBlockBodyDeneb<E, FullPayload<E>>> + for ( + BeaconBlockBodyDeneb<E, BlindedPayload<E>>, + Option<ExecutionPayloadDeneb<E>>, + ) +{ + fn from(body: BeaconBlockBodyDeneb<E, FullPayload<E>>) -> Self { + let BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = body; + + ( + BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadDeneb { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + Some(execution_payload), + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl<E: EthSpec> BeaconBlockBodyBase<E, FullPayload<E>> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase<E, BlindedPayload<E>> { @@ -402,6 +458,42 @@ impl<E: EthSpec> BeaconBlockBodyCapella<E, FullPayload<E>> { } } +impl<E: EthSpec> BeaconBlockBodyDeneb<E, FullPayload<E>> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyDeneb<E, BlindedPayload<E>> { + let BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + } = self; + + BeaconBlockBodyDeneb { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadDeneb { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + blob_kzg_commitments: blob_kzg_commitments.clone(), + } + } +} + impl<E: EthSpec> From<BeaconBlockBody<E, FullPayload<E>>> for ( BeaconBlockBody<E, BlindedPayload<E>>, @@ -416,6 +508,14 @@ impl<E: EthSpec> From<BeaconBlockBody<E, FullPayload<E>>> } } +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec<String> = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} + #[cfg(test)] mod tests { mod base { diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index f2ef0a3dcc..689f1a28b0 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6a205e307a..18b9866f35 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -9,7 +9,7 @@ use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; @@ -183,7 +183,7 @@ impl From<BeaconStateHash> for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Derivative, @@ -263,9 +263,9 @@ where pub current_epoch_attestations: VariableList<PendingAttestation<T>, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub previous_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>, // Finality @@ -280,13 +280,13 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub inactivity_scores: VariableList<u64, T::ValidatorRegistryLimit>, // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub current_sync_committee: Arc<SyncCommittee<T>>, - #[superstruct(only(Altair, Merge, Capella))] + #[superstruct(only(Altair, Merge, Capella, Deneb))] pub next_sync_committee: Arc<SyncCommittee<T>>, // Execution @@ -300,16 +300,21 @@ where partial_getter(rename = "latest_execution_payload_header_capella") )] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella<T>, + #[superstruct( + only(Deneb), + partial_getter(rename = "latest_execution_payload_header_deneb") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb<T>, // Capella - #[superstruct(only(Capella), partial_getter(copy))] + #[superstruct(only(Capella, Deneb), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_index: u64, - #[superstruct(only(Capella), partial_getter(copy))] + #[superstruct(only(Capella, Deneb), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub historical_summaries: VariableList<HistoricalSummary, T::HistoricalRootsLimit>, // Caching (not in the spec) @@ -424,12 +429,7 @@ impl<T: EthSpec> BeaconState<T> { /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result<ForkName, InconsistentFork> { let fork_at_slot = spec.fork_name_at_epoch(self.current_epoch()); - let object_fork = match self { - BeaconState::Base { .. } => ForkName::Base, - BeaconState::Altair { .. } => ForkName::Altair, - BeaconState::Merge { .. } => ForkName::Merge, - BeaconState::Capella { .. } => ForkName::Capella, - }; + let object_fork = self.fork_name_unchecked(); if fork_at_slot == object_fork { Ok(object_fork) @@ -441,6 +441,19 @@ impl<T: EthSpec> BeaconState<T> { } } + /// Returns the name of the fork pertaining to `self`. + /// + /// Does not check if `self` is consistent with the fork dictated by `self.slot()`. + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + BeaconState::Base { .. } => ForkName::Base, + BeaconState::Altair { .. } => ForkName::Altair, + BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, + BeaconState::Deneb { .. } => ForkName::Deneb, + } + } + /// Specialised deserialisation method that uses the `ChainSpec` as context. #[allow(clippy::arithmetic_side_effects)] pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> { @@ -728,6 +741,9 @@ impl<T: EthSpec> BeaconState<T> { BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRef::Capella( &state.latest_execution_payload_header, )), + BeaconState::Deneb(state) => Ok(ExecutionPayloadHeaderRef::Deneb( + &state.latest_execution_payload_header, + )), } } @@ -742,6 +758,9 @@ impl<T: EthSpec> BeaconState<T> { BeaconState::Capella(state) => Ok(ExecutionPayloadHeaderRefMut::Capella( &mut state.latest_execution_payload_header, )), + BeaconState::Deneb(state) => Ok(ExecutionPayloadHeaderRefMut::Deneb( + &mut state.latest_execution_payload_header, + )), } } @@ -1188,6 +1207,11 @@ impl<T: EthSpec> BeaconState<T> { &mut state.balances, &mut state.progressive_balances_cache, ), + BeaconState::Deneb(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), } } @@ -1298,6 +1322,24 @@ impl<T: EthSpec> BeaconState<T> { )) } + /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). + /// + /// Uses the epoch cache, and will error if it isn't initialized. + /// + /// Spec v1.4.0 + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result<u64, Error> { + Ok(match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) => self.get_churn_limit(spec)?, + BeaconState::Deneb(_) => std::cmp::min( + spec.max_per_epoch_activation_churn_limit, + self.get_churn_limit(spec)?, + ), + }) + } + /// Returns the `slot`, `index`, `committee_position` and `committee_len` for which a validator must produce an /// attestation. /// @@ -1385,6 +1427,7 @@ impl<T: EthSpec> BeaconState<T> { BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { @@ -1392,6 +1435,7 @@ impl<T: EthSpec> BeaconState<T> { BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Deneb(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1703,6 +1747,7 @@ impl<T: EthSpec> BeaconState<T> { BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), + BeaconState::Deneb(inner) => BeaconState::Deneb(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); @@ -1880,6 +1925,7 @@ impl<T: EthSpec> CompareFields for BeaconState<T> { (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), + (BeaconState::Deneb(x), BeaconState::Deneb(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 64bf686f34..8d29bc2217 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -4,7 +4,7 @@ use super::BeaconState; use crate::*; use core::num::NonZeroUsize; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index b657d62ae6..cb96fba691 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -1,6 +1,6 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Map from exit epoch to the number of validators with that exit epoch. diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index 9f5c223d57..6c0682480b 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -2,7 +2,7 @@ use crate::beacon_state::balance::Balance; use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; use arbitrary::Arbitrary; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString, EnumVariantNames}; /// This cache keeps track of the accumulated target attestation balance for the current & previous @@ -179,6 +179,9 @@ impl ProgressiveBalancesMode { pub fn is_progressive_balances_enabled<E: EthSpec>(state: &BeaconState<E>) -> bool { match state { BeaconState::Base(_) => false, - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true, + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => true, } } diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index 590ea30f99..c56c9077e1 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs new file mode 100644 index 0000000000..8637e538d0 --- /dev/null +++ b/consensus/types/src/blob_sidecar.rs @@ -0,0 +1,241 @@ +use crate::test_utils::TestRandom; +use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use derivative::Derivative; +use kzg::{ + Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, + FIELD_ELEMENTS_PER_BLOB, +}; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use std::fmt::Debug; +use std::hash::Hash; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +/// Container of the data that identifies an individual blob. +#[derive( + Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, +)] +pub struct BlobIdentifier { + pub block_root: Hash256, + pub index: u64, +} + +impl BlobIdentifier { + pub fn get_all_blob_ids<E: EthSpec>(block_root: Hash256) -> Vec<BlobIdentifier> { + let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block()); + for i in 0..E::max_blobs_per_block() { + blob_ids.push(BlobIdentifier { + block_root, + index: i as u64, + }); + } + blob_ids + } +} + +impl PartialOrd for BlobIdentifier { + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for BlobIdentifier { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, +)] +#[serde(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] +pub struct BlobSidecar<T: EthSpec> { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub block_parent_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub proposer_index: u64, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub blob: Blob<T>, + pub kzg_commitment: KzgCommitment, + pub kzg_proof: KzgProof, +} + +impl<E: EthSpec> From<Arc<BlobSidecar<E>>> for BlindedBlobSidecar { + fn from(blob_sidecar: Arc<BlobSidecar<E>>) -> Self { + BlindedBlobSidecar { + block_root: blob_sidecar.block_root, + index: blob_sidecar.index, + slot: blob_sidecar.slot, + block_parent_root: blob_sidecar.block_parent_root, + proposer_index: blob_sidecar.proposer_index, + blob_root: blob_sidecar.blob.tree_hash_root(), + kzg_commitment: blob_sidecar.kzg_commitment, + kzg_proof: blob_sidecar.kzg_proof, + } + } +} + +impl<E: EthSpec> From<BlobSidecar<E>> for BlindedBlobSidecar { + fn from(blob_sidecar: BlobSidecar<E>) -> Self { + BlindedBlobSidecar { + block_root: blob_sidecar.block_root, + index: blob_sidecar.index, + slot: blob_sidecar.slot, + block_parent_root: blob_sidecar.block_parent_root, + proposer_index: blob_sidecar.proposer_index, + blob_root: blob_sidecar.blob.tree_hash_root(), + kzg_commitment: blob_sidecar.kzg_commitment, + kzg_proof: blob_sidecar.kzg_proof, + } + } +} + +impl<T: EthSpec> PartialOrd for BlobSidecar<T> { + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { + Some(self.cmp(other)) + } +} + +impl<T: EthSpec> Ord for BlobSidecar<T> { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + +impl<T: EthSpec> SignedRoot for BlobSidecar<T> {} + +impl<T: EthSpec> BlobSidecar<T> { + pub fn id(&self) -> BlobIdentifier { + BlobIdentifier { + block_root: self.block_root, + index: self.index, + } + } + + pub fn empty() -> Self { + Self { + block_root: Hash256::zero(), + index: 0, + slot: Slot::new(0), + block_parent_root: Hash256::zero(), + proposer_index: 0, + blob: Blob::<T>::default(), + kzg_commitment: KzgCommitment::empty_for_testing(), + kzg_proof: KzgProof::empty(), + } + } + + pub fn random_valid<R: Rng>(rng: &mut R, kzg: &Kzg) -> Result<Self, String> { + let mut blob_bytes = vec![0u8; BYTES_PER_BLOB]; + rng.fill_bytes(&mut blob_bytes); + // Ensure that the blob is canonical by ensuring that + // each field element contained in the blob is < BLS_MODULUS + for i in 0..FIELD_ELEMENTS_PER_BLOB { + let Some(byte) = blob_bytes.get_mut( + i.checked_mul(BYTES_PER_FIELD_ELEMENT) + .ok_or("overflow".to_string())?, + ) else { + return Err(format!("blob byte index out of bounds: {:?}", i)); + }; + *byte = 0; + } + + let blob = Blob::<T>::new(blob_bytes) + .map_err(|e| format!("error constructing random blob: {:?}", e))?; + let kzg_blob = KzgBlob::from_bytes(&blob).unwrap(); + + let commitment = kzg + .blob_to_kzg_commitment(&kzg_blob) + .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; + + let proof = kzg + .compute_blob_kzg_proof(&kzg_blob, commitment) + .map_err(|e| format!("error computing kzg proof: {:?}", e))?; + + Ok(Self { + blob, + kzg_commitment: commitment, + kzg_proof: proof, + ..Self::empty() + }) + } + + #[allow(clippy::arithmetic_side_effects)] + pub fn max_size() -> usize { + // Fixed part + Self::empty().as_ssz_bytes().len() + } +} + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, +)] +#[derivative(PartialEq, Eq, Hash)] +pub struct BlindedBlobSidecar { + pub block_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + pub block_parent_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] + pub proposer_index: u64, + pub blob_root: Hash256, + pub kzg_commitment: KzgCommitment, + pub kzg_proof: KzgProof, +} + +impl BlindedBlobSidecar { + pub fn empty() -> Self { + Self { + block_root: Hash256::zero(), + index: 0, + slot: Slot::new(0), + block_parent_root: Hash256::zero(), + proposer_index: 0, + blob_root: Hash256::zero(), + kzg_commitment: KzgCommitment::empty_for_testing(), + kzg_proof: KzgProof::empty(), + } + } +} + +impl SignedRoot for BlindedBlobSidecar {} + +pub type SidecarList<T, Sidecar> = VariableList<Arc<Sidecar>, <T as EthSpec>::MaxBlobsPerBlock>; +pub type BlobSidecarList<T> = SidecarList<T, BlobSidecar<T>>; +pub type BlindedBlobSidecarList<T> = SidecarList<T, BlindedBlobSidecar>; + +pub type FixedBlobSidecarList<T> = + FixedVector<Option<Arc<BlobSidecar<T>>>, <T as EthSpec>::MaxBlobsPerBlock>; + +pub type BlobsList<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobCommitmentsPerBlock>; +pub type BlobRootsList<T> = VariableList<Hash256, <T as EthSpec>::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index 3ed9ee9255..baa65f5172 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 8723c2afed..910ef97c71 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,76 +1,106 @@ +use crate::beacon_block_body::KzgCommitments; use crate::{ - AbstractExecPayload, ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, ForkName, - ForkVersionDeserialize, SignedRoot, Uint256, + BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, + ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; -use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use serde_with::{serde_as, DeserializeAs, SerializeAs}; -use std::marker::PhantomData; +use serde::{Deserialize, Deserializer, Serialize}; +use ssz_derive::Encode; +use superstruct::superstruct; use tree_hash_derive::TreeHash; -#[serde_as] +#[derive(PartialEq, Debug, Default, Serialize, Deserialize, TreeHash, Clone, Encode)] +#[serde(bound = "E: EthSpec")] +pub struct BlindedBlobsBundle<E: EthSpec> { + pub commitments: KzgCommitments<E>, + pub proofs: KzgProofs<E>, + pub blob_roots: BlobRootsList<E>, +} + +#[superstruct( + variants(Merge, Capella, Deneb), + variant_attributes( + derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), + serde(bound = "E: EthSpec", deny_unknown_fields) + ), + map_ref_into(ExecutionPayloadHeaderRef), + map_ref_mut_into(ExecutionPayloadHeaderRefMut) +)] #[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] -pub struct BuilderBid<E: EthSpec, Payload: AbstractExecPayload<E>> { - #[serde_as(as = "BlindedPayloadAsHeader<E>")] - pub header: Payload, +#[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] +#[tree_hash(enum_behaviour = "transparent")] +pub struct BuilderBid<E: EthSpec> { + #[superstruct(only(Merge), partial_getter(rename = "header_merge"))] + pub header: ExecutionPayloadHeaderMerge<E>, + #[superstruct(only(Capella), partial_getter(rename = "header_capella"))] + pub header: ExecutionPayloadHeaderCapella<E>, + #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] + pub header: ExecutionPayloadHeaderDeneb<E>, + #[superstruct(only(Deneb))] + pub blinded_blobs_bundle: BlindedBlobsBundle<E>, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, - #[serde(skip)] - #[tree_hash(skip_hashing)] - _phantom_data: PhantomData<E>, } -impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedRoot for BuilderBid<E, Payload> {} - -/// Validator registration, for use in interacting with servers implementing the builder API. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -#[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] -pub struct SignedBuilderBid<E: EthSpec, Payload: AbstractExecPayload<E>> { - pub message: BuilderBid<E, Payload>, - pub signature: Signature, +impl<E: EthSpec> BuilderBid<E> { + pub fn header(&self) -> ExecutionPayloadHeaderRef<'_, E> { + self.to_ref().header() + } } -impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize - for BuilderBid<T, Payload> -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result<Self, D::Error> { - let convert_err = |_| { - serde::de::Error::custom( - "BuilderBid failed to deserialize: unable to convert payload header to payload", - ) - }; +impl<'a, E: EthSpec> BuilderBidRef<'a, E> { + pub fn header(&self) -> ExecutionPayloadHeaderRef<'a, E> { + map_builder_bid_ref_into_execution_payload_header_ref!(&'a _, self, |bid, cons| cons( + &bid.header + )) + } +} - #[derive(Deserialize)] - struct Helper { - header: serde_json::Value, - #[serde(with = "serde_utils::quoted_u256")] - value: Uint256, - pubkey: PublicKeyBytes, - } - let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; - let payload_header = - ExecutionPayloadHeader::deserialize_by_fork::<'de, D>(helper.header, fork_name)?; - - Ok(Self { - header: Payload::try_from(payload_header).map_err(convert_err)?, - value: helper.value, - pubkey: helper.pubkey, - _phantom_data: Default::default(), +impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> { + pub fn header_mut(self) -> ExecutionPayloadHeaderRefMut<'a, E> { + map_builder_bid_ref_mut_into_execution_payload_header_ref_mut!(&'a _, self, |bid, cons| { + cons(&mut bid.header) }) } } -impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize - for SignedBuilderBid<T, Payload> -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( +impl<E: EthSpec> SignedRoot for BuilderBid<E> {} + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec")] +pub struct SignedBuilderBid<E: EthSpec> { + pub message: BuilderBid<E>, + pub signature: Signature, +} + +impl<T: EthSpec> ForkVersionDeserialize for BuilderBid<T> { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: serde_json::value::Value, + fork_name: ForkName, + ) -> Result<Self, D::Error> { + let convert_err = + |e| serde::de::Error::custom(format!("BuilderBid failed to deserialize: {:?}", e)); + + Ok(match fork_name { + ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Base | ForkName::Altair => { + return Err(serde::de::Error::custom(format!( + "BuilderBid failed to deserialize: unsupported fork '{}'", + fork_name + ))); + } + }) + } +} + +impl<T: EthSpec> ForkVersionDeserialize for SignedBuilderBid<T> { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result<Self, D::Error> { @@ -88,34 +118,10 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize } } -struct BlindedPayloadAsHeader<E>(PhantomData<E>); - -impl<E: EthSpec, Payload: ExecPayload<E>> SerializeAs<Payload> for BlindedPayloadAsHeader<E> { - fn serialize_as<S>(source: &Payload, serializer: S) -> Result<S::Ok, S::Error> - where - S: Serializer, - { - source.to_execution_payload_header().serialize(serializer) - } -} - -impl<'de, E: EthSpec, Payload: AbstractExecPayload<E>> DeserializeAs<'de, Payload> - for BlindedPayloadAsHeader<E> -{ - fn deserialize_as<D>(deserializer: D) -> Result<Payload, D::Error> - where - D: Deserializer<'de>, - { - let payload_header = ExecutionPayloadHeader::deserialize(deserializer)?; - Payload::try_from(payload_header) - .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) - } -} - -impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBuilderBid<E, Payload> { +impl<E: EthSpec> SignedBuilderBid<E> { pub fn verify_signature(&self, spec: &ChainSpec) -> bool { self.message - .pubkey + .pubkey() .decompress() .map(|pubkey| { let domain = spec.get_builder_domain(); diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a13d3116d8..ed00297858 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,8 +1,8 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use int_to_bytes::int_to_bytes4; +use serde::Deserialize; use serde::{Deserializer, Serialize, Serializer}; -use serde_derive::Deserialize; use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; @@ -15,6 +15,7 @@ pub enum Domain { BlsToExecutionChange, BeaconProposer, BeaconAttester, + BlobSidecar, Randao, Deposit, VoluntaryExit, @@ -50,6 +51,7 @@ pub struct ChainSpec { pub max_committees_per_slot: usize, pub target_committee_size: usize, pub min_per_epoch_churn_limit: u64, + pub max_per_epoch_activation_churn_limit: u64, pub churn_limit_quotient: u64, pub shuffle_round_count: u8, pub min_genesis_active_validator_count: u64, @@ -100,6 +102,7 @@ pub struct ChainSpec { */ pub(crate) domain_beacon_proposer: u32, pub(crate) domain_beacon_attester: u32, + pub(crate) domain_blob_sidecar: u32, pub(crate) domain_randao: u32, pub(crate) domain_deposit: u32, pub(crate) domain_voluntary_exit: u32, @@ -160,6 +163,12 @@ pub struct ChainSpec { pub capella_fork_epoch: Option<Epoch>, pub max_validators_per_withdrawals_sweep: u64, + /* + * Deneb hard fork params + */ + pub deneb_fork_version: [u8; 4], + pub deneb_fork_epoch: Option<Epoch>, + /* * Networking */ @@ -254,13 +263,16 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.capella_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, - _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.deneb_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, }, } @@ -273,6 +285,7 @@ impl ChainSpec { ForkName::Altair => self.altair_fork_version, ForkName::Merge => self.bellatrix_fork_version, ForkName::Capella => self.capella_fork_version, + ForkName::Deneb => self.deneb_fork_version, } } @@ -283,6 +296,7 @@ impl ChainSpec { ForkName::Altair => self.altair_fork_epoch, ForkName::Merge => self.bellatrix_fork_epoch, ForkName::Capella => self.capella_fork_epoch, + ForkName::Deneb => self.deneb_fork_epoch, } } @@ -293,6 +307,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix, + BeaconState::Deneb(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -306,6 +321,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, BeaconState::Capella(_) => self.proportional_slashing_multiplier_bellatrix, + BeaconState::Deneb(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -319,6 +335,7 @@ impl ChainSpec { BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, BeaconState::Capella(_) => self.min_slashing_penalty_quotient_bellatrix, + BeaconState::Deneb(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -357,6 +374,7 @@ impl ChainSpec { match domain { Domain::BeaconProposer => self.domain_beacon_proposer, Domain::BeaconAttester => self.domain_beacon_attester, + Domain::BlobSidecar => self.domain_blob_sidecar, Domain::Randao => self.domain_randao, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, @@ -493,6 +511,7 @@ impl ChainSpec { max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, + max_per_epoch_activation_churn_limit: 8, churn_limit_quotient: 65_536, shuffle_round_count: 90, min_genesis_active_validator_count: 16_384, @@ -560,6 +579,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blob_sidecar: 11, // 0x0B000000 /* * Fork choice @@ -621,6 +641,12 @@ impl ChainSpec { capella_fork_epoch: Some(Epoch::new(194048)), max_validators_per_withdrawals_sweep: 16384, + /* + * Deneb hard fork params + */ + deneb_fork_version: [0x04, 0x00, 0x00, 0x00], + deneb_fork_epoch: None, + /* * Network specific */ @@ -662,6 +688,8 @@ impl ChainSpec { config_name: None, max_committees_per_slot: 4, target_committee_size: 4, + min_per_epoch_churn_limit: 2, + max_per_epoch_activation_churn_limit: 4, churn_limit_quotient: 32, shuffle_round_count: 10, min_genesis_active_validator_count: 64, @@ -693,6 +721,9 @@ impl ChainSpec { capella_fork_version: [0x03, 0x00, 0x00, 0x01], capella_fork_epoch: None, max_validators_per_withdrawals_sweep: 16, + // Deneb + deneb_fork_version: [0x04, 0x00, 0x00, 0x01], + deneb_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -723,6 +754,7 @@ impl ChainSpec { max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, + max_per_epoch_activation_churn_limit: 8, churn_limit_quotient: 4_096, shuffle_round_count: 90, min_genesis_active_validator_count: 4_096, @@ -790,6 +822,7 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_blob_sidecar: 11, /* * Fork choice @@ -853,6 +886,12 @@ impl ChainSpec { capella_fork_epoch: Some(Epoch::new(648704)), max_validators_per_withdrawals_sweep: 8192, + /* + * Deneb hard fork params + */ + deneb_fork_version: [0x04, 0x00, 0x00, 0x64], + deneb_fork_epoch: None, + /* * Network specific */ @@ -950,6 +989,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub capella_fork_epoch: Option<MaybeQuoted<Epoch>>, + #[serde(default = "default_deneb_fork_version")] + #[serde(with = "serde_utils::bytes_4_hex")] + deneb_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub deneb_fork_epoch: Option<MaybeQuoted<Epoch>>, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -972,6 +1019,9 @@ pub struct Config { ejection_balance: u64, #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, + #[serde(default = "default_max_per_epoch_activation_churn_limit")] + #[serde(with = "serde_utils::quoted_u64")] + max_per_epoch_activation_churn_limit: u64, #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, @@ -1023,6 +1073,11 @@ fn default_capella_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_deneb_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1051,6 +1106,10 @@ fn default_subnets_per_node() -> u8 { 2u8 } +const fn default_max_per_epoch_activation_churn_limit() -> u64 { + 8 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1163,6 +1222,10 @@ impl Config { capella_fork_epoch: spec .capella_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + deneb_fork_version: spec.deneb_fork_version, + deneb_fork_epoch: spec + .deneb_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -1176,6 +1239,7 @@ impl Config { ejection_balance: spec.ejection_balance, churn_limit_quotient: spec.churn_limit_quotient, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit: spec.max_per_epoch_activation_churn_limit, proposer_score_boost: spec.proposer_score_boost.map(|value| MaybeQuoted { value }), @@ -1221,6 +1285,8 @@ impl Config { bellatrix_fork_version, capella_fork_epoch, capella_fork_version, + deneb_fork_epoch, + deneb_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1231,6 +1297,7 @@ impl Config { inactivity_score_recovery_rate, ejection_balance, min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost, deposit_chain_id, @@ -1263,6 +1330,8 @@ impl Config { bellatrix_fork_version, capella_fork_epoch: capella_fork_epoch.map(|q| q.value), capella_fork_version, + deneb_fork_epoch: deneb_fork_epoch.map(|q| q.value), + deneb_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1273,6 +1342,7 @@ impl Config { inactivity_score_recovery_rate, ejection_balance, min_per_epoch_churn_limit, + max_per_epoch_activation_churn_limit, churn_limit_quotient, proposer_score_boost: proposer_score_boost.map(|q| q.value), deposit_chain_id, @@ -1346,6 +1416,7 @@ mod tests { test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); + test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); @@ -1370,6 +1441,8 @@ mod tests { spec.domain_bls_to_execution_change, &spec, ); + + test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { @@ -1525,6 +1598,7 @@ mod yaml_tests { INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 CHURN_LIMIT_QUOTIENT: 65536 PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index e84798f6f7..044fc57f22 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 01f86d3480..bd2efd3d9e 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,9 +1,9 @@ use crate::{ consts::altair, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, - EthSpec, ForkName, + DenebPreset, EthSpec, ForkName, }; use maplit::hashmap; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; @@ -12,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Bellatrix, Capella), + variants(Capella, Deneb), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -27,9 +27,11 @@ pub struct ConfigAndPreset { pub altair_preset: AltairPreset, #[serde(flatten)] pub bellatrix_preset: BellatrixPreset, - #[superstruct(only(Capella))] #[serde(flatten)] pub capella_preset: CapellaPreset, + #[superstruct(only(Deneb))] + #[serde(flatten)] + pub deneb_preset: DenebPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap<String, Value>, @@ -41,14 +43,24 @@ impl ConfigAndPreset { let base_preset = BasePreset::from_chain_spec::<T>(spec); let altair_preset = AltairPreset::from_chain_spec::<T>(spec); let bellatrix_preset = BellatrixPreset::from_chain_spec::<T>(spec); + let capella_preset = CapellaPreset::from_chain_spec::<T>(spec); let extra_fields = get_extra_fields(spec); - if spec.capella_fork_epoch.is_some() + if spec.deneb_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Capella) + || fork_name == Some(ForkName::Deneb) { - let capella_preset = CapellaPreset::from_chain_spec::<T>(spec); - + let deneb_preset = DenebPreset::from_chain_spec::<T>(spec); + ConfigAndPreset::Deneb(ConfigAndPresetDeneb { + config, + base_preset, + altair_preset, + bellatrix_preset, + capella_preset, + deneb_preset, + extra_fields, + }) + } else { ConfigAndPreset::Capella(ConfigAndPresetCapella { config, base_preset, @@ -57,14 +69,6 @@ impl ConfigAndPreset { capella_preset, extra_fields, }) - } else { - ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { - config, - base_preset, - altair_preset, - bellatrix_preset, - extra_fields, - }) } } } @@ -78,6 +82,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), + "domain_blob_sidecar".to_uppercase() => u32_hex(spec.domain_blob_sidecar), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), @@ -132,8 +137,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetCapella = + let from: ConfigAndPresetDeneb = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Capella(from), yamlconfig); + assert_eq!(ConfigAndPreset::Deneb(from), yamlconfig); } } diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index a9377bc3e0..f93c75ee8d 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -22,3 +22,11 @@ pub mod altair { pub mod merge { pub const INTERVALS_PER_SLOT: u64 = 3; } +pub mod deneb { + use crate::Epoch; + + pub const VERSIONED_HASH_VERSION_KZG: u8 = 1; + pub const BLOB_SIDECAR_SUBNET_COUNT: u64 = 6; + pub const MAX_BLOBS_PER_BLOCK: u64 = BLOB_SIDECAR_SUBNET_COUNT; + pub const MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: Epoch = Epoch::new(4096); +} diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 7e757f89b1..aba98c92b7 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index bbc3bd9fb8..c818c7d808 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::typenum::U33; use test_random_derive::TestRandom; diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d75643f659..e074ffdfaa 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::{PublicKeyBytes, SignatureBytes}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 1096cfaa28..e5c666df82 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::PublicKeyBytes; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 12e81d0028..d4dcdb2eda 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,7 +1,7 @@ use crate::*; use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use test_utils::TestRandom; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 409383c904..3ae7c39cfe 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index d8f476b99b..e2c4e511ef 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -1,7 +1,7 @@ use super::Hash256; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 64bfb8da0b..70982e8d56 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -1,10 +1,10 @@ use crate::*; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U16, - U16777216, U2, U2048, U256, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, + bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, + U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -102,6 +102,13 @@ pub trait EthSpec: */ type MaxBlsToExecutionChanges: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Deneb + */ + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; + type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; + type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -120,6 +127,11 @@ pub trait EthSpec: /// Must be set to `SyncCommitteeSize / SyncCommitteeSubnetCount`. type SyncSubcommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The total length of a blob in bytes. + /// + /// Must be set to `BytesPerFieldElement * FieldElementsPerBlob`. + type BytesPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + fn default_spec() -> ChainSpec; fn spec_name() -> EthSpecId; @@ -239,6 +251,26 @@ pub trait EthSpec: fn max_withdrawals_per_payload() -> usize { Self::MaxWithdrawalsPerPayload::to_usize() } + + /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. + fn max_blobs_per_block() -> usize { + Self::MaxBlobsPerBlock::to_usize() + } + + /// Returns the `MAX_BLOB_COMMITMENTS_PER_BLOCK` constant for this specification. + fn max_blob_commitments_per_block() -> usize { + Self::MaxBlobCommitmentsPerBlock::to_usize() + } + + /// Returns the `FIELD_ELEMENTS_PER_BLOB` constant for this specification. + fn field_elements_per_blob() -> usize { + Self::FieldElementsPerBlob::to_usize() + } + + /// Returns the `BYTES_PER_BLOB` constant for this specification. + fn bytes_per_blob() -> usize { + Self::BytesPerBlob::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -278,6 +310,11 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; + type MaxBlobsPerBlock = U6; + type MaxBlobCommitmentsPerBlock = U4096; + type BytesPerFieldElement = U32; + type FieldElementsPerBlob = U4096; + type BytesPerBlob = U131072; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -308,6 +345,9 @@ impl EthSpec for MinimalEthSpec { type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch type MaxWithdrawalsPerPayload = U4; + type FieldElementsPerBlob = U4096; + type BytesPerBlob = U131072; + type MaxBlobCommitmentsPerBlock = U16; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -328,7 +368,9 @@ impl EthSpec for MinimalEthSpec { GasLimitDenominator, MinGasLimit, MaxExtraDataBytes, - MaxBlsToExecutionChanges + MaxBlsToExecutionChanges, + MaxBlobsPerBlock, + BytesPerFieldElement }); fn default_spec() -> ChainSpec { @@ -374,6 +416,11 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U8; + type MaxBlobsPerBlock = U6; + type MaxBlobCommitmentsPerBlock = U4096; + type FieldElementsPerBlob = U4096; + type BytesPerFieldElement = U32; + type BytesPerBlob = U131072; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 363a35a86a..b2401f0c0f 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; use derivative::Derivative; use rand::RngCore; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; @@ -20,7 +20,7 @@ use std::fmt; )] #[derivative(Debug = "transparent")] #[serde(transparent)] -pub struct ExecutionBlockHash(Hash256); +pub struct ExecutionBlockHash(pub Hash256); impl ExecutionBlockHash { pub fn zero() -> Self { diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index b19988ff7d..945222a925 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -24,9 +24,12 @@ use metastruct::metastruct; /// /// Credit to Reth for the type definition. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[metastruct(mappings(map_execution_block_header_fields_except_withdrawals(exclude( - withdrawals_root -))))] +#[metastruct(mappings(map_execution_block_header_fields_base(exclude( + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root +)),))] pub struct ExecutionBlockHeader { pub parent_hash: Hash256, pub ommers_hash: Hash256, @@ -45,6 +48,9 @@ pub struct ExecutionBlockHeader { pub nonce: Hash64, pub base_fee_per_gas: Uint256, pub withdrawals_root: Option<Hash256>, + pub blob_gas_used: Option<u64>, + pub excess_blob_gas: Option<u64>, + pub parent_beacon_block_root: Option<Hash256>, } impl ExecutionBlockHeader { @@ -53,6 +59,9 @@ impl ExecutionBlockHeader { rlp_empty_list_root: Hash256, rlp_transactions_root: Hash256, rlp_withdrawals_root: Option<Hash256>, + rlp_blob_gas_used: Option<u64>, + rlp_excess_blob_gas: Option<u64>, + rlp_parent_beacon_block_root: Option<Hash256>, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -74,6 +83,9 @@ impl ExecutionBlockHeader { nonce: Hash64::zero(), base_fee_per_gas: payload.base_fee_per_gas(), withdrawals_root: rlp_withdrawals_root, + blob_gas_used: rlp_blob_gas_used, + excess_blob_gas: rlp_excess_blob_gas, + parent_beacon_block_root: rlp_parent_beacon_block_root, } } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 690138da67..1dc5951b25 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -15,7 +15,7 @@ pub type Transactions<T> = VariableList< pub type Withdrawals<T> = VariableList<Withdrawal, <T as EthSpec>::MaxWithdrawalsPerPayload>; #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Default, @@ -81,8 +81,14 @@ pub struct ExecutionPayload<T: EthSpec> { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions<T>, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] pub withdrawals: Withdrawals<T>, + #[superstruct(only(Deneb), partial_getter(copy))] + #[serde(with = "serde_utils::quoted_u64")] + pub blob_gas_used: u64, + #[superstruct(only(Deneb), partial_getter(copy))] + #[serde(with = "serde_utils::quoted_u64")] + pub excess_blob_gas: u64, } impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { @@ -103,6 +109,7 @@ impl<T: EthSpec> ExecutionPayload<T> { ))), ForkName::Merge => ExecutionPayloadMerge::from_ssz_bytes(bytes).map(Self::Merge), ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), + ForkName::Deneb => ExecutionPayloadDeneb::from_ssz_bytes(bytes).map(Self::Deneb), } } @@ -129,6 +136,19 @@ impl<T: EthSpec> ExecutionPayload<T> { // Max size of variable length `withdrawals` field + (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len()) } + + #[allow(clippy::arithmetic_side_effects)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_deneb_size() -> usize { + // Fixed part + ExecutionPayloadDeneb::<T>::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * <u8 as Encode>::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (T::max_withdrawals_per_payload() * <Withdrawal as Encode>::ssz_fixed_len()) + } } impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayload<T> { @@ -143,6 +163,7 @@ impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayload<T> { Ok(match fork_name { ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", @@ -158,6 +179,7 @@ impl<T: EthSpec> ExecutionPayload<T> { match self { ExecutionPayload::Merge(_) => ForkName::Merge, ExecutionPayload::Capella(_) => ForkName::Capella, + ExecutionPayload::Deneb(_) => ForkName::Deneb, } } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 1fb29db9d3..e0859c0a1e 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, *}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; use BeaconStateError; #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Default, @@ -28,7 +28,10 @@ use BeaconStateError; serde(bound = "T: EthSpec", deny_unknown_fields), arbitrary(bound = "T: EthSpec") ), - ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), + ref_attributes( + derive(PartialEq, TreeHash, Debug), + tree_hash(enum_behaviour = "transparent") + ), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") )] @@ -74,9 +77,17 @@ pub struct ExecutionPayloadHeader<T: EthSpec> { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella))] + #[superstruct(only(Capella, Deneb))] #[superstruct(getter(copy))] pub withdrawals_root: Hash256, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::quoted_u64")] + #[superstruct(getter(copy))] + pub blob_gas_used: u64, + #[superstruct(only(Deneb))] + #[serde(with = "serde_utils::quoted_u64")] + #[superstruct(getter(copy))] + pub excess_blob_gas: u64, } impl<T: EthSpec> ExecutionPayloadHeader<T> { @@ -93,6 +104,7 @@ impl<T: EthSpec> ExecutionPayloadHeader<T> { ForkName::Capella => { ExecutionPayloadHeaderCapella::from_ssz_bytes(bytes).map(Self::Capella) } + ForkName::Deneb => ExecutionPayloadHeaderDeneb::from_ssz_bytes(bytes).map(Self::Deneb), } } } @@ -128,6 +140,30 @@ impl<T: EthSpec> ExecutionPayloadHeaderMerge<T> { } } +impl<T: EthSpec> ExecutionPayloadHeaderCapella<T> { + pub fn upgrade_to_deneb(&self) -> ExecutionPayloadHeaderDeneb<T> { + ExecutionPayloadHeaderDeneb { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + blob_gas_used: 0, + excess_blob_gas: 0, + } + } +} + impl<'a, T: EthSpec> From<&'a ExecutionPayloadMerge<T>> for ExecutionPayloadHeaderMerge<T> { fn from(payload: &'a ExecutionPayloadMerge<T>) -> Self { Self { @@ -170,6 +206,30 @@ impl<'a, T: EthSpec> From<&'a ExecutionPayloadCapella<T>> for ExecutionPayloadHe } } +impl<'a, T: EthSpec> From<&'a ExecutionPayloadDeneb<T>> for ExecutionPayloadHeaderDeneb<T> { + fn from(payload: &'a ExecutionPayloadDeneb<T>) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderMerge<T> { @@ -184,6 +244,12 @@ impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderCapella<T> { } } +impl<'a, T: EthSpec> From<&'a Self> for ExecutionPayloadHeaderDeneb<T> { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + impl<'a, T: EthSpec> From<ExecutionPayloadRef<'a, T>> for ExecutionPayloadHeader<T> { fn from(payload: ExecutionPayloadRef<'a, T>) -> Self { map_execution_payload_ref_into_execution_payload_header!( @@ -214,6 +280,15 @@ impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for ExecutionPayloadHeaderCa } } } +impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for ExecutionPayloadHeaderDeneb<T> { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader<T>) -> Result<Self, Self::Error> { + match header { + ExecutionPayloadHeader::Deneb(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayloadHeader<T> { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( @@ -230,6 +305,7 @@ impl<T: EthSpec> ForkVersionDeserialize for ExecutionPayloadHeader<T> { Ok(match fork_name { ForkName::Merge => Self::Merge(serde_json::from_value(value).map_err(convert_err)?), ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 4650881f72..b23113f436 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Epoch; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 90d1fbc686..23163f0eec 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -54,6 +54,13 @@ impl ForkContext { )); } + if spec.deneb_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Deneb, + ChainSpec::compute_fork_digest(spec.deneb_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap<ForkName, [u8; 4]> = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bf9c48cd7e..52ce57a2a9 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, SignedRoot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 85144a6137..6523b2a678 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,17 +1,20 @@ use crate::{ChainSpec, Epoch}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::{self, Display, Formatter}; use std::str::FromStr; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(try_from = "String")] #[serde(into = "String")] +#[ssz(enum_behaviour = "tag")] pub enum ForkName { Base, Altair, Merge, Capella, + Deneb, } impl ForkName { @@ -21,6 +24,7 @@ impl ForkName { ForkName::Altair, ForkName::Merge, ForkName::Capella, + ForkName::Deneb, ] } @@ -38,24 +42,35 @@ impl ForkName { spec.altair_fork_epoch = None; spec.bellatrix_fork_epoch = None; spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = None; spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = None; + spec.deneb_fork_epoch = None; spec } ForkName::Capella => { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = None; + spec + } + ForkName::Deneb => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); spec } } @@ -70,6 +85,7 @@ impl ForkName { ForkName::Altair => Some(ForkName::Base), ForkName::Merge => Some(ForkName::Altair), ForkName::Capella => Some(ForkName::Merge), + ForkName::Deneb => Some(ForkName::Capella), } } @@ -81,7 +97,8 @@ impl ForkName { ForkName::Base => Some(ForkName::Altair), ForkName::Altair => Some(ForkName::Merge), ForkName::Merge => Some(ForkName::Capella), - ForkName::Capella => None, + ForkName::Capella => Some(ForkName::Deneb), + ForkName::Deneb => None, } } } @@ -127,6 +144,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Capella(value), extra_data) } + ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } } }; } @@ -140,6 +161,7 @@ impl FromStr for ForkName { "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, "capella" => ForkName::Capella, + "deneb" => ForkName::Deneb, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -152,6 +174,7 @@ impl Display for ForkName { ForkName::Altair => "altair".fmt(f), ForkName::Merge => "bellatrix".fmt(f), ForkName::Capella => "capella".fmt(f), + ForkName::Deneb => "deneb".fmt(f), } } } diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index e75b64cae9..e3e037fd63 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::FixedVector; use test_random_derive::TestRandom; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 84d87b85fd..dcc387d3d6 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -4,7 +4,7 @@ use crate::{BeaconState, EthSpec, Hash256}; use cached_tree_hash::Error; use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use compare_fields_derive::CompareFields; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use test_random_derive::TestRandom; diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index c59cbef307..c2d48d7242 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::hash::{Hash, Hasher}; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 85ce351766..26541a188c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -98,6 +98,10 @@ pub mod slot_data; #[cfg(feature = "sqlite")] pub mod sqlite; +pub mod blob_sidecar; +pub mod sidecar; +pub mod signed_blob; + use ethereum_types::{H160, H256}; pub use crate::aggregate_and_proof::AggregateAndProof; @@ -106,22 +110,24 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, - BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockDeneb, + BeaconBlockMerge, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyCapella, - BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBodyDeneb, BeaconBlockBodyMerge, BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::blob_sidecar::{ + BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, BlobSidecar, BlobSidecarList, + BlobsList, SidecarList, +}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetBellatrix, ConfigAndPresetCapella, -}; +pub use crate::config_and_preset::{ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb}; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; @@ -133,12 +139,12 @@ pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::ExecutionBlockHeader; pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, + ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, + ExecutionPayloadRef, Transaction, Transactions, Withdrawals, }; pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; @@ -153,12 +159,12 @@ pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadMerge, - BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadCapella, FullPayloadMerge, - FullPayloadRef, OwnedExecPayload, + AbstractExecPayload, BlindedPayload, BlindedPayloadCapella, BlindedPayloadDeneb, + BlindedPayloadMerge, BlindedPayloadRef, BlockType, ExecPayload, FullPayload, + FullPayloadCapella, FullPayloadDeneb, FullPayloadMerge, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; @@ -166,10 +172,13 @@ pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockHash, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockHash, SignedBeaconBlockMerge, + SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; +pub use crate::signed_blob::*; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; @@ -198,6 +207,8 @@ pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; +pub type Blob<T> = FixedVector<u8, <T as EthSpec>::BytesPerBlob>; +pub type KzgProofs<T> = VariableList<KzgProof, <T as EthSpec>::MaxBlobCommitmentsPerBlock>; pub type VersionedHash = Hash256; pub type Hash64 = ethereum_types::H64; @@ -205,5 +216,9 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; + +pub use kzg::{KzgCommitment, KzgProof}; + +pub use sidecar::Sidecar; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 1a5eed2205..1d70456d73 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,6 +1,6 @@ use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; use crate::{light_client_update::*, test_utils::TestRandom}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use test_random_derive::TestRandom; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 08069c9308..30f337fb2b 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -3,7 +3,7 @@ use super::{ Slot, SyncAggregate, }; use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 7a39bd9ac1..fbb0558ece 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,7 @@ use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; use crate::{ light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index ca35f96802..6e53e14c99 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,7 +1,7 @@ use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::typenum::{U5, U6}; use std::sync::Arc; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index 4f170a60be..e94e56f0cd 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -1,6 +1,6 @@ use crate::{consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom, Hash256}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 2795c7f109..ff7caffcf3 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -81,8 +81,15 @@ pub trait AbstractExecPayload<T: EthSpec>: + TryFrom<ExecutionPayloadHeader<T>> + TryInto<Self::Merge> + TryInto<Self::Capella> + + TryInto<Self::Deneb> { - type Ref<'a>: ExecPayload<T> + Copy + From<&'a Self::Merge> + From<&'a Self::Capella>; + type Sidecar: Sidecar<T>; + + type Ref<'a>: ExecPayload<T> + + Copy + + From<&'a Self::Merge> + + From<&'a Self::Capella> + + From<&'a Self::Deneb>; type Merge: OwnedExecPayload<T> + Into<Self> @@ -92,12 +99,19 @@ pub trait AbstractExecPayload<T: EthSpec>: + Into<Self> + for<'a> From<Cow<'a, ExecutionPayloadCapella<T>>> + TryFrom<ExecutionPayloadHeaderCapella<T>>; + type Deneb: OwnedExecPayload<T> + + Into<Self> + + for<'a> From<Cow<'a, ExecutionPayloadDeneb<T>>> + + TryFrom<ExecutionPayloadHeaderDeneb<T>>; fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>; + fn default_blobs_at_fork( + fork_name: ForkName, + ) -> Result<<Self::Sidecar as Sidecar<T>>::BlobItems, Error>; } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -136,6 +150,8 @@ pub struct FullPayload<T: EthSpec> { pub execution_payload: ExecutionPayloadMerge<T>, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload: ExecutionPayloadCapella<T>, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload: ExecutionPayloadDeneb<T>, } impl<T: EthSpec> From<FullPayload<T>> for ExecutionPayload<T> { @@ -239,6 +255,9 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { FullPayload::Capella(ref inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayload::Deneb(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -345,6 +364,9 @@ impl<'b, T: EthSpec> ExecPayload<T> for FullPayloadRef<'b, T> { FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayloadRef::Deneb(inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -362,15 +384,26 @@ impl<'b, T: EthSpec> ExecPayload<T> for FullPayloadRef<'b, T> { } impl<T: EthSpec> AbstractExecPayload<T> for FullPayload<T> { + type Sidecar = BlobSidecar<T>; type Ref<'a> = FullPayloadRef<'a, T>; type Merge = FullPayloadMerge<T>; type Capella = FullPayloadCapella<T>; + type Deneb = FullPayloadDeneb<T>; fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> { match fork_name { ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), ForkName::Merge => Ok(FullPayloadMerge::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), + } + } + fn default_blobs_at_fork(fork_name: ForkName) -> Result<BlobsList<T>, Error> { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Err(Error::IncorrectStateVariant) + } + ForkName::Deneb => Ok(VariableList::default()), } } } @@ -391,7 +424,7 @@ impl<T: EthSpec> TryFrom<ExecutionPayloadHeader<T>> for FullPayload<T> { } #[superstruct( - variants(Merge, Capella), + variants(Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -429,6 +462,8 @@ pub struct BlindedPayload<T: EthSpec> { pub execution_payload_header: ExecutionPayloadHeaderMerge<T>, #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] pub execution_payload_header: ExecutionPayloadHeaderCapella<T>, + #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))] + pub execution_payload_header: ExecutionPayloadHeaderDeneb<T>, } impl<'a, T: EthSpec> From<BlindedPayloadRef<'a, T>> for BlindedPayload<T> { @@ -510,6 +545,7 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { BlindedPayload::Capella(ref inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -597,6 +633,7 @@ impl<'b, T: EthSpec> ExecPayload<T> for BlindedPayloadRef<'b, T> { BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -860,17 +897,36 @@ impl_exec_payload_for_fork!( ExecutionPayloadCapella, Capella ); +impl_exec_payload_for_fork!( + BlindedPayloadDeneb, + FullPayloadDeneb, + ExecutionPayloadHeaderDeneb, + ExecutionPayloadDeneb, + Deneb +); impl<T: EthSpec> AbstractExecPayload<T> for BlindedPayload<T> { type Ref<'a> = BlindedPayloadRef<'a, T>; type Merge = BlindedPayloadMerge<T>; type Capella = BlindedPayloadCapella<T>; + type Deneb = BlindedPayloadDeneb<T>; + + type Sidecar = BlindedBlobSidecar; fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> { match fork_name { ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), + ForkName::Deneb => Ok(BlindedPayloadDeneb::default().into()), + } + } + fn default_blobs_at_fork(fork_name: ForkName) -> Result<BlobRootsList<T>, Error> { + match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + Err(Error::IncorrectStateVariant) + } + ForkName::Deneb => Ok(VariableList::default()), } } } @@ -899,6 +955,11 @@ impl<T: EthSpec> From<ExecutionPayloadHeader<T>> for BlindedPayload<T> { execution_payload_header, }) } + ExecutionPayloadHeader::Deneb(execution_payload_header) => { + Self::Deneb(BlindedPayloadDeneb { + execution_payload_header, + }) + } } } } @@ -912,6 +973,9 @@ impl<T: EthSpec> From<BlindedPayload<T>> for ExecutionPayloadHeader<T> { BlindedPayload::Capella(blinded_payload) => { ExecutionPayloadHeader::Capella(blinded_payload.execution_payload_header) } + BlindedPayload::Deneb(blinded_payload) => { + ExecutionPayloadHeader::Deneb(blinded_payload.execution_payload_header) + } } } } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 88db0ec4d3..d25a6987c0 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{AttestationData, BitList, EthSpec}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index e65dd8f60d..63a372ea1c 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -1,5 +1,5 @@ use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; /// Value-level representation of an Ethereum consensus "preset". /// @@ -205,6 +205,27 @@ impl CapellaPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct DenebPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub max_blobs_per_block: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_blob_commitments_per_block: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub field_elements_per_blob: u64, +} + +impl DenebPreset { + pub fn from_chain_spec<T: EthSpec>(_spec: &ChainSpec) -> Self { + Self { + max_blobs_per_block: T::max_blobs_per_block() as u64, + max_blob_commitments_per_block: T::max_blob_commitments_per_block() as u64, + field_elements_per_blob: T::field_elements_per_blob() as u64, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -243,6 +264,9 @@ mod test { let capella: CapellaPreset = preset_from_file(&preset_name, "capella.yaml"); assert_eq!(capella, CapellaPreset::from_chain_spec::<E>(&spec)); + + let deneb: DenebPreset = preset_from_file(&preset_name, "deneb.yaml"); + assert_eq!(deneb, DenebPreset::from_chain_spec::<E>(&spec)); } #[test] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index 1ac2464a47..ee55d62c20 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::SignedBeaconBlockHeader; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs index 120d744a5e..a5bdc86673 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/shuffling_id.rs @@ -1,5 +1,5 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::hash::Hash; diff --git a/consensus/types/src/sidecar.rs b/consensus/types/src/sidecar.rs new file mode 100644 index 0000000000..e784cc57f1 --- /dev/null +++ b/consensus/types/src/sidecar.rs @@ -0,0 +1,221 @@ +use crate::beacon_block_body::KzgCommitments; +use crate::test_utils::TestRandom; +use crate::{ + AbstractExecPayload, BeaconBlock, BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, + BlobSidecar, BlobSidecarList, BlobsList, ChainSpec, Domain, EthSpec, Fork, Hash256, + SidecarList, SignedRoot, SignedSidecar, Slot, +}; +use bls::SecretKey; +use kzg::KzgProof; +use serde::de::DeserializeOwned; +use ssz::{Decode, Encode}; +use ssz_types::VariableList; +use std::fmt::Debug; +use std::hash::Hash; +use std::marker::PhantomData; +use std::sync::Arc; +use tree_hash::TreeHash; + +pub trait Sidecar<E: EthSpec>: + serde::Serialize + + Clone + + DeserializeOwned + + Encode + + Decode + + Hash + + TreeHash + + TestRandom + + Debug + + SignedRoot + + Sync + + Send + + for<'a> arbitrary::Arbitrary<'a> +{ + type BlobItems: BlobItems<E>; + + fn slot(&self) -> Slot; + + fn build_sidecar<Payload: AbstractExecPayload<E>>( + blob_items: Self::BlobItems, + block: &BeaconBlock<E, Payload>, + expected_kzg_commitments: &KzgCommitments<E>, + kzg_proofs: Vec<KzgProof>, + ) -> Result<SidecarList<E, Self>, String>; + + // this is mostly not used except for in testing + fn sign( + self: Arc<Self>, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedSidecar<E, Self> { + let signing_epoch = self.slot().epoch(E::slots_per_epoch()); + let domain = spec.get_domain( + signing_epoch, + Domain::BlobSidecar, + fork, + genesis_validators_root, + ); + let message = self.signing_root(domain); + let signature = secret_key.sign(message); + + SignedSidecar { + message: self, + signature, + _phantom: PhantomData, + } + } +} + +pub trait BlobItems<T: EthSpec>: Sync + Send + Sized { + fn try_from_blob_roots(roots: BlobRootsList<T>) -> Result<Self, String>; + fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String>; + fn len(&self) -> usize; + fn is_empty(&self) -> bool; + fn blobs(&self) -> Option<&BlobsList<T>>; +} + +impl<T: EthSpec> BlobItems<T> for BlobsList<T> { + fn try_from_blob_roots(_roots: BlobRootsList<T>) -> Result<Self, String> { + Err("Unexpected conversion from blob roots to blobs".to_string()) + } + + fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String> { + Ok(blobs) + } + + fn len(&self) -> usize { + VariableList::len(self) + } + + fn is_empty(&self) -> bool { + VariableList::is_empty(self) + } + + fn blobs(&self) -> Option<&BlobsList<T>> { + Some(self) + } +} + +impl<T: EthSpec> BlobItems<T> for BlobRootsList<T> { + fn try_from_blob_roots(roots: BlobRootsList<T>) -> Result<Self, String> { + Ok(roots) + } + + fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String> { + VariableList::new( + blobs + .into_iter() + .map(|blob| blob.tree_hash_root()) + .collect(), + ) + .map_err(|e| format!("{e:?}")) + } + + fn len(&self) -> usize { + VariableList::len(self) + } + + fn is_empty(&self) -> bool { + VariableList::is_empty(self) + } + + fn blobs(&self) -> Option<&BlobsList<T>> { + None + } +} + +impl<E: EthSpec> Sidecar<E> for BlobSidecar<E> { + type BlobItems = BlobsList<E>; + + fn slot(&self) -> Slot { + self.slot + } + + fn build_sidecar<Payload: AbstractExecPayload<E>>( + blobs: BlobsList<E>, + block: &BeaconBlock<E, Payload>, + expected_kzg_commitments: &KzgCommitments<E>, + kzg_proofs: Vec<KzgProof>, + ) -> Result<SidecarList<E, Self>, String> { + let beacon_block_root = block.canonical_root(); + let slot = block.slot(); + let blob_sidecars = BlobSidecarList::from( + blobs + .into_iter() + .enumerate() + .map(|(blob_index, blob)| { + let kzg_commitment = expected_kzg_commitments + .get(blob_index) + .ok_or("KZG commitment should exist for blob")?; + + let kzg_proof = kzg_proofs + .get(blob_index) + .ok_or("KZG proof should exist for blob")?; + + Ok(Arc::new(BlobSidecar { + block_root: beacon_block_root, + index: blob_index as u64, + slot, + block_parent_root: block.parent_root(), + proposer_index: block.proposer_index(), + blob, + kzg_commitment: *kzg_commitment, + kzg_proof: *kzg_proof, + })) + }) + .collect::<Result<Vec<_>, String>>()?, + ); + + Ok(blob_sidecars) + } +} + +impl<E: EthSpec> Sidecar<E> for BlindedBlobSidecar { + type BlobItems = BlobRootsList<E>; + + fn slot(&self) -> Slot { + self.slot + } + + fn build_sidecar<Payload: AbstractExecPayload<E>>( + blob_roots: BlobRootsList<E>, + block: &BeaconBlock<E, Payload>, + expected_kzg_commitments: &KzgCommitments<E>, + kzg_proofs: Vec<KzgProof>, + ) -> Result<SidecarList<E, BlindedBlobSidecar>, String> { + let beacon_block_root = block.canonical_root(); + let slot = block.slot(); + + let blob_sidecars = BlindedBlobSidecarList::<E>::from( + blob_roots + .into_iter() + .enumerate() + .map(|(blob_index, blob_root)| { + let kzg_commitment = expected_kzg_commitments + .get(blob_index) + .ok_or("KZG commitment should exist for blob")?; + + let kzg_proof = kzg_proofs.get(blob_index).ok_or(format!( + "Missing KZG proof for slot {} blob index: {}", + slot, blob_index + ))?; + + Ok(Arc::new(BlindedBlobSidecar { + block_root: beacon_block_root, + index: blob_index as u64, + slot, + block_parent_root: block.parent_root(), + proposer_index: block.proposer_index(), + blob_root, + kzg_commitment: *kzg_commitment, + kzg_proof: *kzg_proof, + })) + }) + .collect::<Result<Vec<_>, String>>()?, + ); + + Ok(blob_sidecars) + } +} diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index 6d86c05634..10010073e5 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SelectionProof, Signature, SignedRoot, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5f623cf07a..24b5e27c8c 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,7 +1,8 @@ +use crate::beacon_block_body::format_kzg_commitments; use crate::*; use bls::Signature; use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; use superstruct::superstruct; @@ -37,7 +38,7 @@ impl From<SignedBeaconBlockHash> for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Merge, Capella), + variants(Base, Altair, Merge, Capella, Deneb), variant_attributes( derive( Debug, @@ -76,6 +77,8 @@ pub struct SignedBeaconBlock<E: EthSpec, Payload: AbstractExecPayload<E> = FullP pub message: BeaconBlockMerge<E, Payload>, #[superstruct(only(Capella), partial_getter(rename = "message_capella"))] pub message: BeaconBlockCapella<E, Payload>, + #[superstruct(only(Deneb), partial_getter(rename = "message_deneb"))] + pub message: BeaconBlockDeneb<E, Payload>, pub signature: Signature, } @@ -90,6 +93,12 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload> self.message().fork_name(spec) } + /// Returns the name of the fork pertaining to `self` + /// Does not check that the fork is consistent with the slot. + pub fn fork_name_unchecked(&self) -> ForkName { + self.message().fork_name_unchecked() + } + /// SSZ decode with fork variant determined by slot. pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> { Self::from_ssz_bytes_with(bytes, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) @@ -136,6 +145,9 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload> BeaconBlock::Capella(message) => { SignedBeaconBlock::Capella(SignedBeaconBlockCapella { message, signature }) } + BeaconBlock::Deneb(message) => { + SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb { message, signature }) + } } } @@ -186,7 +198,7 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload> } let domain = spec.get_domain( - self.slot().epoch(E::slots_per_epoch()), + self.epoch(), Domain::BeaconProposer, fork, genesis_validators_root, @@ -218,6 +230,11 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload> self.message().slot() } + /// Convenience accessor for the block's epoch. + pub fn epoch(&self) -> Epoch { + self.message().slot().epoch(E::slots_per_epoch()) + } + /// Convenience accessor for the block's parent root. pub fn parent_root(&self) -> Hash256 { self.message().parent_root() @@ -232,6 +249,23 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload> pub fn canonical_root(&self) -> Hash256 { self.message().tree_hash_root() } + + pub fn num_expected_blobs(&self) -> usize { + self.message() + .body() + .blob_kzg_commitments() + .map(|c| c.len()) + .unwrap_or(0) + } + + /// Used for displaying commitments in logs. + pub fn commitments_formatted(&self) -> String { + let Ok(commitments) = self.message().body().blob_kzg_commitments() else { + return "[]".to_string(); + }; + + format_kzg_commitments(commitments.as_ref()) + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. @@ -368,6 +402,62 @@ impl<E: EthSpec> SignedBeaconBlockCapella<E, BlindedPayload<E>> { } } +impl<E: EthSpec> SignedBeaconBlockDeneb<E, BlindedPayload<E>> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadDeneb<E>, + ) -> SignedBeaconBlockDeneb<E, FullPayload<E>> { + let SignedBeaconBlockDeneb { + message: + BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadDeneb { .. }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + }, + signature, + } = self; + SignedBeaconBlockDeneb { + message: BeaconBlockDeneb { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyDeneb { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadDeneb { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + }, + }, + signature, + } + } +} + impl<E: EthSpec> SignedBeaconBlock<E, BlindedPayload<E>> { pub fn try_into_full_block( self, @@ -382,10 +472,14 @@ impl<E: EthSpec> SignedBeaconBlock<E, BlindedPayload<E>> { (SignedBeaconBlock::Capella(block), Some(ExecutionPayload::Capella(payload))) => { SignedBeaconBlock::Capella(block.into_full_block(payload)) } + (SignedBeaconBlock::Deneb(block), Some(ExecutionPayload::Deneb(payload))) => { + SignedBeaconBlock::Deneb(block.into_full_block(payload)) + } // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added (SignedBeaconBlock::Merge(_), _) => return None, (SignedBeaconBlock::Capella(_), _) => return None, + (SignedBeaconBlock::Deneb(_), _) => return None, }; Some(full_block) } @@ -440,6 +534,120 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> ForkVersionDeserialize } } +/// This module can be used to encode and decode a `SignedBeaconBlock` the same way it +/// would be done if we had tagged the superstruct enum with +/// `#[ssz(enum_behaviour = "union")]` +/// This should _only_ be used *some* cases when storing these objects in the database +/// and _NEVER_ for encoding / decoding blocks sent over the network! +pub mod ssz_tagged_signed_beacon_block { + use super::*; + pub mod encode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn ssz_bytes_len<E: EthSpec, Payload: AbstractExecPayload<E>>( + block: &SignedBeaconBlock<E, Payload>, + ) -> usize { + block + .ssz_bytes_len() + .checked_add(1) + .expect("encoded length must be less than usize::max") + } + + pub fn ssz_append<E: EthSpec, Payload: AbstractExecPayload<E>>( + block: &SignedBeaconBlock<E, Payload>, + buf: &mut Vec<u8>, + ) { + let fork_name = block.fork_name_unchecked(); + fork_name.ssz_append(buf); + block.ssz_append(buf); + } + + pub fn as_ssz_bytes<E: EthSpec, Payload: AbstractExecPayload<E>>( + block: &SignedBeaconBlock<E, Payload>, + ) -> Vec<u8> { + let mut buf = vec![]; + ssz_append(block, &mut buf); + + buf + } + } + + pub mod decode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn from_ssz_bytes<E: EthSpec, Payload: AbstractExecPayload<E>>( + bytes: &[u8], + ) -> Result<SignedBeaconBlock<E, Payload>, DecodeError> { + let fork_byte = bytes + .first() + .copied() + .ok_or(DecodeError::OutOfBoundsByte { i: 0 })?; + let body = bytes + .get(1..) + .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; + + match ForkName::from_ssz_bytes(&[fork_byte])? { + ForkName::Base => Ok(SignedBeaconBlock::Base( + SignedBeaconBlockBase::from_ssz_bytes(body)?, + )), + ForkName::Altair => Ok(SignedBeaconBlock::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(body)?, + )), + ForkName::Merge => Ok(SignedBeaconBlock::Merge( + SignedBeaconBlockMerge::from_ssz_bytes(body)?, + )), + ForkName::Capella => Ok(SignedBeaconBlock::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(body)?, + )), + ForkName::Deneb => Ok(SignedBeaconBlock::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(body)?, + )), + } + } + } +} + +pub mod ssz_tagged_signed_beacon_block_arc { + use super::*; + pub mod encode { + pub use super::ssz_tagged_signed_beacon_block::encode::*; + } + + pub mod decode { + pub use super::ssz_tagged_signed_beacon_block::decode::{is_ssz_fixed_len, ssz_fixed_len}; + use super::*; + #[allow(unused_imports)] + use ssz::*; + use std::sync::Arc; + + pub fn from_ssz_bytes<E: EthSpec, Payload: AbstractExecPayload<E>>( + bytes: &[u8], + ) -> Result<Arc<SignedBeaconBlock<E, Payload>>, DecodeError> { + ssz_tagged_signed_beacon_block::decode::from_ssz_bytes(bytes).map(Arc::new) + } + } +} + #[cfg(test)] mod test { use super::*; @@ -481,4 +689,38 @@ mod test { assert_eq!(reconstructed, block); } } + + #[test] + fn test_ssz_tagged_signed_beacon_block() { + type E = MainnetEthSpec; + + let spec = &E::default_spec(); + let sig = Signature::empty(); + let blocks = vec![ + SignedBeaconBlock::<E>::from_block( + BeaconBlock::Base(BeaconBlockBase::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Altair(BeaconBlockAltair::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Capella(BeaconBlockCapella::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block(BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)), sig), + ]; + + for block in blocks { + let encoded = ssz_tagged_signed_beacon_block::encode::as_ssz_bytes(&block); + let decoded = ssz_tagged_signed_beacon_block::decode::from_ssz_bytes::<E, _>(&encoded) + .expect("should decode"); + assert_eq!(decoded, block); + } + } } diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index c265eded1d..3d4269a2ce 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,7 +2,7 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_blob.rs b/consensus/types/src/signed_blob.rs new file mode 100644 index 0000000000..3c560823ce --- /dev/null +++ b/consensus/types/src/signed_blob.rs @@ -0,0 +1,114 @@ +use crate::sidecar::Sidecar; +use crate::{ + test_utils::TestRandom, BlindedBlobSidecar, Blob, BlobSidecar, ChainSpec, Domain, EthSpec, + Fork, Hash256, Signature, SignedRoot, SigningData, +}; +use bls::PublicKey; +use derivative::Derivative; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use std::marker::PhantomData; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + Derivative, + arbitrary::Arbitrary, +)] +#[serde(bound = "T: EthSpec, S: Sidecar<T>")] +#[arbitrary(bound = "T: EthSpec, S: Sidecar<T>")] +#[derivative(Hash(bound = "T: EthSpec, S: Sidecar<T>"))] +pub struct SignedSidecar<T: EthSpec, S: Sidecar<T>> { + pub message: Arc<S>, + pub signature: Signature, + #[ssz(skip_serializing, skip_deserializing)] + #[tree_hash(skip_hashing)] + #[serde(skip)] + #[arbitrary(default)] + pub _phantom: PhantomData<T>, +} + +impl<T: EthSpec> SignedSidecar<T, BlindedBlobSidecar> { + pub fn into_full_blob_sidecars(self, blob: Blob<T>) -> SignedSidecar<T, BlobSidecar<T>> { + let blinded_sidecar = self.message; + SignedSidecar { + message: Arc::new(BlobSidecar { + block_root: blinded_sidecar.block_root, + index: blinded_sidecar.index, + slot: blinded_sidecar.slot, + block_parent_root: blinded_sidecar.block_parent_root, + proposer_index: blinded_sidecar.proposer_index, + blob, + kzg_commitment: blinded_sidecar.kzg_commitment, + kzg_proof: blinded_sidecar.kzg_proof, + }), + signature: self.signature, + _phantom: PhantomData, + } + } +} + +impl<T: EthSpec> SignedBlobSidecar<T> { + /// Verify `self.signature`. + /// + /// If the root of `block.message` is already known it can be passed in via `object_root_opt`. + /// Otherwise, it will be computed locally. + pub fn verify_signature( + &self, + object_root_opt: Option<Hash256>, + pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let domain = spec.get_domain( + self.message.slot.epoch(T::slots_per_epoch()), + Domain::BlobSidecar, + fork, + genesis_validators_root, + ); + + let message = if let Some(object_root) = object_root_opt { + SigningData { + object_root, + domain, + } + .tree_hash_root() + } else { + self.message.signing_root(domain) + }; + + self.signature.verify(pubkey, message) + } +} + +impl<T: EthSpec> From<SignedBlobSidecar<T>> for SignedBlindedBlobSidecar<T> { + fn from(signed: SignedBlobSidecar<T>) -> Self { + SignedBlindedBlobSidecar { + message: Arc::new(signed.message.into()), + signature: signed.signature, + _phantom: PhantomData, + } + } +} + +pub type SignedBlobSidecar<T> = SignedSidecar<T, BlobSidecar<T>>; +pub type SignedBlindedBlobSidecar<T> = SignedSidecar<T, BlindedBlobSidecar>; + +/// List of Signed Sidecars that implements `Sidecar`. +pub type SignedSidecarList<T, Sidecar> = + VariableList<SignedSidecar<T, Sidecar>, <T as EthSpec>::MaxBlobsPerBlock>; +pub type SignedBlobSidecarList<T> = SignedSidecarList<T, BlobSidecar<T>>; +pub type SignedBlindedBlobSidecarList<T> = SignedSidecarList<T, BlindedBlobSidecar>; diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 2b17095ae7..2a4ecdf438 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/signed_contribution_and_proof.rs index 4cb3588433..6cb45ac8e6 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/signed_contribution_and_proof.rs @@ -3,7 +3,7 @@ use super::{ SignedRoot, SyncCommitteeContribution, SyncSelectionProof, }; use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 3392826a62..30eda11791 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, VoluntaryExit}; use bls::Signature; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/signing_data.rs index b80d4a40d5..f30d5fdfcb 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/signing_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::Hash256; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index e9f1e192b4..ec659d1dbb 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -15,7 +15,7 @@ use crate::{ChainSpec, SignedRoot}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; use std::hash::Hash; @@ -76,8 +76,8 @@ impl Slot { } impl Epoch { - pub const fn new(slot: u64) -> Epoch { - Epoch(slot) + pub const fn new(epoch: u64) -> Epoch { + Epoch(epoch) } pub fn max_value() -> Epoch { diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 415d6a1404..2752e31092 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,7 +1,7 @@ //! Identifies each shard by an integer identifier. use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; use swap_or_not_shuffle::compute_shuffled_index; diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 300c86fc0f..bb00c4aa20 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -3,7 +3,7 @@ use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index b101068123..2b60d01b8e 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{SignedRoot, Slot}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 43ba23f121..0bcf505f25 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -3,7 +3,7 @@ use crate::typenum::Unsigned; use crate::{EthSpec, FixedVector, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use test_random_derive::TestRandom; diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 425f8f116d..b8ee5c2e36 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -2,7 +2,7 @@ use super::{AggregateSignature, EthSpec, SignedRoot}; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index d0301cdf63..d7d309cd56 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; use crate::slot_data::SlotData; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index e3ffe62bfd..1058b9d3b4 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -1,7 +1,7 @@ use crate::{EthSpec, SyncCommittee, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::ArithError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 5af756ae01..5605482929 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -2,7 +2,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; use safe_arith::{ArithError, SafeArith}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; use std::collections::HashSet; use std::fmt::{self, Display}; diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 51b79d8d53..f31df2ce1b 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -10,6 +10,8 @@ mod address; mod aggregate_signature; mod bitfield; mod hash256; +mod kzg_commitment; +mod kzg_proof; mod public_key; mod public_key_bytes; mod secret_key; diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 5cb4e7d521..3992421e37 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -4,8 +4,21 @@ use smallvec::smallvec; impl<N: Unsigned + Clone> TestRandom for BitList<N> { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); + + let non_zero_bytes = raw_bytes + .iter() + .enumerate() + .rev() + .find_map(|(i, byte)| (*byte > 0).then_some(i + 1)) + .unwrap_or(0); + + if non_zero_bytes < initial_len { + raw_bytes.truncate(non_zero_bytes); + } + Self::from_bytes(raw_bytes).expect("we generate a valid BitList") } } diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs new file mode 100644 index 0000000000..a4030f2b6a --- /dev/null +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -0,0 +1,7 @@ +use super::*; + +impl TestRandom for KzgCommitment { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { + KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng)) + } +} diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs new file mode 100644 index 0000000000..d6d8ed2d08 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -0,0 +1,10 @@ +use super::*; +use kzg::{KzgProof, BYTES_PER_COMMITMENT}; + +impl TestRandom for KzgProof { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut bytes = [0; BYTES_PER_COMMITMENT]; + rng.fill_bytes(&mut bytes); + Self(bytes) + } +} diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 6860397fb5..8fbd9009ea 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -2,7 +2,7 @@ use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index de7f26cc63..174014df8e 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -21,3 +21,17 @@ pub struct ValidatorRegistrationData { } impl SignedRoot for ValidatorRegistrationData {} + +impl SignedValidatorRegistrationData { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 02686fef9a..a24f7376a1 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -1,9 +1,9 @@ use crate::{ - test_utils::TestRandom, ChainSpec, Domain, Epoch, Fork, Hash256, SecretKey, SignedRoot, + test_utils::TestRandom, ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -37,16 +37,20 @@ impl VoluntaryExit { pub fn sign( self, secret_key: &SecretKey, - fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, ) -> SignedVoluntaryExit { - let domain = spec.get_domain( - self.epoch, - Domain::VoluntaryExit, - fork, - genesis_validators_root, - ); + let fork_name = spec.fork_name_at_epoch(self.epoch); + let fork_version = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + spec.fork_version_for_name(fork_name) + } + // EIP-7044 + ForkName::Deneb => spec.fork_version_for_name(ForkName::Capella), + }; + let domain = + spec.compute_domain(Domain::VoluntaryExit, fork_version, genesis_validators_root); + let message = self.signing_root(domain); SignedVoluntaryExit { message: self, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index eed7c7e277..3e61156554 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index a610f257cd..1216fc2a98 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -2,24 +2,24 @@ name = "bls" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -ethereum_ssz = "0.5.0" -tree_hash = "0.5.0" -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } -rand = "0.7.3" -serde = "1.0.116" -serde_derive = "1.0.116" -ethereum_serde_utils = "0.5.0" -hex = "0.4.2" -ethereum_hashing = "1.0.0-beta.2" -ethereum-types = "0.14.1" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -zeroize = { version = "1.4.2", features = ["zeroize_derive"] } +ethereum_ssz = { workspace = true } +tree_hash = { workspace = true } +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } +rand = { workspace = true } +serde = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum-types = { workspace = true } +arbitrary = { workspace = true } +zeroize = { workspace = true } blst = { version = "0.3.3", optional = true } [features] +arbitrary = [] default = ["supranational"] fake_crypto = [] milagro = ["milagro_bls"] diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index 59b0ffc43f..240568b4f6 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -27,10 +27,7 @@ impl<Pub> Copy for GenericPublicKeyBytes<Pub> {} impl<Pub> Clone for GenericPublicKeyBytes<Pub> { fn clone(&self) -> Self { - Self { - bytes: self.bytes, - _phantom: PhantomData, - } + *self } } diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index bd28abff9f..e831a175c7 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -99,9 +99,8 @@ pub fn verify_signature_sets<'a>( // Aggregate all the public keys. // Public keys have already been checked for subgroup and infinity - let agg_pk = match blst_core::AggregatePublicKey::aggregate(&signing_keys, false) { - Ok(agg_pk) => agg_pk, - Err(_) => return false, + let Ok(agg_pk) = blst_core::AggregatePublicKey::aggregate(&signing_keys, false) else { + return false; }; pks.push(agg_pk.to_public_key()); } diff --git a/crypto/bls/src/zeroize_hash.rs b/crypto/bls/src/zeroize_hash.rs index 41136f97a7..e346f456d1 100644 --- a/crypto/bls/src/zeroize_hash.rs +++ b/crypto/bls/src/zeroize_hash.rs @@ -1,5 +1,5 @@ use super::SECRET_KEY_BYTES_LEN; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use zeroize::Zeroize; /// Provides a wrapper around a `[u8; SECRET_KEY_BYTES_LEN]` that implements `Zeroize` on `Drop`. diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index 3f174a02d4..a0237ba7ed 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -2,16 +2,16 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = "0.10" -zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -num-bigint-dig = { version = "0.6.0", features = ["zeroize"] } -ring = "0.16.19" -bls = { path = "../bls" } +sha2 = { workspace = true } +zeroize = { workspace = true } +num-bigint-dig = { version = "0.8.4", features = ["zeroize"] } +ring = { workspace = true } +bls = { workspace = true } [dev-dependencies] -hex = "0.4.2" +hex = { workspace = true } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index d83a60da24..bb6222807b 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -2,26 +2,26 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay <pawan@sigmaprime.io", "Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.8.5" +rand = { workspace = true } hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } scrypt = { version = "0.7.0", default-features = false } -sha2 = "0.9.2" -uuid = { version = "0.8.1", features = ["serde", "v4"] } -zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -serde = "1.0.116" -serde_repr = "0.1.6" -hex = "0.4.2" -bls = { path = "../bls" } -serde_json = "1.0.58" -eth2_key_derivation = { path = "../eth2_key_derivation" } +sha2 = { workspace = true } +uuid = { workspace = true } +zeroize = { workspace = true } +serde = { workspace = true } +serde_repr = { workspace = true } +hex = { workspace = true } +bls = { workspace = true } +serde_json = { workspace = true } +eth2_key_derivation = { workspace = true } unicode-normalization = "0.1.16" aes = { version = "0.7", features = ["ctr"] } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index 71f66ff933..f3af6aab59 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -2,20 +2,20 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = "1.0.116" -serde_json = "1.0.58" -serde_repr = "0.1.6" -uuid = { version = "0.8.1", features = ["serde", "v4"] } -rand = "0.8.5" -eth2_keystore = { path = "../eth2_keystore" } -eth2_key_derivation = { path = "../eth2_key_derivation" } -tiny-bip39 = "0.8.1" +serde = { workspace = true } +serde_json = { workspace = true } +serde_repr = { workspace = true } +uuid = { workspace = true } +rand = { workspace = true } +eth2_keystore = { workspace = true } +eth2_key_derivation = { workspace = true } +tiny-bip39 = "1" [dev-dependencies] -hex = "0.4.2" -tempfile = "3.1.0" +hex = { workspace = true } +tempfile = { workspace = true } diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml new file mode 100644 index 0000000000..7b70166f92 --- /dev/null +++ b/crypto/kzg/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "kzg" +version = "0.1.0" +authors = ["Pawan Dhananjay <pawandhananjay@gmail.com>"] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arbitrary = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +derivative = { workspace = true } +serde = { workspace = true } +ethereum_serde_utils = { workspace = true } +hex = { workspace = true } +ethereum_hashing = { workspace = true } +c-kzg = { git = "https://github.com/ethereum/c-kzg-4844", rev = "748283cced543c486145d5f3f38684becdfe3e1b"} \ No newline at end of file diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs new file mode 100644 index 0000000000..cfab09f63e --- /dev/null +++ b/crypto/kzg/src/kzg_commitment.rs @@ -0,0 +1,144 @@ +use c_kzg::BYTES_PER_COMMITMENT; +use derivative::Derivative; +use ethereum_hashing::hash_fixed; +use serde::de::{Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use std::fmt::{Debug, Display, Formatter}; +use std::str::FromStr; +use tree_hash::{Hash256, PackedEncoding, TreeHash}; + +pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; + +#[derive(Derivative, Clone, Copy, Encode, Decode)] +#[derivative(PartialEq, Eq, Hash)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); + +impl KzgCommitment { + pub fn calculate_versioned_hash(&self) -> Hash256 { + let mut versioned_hash = hash_fixed(&self.0); + versioned_hash[0] = VERSIONED_HASH_VERSION_KZG; + Hash256::from_slice(versioned_hash.as_slice()) + } + + pub fn empty_for_testing() -> Self { + KzgCommitment([0; c_kzg::BYTES_PER_COMMITMENT]) + } +} + +impl From<KzgCommitment> for c_kzg::Bytes48 { + fn from(value: KzgCommitment) -> Self { + value.0.into() + } +} + +impl Display for KzgCommitment { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "0x")?; + for i in &self.0[0..2] { + write!(f, "{:02x}", i)?; + } + write!(f, "…")?; + for i in &self.0[BYTES_PER_COMMITMENT - 2..BYTES_PER_COMMITMENT] { + write!(f, "{:02x}", i)?; + } + Ok(()) + } +} + +impl TreeHash for KzgCommitment { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; BYTES_PER_COMMITMENT] as TreeHash>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; BYTES_PER_COMMITMENT] as TreeHash>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl Serialize for KzgCommitment { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + serializer.serialize_str(&format!("{:?}", self)) + } +} + +impl<'de> Deserialize<'de> for KzgCommitment { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + Self::from_str(&string).map_err(serde::de::Error::custom) + } +} + +impl FromStr for KzgCommitment { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if let Some(stripped) = s.strip_prefix("0x") { + let bytes = hex::decode(stripped).map_err(|e| e.to_string())?; + if bytes.len() == BYTES_PER_COMMITMENT { + let mut kzg_commitment_bytes = [0; BYTES_PER_COMMITMENT]; + kzg_commitment_bytes[..].copy_from_slice(&bytes); + Ok(Self(kzg_commitment_bytes)) + } else { + Err(format!( + "InvalidByteLength: got {}, expected {}", + bytes.len(), + BYTES_PER_COMMITMENT + )) + } + } else { + Err("must start with 0x".to_string()) + } + } +} + +impl Debug for KzgCommitment { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl arbitrary::Arbitrary<'_> for KzgCommitment { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { + let mut bytes = [0u8; BYTES_PER_COMMITMENT]; + u.fill_buffer(&mut bytes)?; + Ok(KzgCommitment(bytes)) + } +} + +#[test] +fn kzg_commitment_display() { + let display_commitment_str = "0x53fa…adac"; + let display_commitment = KzgCommitment::from_str( + "0x53fa09af35d1d1a9e76f65e16112a9064ce30d1e4e2df98583f0f5dc2e7dd13a4f421a9c89f518fafd952df76f23adac", + ) + .unwrap() + .to_string(); + + assert_eq!(display_commitment, display_commitment_str); +} + +#[test] +fn kzg_commitment_debug() { + let debug_commitment_str = + "0x53fa09af35d1d1a9e76f65e16112a9064ce30d1e4e2df98583f0f5dc2e7dd13a4f421a9c89f518fafd952df76f23adac"; + let debug_commitment = KzgCommitment::from_str(debug_commitment_str).unwrap(); + + assert_eq!(format!("{debug_commitment:?}"), debug_commitment_str); +} diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs new file mode 100644 index 0000000000..c9a138a31c --- /dev/null +++ b/crypto/kzg/src/kzg_proof.rs @@ -0,0 +1,119 @@ +use c_kzg::BYTES_PER_PROOF; +use serde::de::{Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use ssz_derive::{Decode, Encode}; +use std::fmt; +use std::fmt::Debug; +use std::str::FromStr; +use tree_hash::{PackedEncoding, TreeHash}; + +#[derive(PartialEq, Hash, Clone, Copy, Encode, Decode)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgProof(pub [u8; BYTES_PER_PROOF]); + +impl From<KzgProof> for c_kzg::Bytes48 { + fn from(value: KzgProof) -> Self { + value.0.into() + } +} + +impl KzgProof { + /// Creates a valid proof using `G1_POINT_AT_INFINITY`. + pub fn empty() -> Self { + let mut bytes = [0; BYTES_PER_PROOF]; + bytes[0] = 0xc0; + Self(bytes) + } +} + +impl fmt::Display for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl From<[u8; BYTES_PER_PROOF]> for KzgProof { + fn from(bytes: [u8; BYTES_PER_PROOF]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; BYTES_PER_PROOF]> for KzgProof { + fn into(self) -> [u8; BYTES_PER_PROOF] { + self.0 + } +} + +impl TreeHash for KzgProof { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; BYTES_PER_PROOF]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; BYTES_PER_PROOF]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl Serialize for KzgProof { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for KzgProof { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + Self::from_str(&string).map_err(serde::de::Error::custom) + } +} + +impl FromStr for KzgProof { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if let Some(stripped) = s.strip_prefix("0x") { + let bytes = hex::decode(stripped).map_err(|e| e.to_string())?; + if bytes.len() == BYTES_PER_PROOF { + let mut kzg_proof_bytes = [0; BYTES_PER_PROOF]; + kzg_proof_bytes[..].copy_from_slice(&bytes); + Ok(Self(kzg_proof_bytes)) + } else { + Err(format!( + "InvalidByteLength: got {}, expected {}", + bytes.len(), + BYTES_PER_PROOF + )) + } + } else { + Err("must start with 0x".to_string()) + } + } +} + +impl Debug for KzgProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(self.0)) + } +} + +impl arbitrary::Arbitrary<'_> for KzgProof { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { + let mut bytes = [0u8; BYTES_PER_PROOF]; + u.fill_buffer(&mut bytes)?; + Ok(KzgProof(bytes)) + } +} diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs new file mode 100644 index 0000000000..fb2a6d394f --- /dev/null +++ b/crypto/kzg/src/lib.rs @@ -0,0 +1,115 @@ +mod kzg_commitment; +mod kzg_proof; +mod trusted_setup; + +use std::fmt::Debug; + +pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup}; +pub use c_kzg::{ + Blob, Bytes32, Bytes48, Error, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, + BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, +}; + +/// A wrapper over a kzg library that holds the trusted setup parameters. +#[derive(Debug)] +pub struct Kzg { + trusted_setup: KzgSettings, +} + +impl Kzg { + /// Load the kzg trusted setup parameters from a vec of G1 and G2 points. + pub fn new_from_trusted_setup(trusted_setup: TrustedSetup) -> Result<Self, Error> { + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + }) + } + + /// Compute the kzg proof given a blob and its kzg commitment. + pub fn compute_blob_kzg_proof( + &self, + blob: &Blob, + kzg_commitment: KzgCommitment, + ) -> Result<KzgProof, Error> { + c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup) + .map(|proof| KzgProof(proof.to_bytes().into_inner())) + } + + /// Verify a kzg proof given the blob, kzg commitment and kzg proof. + pub fn verify_blob_kzg_proof( + &self, + blob: &Blob, + kzg_commitment: KzgCommitment, + kzg_proof: KzgProof, + ) -> Result<bool, Error> { + c_kzg::KzgProof::verify_blob_kzg_proof( + blob, + &kzg_commitment.into(), + &kzg_proof.into(), + &self.trusted_setup, + ) + } + + /// Verify a batch of blob commitment proof triplets. + /// + /// Note: This method is slightly faster than calling `Self::verify_blob_kzg_proof` in a loop sequentially. + /// TODO(pawan): test performance against a parallelized rayon impl. + pub fn verify_blob_kzg_proof_batch( + &self, + blobs: &[Blob], + kzg_commitments: &[KzgCommitment], + kzg_proofs: &[KzgProof], + ) -> Result<bool, Error> { + let commitments_bytes = kzg_commitments + .iter() + .map(|comm| Bytes48::from(*comm)) + .collect::<Vec<_>>(); + + let proofs_bytes = kzg_proofs + .iter() + .map(|proof| Bytes48::from(*proof)) + .collect::<Vec<_>>(); + + c_kzg::KzgProof::verify_blob_kzg_proof_batch( + blobs, + &commitments_bytes, + &proofs_bytes, + &self.trusted_setup, + ) + } + + /// Converts a blob to a kzg commitment. + pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result<KzgCommitment, Error> { + c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup) + .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) + } + + /// Computes the kzg proof for a given `blob` and an evaluation point `z` + pub fn compute_kzg_proof( + &self, + blob: &Blob, + z: &Bytes32, + ) -> Result<(KzgProof, Bytes32), Error> { + c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup) + .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) + } + + /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` + pub fn verify_kzg_proof( + &self, + kzg_commitment: KzgCommitment, + z: &Bytes32, + y: &Bytes32, + kzg_proof: KzgProof, + ) -> Result<bool, Error> { + c_kzg::KzgProof::verify_kzg_proof( + &kzg_commitment.into(), + z, + y, + &kzg_proof.into(), + &self.trusted_setup, + ) + } +} diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs new file mode 100644 index 0000000000..55a00eed16 --- /dev/null +++ b/crypto/kzg/src/trusted_setup.rs @@ -0,0 +1,142 @@ +use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; +use serde::{ + de::{self, Deserializer, Visitor}, + Deserialize, Serialize, +}; + +/// Wrapper over a BLS G1 point's byte representation. +#[derive(Debug, Clone, PartialEq)] +struct G1Point([u8; BYTES_PER_G1_POINT]); + +/// Wrapper over a BLS G2 point's byte representation. +#[derive(Debug, Clone, PartialEq)] +struct G2Point([u8; BYTES_PER_G2_POINT]); + +/// Contains the trusted setup parameters that are required to instantiate a +/// `c_kzg::KzgSettings` object. +/// +/// The serialize/deserialize implementations are written according to +/// the format specified in the the ethereum consensus specs trusted setup files. +/// +/// See https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TrustedSetup { + #[serde(rename = "g1_lagrange")] + g1_points: Vec<G1Point>, + #[serde(rename = "g2_monomial")] + g2_points: Vec<G2Point>, +} + +impl TrustedSetup { + pub fn g1_points(&self) -> Vec<[u8; BYTES_PER_G1_POINT]> { + self.g1_points.iter().map(|p| p.0).collect() + } + + pub fn g2_points(&self) -> Vec<[u8; BYTES_PER_G2_POINT]> { + self.g2_points.iter().map(|p| p.0).collect() + } + + pub fn g1_len(&self) -> usize { + self.g1_points.len() + } +} + +impl Serialize for G1Point { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: serde::Serializer, + { + let point = hex::encode(self.0); + serializer.serialize_str(&point) + } +} + +impl Serialize for G2Point { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: serde::Serializer, + { + let point = hex::encode(self.0); + serializer.serialize_str(&point) + } +} + +impl<'de> Deserialize<'de> for G1Point { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + struct G1PointVisitor; + + impl<'de> Visitor<'de> for G1PointVisitor { + type Value = G1Point; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("A 48 byte hex encoded string") + } + + fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> + where + E: de::Error, + { + let point = hex::decode(strip_prefix(v)) + .map_err(|e| de::Error::custom(format!("Failed to decode G1 point: {}", e)))?; + if point.len() != BYTES_PER_G1_POINT { + return Err(de::Error::custom(format!( + "G1 point has invalid length. Expected {} got {}", + BYTES_PER_G1_POINT, + point.len() + ))); + } + let mut res = [0; BYTES_PER_G1_POINT]; + res.copy_from_slice(&point); + Ok(G1Point(res)) + } + } + + deserializer.deserialize_str(G1PointVisitor) + } +} + +impl<'de> Deserialize<'de> for G2Point { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + struct G2PointVisitor; + + impl<'de> Visitor<'de> for G2PointVisitor { + type Value = G2Point; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("A 96 byte hex encoded string") + } + + fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> + where + E: de::Error, + { + let point = hex::decode(strip_prefix(v)) + .map_err(|e| de::Error::custom(format!("Failed to decode G2 point: {}", e)))?; + if point.len() != BYTES_PER_G2_POINT { + return Err(de::Error::custom(format!( + "G2 point has invalid length. Expected {} got {}", + BYTES_PER_G2_POINT, + point.len() + ))); + } + let mut res = [0; BYTES_PER_G2_POINT]; + res.copy_from_slice(&point); + Ok(G2Point(res)) + } + } + + deserializer.deserialize_str(G2PointVisitor) + } +} + +fn strip_prefix(s: &str) -> &str { + if let Some(stripped) = s.strip_prefix("0x") { + stripped + } else { + s + } +} diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index f715528138..07045dd95c 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "database_manager" version = "0.1.0" -edition = "2021" +edition = { workspace = true } [dependencies] -beacon_chain = { path = "../beacon_node/beacon_chain" } -beacon_node = { path = "../beacon_node" } -clap = "2.33.3" -clap_utils = { path = "../common/clap_utils" } -environment = { path = "../lighthouse/environment" } -logging = { path = "../common/logging" } -sloggers = "2.0.2" -store = { path = "../beacon_node/store" } -tempfile = "3.1.0" -types = { path = "../consensus/types" } -slog = "2.5.2" -strum = { version = "0.24.0", features = ["derive"] } +beacon_chain = { workspace = true } +beacon_node = { workspace = true } +clap = { workspace = true } +clap_utils = { workspace = true } +environment = { workspace = true } +hex = { workspace = true } +logging = { workspace = true } +sloggers = { workspace = true } +store = { workspace = true } +tempfile = { workspace = true } +types = { workspace = true } +slog = { workspace = true } +strum = { workspace = true } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index ce0b094b77..079419aba5 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -60,6 +60,24 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("sizes") .possible_values(InspectTarget::VARIANTS), ) + .arg( + Arg::with_name("skip") + .long("skip") + .value_name("N") + .help("Skip over the first N keys"), + ) + .arg( + Arg::with_name("limit") + .long("limit") + .value_name("N") + .help("Output at most N keys"), + ) + .arg( + Arg::with_name("freezer") + .long("freezer") + .help("Inspect the freezer DB rather than the hot DB") + .takes_value(false), + ) .arg( Arg::with_name("output-dir") .long("output-dir") @@ -75,6 +93,12 @@ pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { .about("Prune finalized execution payloads") } +pub fn prune_blobs_app<'a, 'b>() -> App<'a, 'b> { + App::new("prune_blobs") + .setting(clap::AppSettings::ColoredHelp) + .about("Prune blobs older than data availability boundary") +} + pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .visible_aliases(&["db"]) @@ -98,10 +122,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for the freezer database.") .takes_value(true), ) + .arg( + Arg::with_name("blob-prune-margin-epochs") + .long("blob-prune-margin-epochs") + .value_name("EPOCHS") + .help( + "The margin for blob pruning in epochs. The oldest blobs are pruned \ + up until data_availability_boundary - blob_prune_margin_epochs.", + ) + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true), + ) .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) .subcommand(inspect_cli_app()) .subcommand(prune_payloads_app()) + .subcommand(prune_blobs_app()) } fn parse_client_config<E: EthSpec>( @@ -116,10 +159,20 @@ fn parse_client_config<E: EthSpec>( client_config.freezer_db_path = Some(freezer_dir); } + if let Some(blobs_db_dir) = clap_utils::parse_optional(cli_args, "blobs-dir")? { + client_config.blobs_db_path = Some(blobs_db_dir); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::<E>(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; + if let Some(blob_prune_margin_epochs) = + clap_utils::parse_optional(cli_args, "blob-prune-margin-epochs")? + { + client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; + } + Ok(client_config) } @@ -131,11 +184,13 @@ pub fn display_db_version<E: EthSpec>( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut version = CURRENT_SCHEMA_VERSION; HotColdDB::<E, LevelDB<E>, LevelDB<E>>::open( &hot_path, &cold_path, + blobs_path, |_, from, _| { version = from; Ok(()) @@ -171,6 +226,9 @@ pub enum InspectTarget { pub struct InspectConfig { column: DBColumn, target: InspectTarget, + skip: Option<usize>, + limit: Option<usize>, + freezer: bool, /// Configures where the inspect output should be stored. output_dir: PathBuf, } @@ -178,11 +236,18 @@ pub struct InspectConfig { fn parse_inspect_config(cli_args: &ArgMatches) -> Result<InspectConfig, String> { let column = clap_utils::parse_required(cli_args, "column")?; let target = clap_utils::parse_required(cli_args, "output")?; + let skip = clap_utils::parse_optional(cli_args, "skip")?; + let limit = clap_utils::parse_optional(cli_args, "limit")?; + let freezer = cli_args.is_present("freezer"); + let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); Ok(InspectConfig { column, target, + skip, + limit, + freezer, output_dir, }) } @@ -196,10 +261,12 @@ pub fn inspect_db<E: EthSpec>( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::<E, LevelDB<E>, LevelDB<E>>::open( &hot_path, &cold_path, + blobs_path, |_, _, _| Ok(()), client_config.store, spec, @@ -208,6 +275,17 @@ pub fn inspect_db<E: EthSpec>( .map_err(|e| format!("{:?}", e))?; let mut total = 0; + let mut num_keys = 0; + + let sub_db = if inspect_config.freezer { + &db.cold_db + } else { + &db.hot_db + }; + + let skip = inspect_config.skip.unwrap_or(0); + let limit = inspect_config.limit.unwrap_or(usize::MAX); + let base_path = &inspect_config.output_dir; if let InspectTarget::Values = inspect_config.target { @@ -215,20 +293,24 @@ pub fn inspect_db<E: EthSpec>( .map_err(|e| format!("Unable to create import directory: {:?}", e))?; } - for res in db.hot_db.iter_column(inspect_config.column) { + for res in sub_db + .iter_column::<Vec<u8>>(inspect_config.column) + .skip(skip) + .take(limit) + { let (key, value) = res.map_err(|e| format!("{:?}", e))?; match inspect_config.target { InspectTarget::ValueSizes => { - println!("{:?}: {} bytes", key, value.len()); - total += value.len(); - } - InspectTarget::ValueTotal => { - total += value.len(); + println!("{}: {} bytes", hex::encode(&key), value.len()); } + InspectTarget::ValueTotal => (), InspectTarget::Values => { - let file_path = - base_path.join(format!("{}_{}.ssz", inspect_config.column.as_str(), key)); + let file_path = base_path.join(format!( + "{}_{}.ssz", + inspect_config.column.as_str(), + hex::encode(&key) + )); let write_result = fs::OpenOptions::new() .create(true) @@ -244,17 +326,14 @@ pub fn inspect_db<E: EthSpec>( } else { println!("Successfully saved values to file: {:?}", file_path); } - - total += value.len(); } } + total += value.len(); + num_keys += 1; } - match inspect_config.target { - InspectTarget::ValueSizes | InspectTarget::ValueTotal | InspectTarget::Values => { - println!("Total: {} bytes", total); - } - } + println!("Num keys: {}", num_keys); + println!("Total: {} bytes", total); Ok(()) } @@ -278,12 +357,14 @@ pub fn migrate_db<E: EthSpec>( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut from = CURRENT_SCHEMA_VERSION; let to = migrate_config.to; let db = HotColdDB::<E, LevelDB<E>, LevelDB<E>>::open( &hot_path, &cold_path, + blobs_path, |_, db_initial_version, _| { from = db_initial_version; Ok(()) @@ -318,10 +399,12 @@ pub fn prune_payloads<E: EthSpec>( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::<E, LevelDB<E>, LevelDB<E>>::open( &hot_path, &cold_path, + blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), @@ -334,6 +417,31 @@ pub fn prune_payloads<E: EthSpec>( db.try_prune_execution_payloads(force) } +pub fn prune_blobs<E: EthSpec>( + client_config: ClientConfig, + runtime_context: &RuntimeContext<E>, + log: Logger, +) -> Result<(), Error> { + let spec = &runtime_context.eth2_config.spec; + let hot_path = client_config.get_db_path(); + let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); + + let db = HotColdDB::<E, LevelDB<E>, LevelDB<E>>::open( + &hot_path, + &cold_path, + blobs_path, + |_, _, _| Ok(()), + client_config.store, + spec.clone(), + log, + )?; + + // If we're triggering a prune manually then ignore the check on `epochs_per_blob_prune` that + // bails out early by passing true to the force parameter. + db.try_prune_most_blobs(true) +} + /// Run the database manager, returning an error string if the operation did not succeed. pub fn run<T: EthSpec>(cli_args: &ArgMatches<'_>, env: Environment<T>) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; @@ -356,6 +464,7 @@ pub fn run<T: EthSpec>(cli_args: &ArgMatches<'_>, env: Environment<T>) -> Result ("prune_payloads", Some(_)) => { prune_payloads(client_config, &context, log).map_err(format_err) } + ("prune_blobs", Some(_)) => prune_blobs(client_config, &context, log).map_err(format_err), _ => Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()), } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index f9d0a6a31c..854f718c59 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.3.0" +version = "4.5.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [features] portable = ["bls/supranational-portable"] @@ -11,38 +11,38 @@ fake_crypto = ['bls/fake_crypto'] jemalloc = ["malloc_utils/jemalloc"] [dependencies] -bls = { path = "../crypto/bls" } -clap = "2.33.3" -log = "0.4.11" -serde = "1.0.116" -serde_yaml = "0.8.13" -serde_json = "1.0.66" -env_logger = "0.9.0" -types = { path = "../consensus/types" } -state_processing = { path = "../consensus/state_processing" } -int_to_bytes = { path = "../consensus/int_to_bytes" } -ethereum_hashing = "1.0.0-beta.2" -ethereum_ssz = "0.5.0" -environment = { path = "../lighthouse/environment" } -eth2_network_config = { path = "../common/eth2_network_config" } -genesis = { path = "../beacon_node/genesis" } -deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.5.0" -clap_utils = { path = "../common/clap_utils" } -lighthouse_network = { path = "../beacon_node/lighthouse_network" } -validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } -lighthouse_version = { path = "../common/lighthouse_version" } -directory = { path = "../common/directory" } -account_utils = { path = "../common/account_utils" } -eth2_wallet = { path = "../crypto/eth2_wallet" } -eth1_test_rig = { path = "../testing/eth1_test_rig" } -sensitive_url = { path = "../common/sensitive_url" } -eth2 = { path = "../common/eth2" } -snap = "1.0.1" -beacon_chain = { path = "../beacon_node/beacon_chain" } -store = { path = "../beacon_node/store" } -malloc_utils = { path = "../common/malloc_utils" } -rayon = "1.7.0" +bls = { workspace = true } +clap = { workspace = true } +log = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } +serde_json = { workspace = true } +env_logger = { workspace = true } +types = { workspace = true } +state_processing = { workspace = true } +int_to_bytes = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +environment = { workspace = true } +eth2_network_config = { workspace = true } +genesis = { workspace = true } +deposit_contract = { workspace = true } +tree_hash = { workspace = true } +clap_utils = { workspace = true } +lighthouse_network = { workspace = true } +validator_dir = { workspace = true, features = ["insecure_keys"] } +lighthouse_version = { workspace = true } +directory = { workspace = true } +account_utils = { workspace = true } +eth2_wallet = { workspace = true } +eth1_test_rig = { workspace = true } +sensitive_url = { workspace = true } +eth2 = { workspace = true } +snap = { workspace = true } +beacon_chain = { workspace = true } +store = { workspace = true } +malloc_utils = { workspace = true } +rayon = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lcli/Dockerfile b/lcli/Dockerfile index a50aa17027..1ee80e14fd 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.68.2-bullseye AS builder +FROM rust:1.69.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 6c0e8dcecf..5c96035851 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -5,8 +5,8 @@ use std::fs::File; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; use types::{ - EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderMerge, - ForkName, + EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderMerge, ForkName, }; pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { @@ -40,6 +40,14 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeaderCapella::default() }), + ForkName::Deneb => ExecutionPayloadHeader::Deneb(ExecutionPayloadHeaderDeneb { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + prev_randao: eth1_block_hash.into_root(), + ..ExecutionPayloadHeaderDeneb::default() + }), }; let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 34144cd86d..bddd4baad8 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -49,7 +49,7 @@ pub fn run<T: EthSpec>( .wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec) .await .map(move |genesis_state| { - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir) }) .map_err(|e| format!("Failed to find genesis: {}", e))?; diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 0584cd6549..1d41bedc88 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -4,16 +4,16 @@ use lighthouse_network::{ libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; +use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; - let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; - let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 57a5ba0098..1a0b81fcb7 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -42,7 +42,7 @@ pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), &spec, )?; - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir)?; Ok(()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 38fec2ebb4..2aa855474f 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -432,7 +432,7 @@ fn main() { .takes_value(true) .default_value("bellatrix") .help("The fork for which the execution payload header should be created.") - .possible_values(&["merge", "bellatrix", "capella"]) + .possible_values(&["merge", "bellatrix", "capella", "deneb"]) ) ) .subcommand( @@ -608,6 +608,15 @@ fn main() { "The epoch at which to enable the Capella hard fork", ), ) + .arg( + Arg::with_name("deneb-fork-epoch") + .long("deneb-fork-epoch") + .value_name("EPOCH") + .takes_value(true) + .help( + "The epoch at which to enable the deneb hard fork", + ), + ) .arg( Arg::with_name("ttd") .long("ttd") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 01a44cabef..3a0c7a9f60 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,7 +1,7 @@ use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::Eth2NetworkConfig; +use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource, TRUSTED_SETUP_BYTES}; use eth2_wallet::bip39::Seed; use eth2_wallet::bip39::{Language, Mnemonic}; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; @@ -19,8 +19,8 @@ use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, ForkName, Hash256, Keypair, - PublicKey, Validator, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, + ForkName, Hash256, Keypair, PublicKey, Validator, }; pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -85,6 +85,10 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.capella_fork_epoch = Some(fork_epoch); } + if let Some(fork_epoch) = parse_optional(matches, "deneb-fork-epoch")? { + spec.deneb_fork_epoch = Some(fork_epoch); + } + if let Some(ttd) = parse_optional(matches, "ttd")? { spec.terminal_total_difficulty = ttd; } @@ -111,6 +115,10 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul ExecutionPayloadHeaderCapella::<T>::from_ssz_bytes(bytes.as_slice()) .map(ExecutionPayloadHeader::Capella) } + ForkName::Deneb => { + ExecutionPayloadHeaderDeneb::<T>::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Deneb) + } } .map_err(|e| format!("SSZ decode failed: {:?}", e)) }) @@ -187,11 +195,23 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul None }; + let kzg_trusted_setup = if let Some(epoch) = spec.deneb_fork_epoch { + // Only load the trusted setup if the deneb fork epoch is set + if epoch != Epoch::max_value() { + Some(TRUSTED_SETUP_BYTES.to_vec()) + } else { + None + } + } else { + None + }; let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), - genesis_state_bytes, + genesis_state_bytes: genesis_state_bytes.map(Into::into), + genesis_state_source: GenesisStateSource::IncludedBytes, config: Config::from_chain_spec::<T>(&spec), + kzg_trusted_setup, }; testnet.write_to_file(testnet_dir_path, overwrite_files) @@ -299,6 +319,9 @@ fn initialize_state_with_validators<T: EthSpec>( ExecutionPayloadHeaderRefMut::Capella(_) => { return Err("Cannot start genesis from a capella state".to_string()) } + ExecutionPayloadHeaderRefMut::Deneb(_) => { + return Err("Cannot start genesis from a deneb state".to_string()) + } } } diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 5c306f4fdc..453169cdc5 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -78,6 +78,9 @@ pub fn run_parse_ssz<T: EthSpec>( SignedBeaconBlockCapella::<T>::from_ssz_bytes, format, )?, + "SignedBeaconBlockDeneb" => { + decode_and_print(&bytes, SignedBeaconBlockDeneb::<T>::from_ssz_bytes, format)? + } "BeaconState" => decode_and_print::<BeaconState<T>>( &bytes, |bytes| BeaconState::from_ssz_bytes(bytes, spec), @@ -95,6 +98,10 @@ pub fn run_parse_ssz<T: EthSpec>( "BeaconStateCapella" => { decode_and_print(&bytes, BeaconStateCapella::<T>::from_ssz_bytes, format)? } + "BeaconStateDeneb" => { + decode_and_print(&bytes, BeaconStateDeneb::<T>::from_ssz_bytes, format)? + } + "BlobSidecar" => decode_and_print(&bytes, BlobSidecar::<T>::from_ssz_bytes, format)?, other => return Err(format!("Unknown type: {}", other)), }; diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 31fe9fe648..cdbacfe4d5 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -109,6 +109,7 @@ pub fn run<T: EthSpec>( } _ => return Err("must supply either --state-path or --beacon-url".into()), }; + let mut post_state = None; let initial_slot = state.slot(); let target_slot = initial_slot + slots; @@ -140,14 +141,15 @@ pub fn run<T: EthSpec>( let duration = Instant::now().duration_since(start); info!("Run {}: {:?}", i, duration); + post_state = Some(state); } - if let Some(output_path) = output_path { + if let (Some(post_state), Some(output_path)) = (post_state, output_path) { let mut output_file = File::create(output_path) .map_err(|e| format!("Unable to create output file: {:?}", e))?; output_file - .write_all(&state.as_ssz_bytes()) + .write_all(&post_state.as_ssz_bytes()) .map_err(|e| format!("Unable to write to output file: {:?}", e))?; } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8003236f2d..48b4eb037a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "4.3.0" +version = "4.5.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } autotests = false -rust-version = "1.68.2" +rust-version = "1.69.0" [features] default = ["slasher-lmdb"] @@ -28,45 +28,45 @@ slasher-lmdb = ["slasher/lmdb"] jemalloc = ["malloc_utils/jemalloc"] [dependencies] -beacon_node = { "path" = "../beacon_node" } -slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = { version = "2.1.1", features = ["json"] } -types = { "path" = "../consensus/types" } -bls = { path = "../crypto/bls" } -ethereum_hashing = "1.0.0-beta.2" -clap = "2.33.3" -env_logger = "0.9.0" -environment = { path = "./environment" } +beacon_node = { workspace = true } +slog = { workspace = true } +sloggers = { workspace = true } +types = { workspace = true } +bls = { workspace = true } +ethereum_hashing = { workspace = true } +clap = { workspace = true } +env_logger = { workspace = true } +environment = { workspace = true } boot_node = { path = "../boot_node" } -futures = "0.3.7" -validator_client = { "path" = "../validator_client" } +futures = { workspace = true } +validator_client = { workspace = true } account_manager = { "path" = "../account_manager" } -clap_utils = { path = "../common/clap_utils" } -eth2_network_config = { path = "../common/eth2_network_config" } -lighthouse_version = { path = "../common/lighthouse_version" } -account_utils = { path = "../common/account_utils" } -lighthouse_metrics = { path = "../common/lighthouse_metrics" } -lazy_static = "1.4.0" -serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.59" -serde_yaml = "0.8.13" -task_executor = { path = "../common/task_executor" } -malloc_utils = { path = "../common/malloc_utils" } -directory = { path = "../common/directory" } -unused_port = { path = "../common/unused_port" } +clap_utils = { workspace = true } +eth2_network_config = { workspace = true } +lighthouse_version = { workspace = true } +account_utils = { workspace = true } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +task_executor = { workspace = true } +malloc_utils = { workspace = true } +directory = { workspace = true } +unused_port = { workspace = true } database_manager = { path = "../database_manager" } -slasher = { path = "../slasher" } +slasher = { workspace = true } validator_manager = { path = "../validator_manager" } [dev-dependencies] -tempfile = "3.1.0" -validator_dir = { path = "../common/validator_dir" } -slashing_protection = { path = "../validator_client/slashing_protection" } -lighthouse_network = { path = "../beacon_node/lighthouse_network" } -sensitive_url = { path = "../common/sensitive_url" } -eth1 = { path = "../beacon_node/eth1" } -eth2 = { path = "../common/eth2" } -beacon_processor = { path = "../beacon_node/beacon_processor" } +tempfile = { workspace = true } +validator_dir = { workspace = true } +slashing_protection = { workspace = true } +lighthouse_network = { workspace = true } +sensitive_url = { workspace = true } +eth1 = { workspace = true } +eth2 = { workspace = true } +beacon_processor = { workspace = true } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 1ba0bb267c..b57e1e9dee 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -2,24 +2,23 @@ name = "environment" version = "0.1.2" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -tokio = { version = "1.14.0", features = ["macros", "rt", "rt-multi-thread", "signal" ] } -slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = { version = "2.1.1", features = ["json"] } -types = { path = "../../consensus/types" } -eth2_config = { path = "../../common/eth2_config" } -task_executor = { path = "../../common/task_executor" } -eth2_network_config = { path = "../../common/eth2_network_config" } -logging = { path = "../../common/logging" } -slog-term = "2.6.0" -slog-async = "2.5.0" -futures = "0.3.7" +tokio = { workspace = true } +slog = { workspace = true } +sloggers = { workspace = true } +types = { workspace = true } +eth2_config = { workspace = true } +task_executor = { workspace = true } +eth2_network_config = { workspace = true } +logging = { workspace = true } +slog-term = { workspace = true } +slog-async = { workspace = true } +futures = { workspace = true } slog-json = "2.3.0" -exit-future = "0.2.0" -serde = "1.0.116" -serde_derive = "1.0.116" +exit-future = { workspace = true } +serde = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fc7ab8d52c..a1f6b26a95 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -13,7 +13,7 @@ use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use logging::SSELoggingComponents; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; @@ -254,12 +254,9 @@ impl<E: EthSpec> EnvironmentBuilder<E> { } // Disable file logging if no path is specified. - let path = match config.path { - Some(path) => path, - None => { - self.log = Some(stdout_logger); - return Ok(self); - } + let Some(path) = config.path else { + self.log = Some(stdout_logger); + return Ok(self); }; // Ensure directories are created becfore the logfile. diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index b98145163c..dbb6819cd8 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -67,6 +67,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16 EJECTION_BALANCE: 16000000000 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index d8b522307c..f98af96176 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -8,6 +8,7 @@ use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; +use futures::TryFutureExt; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; @@ -324,6 +325,30 @@ fn main() { .takes_value(true) .global(true) ) + .arg( + Arg::with_name("genesis-state-url") + .long("genesis-state-url") + .value_name("URL") + .help( + "A URL of a beacon-API compatible server from which to download the genesis state. \ + Checkpoint sync server URLs can generally be used with this flag. \ + If not supplied, a default URL or the --checkpoint-sync-url may be used. \ + If the genesis state is already included in this binary then this value will be ignored.", + ) + .takes_value(true) + .global(true), + ) + .arg( + Arg::with_name("genesis-state-url-timeout") + .long("genesis-state-url-timeout") + .value_name("SECONDS") + .help( + "The timeout in seconds for the request to --genesis-state-url.", + ) + .takes_value(true) + .default_value("180") + .global(true), + ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) @@ -635,8 +660,8 @@ fn run<E: EthSpec>( executor.clone().spawn( async move { if let Err(e) = ProductionValidatorClient::new(context, config) + .and_then(|mut vc| async move { vc.start_service().await }) .await - .and_then(|mut vc| vc.start_service()) { crit!(log, "Failed to start validator client"; "reason" => e); // Ignore the error since it always occurs during normal operation when diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 37d224dbc3..7937b1d496 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,6 +11,7 @@ use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::path::Path; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -21,9 +22,14 @@ use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; -use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DUMMY_ENR_TCP_PORT: u16 = 7777; +const DUMMY_ENR_UDP_PORT: u16 = 8888; +const DUMMY_ENR_QUIC_PORT: u16 = 9999; + +const _: () = + assert!(DUMMY_ENR_QUIC_PORT != 0 && DUMMY_ENR_TCP_PORT != 0 && DUMMY_ENR_UDP_PORT != 0); /// Returns the `lighthouse beacon_node` command. fn base_cmd() -> Command { @@ -752,6 +758,38 @@ fn builder_fallback_flags() { ); }, ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("ignore-builder-override-suggestion-threshold"), + Some("53.4"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .ignore_builder_override_suggestion_threshold, + 53.4f32 + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .ignore_builder_override_suggestion_threshold, + 10.0f32 + ); + }, + ); } #[test] @@ -797,7 +835,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let id = "bn-1"; let version = "Lighthouse-v2.1.3"; CommandLineTest::new() - .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag("execution-endpoint", Some(execution_endpoint)) .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) .flag(jwt_id_flag, Some(id)) .flag(jwt_version_flag, Some(version)) @@ -1003,46 +1041,46 @@ fn network_listen_address_flag_wrong_double_v6_value_config() { } #[test] fn network_port_flag_over_ipv4() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() .with_config(|config| { assert_eq!( - config - .network - .listen_addrs() - .v4() - .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), - Some((port, port)) + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + Some((port, port + 1, port)) ); }); } #[test] fn network_port_flag_over_ipv6() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) .run() .with_config(|config| { assert_eq!( - config - .network - .listen_addrs() - .v6() - .map(|listen_addr| (listen_addr.udp_port, listen_addr.tcp_port)), - Some((port, port)) + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.disc_port, + listen_addr.quic_port, + listen_addr.tcp_port + )), + Some((port, port + 1, port)) ); }); } #[test] fn network_port_and_discovery_port_flags_over_ipv4() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; CommandLineTest::new() .flag("port", Some(tcp4_port.to_string().as_str())) - .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("discovery-port", Some(disc4_port.to_string().as_str())) .run() .with_config(|config| { assert_eq!( @@ -1050,19 +1088,19 @@ fn network_port_and_discovery_port_flags_over_ipv4() { .network .listen_addrs() .v4() - .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), - Some((tcp4_port, udp4_port)) + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.disc_port)), + Some((tcp4_port, disc4_port)) ); }); } #[test] fn network_port_and_discovery_port_flags_over_ipv6() { - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(tcp6_port.to_string().as_str())) - .flag("discovery-port", Some(udp6_port.to_string().as_str())) + .flag("discovery-port", Some(disc6_port.to_string().as_str())) .run() .with_config(|config| { assert_eq!( @@ -1070,24 +1108,24 @@ fn network_port_and_discovery_port_flags_over_ipv6() { .network .listen_addrs() .v6() - .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), - Some((tcp6_port, udp6_port)) + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.disc_port)), + Some((tcp6_port, disc6_port)) ); }); } #[test] fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) .flag("port", Some(tcp4_port.to_string().as_str())) - .flag("discovery-port", Some(udp4_port.to_string().as_str())) + .flag("discovery-port", Some(disc4_port.to_string().as_str())) .flag("port6", Some(tcp6_port.to_string().as_str())) - .flag("discovery-port6", Some(udp6_port.to_string().as_str())) + .flag("discovery-port6", Some(disc6_port.to_string().as_str())) .run() .with_config(|config| { assert_eq!( @@ -1095,8 +1133,8 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { .network .listen_addrs() .v4() - .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), - Some((tcp4_port, udp4_port)) + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.disc_port)), + Some((tcp4_port, disc4_port)) ); assert_eq!( @@ -1104,8 +1142,47 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { .network .listen_addrs() .v6() - .map(|listen_addr| (listen_addr.tcp_port, listen_addr.udp_port)), - Some((tcp6_port, udp6_port)) + .map(|listen_addr| (listen_addr.tcp_port, listen_addr.disc_port)), + Some((tcp6_port, disc6_port)) + ); + }); +} + +#[test] +fn network_port_discovery_quic_port_flags_over_ipv4_and_ipv6() { + let tcp4_port = 0; + let disc4_port = 0; + let quic4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; + let quic6_port = 0; + CommandLineTest::new() + .flag("listen-address", Some("::1")) + .flag("listen-address", Some("127.0.0.1")) + .flag("port", Some(tcp4_port.to_string().as_str())) + .flag("discovery-port", Some(disc4_port.to_string().as_str())) + .flag("quic-port", Some(quic4_port.to_string().as_str())) + .flag("port6", Some(tcp6_port.to_string().as_str())) + .flag("discovery-port6", Some(disc6_port.to_string().as_str())) + .flag("quic-port6", Some(quic6_port.to_string().as_str())) + .run() + .with_config(|config| { + assert_eq!( + config.network.listen_addrs().v4().map(|listen_addr| ( + listen_addr.tcp_port, + listen_addr.disc_port, + listen_addr.quic_port + )), + Some((tcp4_port, disc4_port, quic4_port)) + ); + + assert_eq!( + config.network.listen_addrs().v6().map(|listen_addr| ( + listen_addr.tcp_port, + listen_addr.disc_port, + listen_addr.quic_port + )), + Some((tcp6_port, disc6_port, quic6_port)) ); }); } @@ -1117,6 +1194,14 @@ fn disable_discovery_flag() { .run_with_zero_port() .with_config(|config| assert!(config.network.disable_discovery)); } + +#[test] +fn disable_quic_flag() { + CommandLineTest::new() + .flag("disable-quic", None) + .run_with_zero_port() + .with_config(|config| assert!(config.network.disable_quic_support)); +} #[test] fn disable_peer_scoring_flag() { CommandLineTest::new() @@ -1216,41 +1301,91 @@ fn network_load_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; + assert!(port != 0); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ) + }); +} +#[test] +fn enr_quic_port_flag() { + let port = DUMMY_ENR_QUIC_PORT; + CommandLineTest::new() + .flag("enr-quic-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.enr_quic4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp_port_flag() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_udp6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-udp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(port) + ) + }); +} +#[test] +fn enr_quic6_port_flag() { + let port = DUMMY_ENR_QUIC_PORT; + CommandLineTest::new() + .flag("enr-quic6-port", Some(port.to_string().as_str())) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.enr_quic6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp6_port_flag() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::<Ipv4Addr>().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -1261,21 +1396,27 @@ fn enr_match_flag_over_ipv4() { assert_eq!( config.network.listen_addrs().v4().map(|listen_addr| ( listen_addr.addr, - listen_addr.udp_port, + listen_addr.disc_port, listen_addr.tcp_port )), Some((addr, udp4_port, tcp4_port)) ); assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::<Ipv6Addr>().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(ADDR)) @@ -1286,25 +1427,33 @@ fn enr_match_flag_over_ipv6() { assert_eq!( config.network.listen_addrs().v6().map(|listen_addr| ( listen_addr.addr, - listen_addr.udp_port, + listen_addr.disc_port, listen_addr.tcp_port )), Some((addr, udp6_port, tcp6_port)) ); assert_eq!(config.network.enr_address, (None, Some(addr))); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); }); } #[test] fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; let ipv6_addr = IPV6_ADDR.parse::<Ipv6Addr>().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; let ipv4_addr = IPV4_ADDR.parse::<Ipv4Addr>().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(IPV4_ADDR)) @@ -1318,7 +1467,7 @@ fn enr_match_flag_over_ipv4_and_ipv6() { assert_eq!( config.network.listen_addrs().v6().map(|listen_addr| ( listen_addr.addr, - listen_addr.udp_port, + listen_addr.disc_port, listen_addr.tcp_port )), Some((ipv6_addr, udp6_port, tcp6_port)) @@ -1326,7 +1475,7 @@ fn enr_match_flag_over_ipv4_and_ipv6() { assert_eq!( config.network.listen_addrs().v4().map(|listen_addr| ( listen_addr.addr, - listen_addr.udp_port, + listen_addr.disc_port, listen_addr.tcp_port )), Some((ipv4_addr, udp4_port, tcp4_port)) @@ -1335,41 +1484,53 @@ fn enr_match_flag_over_ipv4_and_ipv6() { config.network.enr_address, (Some(ipv4_addr), Some(ipv6_addr)) ); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] fn enr_address_flag_with_ipv4() { let addr = "192.167.1.1".parse::<Ipv4Addr>().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_flag_with_ipv6() { let addr = "192.167.1.1".parse::<Ipv4Addr>().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] fn enr_address_dns_flag() { let addr = Ipv4Addr::LOCALHOST; let ipv6addr = Ipv6Addr::LOCALHOST; - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1379,7 +1540,10 @@ fn enr_address_dns_flag() { config.network.enr_address.0 == Some(addr) || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] @@ -1402,6 +1566,7 @@ fn http_flag() { fn http_address_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); @@ -1410,15 +1575,17 @@ fn http_address_flag() { fn http_address_ipv6_flag() { let addr = "::1".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("::1")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); } #[test] fn http_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() + .flag("http", None) .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) .run() @@ -1460,15 +1627,20 @@ fn disable_inbound_rate_limiter_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() - .flag("http-allow-origin", Some("127.0.0.99")) + .flag("http", None) + .flag("http-allow-origin", Some("http://127.0.0.99")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.allow_origin, Some("127.0.0.99".to_string())); + assert_eq!( + config.http_api.allow_origin, + Some("http://127.0.0.99".to_string()) + ); }); } #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -1476,6 +1648,7 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_sync_stalled_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-sync-stalled", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true)); @@ -1483,32 +1656,29 @@ fn http_allow_sync_stalled_flag() { #[test] fn http_enable_beacon_processor() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); } #[test] fn http_tls_flags() { - let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() + .flag("http", None) .flag("http-enable-tls", None) - .flag( - "http-tls-cert", - dir.path().join("certificate.crt").as_os_str().to_str(), - ) - .flag( - "http-tls-key", - dir.path().join("private.key").as_os_str().to_str(), - ) + .flag("http-tls-cert", Some("tests/tls/cert.pem")) + .flag("http-tls-key", Some("tests/tls/key.rsa")) .run_with_zero_port() .with_config(|config| { let tls_config = config @@ -1516,14 +1686,15 @@ fn http_tls_flags() { .tls_config .as_ref() .expect("tls_config was empty."); - assert_eq!(tls_config.cert, dir.path().join("certificate.crt")); - assert_eq!(tls_config.key, dir.path().join("private.key")); + assert_eq!(tls_config.cert, Path::new("tests/tls/cert.pem")); + assert_eq!(tls_config.key, Path::new("tests/tls/key.rsa")); }); } #[test] fn http_spec_fork_default() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); } @@ -1531,6 +1702,7 @@ fn http_spec_fork_default() { #[test] fn http_spec_fork_override() { CommandLineTest::new() + .flag("http", None) .flag("http-spec-fork", Some("altair")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair))); @@ -1567,8 +1739,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -1755,6 +1927,45 @@ fn prune_payloads_on_startup_false() { .with_config(|config| assert!(!config.store.prune_payloads)); } #[test] +fn prune_blobs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.prune_blobs)); +} +#[test] +fn prune_blobs_on_startup_false() { + CommandLineTest::new() + .flag("prune-blobs", Some("false")) + .run_with_zero_port() + .with_config(|config| assert!(!config.store.prune_blobs)); +} +#[test] +fn epochs_per_blob_prune_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.epochs_per_blob_prune == 1)); +} +#[test] +fn epochs_per_blob_prune_on_startup_five() { + CommandLineTest::new() + .flag("epochs-per-blob-prune", Some("5")) + .run_with_zero_port() + .with_config(|config| assert!(config.store.epochs_per_blob_prune == 5)); +} +#[test] +fn blob_prune_margin_epochs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 0)); +} +#[test] +fn blob_prune_margin_epochs_on_startup_ten() { + CommandLineTest::new() + .flag("blob-prune-margin-epochs", Some("10")) + .run_with_zero_port() + .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 10)); +} +#[test] fn reconstruct_historic_states_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) @@ -2265,6 +2476,18 @@ fn gui_flag() { }); } +#[test] +fn multiple_http_enabled_flags() { + CommandLineTest::new() + .flag("gui", None) + .flag("http", None) + .flag("staking", None) + .run_with_zero_port() + .with_config(|config| { + assert!(config.http_api.enabled); + }); +} + #[test] fn optimistic_finalized_sync_default() { CommandLineTest::new() @@ -2377,7 +2600,53 @@ fn http_sse_capacity_multiplier_default() { #[test] fn http_sse_capacity_multiplier_override() { CommandLineTest::new() + .flag("http", None) .flag("http-sse-capacity-multiplier", Some("10")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10)); } + +#[test] +fn http_duplicate_block_status_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 202) + }); +} + +#[test] +fn http_duplicate_block_status_override() { + CommandLineTest::new() + .flag("http", None) + .flag("http-duplicate-block-status", Some("301")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 301) + }); +} + +#[test] +fn genesis_state_url_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.genesis_state_url, None); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(180)); + }); +} + +#[test] +fn genesis_state_url_value() { + CommandLineTest::new() + .flag("genesis-state-url", Some("http://genesis.com")) + .flag("genesis-state-url-timeout", Some("42")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.genesis_state_url.as_deref(), + Some("http://genesis.com") + ); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); + }); +} diff --git a/lighthouse/tests/tls/cert.pem b/lighthouse/tests/tls/cert.pem new file mode 100644 index 0000000000..03af12ff81 --- /dev/null +++ b/lighthouse/tests/tls/cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEADCCAmigAwIBAgICAcgwDQYJKoZIhvcNAQELBQAwLDEqMCgGA1UEAwwhcG9u +eXRvd24gUlNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTE2MDgxMzE2MDcwNFoX +DTIyMDIwMzE2MDcwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpVhh1/FNP2qvWenbZSghari/UThwe +dynfnHG7gc3JmygkEdErWBO/CHzHgsx7biVE5b8sZYNEDKFojyoPHGWK2bQM/FTy +niJCgNCLdn6hUqqxLAml3cxGW77hAWu94THDGB1qFe+eFiAUnDmob8gNZtAzT6Ky +b/JGJdrEU0wj+Rd7wUb4kpLInNH/Jc+oz2ii2AjNbGOZXnRz7h7Kv3sO9vABByYe +LcCj3qnhejHMqVhbAT1MD6zQ2+YKBjE52MsQKU/xhUpu9KkUyLh0cxkh3zrFiKh4 +Vuvtc+n7aeOv2jJmOl1dr0XLlSHBlmoKqH6dCTSbddQLmlK7dms8vE01AgMBAAGj +gb4wgbswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwHQYDVR0OBBYEFMeUzGYV +bXwJNQVbY1+A8YXYZY8pMEIGA1UdIwQ7MDmAFJvEsUi7+D8vp8xcWvnEdVBGkpoW +oR6kHDAaMRgwFgYDVQQDDA9wb255dG93biBSU0EgQ0GCAXswOwYDVR0RBDQwMoIO +dGVzdHNlcnZlci5jb22CFXNlY29uZC50ZXN0c2VydmVyLmNvbYIJbG9jYWxob3N0 +MA0GCSqGSIb3DQEBCwUAA4IBgQBsk5ivAaRAcNgjc7LEiWXFkMg703AqDDNx7kB1 +RDgLalLvrjOfOp2jsDfST7N1tKLBSQ9bMw9X4Jve+j7XXRUthcwuoYTeeo+Cy0/T +1Q78ctoX74E2nB958zwmtRykGrgE/6JAJDwGcgpY9kBPycGxTlCN926uGxHsDwVs +98cL6ZXptMLTR6T2XP36dAJZuOICSqmCSbFR8knc/gjUO36rXTxhwci8iDbmEVaf +BHpgBXGU5+SQ+QM++v6bHGf4LNQC5NZ4e4xvGax8ioYu/BRsB/T3Lx+RlItz4zdU +XuxCNcm3nhQV2ZHquRdbSdoyIxV5kJXel4wCmOhWIq7A2OBKdu5fQzIAzzLi65EN +RPAKsKB4h7hGgvciZQ7dsMrlGw0DLdJ6UrFyiR5Io7dXYT/+JP91lP5xsl6Lhg9O +FgALt7GSYRm2cZdgi9pO9rRr83Br1VjQT1vHz6yoZMXSqc4A2zcN2a2ZVq//rHvc +FZygs8miAhWPzqnpmgTj1cPiU1M= +-----END CERTIFICATE----- diff --git a/lighthouse/tests/tls/key.rsa b/lighthouse/tests/tls/key.rsa new file mode 100644 index 0000000000..b13bf5d07f --- /dev/null +++ b/lighthouse/tests/tls/key.rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqVYYdfxTT9qr1np22UoIWq4v1E4cHncp35xxu4HNyZsoJBHR +K1gTvwh8x4LMe24lROW/LGWDRAyhaI8qDxxlitm0DPxU8p4iQoDQi3Z+oVKqsSwJ +pd3MRlu+4QFrveExwxgdahXvnhYgFJw5qG/IDWbQM0+ism/yRiXaxFNMI/kXe8FG ++JKSyJzR/yXPqM9ootgIzWxjmV50c+4eyr97DvbwAQcmHi3Ao96p4XoxzKlYWwE9 +TA+s0NvmCgYxOdjLEClP8YVKbvSpFMi4dHMZId86xYioeFbr7XPp+2njr9oyZjpd +Xa9Fy5UhwZZqCqh+nQk0m3XUC5pSu3ZrPLxNNQIDAQABAoIBAFKtZJgGsK6md4vq +kyiYSufrcBLaaEQ/rkQtYCJKyC0NAlZKFLRy9oEpJbNLm4cQSkYPXn3Qunx5Jj2k +2MYz+SgIDy7f7KHgr52Ew020dzNQ52JFvBgt6NTZaqL1TKOS1fcJSSNIvouTBerK +NCSXHzfb4P+MfEVe/w1c4ilE+kH9SzdEo2jK/sRbzHIY8TX0JbmQ4SCLLayr22YG +usIxtIYcWt3MMP/G2luRnYzzBCje5MXdpAhlHLi4TB6x4h5PmBKYc57uOVNngKLd +YyrQKcszW4Nx5v0a4HG3A5EtUXNCco1+5asXOg2lYphQYVh2R+1wgu5WiDjDVu+6 +EYgjFSkCgYEA0NBk6FDoxE/4L/4iJ4zIhu9BptN8Je/uS5c6wRejNC/VqQyw7SHb +hRFNrXPvq5Y+2bI/DxtdzZLKAMXOMjDjj0XEgfOIn2aveOo3uE7zf1i+njxwQhPu +uSYA9AlBZiKGr2PCYSDPnViHOspVJjxRuAgyWM1Qf+CTC0D95aj0oz8CgYEAz5n4 +Cb3/WfUHxMJLljJ7PlVmlQpF5Hk3AOR9+vtqTtdxRjuxW6DH2uAHBDdC3OgppUN4 +CFj55kzc2HUuiHtmPtx8mK6G+otT7Lww+nLSFL4PvZ6CYxqcio5MPnoYd+pCxrXY +JFo2W7e4FkBOxb5PF5So5plg+d0z/QiA7aFP1osCgYEAtgi1rwC5qkm8prn4tFm6 +hkcVCIXc+IWNS0Bu693bXKdGr7RsmIynff1zpf4ntYGpEMaeymClCY0ppDrMYlzU +RBYiFNdlBvDRj6s/H+FTzHRk2DT/99rAhY9nzVY0OQFoQIXK8jlURGrkmI/CYy66 +XqBmo5t4zcHM7kaeEBOWEKkCgYAYnO6VaRtPNQfYwhhoFFAcUc+5t+AVeHGW/4AY +M5qlAlIBu64JaQSI5KqwS0T4H+ZgG6Gti68FKPO+DhaYQ9kZdtam23pRVhd7J8y+ +xMI3h1kiaBqZWVxZ6QkNFzizbui/2mtn0/JB6YQ/zxwHwcpqx0tHG8Qtm5ZAV7PB +eLCYhQKBgQDALJxU/6hMTdytEU5CLOBSMby45YD/RrfQrl2gl/vA0etPrto4RkVq +UrkDO/9W4mZORClN3knxEFSTlYi8YOboxdlynpFfhcs82wFChs+Ydp1eEsVHAqtu +T+uzn0sroycBiBfVB949LExnzGDFUkhG0i2c2InarQYLTsIyHCIDEA== +-----END RSA PRIVATE KEY----- diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index f20ff4e0f1..5268f3230a 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -260,6 +260,7 @@ fn http_flag() { fn http_address_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .flag("unencrypted-http-transport", None) .run() @@ -269,6 +270,7 @@ fn http_address_flag() { fn http_address_ipv6_flag() { let addr = "::1".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("::1")) .flag("unencrypted-http-transport", None) .run() @@ -279,6 +281,7 @@ fn http_address_ipv6_flag() { fn missing_unencrypted_http_transport_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); @@ -286,6 +289,7 @@ fn missing_unencrypted_http_transport_flag() { #[test] fn http_port_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_api.listen_port, 9090)); @@ -293,6 +297,7 @@ fn http_port_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -305,6 +310,7 @@ fn http_allow_origin_flag() { #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -312,12 +318,14 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_keystore_export_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.allow_keystore_export)); } #[test] fn http_allow_keystore_export_present() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-keystore-export", None) .run() .with_config(|config| assert!(config.http_api.allow_keystore_export)); @@ -325,12 +333,14 @@ fn http_allow_keystore_export_present() { #[test] fn http_store_keystore_passwords_in_secrets_dir_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir)); } #[test] fn http_store_keystore_passwords_in_secrets_dir_present() { CommandLineTest::new() + .flag("http", None) .flag("http-store-passwords-in-secrets-dir", None) .run() .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); @@ -348,6 +358,7 @@ fn metrics_flag() { fn metrics_address_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -356,6 +367,7 @@ fn metrics_address_flag() { fn metrics_address_ipv6_flag() { let addr = "::1".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("::1")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -363,6 +375,7 @@ fn metrics_address_ipv6_flag() { #[test] fn metrics_port_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090)); @@ -370,6 +383,7 @@ fn metrics_port_flag() { #[test] fn metrics_allow_origin_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -382,6 +396,7 @@ fn metrics_allow_origin_flag() { #[test] fn metrics_allow_origin_all_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_metrics.allow_origin, Some("*".to_string()))); diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index f261ea67fd..4f54154d1d 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -128,3 +128,16 @@ Update the genesis time to now using: > Note: you probably want to just rerun `./start_local_testnet.sh` to start over > but this is another option. + +### Testing builder flow + +1. Add builder URL to `BN_ARGS` in `./var.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: + - [`mock-relay`](https://github.com/realbigsean/mock-relay) + - [`dummy-builder`](https://github.com/michaelsproul/dummy_builder) +2. (Optional) Add `--always-prefer-builder-payload` to `BN_ARGS`. +3. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. +4. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag: + ```bash + ./start_local_testnet.sh -p genesis.json + ``` +5. Block production using builder flow will start at epoch 4. diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 1a04d12d4a..940fe2b858 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -39,10 +39,11 @@ done # Get positional arguments data_dir=${@:$OPTIND+0:1} -network_port=${@:$OPTIND+1:1} -http_port=${@:$OPTIND+2:1} -execution_endpoint=${@:$OPTIND+3:1} -execution_jwt=${@:$OPTIND+4:1} +tcp_port=${@:$OPTIND+1:1} +quic_port=${@:$OPTIND+2:1} +http_port=${@:$OPTIND+3:1} +execution_endpoint=${@:$OPTIND+4:1} +execution_jwt=${@:$OPTIND+5:1} lighthouse_binary=lighthouse @@ -56,11 +57,14 @@ exec $lighthouse_binary \ --disable-peer-scoring \ --staking \ --enr-address 127.0.0.1 \ - --enr-udp-port $network_port \ - --enr-tcp-port $network_port \ - --port $network_port \ + --enr-udp-port $tcp_port \ + --enr-tcp-port $tcp_port \ + --enr-quic-port $quic_port \ + --port $tcp_port \ + --quic-port $quic_port \ --http-port $http_port \ --disable-packet-filter \ --target-peers $((BN_COUNT - 1)) \ --execution-endpoint $execution_endpoint \ - --execution-jwt $execution_jwt + --execution-jwt $execution_jwt \ + $BN_ARGS diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json index 3ac553e55b..eda3b312f6 100644 --- a/scripts/local_testnet/genesis.json +++ b/scripts/local_testnet/genesis.json @@ -13,6 +13,7 @@ "londonBlock": 0, "mergeNetsplitBlock": 0, "shanghaiTime": 0, + "cancunTime": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, @@ -858,4 +859,4 @@ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp": "1662465600" -} \ No newline at end of file +} diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh index d3923cdd89..5dc4575cf0 100755 --- a/scripts/local_testnet/geth.sh +++ b/scripts/local_testnet/geth.sh @@ -33,7 +33,6 @@ http_port=${@:$OPTIND+2:1} auth_port=${@:$OPTIND+3:1} genesis_file=${@:$OPTIND+4:1} - # Init $GETH_BINARY init \ --datadir $data_dir \ @@ -51,4 +50,4 @@ exec $GETH_BINARY \ --bootnodes $EL_BOOTNODE_ENODE \ --port $network_port \ --http.port $http_port \ - --authrpc.port $auth_port \ No newline at end of file + --authrpc.port $auth_port diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 283aa0c026..7e000251a2 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -28,6 +28,7 @@ lcli \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ --bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \ --capella-fork-epoch $CAPELLA_FORK_EPOCH \ + --deneb-fork-epoch $DENEB_FORK_EPOCH \ --ttd $TTD \ --eth1-block-hash $ETH1_BLOCK_HASH \ --eth1-id $CHAIN_ID \ @@ -46,6 +47,6 @@ lcli \ insecure-validators \ --count $VALIDATOR_COUNT \ --base-dir $DATADIR \ - --node-count $BN_COUNT + --node-count $VC_COUNT echo Validators generated with keystore passwords at $DATADIR. diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 4b8357b993..c796050bc4 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -103,16 +103,22 @@ echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 # Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') echo $GENESIS_TIME CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) echo $CAPELLA_TIME sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file +CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) +echo $CANCUN_TIME +sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file cat $genesis_file # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh -sleeping 1 +sleeping 3 + +execute_command_add_PID el_bootnode.log ./el_bootnode.sh +sleeping 3 execute_command_add_PID el_bootnode.log ./el_bootnode.sh sleeping 1 @@ -135,11 +141,12 @@ sleeping 20 # Reset the `genesis.json` config file fork times. sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file +sed -i 's/"cancunTime".*$/"cancunTime": 0,/g' $genesis_file for (( bn=1; bn<=$BN_COUNT; bn++ )); do secret=$DATADIR/geth_datadir$bn/geth/jwtsecret echo $secret - execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret + execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_udp_tcp_base + $bn + 100)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret done # Start requested number of validator clients diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 6e05f0c411..d04a235497 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -45,6 +45,7 @@ CHAIN_ID=4242 ALTAIR_FORK_EPOCH=0 BELLATRIX_FORK_EPOCH=0 CAPELLA_FORK_EPOCH=1 +DENEB_FORK_EPOCH=2 TTD=0 @@ -60,5 +61,8 @@ SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage PROPOSER_SCORE_BOOST=40 +# Command line arguments for beacon node client +BN_ARGS="" + # Command line arguments for validator client VC_ARGS="" diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index e9d3e39ce5..e13c06cdba 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -43,23 +43,21 @@ sleep 10 echo "Starting local execution nodes" -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 7000 6000 5000 $genesis_file &> geth.log & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 7100 6100 5100 $genesis_file &> /dev/null & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 7200 6200 5200 $genesis_file &> /dev/null & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 6000 5000 4000 $genesis_file &> geth.log & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 6100 5100 4100 $genesis_file &> /dev/null & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 6200 5200 4200 $genesis_file &> /dev/null & sleep 20 -echo "Starting local beacon nodes" - -exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:5000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> beacon1.log & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 http://localhost:5100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 http://localhost:5200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 8000 7000 9000 http://localhost:4000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 8100 7100 9100 http://localhost:4100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 8200 7200 9200 http://localhost:4200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & echo "Starting local validator clients" -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:9000 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:9100 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:9200 &> /dev/null & echo "Waiting an epoch before starting the next validator client" sleep $(( $SECONDS_PER_SLOT * 32 )) @@ -70,7 +68,7 @@ if [[ "$BEHAVIOR" == "failure" ]]; then # Use same keys as keys from VC1 and connect to BN2 # This process should not last longer than 2 epochs - timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:8100 + timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:9100 DOPPELGANGER_EXIT=$? echo "Shutting down" @@ -96,7 +94,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then echo "Starting the last validator client" - ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:8100 & + ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:9100 & DOPPELGANGER_FAILURE=0 # Sleep three epochs, then make sure all validators were active in epoch 2. Use @@ -110,7 +108,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then cd $HOME/.lighthouse/local-testnet/node_4/validators for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:8100/lighthouse/validator_inclusion/3/$val | jq | grep -q '"is_previous_epoch_target_attester": false' + curl -s localhost:9100/lighthouse/validator_inclusion/3/$val | jq | grep -q '"is_previous_epoch_target_attester": false' IS_ATTESTER=$? if [[ $IS_ATTESTER -eq 0 ]]; then echo "$val did not attest in epoch 2." @@ -128,7 +126,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:8100/lighthouse/validator_inclusion/5/$val | jq | grep -q '"is_previous_epoch_target_attester": true' + curl -s localhost:9100/lighthouse/validator_inclusion/5/$val | jq | grep -q '"is_previous_epoch_target_attester": true' IS_ATTESTER=$? if [[ $IS_ATTESTER -eq 0 ]]; then echo "$val attested in epoch 4." @@ -154,4 +152,4 @@ if [[ "$BEHAVIOR" == "success" ]]; then fi fi -exit 0 \ No newline at end of file +exit 0 diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json index ec3cd1e813..83f45f1a01 100644 --- a/scripts/tests/genesis.json +++ b/scripts/tests/genesis.json @@ -12,10 +12,15 @@ "berlinBlock": 0, "londonBlock": 0, "mergeForkBlock": 0, + "shanghaiTime": 0, + "shardingForkTime": 0, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x6d6172697573766477000000" + }, "0x0000000000000000000000000000000000000000": { "balance": "1" }, @@ -848,4 +853,4 @@ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp": "1662465600" -} \ No newline at end of file +} diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index a7e696ec0a..98ae08f074 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -16,7 +16,7 @@ DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 # Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=16ef16304456fdacdeb272bd70207021031db355ed6c5e44ebd34c1ab757e221 +ETH1_BLOCK_HASH=add7865f8346031c72287e2edc4a4952fd34fc0a8642403e8c1bce67f215c92b VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -41,7 +41,7 @@ CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=0 BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=18446744073709551615 +CAPELLA_FORK_EPOCH=1 DENEB_FORK_EPOCH=18446744073709551615 TTD=0 @@ -58,5 +58,8 @@ SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage PROPOSER_SCORE_BOOST=70 +# Command line arguments for beacon node client +BN_ARGS="" + # Enable doppelganger detection VC_ARGS=" --enable-doppelganger-protection " \ No newline at end of file diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index bfa7b5f64c..90fb54cd1a 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -2,34 +2,34 @@ name = "slasher" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [features] default = ["lmdb"] mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] +portable = ["types/portable"] [dependencies] -bincode = "1.3.1" -byteorder = "1.3.4" -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" +bincode = { workspace = true } +byteorder = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../common/lighthouse_metrics" } -filesystem = { path = "../common/filesystem" } -lru = "0.7.1" -parking_lot = "0.12.0" -rand = "0.8.5" -safe_arith = { path = "../consensus/safe_arith" } -serde = "1.0" -serde_derive = "1.0" -slog = "2.5.2" -sloggers = { version = "2.1.1", features = ["json"] } -tree_hash = "0.5.0" -tree_hash_derive = "0.5.0" -types = { path = "../consensus/types" } -strum = { version = "0.24.1", features = ["derive"] } +lazy_static = { workspace = true } +lighthouse_metrics = { workspace = true } +filesystem = { workspace = true } +lru = { workspace = true } +parking_lot = { workspace = true } +rand = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +sloggers = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } +strum = { workspace = true } # MDBX is pinned at the last version with Windows and macOS support. mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } @@ -37,8 +37,8 @@ lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b9426531 lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } [dev-dependencies] -maplit = "1.0.2" -rayon = "1.3.0" -tempfile = "3.1.0" -logging = { path = "../common/logging" } +maplit = { workspace = true } +rayon = { workspace = true } +tempfile = { workspace = true } +logging = { workspace = true } diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 63cf1e4649..41e3b5b90a 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -2,17 +2,17 @@ name = "slasher_service" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] -edition = "2021" +edition = { workspace = true } [dependencies] -beacon_chain = { path = "../../beacon_node/beacon_chain" } -directory = { path = "../../common/directory" } -lighthouse_network = { path = "../../beacon_node/lighthouse_network" } -network = { path = "../../beacon_node/network" } -slasher = { path = ".." } -slog = "2.5.2" -slot_clock = { path = "../../common/slot_clock" } -state_processing = { path = "../../consensus/state_processing" } -task_executor = { path = "../../common/task_executor" } -tokio = { version = "1.14.0", features = ["full"] } -types = { path = "../../consensus/types" } +beacon_chain = { workspace = true } +directory = { workspace = true } +lighthouse_network = { workspace = true } +network = { workspace = true } +slasher = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +state_processing = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 4deb389124..91c8f373f4 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -4,7 +4,7 @@ use crate::{ SlasherDB, }; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; @@ -159,9 +159,8 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result<Option<Self>, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? { - Some(chunk_bytes) => chunk_bytes, - None => return Ok(None), + let Some(chunk_bytes) = txn.get(Self::select_db(db), &disk_key.to_be_bytes())? else { + return Ok(None); }; let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?; @@ -448,11 +447,9 @@ pub fn apply_attestation_for_validator<E: EthSpec, T: TargetArrayChunk>( return Ok(slashing_status); } - let mut start_epoch = if let Some(start_epoch) = + let Some(mut start_epoch) = T::first_start_epoch(attestation.data.source.epoch, current_epoch, config) - { - start_epoch - } else { + else { return Ok(slashing_status); }; @@ -536,12 +533,10 @@ pub fn epoch_update_for_validator<E: EthSpec, T: TargetArrayChunk>( current_epoch: Epoch, config: &Config, ) -> Result<(), Error> { - let previous_current_epoch = - if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? { - epoch - } else { - return Ok(()); - }; + let Some(previous_current_epoch) = db.get_current_epoch_for_validator(validator_index, txn)? + else { + return Ok(()); + }; let mut epoch = previous_current_epoch; diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 361621d176..894760d277 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,5 +1,5 @@ use crate::Error; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::path::PathBuf; use strum::{Display, EnumString, EnumVariantNames}; use types::{Epoch, EthSpec, IndexedAttestation}; diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar deleted file mode 100644 index c790e248df..0000000000 --- a/testing/antithesis/Dockerfile.libvoidstar +++ /dev/null @@ -1,25 +0,0 @@ -FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse - -# Build lighthouse directly with a cargo build command, bypassing the Makefile. -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse -# build lcli binary directly with cargo install command, bypassing the makefile -RUN cargo install --path /lighthouse/lcli --force --locked - -FROM ubuntu:latest -RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ - libssl-dev \ - ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# create and move the libvoidstar file -RUN mkdir libvoidstar -COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so - -# set the env variable to avoid having to always set it -ENV LD_LIBRARY_PATH=/usr/lib -# move the lighthouse binary and lcli binary -COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse -COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli \ No newline at end of file diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so deleted file mode 100644 index 0f8a0f23c3..0000000000 Binary files a/testing/antithesis/libvoidstar/libvoidstar.so and /dev/null differ diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 11283052f0..dffed7647b 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -2,37 +2,41 @@ name = "ef_tests" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [features] # `ef_tests` feature must be enabled to actually run the tests ef_tests = [] milagro = ["bls/milagro"] fake_crypto = ["bls/fake_crypto"] +portable = ["beacon_chain/portable"] [dependencies] -bls = { path = "../../crypto/bls", default-features = false } -compare_fields = { path = "../../common/compare_fields" } -compare_fields_derive = { path = "../../common/compare_fields_derive" } -derivative = "2.1.1" -ethereum-types = "0.14.1" -hex = "0.4.2" -rayon = "1.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -serde_repr = "0.1.6" -serde_yaml = "0.8.13" -ethereum_ssz = "0.5.0" -ethereum_ssz_derive = "0.5.0" -tree_hash = "0.5.0" -tree_hash_derive = "0.5.0" -cached_tree_hash = { path = "../../consensus/cached_tree_hash" } -state_processing = { path = "../../consensus/state_processing" } -swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } -types = { path = "../../consensus/types" } -snap = "1.0.1" -fs2 = "0.4.3" -beacon_chain = { path = "../../beacon_node/beacon_chain" } -store = { path = "../../beacon_node/store" } -fork_choice = { path = "../../consensus/fork_choice" } -execution_layer = { path = "../../beacon_node/execution_layer" } +bls = { workspace = true } +compare_fields = { workspace = true } +compare_fields_derive = { workspace = true } +derivative = { workspace = true } +ethereum-types = { workspace = true } +hex = { workspace = true } +kzg = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_repr = { workspace = true } +serde_yaml = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +cached_tree_hash = { workspace = true } +state_processing = { workspace = true } +swap_or_not_shuffle = { workspace = true } +types = { workspace = true } +snap = { workspace = true } +fs2 = { workspace = true } +beacon_chain = { workspace = true } +store = { workspace = true } +fork_choice = { workspace = true } +execution_layer = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 81a1739eb1..452d805cec 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.4 +TESTS_TAG := v1.4.0-beta.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index b52d155224..a5ab897c33 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -41,8 +41,6 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # LightClientHeader "tests/.*/.*/ssz_static/LightClientHeader", - # Deneb (previously known as eip4844) tests are disabled for now. - "tests/.*/deneb", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*", # More Mac weirdness. @@ -52,7 +50,8 @@ excluded_paths = [ # some bls tests are not included now "bls12-381-tests/deserialization_G1", "bls12-381-tests/deserialization_G2", - "bls12-381-tests/hash_to_G2" + "bls12-381-tests/hash_to_G2", + "tests/.*/eip6110" ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 216912a4f1..f328fa6404 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -18,6 +18,12 @@ mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; +mod kzg_blob_to_kzg_commitment; +mod kzg_compute_blob_kzg_proof; +mod kzg_compute_kzg_proof; +mod kzg_verify_blob_kzg_proof; +mod kzg_verify_blob_kzg_proof_batch; +mod kzg_verify_kzg_proof; mod merkle_proof_validity; mod operations; mod rewards; @@ -42,6 +48,12 @@ pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; +pub use kzg_blob_to_kzg_commitment::*; +pub use kzg_compute_blob_kzg_proof::*; +pub use kzg_compute_kzg_proof::*; +pub use kzg_verify_blob_kzg_proof::*; +pub use kzg_verify_blob_kzg_proof_batch::*; +pub use kzg_verify_kzg_proof::*; pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 53387ee4d7..c1085e0702 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, Signature}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsAggregateSigs { diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index e9539dc15e..0e006d95c2 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_batch_verify.rs b/testing/ef_tests/src/cases/bls_batch_verify.rs index de8721d67d..703444c987 100644 --- a/testing/ef_tests/src/cases/bls_batch_verify.rs +++ b/testing/ef_tests/src/cases/bls_batch_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{verify_signature_sets, BlsWrappedSignature, PublicKeyBytes, Signature, SignatureSet}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::borrow::Cow; use std::str::FromStr; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs index c41fbca393..8783aa141e 100644 --- a/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs +++ b/testing/ef_tests/src/cases/bls_eth_aggregate_pubkeys.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregatePublicKey, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct BlsEthAggregatePubkeys { diff --git a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs index 80e018459b..0fb3a026cf 100644 --- a/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_eth_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 608995db9d..dcdc1bd197 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{AggregateSignature, PublicKeyBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index 53c13b569a..6479fabe42 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::SecretKey; -use serde_derive::Deserialize; +use serde::Deserialize; use types::Hash256; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 779b3cf75f..24b62c5fa1 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; use bls::{PublicKeyBytes, Signature, SignatureBytes}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::convert::TryInto; use types::Hash256; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index a59ccb34ad..2a7c998758 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -1,4 +1,4 @@ -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; @@ -64,8 +64,9 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { match fork_name { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, - ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. - ForkName::Capella => ForkName::Merge, // TODO: Check this when tests are released.. + ForkName::Merge => ForkName::Altair, + ForkName::Capella => ForkName::Merge, + ForkName::Deneb => ForkName::Capella, } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 5e71187156..cf182af2b2 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -4,7 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates; use state_processing::per_epoch_processing::{ @@ -101,7 +101,10 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization { justification_and_finalization_state.apply_changes_to_state(state); Ok(()) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { let justification_and_finalization_state = altair::process_justification_and_finalization( state, @@ -122,13 +125,14 @@ impl<E: EthSpec> EpochTransition<E> for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &validator_statuses, spec) } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_rewards_and_penalties( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -151,7 +155,10 @@ impl<E: EthSpec> EpochTransition<E> for Slashings { spec, )?; } - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) @@ -203,7 +210,9 @@ impl<E: EthSpec> EpochTransition<E> for HistoricalRootsUpdate { impl<E: EthSpec> EpochTransition<E> for HistoricalSummariesUpdate { fn run(state: &mut BeaconState<E>, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { - BeaconState::Capella(_) => process_historical_summaries_update(state), + BeaconState::Capella(_) | BeaconState::Deneb(_) => { + process_historical_summaries_update(state) + } _ => Ok(()), } } @@ -223,9 +232,10 @@ impl<E: EthSpec> EpochTransition<E> for SyncCommitteeUpdates { fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_sync_committee_updates(state, spec) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_sync_committee_updates(state, spec), } } } @@ -234,13 +244,14 @@ impl<E: EthSpec> EpochTransition<E> for InactivityUpdates { fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_inactivity_updates( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_inactivity_updates( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ), } } } @@ -249,9 +260,10 @@ impl<E: EthSpec> EpochTransition<E> for ParticipationFlagUpdates { fn run(state: &mut BeaconState<E>, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { - altair::process_participation_flag_updates(state) - } + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => altair::process_participation_flag_updates(state), } } } @@ -302,7 +314,7 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> { T::name() != "participation_record_updates" && T::name() != "historical_summaries_update" } - ForkName::Capella => { + ForkName::Capella | ForkName::Deneb => { T::name() != "participation_record_updates" && T::name() != "historical_roots_update" } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 52157d32f8..bc340fa1cb 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -2,8 +2,10 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; -use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella}; +use serde::Deserialize; +use state_processing::upgrade::{ + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, +}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -62,6 +64,7 @@ impl<E: EthSpec> Case for ForkTest<E> { ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), + ForkName::Deneb => upgrade_to_deneb(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9627d2cde0..db94106975 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -6,8 +6,9 @@ use beacon_chain::{ attestation_verification::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, + blob_verification::GossipVerifiedBlob, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -17,9 +18,9 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, - ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode, - SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, + EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, KzgProof, + ProgressiveBalancesMode, Signature, SignedBeaconBlock, SignedBlobSidecar, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -71,25 +72,27 @@ impl From<PayloadStatus> for PayloadStatusV1 { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step<B, A, AS, P> { +pub enum Step<TBlock, TBlobs, TAttestation, TAttesterSlashing, TPowBlock> { Tick { tick: u64, }, ValidBlock { - block: B, + block: TBlock, }, MaybeValidBlock { - block: B, + block: TBlock, + blobs: Option<TBlobs>, + proofs: Option<Vec<KzgProof>>, valid: bool, }, Attestation { - attestation: A, + attestation: TAttestation, }, AttesterSlashing { - attester_slashing: AS, + attester_slashing: TAttesterSlashing, }, PowBlock { - pow_block: P, + pow_block: TPowBlock, }, OnPayloadInfo { block_hash: ExecutionBlockHash, @@ -113,7 +116,9 @@ pub struct ForkChoiceTest<E: EthSpec> { pub anchor_state: BeaconState<E>, pub anchor_block: BeaconBlock<E>, #[allow(clippy::type_complexity)] - pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, AttesterSlashing<E>, PowBlock>>, + pub steps: Vec< + Step<SignedBeaconBlock<E>, BlobsList<E>, Attestation<E>, AttesterSlashing<E>, PowBlock>, + >, } impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { @@ -126,7 +131,7 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { .expect("path must be valid OsStr") .to_string(); let spec = &testing_spec::<E>(fork_name); - let steps: Vec<Step<String, String, String, String>> = + let steps: Vec<Step<String, String, String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps @@ -139,11 +144,25 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { }) .map(|block| Step::ValidBlock { block }) } - Step::MaybeValidBlock { block, valid } => { - ssz_decode_file_with(&path.join(format!("{}.ssz_snappy", block)), |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, spec) + Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, + } => { + let block = + ssz_decode_file_with(&path.join(format!("{block}.ssz_snappy")), |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, spec) + })?; + let blobs = blobs + .map(|blobs| ssz_decode_file(&path.join(format!("{blobs}.ssz_snappy")))) + .transpose()?; + Ok(Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, }) - .map(|block| Step::MaybeValidBlock { block, valid }) } Step::Attestation { attestation } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) @@ -204,10 +223,15 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { for step in &self.steps { match step { Step::Tick { tick } => tester.set_tick(*tick), - Step::ValidBlock { block } => tester.process_block(block.clone(), true)?, - Step::MaybeValidBlock { block, valid } => { - tester.process_block(block.clone(), *valid)? + Step::ValidBlock { block } => { + tester.process_block(block.clone(), None, None, true)? } + Step::MaybeValidBlock { + block, + blobs, + proofs, + valid, + } => tester.process_block(block.clone(), blobs.clone(), proofs.clone(), *valid)?, Step::Attestation { attestation } => tester.process_attestation(attestation)?, Step::AttesterSlashing { attester_slashing } => { tester.process_attester_slashing(attester_slashing) @@ -300,9 +324,13 @@ impl<E: EthSpec> Tester<E> { )); } - let harness = BeaconChainHarness::builder(E::default()) + let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default()) .spec(spec.clone()) .keypairs(vec![]) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() .recalculate_fork_times_with_genesis(0) @@ -376,16 +404,70 @@ impl<E: EthSpec> Tester<E> { .unwrap(); } - pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { + pub fn process_block( + &self, + block: SignedBeaconBlock<E>, + blobs: Option<BlobsList<E>>, + kzg_proofs: Option<Vec<KzgProof>>, + valid: bool, + ) -> Result<(), Error> { let block_root = block.canonical_root(); + + // Convert blobs and kzg_proofs into sidecars, then plumb them into the availability tracker + if let Some(blobs) = blobs.clone() { + let proofs = kzg_proofs.unwrap(); + let commitments = block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + + // Zipping will stop when any of the zipped lists runs out, which is what we want. Some + // of the tests don't provide enough proofs/blobs, and should fail the availability + // check. + for (i, ((blob, kzg_proof), kzg_commitment)) in blobs + .into_iter() + .zip(proofs) + .zip(commitments.into_iter()) + .enumerate() + { + let signed_sidecar = SignedBlobSidecar { + message: Arc::new(BlobSidecar { + block_root, + index: i as u64, + slot: block.slot(), + block_parent_root: block.parent_root(), + proposer_index: block.message().proposer_index(), + blob, + kzg_commitment, + kzg_proof, + }), + signature: Signature::empty(), + _phantom: Default::default(), + }; + let result = self.block_on_dangerous( + self.harness + .chain + .process_gossip_blob(GossipVerifiedBlob::__assumed_valid(signed_sidecar)), + )?; + if valid { + assert!(result.is_ok()); + } + } + }; + let block = Arc::new(block); - let result = self.block_on_dangerous(self.harness.chain.process_block( - block_root, - block.clone(), - NotifyExecutionLayer::Yes, - || Ok(()), - ))?; - if result.is_ok() != valid { + let result: Result<Result<Hash256, ()>, _> = self + .block_on_dangerous(self.harness.chain.process_block( + block_root, + block.clone(), + NotifyExecutionLayer::Yes, + || Ok(()), + ))? + .map(|avail: AvailabilityProcessingStatus| avail.try_into()); + let success = result.as_ref().map_or(false, |inner| inner.is_ok()); + if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", block_root, @@ -397,8 +479,8 @@ impl<E: EthSpec> Tester<E> { // Apply invalid blocks directly against the fork choice `on_block` function. This ensures // that the block is being rejected by `on_block`, not just some upstream block processing - // function. - if !valid { + // function. When blobs exist, we don't do this. + if !valid && blobs.is_none() { // A missing parent block whilst `valid == false` means the test should pass. if let Some(parent_block) = self .harness diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index dbf6c70b29..14fe7ef959 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index abdc1ed4a7..ec89e0f64b 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -1,6 +1,6 @@ use super::*; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::is_valid_genesis_state; use std::path::Path; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs new file mode 100644 index 0000000000..aa48c127b2 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -0,0 +1,47 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::blob_to_kzg_commitment; +use kzg::KzgCommitment; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGBlobToKZGCommitmentInput { + pub blob: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGBlobToKZGCommitment<E: EthSpec> { + pub input: KZGBlobToKZGCommitmentInput, + pub output: Option<String>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGBlobToKZGCommitment<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGBlobToKZGCommitment<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let kzg = get_kzg()?; + + let commitment = parse_blob::<E>(&self.input.blob).and_then(|blob| { + blob_to_kzg_commitment::<E>(&kzg, &blob).map_err(|e| { + Error::InternalError(format!("Failed to compute kzg commitment: {:?}", e)) + }) + }); + + let expected = self.output.as_ref().and_then(|s| parse_commitment(s).ok()); + + compare_result::<KzgCommitment, _>(&commitment, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs new file mode 100644 index 0000000000..71e1ff8e23 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -0,0 +1,52 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::compute_blob_kzg_proof; +use kzg::KzgProof; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeBlobKZGProofInput { + pub blob: String, + pub commitment: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeBlobKZGProof<E: EthSpec> { + pub input: KZGComputeBlobKZGProofInput, + pub output: Option<String>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGComputeBlobKZGProof<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGComputeBlobKZGProof<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGComputeBlobKZGProofInput| -> Result<_, Error> { + let blob = parse_blob::<E>(&input.blob)?; + let commitment = parse_commitment(&input.commitment)?; + Ok((blob, commitment)) + }; + + let kzg = get_kzg()?; + let proof = parse_input(&self.input).and_then(|(blob, commitment)| { + compute_blob_kzg_proof::<E>(&kzg, &blob, commitment) + .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) + }); + + let expected = self.output.as_ref().and_then(|s| parse_proof(s).ok()); + + compare_result::<KzgProof, _>(&proof, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs new file mode 100644 index 0000000000..98bb749249 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -0,0 +1,62 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::compute_kzg_proof; +use kzg::KzgProof; +use serde::Deserialize; +use std::marker::PhantomData; +use std::str::FromStr; +use types::Hash256; + +pub fn parse_point(point: &str) -> Result<Hash256, Error> { + Hash256::from_str(&point[2..]) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse point: {:?}", e))) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGComputeKZGProofInput { + pub blob: String, + pub z: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGComputeKZGProof<E: EthSpec> { + pub input: KZGComputeKZGProofInput, + pub output: Option<(String, Hash256)>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGComputeKZGProof<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGComputeKZGProof<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGComputeKZGProofInput| -> Result<_, Error> { + let blob = parse_blob::<E>(&input.blob)?; + let z = parse_point(&input.z)?; + Ok((blob, z)) + }; + + let kzg = get_kzg()?; + let proof = parse_input(&self.input).and_then(|(blob, z)| { + compute_kzg_proof::<E>(&kzg, &blob, z) + .map_err(|e| Error::InternalError(format!("Failed to compute kzg proof: {:?}", e))) + }); + + let expected = self + .output + .as_ref() + .and_then(|(s, z)| parse_proof(s).ok().map(|proof| (proof, *z))); + + compare_result::<(KzgProof, Hash256), _>(&proof, &expected) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs new file mode 100644 index 0000000000..226d162b95 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -0,0 +1,100 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::validate_blob; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::{Kzg, KzgCommitment, KzgProof, TrustedSetup}; +use serde::Deserialize; +use std::convert::TryInto; +use std::marker::PhantomData; +use types::Blob; + +pub fn get_kzg() -> Result<Kzg, Error> { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e)))?; + Kzg::new_from_trusted_setup(trusted_setup) + .map_err(|e| Error::InternalError(format!("Failed to initialize kzg: {:?}", e))) +} + +pub fn parse_proof(proof: &str) -> Result<KzgProof, Error> { + hex::decode(strip_0x(proof)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse proof: {:?}", e))) + .and_then(|bytes| { + bytes + .try_into() + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse proof: {:?}", e))) + }) + .map(KzgProof) +} + +pub fn parse_commitment(commitment: &str) -> Result<KzgCommitment, Error> { + hex::decode(strip_0x(commitment)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse commitment: {:?}", e))) + .and_then(|bytes| { + bytes.try_into().map_err(|e| { + Error::FailedToParseTest(format!("Failed to parse commitment: {:?}", e)) + }) + }) + .map(KzgCommitment) +} + +pub fn parse_blob<E: EthSpec>(blob: &str) -> Result<Blob<E>, Error> { + hex::decode(strip_0x(blob)?) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse blob: {:?}", e))) + .and_then(|bytes| { + Blob::<E>::new(bytes) + .map_err(|e| Error::FailedToParseTest(format!("Failed to parse blob: {:?}", e))) + }) +} + +fn strip_0x(s: &str) -> Result<&str, Error> { + s.strip_prefix("0x").ok_or(Error::FailedToParseTest(format!( + "Hex is missing 0x prefix: {}", + s + ))) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofInput { + pub blob: String, + pub commitment: String, + pub proof: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProof<E: EthSpec> { + pub input: KZGVerifyBlobKZGProofInput, + pub output: Option<bool>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGVerifyBlobKZGProof<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGVerifyBlobKZGProof<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyBlobKZGProofInput| -> Result<(Blob<E>, KzgCommitment, KzgProof), Error> { + let blob = parse_blob::<E>(&input.blob)?; + let commitment = parse_commitment(&input.commitment)?; + let proof = parse_proof(&input.proof)?; + Ok((blob, commitment, proof)) + }; + + let kzg = get_kzg()?; + let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { + validate_blob::<E>(&kzg, &blob, commitment, proof) + .map_err(|e| Error::InternalError(format!("Failed to validate blob: {:?}", e))) + }); + + compare_result::<bool, _>(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs new file mode 100644 index 0000000000..24182b69f9 --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -0,0 +1,63 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::validate_blobs; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofBatchInput { + pub blobs: Vec<String>, + pub commitments: Vec<String>, + pub proofs: Vec<String>, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyBlobKZGProofBatch<E: EthSpec> { + pub input: KZGVerifyBlobKZGProofBatchInput, + pub output: Option<bool>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGVerifyBlobKZGProofBatch<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGVerifyBlobKZGProofBatch<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyBlobKZGProofBatchInput| -> Result<_, Error> { + let blobs = input + .blobs + .iter() + .map(|s| parse_blob::<E>(s)) + .collect::<Result<Vec<_>, _>>()?; + let commitments = input + .commitments + .iter() + .map(|s| parse_commitment(s)) + .collect::<Result<Vec<_>, _>>()?; + let proofs = input + .proofs + .iter() + .map(|s| parse_proof(s)) + .collect::<Result<Vec<_>, _>>()?; + Ok((commitments, blobs, proofs)) + }; + + let kzg = get_kzg()?; + let result = parse_input(&self.input).and_then(|(commitments, blobs, proofs)| { + validate_blobs::<E>(&kzg, &commitments, blobs.iter().collect(), &proofs) + .map_err(|e| Error::InternalError(format!("Failed to validate blobs: {:?}", e))) + }); + + compare_result::<bool, _>(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs new file mode 100644 index 0000000000..e395558e0e --- /dev/null +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -0,0 +1,53 @@ +use super::*; +use crate::case_result::compare_result; +use beacon_chain::kzg_utils::verify_kzg_proof; +use serde::Deserialize; +use std::marker::PhantomData; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct KZGVerifyKZGProofInput { + pub commitment: String, + pub z: String, + pub y: String, + pub proof: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct KZGVerifyKZGProof<E: EthSpec> { + pub input: KZGVerifyKZGProofInput, + pub output: Option<bool>, + #[serde(skip)] + _phantom: PhantomData<E>, +} + +impl<E: EthSpec> LoadCase for KZGVerifyKZGProof<E> { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result<Self, Error> { + decode::yaml_decode_file(path.join("data.yaml").as_path()) + } +} + +impl<E: EthSpec> Case for KZGVerifyKZGProof<E> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Deneb + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let parse_input = |input: &KZGVerifyKZGProofInput| -> Result<_, Error> { + let commitment = parse_commitment(&input.commitment)?; + let z = parse_point(&input.z)?; + let y = parse_point(&input.y)?; + let proof = parse_proof(&input.proof)?; + Ok((commitment, z, y, proof)) + }; + + let kzg = get_kzg()?; + let result = parse_input(&self.input).and_then(|(commitment, z, y, proof)| { + verify_kzg_proof::<E>(&kzg, commitment, proof, z, y) + .map_err(|e| Error::InternalError(format!("Failed to validate proof: {:?}", e))) + }); + + compare_result::<bool, _>(&result, &self.output) + } +} diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index c180774bb6..0ba2c92663 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,6 +1,6 @@ use super::*; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use std::path::Path; use tree_hash::Hash256; use types::{BeaconState, EthSpec, ForkName}; @@ -51,13 +51,10 @@ impl<E: EthSpec> Case for MerkleProofValidity<E> { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.initialize_tree_hash_cache(); - let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { - Ok(proof) => proof, - Err(_) => { - return Err(Error::FailedToParseTest( - "Could not retrieve merkle proof".to_string(), - )) - } + let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); }; let proof_len = proof.len(); let branch_len = self.merkle_proof.branch.len(); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 21a56dcf2a..4c02126d41 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -3,14 +3,15 @@ use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; -use serde_derive::Deserialize; +use serde::Deserialize; +use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair, base, process_attester_slashings, process_bls_to_execution_changes, + altair_deneb, base, process_attester_slashings, process_bls_to_execution_changes, process_deposits, process_exits, process_proposer_slashings, }, process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures, @@ -20,7 +21,8 @@ use state_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyMerge, BeaconState, BlindedPayload, ChainSpec, Deposit, EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, }; @@ -96,9 +98,19 @@ impl<E: EthSpec> Operation<E> for Attestation<E> { &mut ctxt, spec, ), - BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + BeaconState::Altair(_) + | BeaconState::Merge(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => { initialize_progressive_balances_cache(state, None, spec)?; - altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) + altair_deneb::process_attestation( + state, + self, + 0, + &mut ctxt, + VerifySignatures::True, + spec, + ) } } } @@ -260,13 +272,13 @@ impl<E: EthSpec> Operation<E> for SyncAggregate<E> { } } -impl<E: EthSpec> Operation<E> for FullPayload<E> { +impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, FullPayload<E>> { fn handler_name() -> String { "execution_payload".into() } fn filename() -> String { - "execution_payload.ssz_snappy".into() + "body.ssz_snappy".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -275,9 +287,13 @@ impl<E: EthSpec> Operation<E> for FullPayload<E> { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result<Self, Error> { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + Ok(match fork_name { + ForkName::Merge => BeaconBlockBody::Merge(<_>::from_ssz_bytes(bytes)?), + ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), + ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), + _ => panic!(), + }) }) - .map(Into::into) } fn apply_to( @@ -297,13 +313,13 @@ impl<E: EthSpec> Operation<E> for FullPayload<E> { } } } -impl<E: EthSpec> Operation<E> for BlindedPayload<E> { +impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, BlindedPayload<E>> { fn handler_name() -> String { "execution_payload".into() } fn filename() -> String { - "execution_payload.ssz_snappy".into() + "body.ssz_snappy".into() } fn is_enabled_for_fork(fork_name: ForkName) -> bool { @@ -312,9 +328,22 @@ impl<E: EthSpec> Operation<E> for BlindedPayload<E> { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result<Self, Error> { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + Ok(match fork_name { + ForkName::Merge => { + let inner = <BeaconBlockBodyMerge<E, FullPayload<E>>>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Merge(inner.clone_as_blinded()) + } + ForkName::Capella => { + let inner = <BeaconBlockBodyCapella<E, FullPayload<E>>>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Capella(inner.clone_as_blinded()) + } + ForkName::Deneb => { + let inner = <BeaconBlockBodyDeneb<E, FullPayload<E>>>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Deneb(inner.clone_as_blinded()) + } + _ => panic!(), + }) }) - .map(Into::into) } fn apply_to( diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index ee0fc265e1..bb41f6fe12 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use compare_fields_derive::CompareFields; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use state_processing::{ diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 191b45c33a..cf8e6b5b2f 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index dd385d13f4..0da179d536 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -2,7 +2,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::per_slot_processing; use types::{BeaconState, EthSpec, ForkName}; diff --git a/testing/ef_tests/src/cases/shuffling.rs b/testing/ef_tests/src/cases/shuffling.rs index b5ce019f5c..e05763c2d8 100644 --- a/testing/ef_tests/src/cases/shuffling.rs +++ b/testing/ef_tests/src/cases/shuffling.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::decode::yaml_decode_file; -use serde_derive::Deserialize; +use serde::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list}; use types::ForkName; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 2374ead888..d6c764f52b 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -4,8 +4,8 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{snappy_decode_file, yaml_decode_file}; +use serde::Deserialize; use serde::{de::Error as SerdeError, Deserializer}; -use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; use std::path::{Path, PathBuf}; use tree_hash_derive::TreeHash; diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index d0cc5f9eac..423dc31528 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::SszStaticType; use crate::decode::{snappy_decode_file, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use ssz::Decode; use tree_hash::TreeHash; use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock}; diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index bb4efdb6de..c94ce3a23a 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use serde_derive::Deserialize; +use serde::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, @@ -47,6 +47,12 @@ impl<E: EthSpec> LoadCase for TransitionTest<E> { spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec.capella_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Deneb => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 2ed596e25e..6dec934629 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -210,10 +210,6 @@ impl<T, E> SszStaticHandler<T, E> { Self::for_forks(vec![ForkName::Altair]) } - pub fn altair_and_later() -> Self { - Self::for_forks(ForkName::list_all()[1..].to_vec()) - } - pub fn merge_only() -> Self { Self::for_forks(vec![ForkName::Merge]) } @@ -222,9 +218,21 @@ impl<T, E> SszStaticHandler<T, E> { Self::for_forks(vec![ForkName::Capella]) } + pub fn deneb_only() -> Self { + Self::for_forks(vec![ForkName::Deneb]) + } + + pub fn altair_and_later() -> Self { + Self::for_forks(ForkName::list_all()[1..].to_vec()) + } + pub fn merge_and_later() -> Self { Self::for_forks(ForkName::list_all()[2..].to_vec()) } + + pub fn capella_and_later() -> Self { + Self::for_forks(ForkName::list_all()[3..].to_vec()) + } } /// Handler for SSZ types that implement `CachedTreeHash`. @@ -629,6 +637,126 @@ impl<E: EthSpec + TypeName> Handler for GenesisInitializationHandler<E> { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGBlobToKZGCommitmentHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGBlobToKZGCommitmentHandler<E> { + type Case = cases::KZGBlobToKZGCommitment<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "blob_to_kzg_commitment".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeBlobKZGProofHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGComputeBlobKZGProofHandler<E> { + type Case = cases::KZGComputeBlobKZGProof<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_blob_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGComputeKZGProofHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGComputeKZGProofHandler<E> { + type Case = cases::KZGComputeKZGProof<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "compute_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyBlobKZGProofHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGVerifyBlobKZGProofHandler<E> { + type Case = cases::KZGVerifyBlobKZGProof<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_blob_kzg_proof".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyBlobKZGProofBatchHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGVerifyBlobKZGProofBatchHandler<E> { + type Case = cases::KZGVerifyBlobKZGProofBatch<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_blob_kzg_proof_batch".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KZGVerifyKZGProofHandler<E>(PhantomData<E>); + +impl<E: EthSpec> Handler for KZGVerifyKZGProofHandler<E> { + type Case = cases::KZGVerifyKZGProof<E>; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "kzg" + } + + fn handler_name(&self) -> String { + "verify_kzg_proof".into() + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct MerkleProofValidityHandler<E>(PhantomData<E>); @@ -654,7 +782,7 @@ impl<E: EthSpec + TypeName> Handler for MerkleProofValidityHandler<E> { // spec. // // https://github.com/sigp/lighthouse/issues/4022 - && fork_name != ForkName::Capella + && fork_name != ForkName::Capella && fork_name != ForkName::Deneb } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 675388ee58..ef12844030 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -1,4 +1,5 @@ //! Mapping from types to canonical string identifiers used in testing. +use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; use types::*; @@ -47,8 +48,11 @@ type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyDeneb, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); +type_name!(BlobIdentifier); +type_name_generic!(BlobSidecar); type_name!(Checkpoint); type_name_generic!(ContributionAndProof); type_name!(Deposit); @@ -58,10 +62,12 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(ExecutionPayloadMerge, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadDeneb, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name_generic!(ExecutionPayloadHeaderMerge, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -72,6 +78,7 @@ type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); +type_name_generic!(SignedBlobSidecar); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 33f8d67ec0..d2d30b596c 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -72,14 +72,14 @@ fn operations_sync_aggregate() { #[test] fn operations_execution_payload_full() { - OperationsHandler::<MinimalEthSpec, FullPayload<_>>::default().run(); - OperationsHandler::<MainnetEthSpec, FullPayload<_>>::default().run(); + OperationsHandler::<MinimalEthSpec, BeaconBlockBody<_, FullPayload<_>>>::default().run(); + OperationsHandler::<MainnetEthSpec, BeaconBlockBody<_, FullPayload<_>>>::default().run(); } #[test] fn operations_execution_payload_blinded() { - OperationsHandler::<MinimalEthSpec, BlindedPayload<_>>::default().run(); - OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run(); + OperationsHandler::<MinimalEthSpec, BeaconBlockBody<_, BlindedPayload<_>>>::default().run(); + OperationsHandler::<MainnetEthSpec, BeaconBlockBody<_, BlindedPayload<_>>>::default().run(); } #[test] @@ -215,6 +215,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; use types::*; @@ -267,6 +268,10 @@ mod ssz_static { .run(); SszStaticHandler::<BeaconBlockBodyCapella<MainnetEthSpec>, MainnetEthSpec>::capella_only() .run(); + SszStaticHandler::<BeaconBlockBodyDeneb<MinimalEthSpec>, MinimalEthSpec>::deneb_only() + .run(); + SszStaticHandler::<BeaconBlockBodyDeneb<MainnetEthSpec>, MainnetEthSpec>::deneb_only() + .run(); } // Altair and later @@ -327,6 +332,10 @@ mod ssz_static { .run(); SszStaticHandler::<ExecutionPayloadCapella<MainnetEthSpec>, MainnetEthSpec>::capella_only() .run(); + SszStaticHandler::<ExecutionPayloadDeneb<MinimalEthSpec>, MinimalEthSpec>::deneb_only() + .run(); + SszStaticHandler::<ExecutionPayloadDeneb<MainnetEthSpec>, MainnetEthSpec>::deneb_only() + .run(); } #[test] @@ -339,30 +348,52 @@ mod ssz_static { ::capella_only().run(); SszStaticHandler::<ExecutionPayloadHeaderCapella<MainnetEthSpec>, MainnetEthSpec> ::capella_only().run(); + SszStaticHandler::<ExecutionPayloadHeaderDeneb<MinimalEthSpec>, MinimalEthSpec> + ::deneb_only().run(); + SszStaticHandler::<ExecutionPayloadHeaderDeneb<MainnetEthSpec>, MainnetEthSpec> + ::deneb_only().run(); } #[test] fn withdrawal() { - SszStaticHandler::<Withdrawal, MinimalEthSpec>::capella_only().run(); - SszStaticHandler::<Withdrawal, MainnetEthSpec>::capella_only().run(); + SszStaticHandler::<Withdrawal, MinimalEthSpec>::capella_and_later().run(); + SszStaticHandler::<Withdrawal, MainnetEthSpec>::capella_and_later().run(); } #[test] fn bls_to_execution_change() { - SszStaticHandler::<BlsToExecutionChange, MinimalEthSpec>::capella_only().run(); - SszStaticHandler::<BlsToExecutionChange, MainnetEthSpec>::capella_only().run(); + SszStaticHandler::<BlsToExecutionChange, MinimalEthSpec>::capella_and_later().run(); + SszStaticHandler::<BlsToExecutionChange, MainnetEthSpec>::capella_and_later().run(); } #[test] fn signed_bls_to_execution_change() { - SszStaticHandler::<SignedBlsToExecutionChange, MinimalEthSpec>::capella_only().run(); - SszStaticHandler::<SignedBlsToExecutionChange, MainnetEthSpec>::capella_only().run(); + SszStaticHandler::<SignedBlsToExecutionChange, MinimalEthSpec>::capella_and_later().run(); + SszStaticHandler::<SignedBlsToExecutionChange, MainnetEthSpec>::capella_and_later().run(); + } + + #[test] + fn blob_sidecar() { + SszStaticHandler::<BlobSidecar<MinimalEthSpec>, MinimalEthSpec>::deneb_only().run(); + SszStaticHandler::<BlobSidecar<MainnetEthSpec>, MainnetEthSpec>::deneb_only().run(); + } + + #[test] + fn signed_blob_sidecar() { + SszStaticHandler::<SignedBlobSidecar<MinimalEthSpec>, MinimalEthSpec>::deneb_only().run(); + SszStaticHandler::<SignedBlobSidecar<MainnetEthSpec>, MainnetEthSpec>::deneb_only().run(); + } + + #[test] + fn blob_identifier() { + SszStaticHandler::<BlobIdentifier, MinimalEthSpec>::deneb_only().run(); + SszStaticHandler::<BlobIdentifier, MainnetEthSpec>::deneb_only().run(); } #[test] fn historical_summary() { - SszStaticHandler::<HistoricalSummary, MinimalEthSpec>::capella_only().run(); - SszStaticHandler::<HistoricalSummary, MainnetEthSpec>::capella_only().run(); + SszStaticHandler::<HistoricalSummary, MinimalEthSpec>::capella_and_later().run(); + SszStaticHandler::<HistoricalSummary, MainnetEthSpec>::capella_and_later().run(); } } @@ -532,6 +563,36 @@ fn genesis_validity() { // Note: there are no genesis validity tests for mainnet } +#[test] +fn kzg_blob_to_kzg_commitment() { + KZGBlobToKZGCommitmentHandler::<MainnetEthSpec>::default().run(); +} + +#[test] +fn kzg_compute_blob_kzg_proof() { + KZGComputeBlobKZGProofHandler::<MainnetEthSpec>::default().run(); +} + +#[test] +fn kzg_compute_kzg_proof() { + KZGComputeKZGProofHandler::<MainnetEthSpec>::default().run(); +} + +#[test] +fn kzg_verify_blob_kzg_proof() { + KZGVerifyBlobKZGProofHandler::<MainnetEthSpec>::default().run(); +} + +#[test] +fn kzg_verify_blob_kzg_proof_batch() { + KZGVerifyBlobKZGProofBatchHandler::<MainnetEthSpec>::default().run(); +} + +#[test] +fn kzg_verify_kzg_proof() { + KZGVerifyKZGProofHandler::<MainnetEthSpec>::default().run(); +} + #[test] fn merkle_proof_validity() { MerkleProofValidityHandler::<MainnetEthSpec>::default().run(); diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 5c78c09022..c76ef91183 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -2,15 +2,15 @@ name = "eth1_test_rig" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -tokio = { version = "1.14.0", features = ["time"] } -ethers-core = "1.0.2" -ethers-providers = "1.0.2" +tokio = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } ethers-contract = "1.0.2" -types = { path = "../../consensus/types"} -serde_json = "1.0.58" -deposit_contract = { path = "../../common/deposit_contract"} -unused_port = { path = "../../common/unused_port" } -hex = "0.4.2" +types = { workspace = true } +serde_json = { workspace = true } +deposit_contract = { workspace = true } +unused_port = { workspace = true } +hex = { workspace = true } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index de3085d222..6de108fcb6 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "execution_engine_integration" version = "0.1.0" -edition = "2021" +edition = { workspace = true } [dependencies] -tempfile = "3.1.0" -serde_json = "1.0.58" -task_executor = { path = "../../common/task_executor" } -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -futures = "0.3.7" -exit-future = "0.2.0" -environment = { path = "../../lighthouse/environment" } -execution_layer = { path = "../../beacon_node/execution_layer" } -sensitive_url = { path = "../../common/sensitive_url" } -types = { path = "../../consensus/types" } -unused_port = { path = "../../common/unused_port" } -ethers-core = "1.0.2" -ethers-providers = "1.0.2" -deposit_contract = { path = "../../common/deposit_contract" } -reqwest = { version = "0.11.0", features = ["json"] } -hex = "0.4.2" -fork_choice = { path = "../../consensus/fork_choice" } -logging = { path = "../../common/logging" } +tempfile = { workspace = true } +serde_json = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +futures = { workspace = true } +exit-future = { workspace = true } +environment = { workspace = true } +execution_layer = { workspace = true } +sensitive_url = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } +deposit_contract = { workspace = true } +reqwest = { workspace = true } +hex = { workspace = true } +fork_choice = { workspace = true } +logging = { workspace = true } + +[features] +portable = ["types/portable"] \ No newline at end of file diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile index 7062065066..72f8d8f6bb 100644 --- a/testing/execution_engine_integration/Makefile +++ b/testing/execution_engine_integration/Makefile @@ -1,5 +1,5 @@ test: - cargo run --release --locked + cargo run --release --locked --features "$(TEST_FEATURES)" clean: rm -rf execution_clients diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 15e7fdc0f1..5d96520660 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -66,6 +66,7 @@ pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result<String, Command::new("git") .arg("fetch") .arg("--tags") + .arg("--force") .current_dir(repo_dir) .output() .map_err(|e| format!("Failed to fetch tags for {repo_dir:?}: Err: {e}"))?, diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 17654b292a..991118478d 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -18,11 +18,10 @@ pub fn geth_genesis_json() -> Value { "muirGlacierBlock":0, "berlinBlock":0, "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true }, "nonce":"0x42", "timestamp":"0x0", @@ -72,8 +71,10 @@ pub fn nethermind_genesis_json() -> Value { "accountStartNonce": "0x0", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID": "0x1469ca", - "MergeForkIdTransition": "0x3e8", + "networkID": "0x00146A2E", + "MergeForkIdTransition": "0x0", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x0", "eip150Transition": "0x0", "eip158Transition": "0x0", "eip160Transition": "0x0", @@ -101,7 +102,15 @@ pub fn nethermind_genesis_json() -> Value { "eip1559Transition": "0x0", "eip3198Transition": "0x0", "eip3529Transition": "0x0", - "eip3541Transition": "0x0" + "eip3541Transition": "0x0", + "eip3540TransitionTimestamp": "0x0", + "eip3651TransitionTimestamp": "0x0", + "eip3670TransitionTimestamp": "0x0", + "eip3675TransitionTimestamp": "0x0", + "eip3855TransitionTimestamp": "0x0", + "eip3860TransitionTimestamp": "0x0", + "eip4895TransitionTimestamp": "0x0", + "terminalTotalDifficulty": "0x0" }, "genesis": { "seal": { @@ -112,10 +121,10 @@ pub fn nethermind_genesis_json() -> Value { }, "difficulty": "0x01", "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", + "timestamp": "0x63585F88", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "", - "gasLimit": "0x1C9C380" + "gasLimit": "0x400000" }, "accounts": { "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { @@ -123,9 +132,9 @@ pub fn nethermind_genesis_json() -> Value { }, "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { "balance": "10000000000000000000000000" - }, + } }, "nodes": [] - } + } ) } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 5c83a97e21..0bd96a5c93 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -3,7 +3,7 @@ use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::geth_genesis_json; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; +use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; @@ -36,6 +36,13 @@ pub fn build(execution_clients_dir: &Path) { }); } +pub fn clean(execution_clients_dir: &Path) { + let repo_dir = execution_clients_dir.join("go-ethereum"); + if let Err(e) = fs::remove_dir_all(repo_dir) { + eprintln!("Error while deleting folder: {}", e); + } +} + /* * Geth-specific Implementation for GenericExecutionEngine */ @@ -60,7 +67,7 @@ impl GenericExecutionEngine for GethEngine { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); + let mut file = fs::File::create(&genesis_json_path).unwrap(); let json = geth_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index e46bc13c8d..efb06833f6 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,5 @@ +#![recursion_limit = "256"] // for inline json + /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -31,6 +33,7 @@ fn test_geth() { let test_dir = build_utils::prepare_dir(); geth::build(&test_dir); TestRig::new(GethEngine).perform_tests_blocking(); + geth::clean(&test_dir); } fn test_nethermind() { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 8925f1cc84..aad37c32bd 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -2,7 +2,7 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::nethermind_genesis_json; use std::env; -use std::fs::File; +use std::fs; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.18.2"; +const NETHERMIND_BRANCH: &str = "release/1.21.0"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -47,6 +47,12 @@ pub fn build(execution_clients_dir: &Path) { build_utils::check_command_output(build_result(&repo_dir), || { format!("nethermind build failed using release {last_release}") }); + + // Cleanup some disk space by removing nethermind's tests + let tests_dir = execution_clients_dir.join("nethermind/src/tests"); + if let Err(e) = fs::remove_dir_all(tests_dir) { + eprintln!("Error while deleting folder: {}", e); + } } /* @@ -68,7 +74,8 @@ impl NethermindEngine { .join("bin") .join("Release") .join("net7.0") - .join("Nethermind.Runner") + .join("linux-x64") + .join("nethermind") } } @@ -76,7 +83,7 @@ impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(genesis_json_path).unwrap(); + let mut file = fs::File::create(genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); datadir diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 654b8628b8..48195f871d 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -18,7 +18,9 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); + +const TEST_FORK: ForkName = ForkName::Capella; struct ExecutionPair<E, T: EthSpec> { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. @@ -110,7 +112,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - let mut spec = MainnetEthSpec::default_spec(); + let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); spec.terminal_total_difficulty = Uint256::zero(); let fee_recipient = None; @@ -269,8 +271,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { Slot::new(1), // Insert proposer for the next slot head_root, proposer_index, - // TODO: think about how to test different forks - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), + PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + Some(vec![]), + None, + ), ) .await; @@ -308,8 +315,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); let valid_payload = self .ee_a .execution_layer @@ -318,8 +330,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, ) .await @@ -358,10 +369,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { * Provide the valid payload back to the EE again. */ + // TODO: again consider forks here let status = self .ee_a .execution_layer - .notify_new_payload(&valid_payload) + .notify_new_payload(valid_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -409,12 +421,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { * Provide an invalidated payload to the EE. */ + // TODO: again think about forks here let mut invalid_payload = valid_payload.clone(); *invalid_payload.prev_randao_mut() = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer - .notify_new_payload(&invalid_payload) + .notify_new_payload(invalid_payload.try_into().unwrap()) .await .unwrap(); assert!(matches!( @@ -448,8 +461,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { .execution_layer .get_suggested_fee_recipient(proposer_index) .await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + Some(vec![]), + None, + ); let second_payload = self .ee_a .execution_layer @@ -458,8 +476,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { &payload_attributes, forkchoice_update_params, builder_params, - // FIXME: think about how to test other forks - ForkName::Merge, + TEST_FORK, &self.spec, ) .await @@ -473,10 +490,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { * Provide the second payload back to the EE again. */ + // TODO: again consider forks here let status = self .ee_a .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -489,11 +507,15 @@ impl<E: GenericExecutionEngine> TestRig<E> { */ let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); - // TODO: think about how to handle different forks // To save sending proposer preparation data, just set the fee recipient // to the fee recipient configured for EE A. - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + Some(vec![]), + None, + ); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; @@ -520,17 +542,14 @@ impl<E: GenericExecutionEngine> TestRig<E> { * * Provide the second payload, without providing the first. */ + // TODO: again consider forks here let status = self .ee_b .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); - // TODO: we should remove the `Accepted` status here once Geth fixes it - assert!(matches!( - status, - PayloadStatus::Syncing | PayloadStatus::Accepted - )); + assert!(matches!(status, PayloadStatus::Syncing)); /* * Execution Engine B: @@ -561,10 +580,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { * Provide the first payload to the EE. */ + // TODO: again consider forks here let status = self .ee_b .execution_layer - .notify_new_payload(&valid_payload) + .notify_new_payload(valid_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -578,7 +598,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_new_payload(&second_payload) + .notify_new_payload(second_payload.clone().try_into().unwrap()) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -630,11 +650,13 @@ async fn check_payload_reconstruction<E: GenericExecutionEngine>( .get_engine_capabilities(None) .await .unwrap(); + assert!( // if the engine doesn't have these capabilities, we need to update the client in our tests capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1, "Testing engine does not support payload bodies methods" ); + let mut bodies = ee .execution_layer .get_payload_bodies_by_hash(vec![payload.block_hash()]) diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index ac77349c58..5fe820d15b 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -2,16 +2,16 @@ name = "node_test_rig" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } [dependencies] -environment = { path = "../../lighthouse/environment" } -beacon_node = { path = "../../beacon_node" } -types = { path = "../../consensus/types" } -tempfile = "3.1.0" -eth2 = { path = "../../common/eth2" } -validator_client = { path = "../../validator_client" } -validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } -sensitive_url = { path = "../../common/sensitive_url" } -execution_layer = { path = "../../beacon_node/execution_layer" } -tokio = { version = "1.14.0", features = ["time"] } +environment = { workspace = true } +beacon_node = { workspace = true } +types = { workspace = true } +tempfile = { workspace = true } +eth2 = { workspace = true } +validator_client = { workspace = true } +validator_dir = { workspace = true } +sensitive_url = { workspace = true } +execution_layer = { workspace = true } +tokio = { workspace = true } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 394f8558fa..3f08c83716 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -98,7 +98,7 @@ pub fn testing_client_config() -> ClientConfig { // Setting ports to `0` means that the OS will choose some available port. client_config .network - .set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 0, 0); + .set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 0, 0, 0); client_config.network.upnp_enabled = false; client_config.http_api.enabled = true; client_config.http_api.listen_port = 0; @@ -115,6 +115,9 @@ pub fn testing_client_config() -> ClientConfig { genesis_time: now, }; + // Simulator tests expect historic states to be available for post-run checks. + client_config.chain.reconstruct_historic_states = true; + // Specify a constant count of beacon processor workers. Having this number // too low can cause annoying HTTP timeouts, especially on Github runners // with 2 logical CPUs. @@ -217,14 +220,13 @@ impl<E: EthSpec> LocalValidatorClient<E> { config.validator_dir = files.validator_dir.path().into(); config.secrets_dir = files.secrets_dir.path().into(); - ProductionValidatorClient::new(context, config) + let mut client = ProductionValidatorClient::new(context, config).await?; + + client + .start_service() .await - .map(move |mut client| { - client - .start_service() - .expect("should start validator services"); - Self { client, files } - }) + .expect("should start validator services"); + Ok(Self { client, files }) } } @@ -248,7 +250,7 @@ impl<E: EthSpec> LocalExecutionNode<E> { panic!("Failed to write jwt file {}", e); } Self { - server: MockServer::new_with_config(&context.executor.handle().unwrap(), config), + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), datadir, } } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index a01c133fd9..eadcaf51b2 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -2,20 +2,20 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] node_test_rig = { path = "../node_test_rig" } -eth1 = {path = "../../beacon_node/eth1"} -execution_layer = {path = "../../beacon_node/execution_layer"} -types = { path = "../../consensus/types" } -parking_lot = "0.12.0" -futures = "0.3.7" -tokio = "1.14.0" -eth1_test_rig = { path = "../eth1_test_rig" } -env_logger = "0.9.0" -clap = "2.33.3" -rayon = "1.4.1" +eth1 = { workspace = true } +execution_layer = { workspace = true } +types = { workspace = true } +parking_lot = { workspace = true } +futures = { workspace = true } +tokio = { workspace = true } +eth1_test_rig = { workspace = true } +env_logger = { workspace = true } +clap = { workspace = true } +rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index b3d7afd7b9..dc8bf0d27d 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -14,6 +14,7 @@ use std::{sync::Arc, time::Duration}; use types::{Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; +const QUIC_PORT: u16 = 43424; pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423"; pub const EXECUTION_PORT: u16 = 4000; @@ -63,9 +64,10 @@ impl<E: EthSpec> LocalNetwork<E> { std::net::Ipv4Addr::UNSPECIFIED, BOOTNODE_PORT, BOOTNODE_PORT, + QUIC_PORT, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -150,13 +152,16 @@ impl<E: EthSpec> LocalNetwork<E> { .expect("bootnode must have a network"), ); let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; + let libp2p_tcp_port = BOOTNODE_PORT + count; + let discv5_port = BOOTNODE_PORT + count; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, - BOOTNODE_PORT + count, - BOOTNODE_PORT + count, + libp2p_tcp_port, + discv5_port, + QUIC_PORT + count, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); + beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); beacon_config.network.discv5_config.table_filter = |_| true; beacon_config.network.proposer_only = is_proposer; } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index a25b3c31c1..a1b6ed3b87 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -2,14 +2,17 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -state_processing = { path = "../../consensus/state_processing" } -types = { path = "../../consensus/types" } -ethereum_ssz = "0.5.0" -beacon_chain = { path = "../../beacon_node/beacon_chain" } -lazy_static = "1.4.0" -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +state_processing = { workspace = true } +types = { workspace = true } +ethereum_ssz = { workspace = true } +beacon_chain = { workspace = true } +lazy_static = { workspace = true } +tokio = { workspace = true } + +[features] +portable = ["beacon_chain/portable"] \ No newline at end of file diff --git a/testing/state_transition_vectors/Makefile b/testing/state_transition_vectors/Makefile index e06c719188..437aa50b00 100644 --- a/testing/state_transition_vectors/Makefile +++ b/testing/state_transition_vectors/Makefile @@ -2,7 +2,7 @@ produce-vectors: cargo run --release test: - cargo test --release + cargo test --release --features "$(TEST_FEATURES)" clean: rm -r vectors/ diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 7e7fd23e0d..3b9235cc4e 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -57,7 +57,7 @@ impl ExitTest { block_modifier(&harness, block); }) .await; - (signed_block, state) + (signed_block.0, state) } fn process( diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index 3d91862dbb..63bb87c06e 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "test-test_logger" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -logging = { path = "../../common/logging" } -slog = "2.7.0" +logging = { workspace = true } +slog = { workspace = true } diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index c0fbf66723..38b775b392 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -1,31 +1,30 @@ [package] name = "web3signer_tests" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] [dev-dependencies] -eth2_keystore = { path = "../../crypto/eth2_keystore" } -types = { path = "../../consensus/types" } -tempfile = "3.1.0" -tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } -reqwest = { version = "0.11.0", features = ["json","stream"] } -url = "2.2.2" -validator_client = { path = "../../validator_client" } -slot_clock = { path = "../../common/slot_clock" } -futures = "0.3.7" -exit-future = "0.2.0" -task_executor = { path = "../../common/task_executor" } -environment = { path = "../../lighthouse/environment" } -account_utils = { path = "../../common/account_utils" } -serde = "1.0.116" -serde_derive = "1.0.116" -serde_yaml = "0.8.13" -eth2_network_config = { path = "../../common/eth2_network_config" } -serde_json = "1.0.58" -zip = "0.5.13" -lazy_static = "1.4.0" -parking_lot = "0.12.0" \ No newline at end of file +eth2_keystore = { workspace = true } +types = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +reqwest = { workspace = true } +url = { workspace = true } +validator_client = { workspace = true } +slot_clock = { workspace = true } +futures = { workspace = true } +exit-future = { workspace = true } +task_executor = { workspace = true } +environment = { workspace = true } +account_utils = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } +eth2_network_config = { workspace = true } +serde_json = { workspace = true } +zip = { workspace = true } +lazy_static = { workspace = true } +parking_lot = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index dd17ae23b1..463de0c8b3 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -51,7 +51,7 @@ mod tests { /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will /// assume it failed to start. - const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20); + const UPCHECK_TIMEOUT: Duration = Duration::from_secs(30); /// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when /// debugging. diff --git a/testing/web3signer_tests/tls/README.md b/testing/web3signer_tests/tls/README.md index 53ff97c272..199e304375 100644 --- a/testing/web3signer_tests/tls/README.md +++ b/testing/web3signer_tests/tls/README.md @@ -1,6 +1,6 @@ ## TLS Testing Files The files in this directory are used for testing TLS with web3signer. We store them in this -repository since the are not sensitive and it's simpler than regenerating them for each test. +repository since they are not sensitive and it's simpler than regenerating them for each test. The files were generated using the `./generate.sh` script. diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f00e7b7e37..f918e87cf8 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,7 @@ #!/bin/bash openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 061b0e3cd7..24b0a2e5c0 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,32 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFmzCCA4OgAwIBAgIUXpTV/0rd/GAoCfCyzPOtwcb4t7YwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC0 -HrD6fJGcqm8zwEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16P -XZitHgAyg3lph1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofv -vGCA7CjLPKZfW6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3b -nKEaRdLCQgTJS3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJ -MGG+TA2ckKIpGT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4 -dYqYAe+aEL5AB4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS -7Gk9ZSCp/1X+fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+ -HGaeZo1k3M0i3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPUR -SN6OdHrO5x38wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8D -un/NunIagmIL5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjP -DKCsnxZZBtPhXC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABozUwMzALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq -hkiG9w0BAQsFAAOCAgEAcCGqC1nhjDiuF87LgzwuKdMV4NEpTGHa8aHrta/UrzO3 -Lf7fcQvB83tR3ZFk9ndlnDbTVr0seAqDDdJxUHyaA3lX6F5g8G6W8bm76w8b5vot -Vl4ohfcA0CIxbCpp773V0qjyZNj9wDIZg8cX8mXcRi4XoUDltD5/yUwRLVjjvJba -tF+vD3NWWuCGRu65qdR3JYJGr4MtbVo06uoeBXcgZrcDsb93chlsuyH337twq2fn -QbqHbuyxAjFxtv125Jmu6li3pu9FUQrnQWQVHzvt2zvR44vOx+yDQHtil9U7H0aU -Nrzqr9OPOApCr7oQ8GoHYn4C7TAs12U/xiPsvuM1puTzbw8ofuKczFRIA8nuyUHU -XTP/9oYyZ/Vs9qyAtIVCCyEfhSobfwZLLFAT4RWzQZ4H0JmtXfNdt+PFPSWg5MZA -W321uulq/JSa4MQUJbNUEeNYeG+NqjhviM00irpt2Baz2EbVAJMT4ClndRQOwrKT -15+icdyvgx5uZbEuvXK6kyU0AHESHxhzN6C5eHPEYkMjVYgftbE7R3cp9TEj3VvK -Ecd1SXTtKOq2J91te10UrceURqquGuGXVUO7PYGVYBNugjlH47qRIwtI0njPg3ep -10XBwkOm1CgvZxHaj4P0NJf+wih+K8Z5Dg1+90nnJ4mxGFFIW8m7Cfn1tPFmEPo= +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 +R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 +aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE +jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw +Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe +V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI +0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM +VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr +67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f +kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa +3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf +TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV +HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO +8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE +x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI +dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h +vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ +js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 +0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP +sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ +1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex +XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI +SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 +nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index bbc69ca38b..d00b6c2122 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC0HrD6fJGcqm8z -wEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16PXZitHgAyg3lp -h1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofvvGCA7CjLPKZf -W6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3bnKEaRdLCQgTJ -S3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJMGG+TA2ckKIp -GT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4dYqYAe+aEL5A -B4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS7Gk9ZSCp/1X+ -fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+HGaeZo1k3M0i -3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPURSN6OdHrO5x38 -wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8Dun/NunIagmIL -5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjPDKCsnxZZBtPh -XC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABAoICAQCXUo2W856Vwy5HiQ7t7JWv -CZAdj3pyp7yBnilC8GQhONGsntdw8M2rDVG05Nusqs4nnheNoX3C8mfHO7x/Q3FY -lKTQZ+DuDhyIz9k+N8kP6ca6dnlvkao3asYn1n9rZyy3QUjGJyGilWKlDGroJsrj -dCX6GidHEH8kgruXPdB7wLdi62KgCjkKiK5zPbhiNwd1gGJsoyqMn1BMGQmYFlHG -yJ+C2Lij1lSYboZcj18EK6N/9vfc0GPU+R2dh8qseIkskWQcruJknbJO2vBEh7yI -OKCrOqhHWRQCUwh1WxabNRLP3JGM+BNx8VZgisRnIsdeoMl+KWo1wklDm8+fa9Tx -4xquIy+4PzmobWXiWBpirF7bTNhyZ4vIaMSTOP5TYiliom/hJtcpAwLf9eXxMfti -vRAogZEtr0eKTieH72dwsBVx6wNlxhazvD+ZKIq7OIzJRA6Do2H+BAmz/l4mgVR/ -geL3u0fn0j/Y+8OyFE3P+8D/PqgPzLgTYa5QSp6JtHxNlVcmWefJiLtZDAJvPpeo -UVsA+E2BHsrGveLk15GF9F+vJ867qKT7luQac3zF7V0hE9pktUKM2gY+Jy455w5i -cMxyjt4RAKY8AHAmFvCRQHNdjU2o1UjVFgYsQTYsOdvAiyq0xEJFkbeR2Zxz2sJW -JWK+YlT+UEGDL5SCaXzP4QKCAQEA7gRAy/Xq0Fjq7UZvc7oJ62h6BmseFL9BuKlW -QmvVFAilYeQVejl/ubafyL4Z9ntEeCGTkv8H4DeALs9A/isFOcDxZDoelCETrSxI -CfXllob24276eTc5dBdHmofBjRgIbovnyuFRYzK5uDalVAxYsZPFOp9/qtGa25ex -uIcyJwX+ivqqtA9B5CHu7p/znNrp155xLwGpVczx4xGqjPPr5N2rwZFOXufGFULH -AKbJBSUxiMMJnb1rN8aIuTo/Utr3/i7hc7AUO3//qieyjLdXe8tESqgxzTNvfZk3 -qYtPk4GSHql7Eesxg19fzVdG+LTnzfRKOfOtcZJPRFGGW29fjwKCAQEAwbqXsZvC -7AmmmeVVAPL7q5pXAxSEMK7VsJzPJ7G6MRQ37YjkNRcCf7SRQqNBGQubVkv3Qzvc -rmMhT9I5QfCR2JXQtrH1y09eS45T6NYbRkT6NA3E3XNmRIPO+wIeDV32v5jJwhIk -7ayuG2zBsAryxNvg3us3pWHeIQ45sX0JqNil6BTemYRBrCZmCRWHndl72zDbtR23 -kVt9GKaycSPyCZQ7yE4ZWD2VsrbgEidVJEQagknsjQrldMO68GLbHCP2ZyrIUhKN -2eeuHJpZPz+pahQ55MAEvjIsJKPWsg8cut2Vo4sqgez+xiz0v/nWiPLtvxdN+DHP -tAVbrw+0NeqnMQKCAQB3GsO+DLpLNiOhRpzhAViTZ32glpu/8BEYMgzLQiCnXMg9 -myAwQHOs4DlG//IICJkzsEGjzmEHj15iji3MwoRj6SwiZn8EyySIhN8rtNQFplYH -a3KFk9/5OukG6CYvz7Xwc6wzNts+U5TiHN5Ql7kOa47HjicZuLfQaTFy0JyFMJe2 -vkcLwZLMcTqaSIpklJtt3Yhv6FnvaJYmdaGt1SXXKiIXw/m+via+XuMsbUmsfHc0 -I709JRtxFrU2U3J6qL5ugNEqzhLhz2SFpkXP6rMpbIcpAM+jCrkg1bon6mGQw8b1 -9wNx7Qqi3egX3jPSotxYkIVQSKMjcP6fhlhAixP7AoIBAH1ynKQwHurF3RIuxPqW -XY3jpZCjCm6T6GAzSpmDpvP9CbJRQKV4Pu//N0kVeiQDthUNoBHzg5WRL5MGqHkg -lPDRIpQLbQS4YnE+uus9KfA43mQyvlZAUerwB2nXFyrEu/GZuJxpL2yQszWjGVEr -5cTANT9kxWXcmACDu6xJMaYalGRSj0qNsBEP1GbxgB4hJOjtHHiNw77mpXz/BPHq -uuKlEIlGuXbAel19ul9HBQU07I2N3RYABlG0JStgeE4io35u38T1qtF+CusOr9gb -G1NLwal1Bh07VAZt6arnykzfC/UZOu9jTh96IQrnd5q65GUnbB/Z8Yu7JIGaA7Ie -PyECggEAPZlzqPCdNcmdoCSNIDCDYZBVf2xZX8591xdphMG59Jrckp5kl5LM5bjQ -tysj1LJpMK+l60b3r8BI8a4lvj+eBqwBUck82/IImTedE9/oLF3Z64kLd1tr3aGa -W5jLXjThFF20BqfD+YbmFVEdHTwN2L+4kN0VvP/6oLadxogTLwQruMFoPlsD4B19 -HDcAKe6OnyWMer/X9nq9OY6GFGc4X6wHjJ8pj4aa4HE8VNNq40GMkRZOZaJvaPqh -orK9SC50qdJtrVQeD4fhfZMVzmRyE4RSSQBPfc9zq/sO/pjUfV9uK4c99FDbviIf -JAkxGuYLZeyrHEyeKLm7S77SLipKWg== +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF +vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA +61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE +t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI +pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA +H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q +ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq +zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r +dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 +bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H +NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 +55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk +pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs +MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp +VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD +wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U +XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f +3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm +FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL +JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo +HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj +CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY +zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf +twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc +k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL +xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ +QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 +nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa +6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S +O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf ++e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr +bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp +NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW +RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG +lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr +UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE +xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf +I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR +y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa +fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ +9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch +IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv +Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft +Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c +GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 +LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts +ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T +NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh +HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg +vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index 22b7d7f425..73468fa084 100644 Binary files a/testing/web3signer_tests/tls/lighthouse/key.p12 and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem index 460cb8b400..6266cadf9b 100644 --- a/testing/web3signer_tests/tls/lighthouse/web3signer.pem +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -1,32 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr -aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 -nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 -xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k -toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW -yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo -SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU -2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt -fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g -jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF -/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 -8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq -hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL -gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk -zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz -7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt -cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf -w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h -jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q -pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg -JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l -b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G -r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem index 460cb8b400..6266cadf9b 100644 --- a/testing/web3signer_tests/tls/web3signer/cert.pem +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -1,32 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr -aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 -nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 -xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k -toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW -yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo -SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU -2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt -fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g -jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF -/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 -8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq -hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL -gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk -zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz -7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt -cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf -w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h -jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q -pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg -JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l -b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G -r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS +cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz +1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa +J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H +9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G +WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB +YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 +4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 +HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr +rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS +8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN +/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV +HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg +/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu +taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 +RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d +0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv +gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ +v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut +OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 +mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 +4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z +yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX +JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key index 6e5171f374..d969753406 100644 --- a/testing/web3signer_tests/tls/web3signer/key.key +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJRQIBADANBgkqhkiG9w0BAQEFAASCCS8wggkrAgEAAoICAQDraQUU4O7K/aBA -iH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49nki1q0rLEU/x -J0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85xfO9th/RkJkg -pzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07ktoF0N+yLGW76 -yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPWyAVTDi3bWmvh -3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLoSMKkyPsm6IN6 -0GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU2DDGdHpxut6h -4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/ktfhE+8zavQzjs -xm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06gjjvN9WdsCXmT -EShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF/Pqu0WQd82Cg -G1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn38HEnyLEyA3e8 -a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABAoICAQDihR2kp4Rfw4luT2nNUm5C -JFAxJH/vLT5uX1Gm8XWPI9oC21dnu6Asd5RskrGfSouWszZXyUmg+TmpXRSa796t -hjHS0KW59HBxvYDx18mEXJXHWbcK/L5D5iFmpMYHH6xiFT6i8BrR9ofCSeCU52SF -CkEzGZJ0pfR/w4dIvjGWNNcsoI2mp2hl9/84fco8ol7x6UPL5vwwJPsLS0hqwmAz -v+994IKCT1EQllEGhv0pY7fPscXF9pOXDbnmYjwqpEhzJekpsF0j03A32R/4dOx2 -x8eOpngLv2Hczg5RSpbzRF4X0yJVANg/AlJJZmkYGOZ5qXnSQqqZF+dcSCvVVwhO -GS7uci6Mcy7Ov0Gj9HWX8As0SofPtUMuO7k/nJYOzcgY+4agyIDrylIeG86gdCDQ -hGVz+T5reJZIBMp66GPT6M8r36q50cx2x9nJjxLlIjvly1EruVjQoSMUfjewHG91 -xJI0iFhlbBrCpyLx3X0smMEr0vJzM5J0GtdxQdcSocDy5244+4zuslAXgsEYwHYx -WYFMsotRif8aB2b3OSt0yH+Heh06dZehvwWa4F4/3qlP48e0/CWIL7Y/tBgZv8Gh -n3F7HsHvMx6qQqMY5AxudbkpKdM9W84gXriHPIsO2oZEU6N65J/Bpq5Ve4JBlih1 -Ji0CtvHlAR2dhGkj6Q36MQKCAQEA9z/HTd8hd4FNEfn595OVWr9CeZc1zAlNa94I -lvTLhLEFcMkGsZd9KyV87MOV3p9m+XI7UJmqllIHOkwrECF2wzFssguPk+RAJ5hW -LZJgsF0fPnhX0qJFXzSNzzqAICES6+s9jvHMO9PhtF59uv4zsRFEBmKAr0AN8Zsk -rEk+2Tl2RgC+sxzRS767De9CrbSjxm+qAHuFFh8QX/N/mPoLUa+V5Oh2srA5bTHn -t0vyfQQ9+gqTBJDy51VGYlYw5OQBAiOPTgzbSmm2gqdWYgGn2Sp5IBQLF5nGGGsV -70DvnsoxViqpsv+yObAF9PqXnu6UGoB023Jr8x683bU9/jQFLQKCAQEA8735Vbbc -kncVJIpIlG7SDPmlLCFnxokvWWmyJS2J4SrIJJykn30qknGGZFFn67NB5PulAEaw -mdG58FIxxkm8bEKwebEhdnB9sP8k3TvddPKlBXYb1WuGxzyF/xlHniEJ7jN0YAAz -D1BLxTP1OM47iX5ocyVpOPbAdq/yZK0bffvIUy/QKLeJNx0d59PKpJRb4y6T/LvS -tp3UHrBqCNYYoKsZluS9Kg6WJF4g269yn2LSdtzQlAW1IT3DgO7h+2UBYI4FwMao -BZVew44CjljGCTA2KL4jdsqnTyt0qzzAiJZ0CGkJY9gal16ODHcBUKfNGYvjU8pf -2qDEiCn0HayXNwKCAQEAlOscLuHy9Viyw94NWbnRXlwOPM//fgooUIzmHY4Jur0o -arsZxgNZR5CHws82yGS4EAxmf3Bel7WGVu2jjk6pin2NW1utOcVjgrW1SjN8+xzL -gcPYGazVHbe4phU1MKTbEa+ZXyxx96LxscKr9eG/3qlokHPp0CRDgb8RApgHO6zp -eNZgBd+YjAewAH+YaKmBbza4bRv4l89T/Ibb1pbcFHIuVTZSr+OGYyeIyhT7U6Mn -dR/DVx+6vezVvMrvHh3aIaCrYrZJqnMrk1wYomUe5KU5WUHZQHjFINX22ykAamKb -/qsplP9/KFHF9Lyub/KAz8mJGNe8/y0HUn4kfaR1bQKCAQEAhZHTsx8UXMcZNP76 -qyzXuviLhVWBExFWez8quqjr6BKTv0yAAk6LJ9lCdnMN6eI/+AXW9AHJAWIm7QV9 -9VWvBfy9zNI+rjMTDg2j3ADUaSQXPpjsw9W69C+8loD5+DPOx1Q3L+ysDnZIL3c7 -qLeLdNtqzb7wnKDL876TrIwYhr+VldCb19RMQ4GXQ9WSNQKAIE0EF/mtjRmMhozS -bqk0scdRrJkI+KUpriBPDVRmEeYLw8taGePO0LqSCnPeLu+5A3qQuIWkyfqDBdMq -n2sSizJ6W3Vm5dBEQ2Ri+Pu/3pnkWD+HP8nLOKw+V6JXfCWYhaldGCvMv3heeufS -uPg9nQKCAQEAp/boT63JB+ahU3VQGtqwlDXkRS/Ge8a7FRp4kjdK7d1mtUDqOJ9U -l2RHgOkqhNuAPy64/07caDK3R7vKeOFmSXCV/WHIcgt46SRwFQECZeyA1R+EkTes -tseTngdFrQ10Xf+DmLNqCyX5KpgQf+ccluyyH6uK6FRI/VfU4sLrUGyOblqHq/c4 -bRR4nMwiw5yga45YhQH8uJF54MI7XaD2/hPCAIJBkx88taRzMUlWl1u1VQosIvtZ -5hCRepq9A44P61c+HI/5fzXAn2xvwR2EiV0hAYLn+rmYgBId/RfcstWUR78A9wpT -/OsV3MTX1gCaTE9Q2GlZVybDh20ZvdBC/g== +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDScvshqu3747j4 +KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz1FPflpj7pVWa +gWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaaJ3smHx2+VOha +MWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H9C0UBIzXP7Pn +GrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6GWLtJvk5ekfOV +lRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pBYKPThE5zW5Kh +IxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK84y5L4BXxxohG +0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8HtmSgkPEgfSV +RxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQrrrIUQAuXDcQX +11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS8kbmmuhxshsn +ZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN/IC4PpwtYUO3 +/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABAoICABRxePXJ+KOpznPE5Owo7BWe +BqTzC/K1xlCYm0v5IJzYEQlM4e4p4wZ+/kR6Hex/nM4IR+bbZpxjcOUObIsWpJTI +VAgS2y5RcTp+UJzfXpJogIpKiqBMNutAqPOrK8Hg797PtlsmAKoBmNn8xqU1+2Oa +FX/rKaJus6qKZ2bz16DnkFUL4foabDJte0IFbd2yAyGv1ZqGiqFKSJFK+wYeoMZU +LzWOEyUR/wK5ryVwJJCY8z9BKAoKNYnb4oHTFlDRDdztIlxv29sR9dtHsjA3EdQc +nOCTNi7eY6JJlucgBSWGrsS6vTvpImGggIIWt6sOh0Px6Fg0F7mFtsESex2GePow +50MwKFbbVo3TUYRYTggJj7ba4+yrl/dsAWJUX3F90xNj/6REF+2+Licb7kgCHQKw +TvdExiikOOFtuFRkl5fqyoM9Ph+sj7/db5Pd53D8vaMjR3Yw/JA5dKPZS5ZKHBs0 +qo7FxV8ZlOESMv2eF6y0kM4wLhUN8wnEWxpsFWtXDNjYIlQ6W5qrfwR1vlnIkrmb +bYQCJFtko6CKUEa8yb4OvLgyX6VSskeYEC5zdekivZWJN/OZZa/xIS2nupYqD4GT +Y3QcsEhfzDvVIwI7M+eBwS5qjgdwN2qEGrXva5KKesb2zdjNircKaUahTWJNYHjj +jHGOSY/vyGFH2HFZNYZpAoIBAQDyoMpeXBDQhAXbHpIm6p8KljqRMHU05UeRRWVR +d0RKXGYq/bUzoAhr8F2QE2+HC+2NnBGh6qR5QNO/6H6p8Du6aSXDaDNJxTErOOmY +pAkbOlcA7TjpDSrNUr4EfAXl6vUF7JB8jJHEXIqBkbGWOFYPzwLEwErQAlQN2u4e +u9HKG3Me+DP2IcrCgZ5iWvmjV4l+vXYyBEXoJqHOWEscWXHiz64c336oZqwqKe/x +s8Xy2sd6FRU/mp34wXT4kZ56/U4BV+DEN20fffBiTfMQxKmXhMykmD/O63dASCiA +seZrZK5mRND+aS95MqI6FMm0ToKj24RvvAWR8w50cuF7wl5zAoIBAQDeDC6ImN7K +mSLaMBaIhoZsJDdG0cJiFPRmwtepeoWt4qUWuc51LOFthhlkyGx/JbEzFMK6uYTu +hHHNOgk6ydrz1+HOzpSvN0Iz61j1hJd8Ve/0MyTBg912FPe2p3hR9dN4j5Ly+oes +QvNIr/ReW5HJhDcgXm/9oT68XyzrKM3t93XPoO4wDPSHPbRWE2dzLrNi1xg/ZyRz +ZLAtBsGPG5rVAeSEob0ytZH2H1pHfkRQ/1jSKxwb+QVMfjDd5FrEAMLA4E6J8HFz +RDHTmrveGrR1i5BJrce3VUOAuL7Y3iw6Sb+b1LyA8htxiYfBVdVfCeocDv64m0R5 +NJs6Milm9uk1AoIBAQCdQLForusG+kqBVjMLng0uY2faKjoM6n2UHhIo1tAgEfr1 +6jHDH/nVW5iIhNBICucQXRLgip/HJskXHKzbn6RWkUe0epijO3c+uEhOciKkzw8M +vrOf+LTBFtupNGjuN3ZPPJ/42XKwffoXOEKNRj4hSN5Wfvr+DkREJp0mtjymbVwT +unKTGBu+LRxmSuh5gYbP6iPtDu/wIvnEL12fJim2Azyp4gDJTKJRQZUOZqHpYPrg +mUGIU8IHM/uID3lT5VDldftrsTC8tHdUf4kGWTBB0ASCuVrB1cMYmqwFnUfmWv7d +scRy3+Gw/6w9ULPadPgfE2umr4o8qfe4aazS9YsZAoIBADZH+hQwcr5KQ0fdW5TS +dgf3rn+khYVepAR++yOWLRm9/yeYEo14hD82+fw2Nre6aiAXoibtdT6tp/hIiLsT +X3AexTe+LoDK3Gc+0Edsu2+MvpUO75xS9Q+JvqirNfGrS5/8USsO7Z3B3CFXykBK +2E/P/33tOCljgqegCKYQGo9i4Cz6pV+fuyNYhT5Jjg+NShMOjAHr3/BJm/vV2/l1 +ARuzU77MnyjHVEA7l+FET8URNxBhs4RvEsmJS77itQGXQgTOkMSNv94yvI+DEwwP +sS/PB13LmrgJou/TuevgHCW/o5Sfo9lN1kGiIkq0Be4uyUlErSZJ5qpOnufSHWbr +U0UCggEAC5WM3BXKo11Y+XphsYnpJesiB9C5HMvhnB5oCHH7ffIVqkXp2AiUnWy6 +HE+DwUWFEtRLYr4beTXn+TeunoQa7X5K1JXV41XENf5CsbQTIUnX2j7o2ilCEx9C +rDPtpUZPObqXHBiHSF67Il7GitCud+7YDAGqbJABlV3WF0MkPIfW/cxN3cb65FoI +AEV3OZiS6zvDR91++ovNV5QAmH1vljvipM7kKy5RsLFF8GYa0KNTNJ/EYojKmw00 +2OakG0pjjDcWjfdGI+i5gcHNUZwbgqx4NG/RY3YslJswBhGGlhEGuuUtpH47HTM2 +oJ/aHbXf6PdOO9MYiI/es/dfKK8ywA== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index 459f4fb62e..792dc197f8 100644 Binary files a/testing/web3signer_tests/tls/web3signer/key.p12 and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index de80bb7ceb..c4722fe587 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse 1B:43:E1:58:26:7D:3F:70:BD:DA:32:E9:29:A5:A9:50:EA:B2:A8:C3:0C:82:BF:90:13:ED:5B:E0:7D:5B:0A:C0 +lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 200db73167..0b648a8155 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -2,63 +2,62 @@ name = "validator_client" version = "0.3.5" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@lukeanderson.com.au>"] -edition = "2021" +edition = { workspace = true } [lib] name = "validator_client" path = "src/lib.rs" [dev-dependencies] -tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } +tokio = { workspace = true } [dependencies] -tree_hash = "0.5.0" -clap = "2.33.3" -slashing_protection = { path = "./slashing_protection" } -slot_clock = { path = "../common/slot_clock" } -types = { path = "../consensus/types" } -safe_arith = { path = "../consensus/safe_arith" } -serde = "1.0.116" -serde_derive = "1.0.116" -bincode = "1.3.1" -serde_json = "1.0.58" -slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } -tokio = { version = "1.14.0", features = ["time"] } -tokio-stream = { version = "0.1.3", features = ["sync"] } -futures = "0.3.7" -dirs = "3.0.1" -directory = { path = "../common/directory" } -lockfile = { path = "../common/lockfile" } -environment = { path = "../lighthouse/environment" } -parking_lot = "0.12.0" -exit-future = "0.2.0" -filesystem = { path = "../common/filesystem" } -hex = "0.4.2" -deposit_contract = { path = "../common/deposit_contract" } -bls = { path = "../crypto/bls" } -eth2 = { path = "../common/eth2" } -tempfile = "3.1.0" -validator_dir = { path = "../common/validator_dir" } -clap_utils = { path = "../common/clap_utils" } -eth2_keystore = { path = "../crypto/eth2_keystore" } -account_utils = { path = "../common/account_utils" } -lighthouse_version = { path = "../common/lighthouse_version" } -warp_utils = { path = "../common/warp_utils" } -warp = "0.3.2" -hyper = "0.14.4" -ethereum_serde_utils = "0.5.0" -libsecp256k1 = "0.7.0" -ring = "0.16.19" -rand = { version = "0.8.5", features = ["small_rng"] } -lighthouse_metrics = { path = "../common/lighthouse_metrics" } -lazy_static = "1.4.0" -itertools = "0.10.0" -monitoring_api = { path = "../common/monitoring_api" } -sensitive_url = { path = "../common/sensitive_url" } -task_executor = { path = "../common/task_executor" } -reqwest = { version = "0.11.0", features = ["json","stream"] } -url = "2.2.2" -malloc_utils = { path = "../common/malloc_utils" } -sysinfo = "0.26.5" +tree_hash = { workspace = true } +clap = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +types = { workspace = true } +safe_arith = { workspace = true } +serde = { workspace = true } +bincode = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +futures = { workspace = true } +dirs = { workspace = true } +directory = { workspace = true } +lockfile = { workspace = true } +environment = { workspace = true } +parking_lot = { workspace = true } +exit-future = { workspace = true } +filesystem = { workspace = true } +hex = { workspace = true } +deposit_contract = { workspace = true } +bls = { workspace = true } +eth2 = { workspace = true } +tempfile = { workspace = true } +validator_dir = { workspace = true } +clap_utils = { workspace = true } +eth2_keystore = { workspace = true } +account_utils = { workspace = true } +lighthouse_version = { workspace = true } +warp_utils = { workspace = true } +warp = { workspace = true } +hyper = { workspace = true } +ethereum_serde_utils = { workspace = true } +libsecp256k1 = { workspace = true } +ring = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +lighthouse_metrics = { workspace = true } +lazy_static = { workspace = true } +itertools = { workspace = true } +monitoring_api = { workspace = true } +sensitive_url = { workspace = true } +task_executor = { workspace = true } +reqwest = { workspace = true, features = ["native-tls"] } +url = { workspace = true } +malloc_utils = { workspace = true } +sysinfo = { workspace = true } system_health = { path = "../common/system_health" } -logging = { path = "../common/logging" } +logging = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 278dc22d0d..baba14c538 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -2,7 +2,7 @@ name = "slashing_protection" version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>", "pscott <scottpiriou@gmail.com>"] -edition = "2021" +edition = { workspace = true } autotests = false [[test]] @@ -10,21 +10,21 @@ name = "slashing_protection_tests" path = "tests/main.rs" [dependencies] -tempfile = "3.1.0" -types = { path = "../../consensus/types" } -rusqlite = { version = "0.28.0", features = ["bundled"] } -r2d2 = "0.8.9" +tempfile = { workspace = true } +types = { workspace = true } +rusqlite = { workspace = true } +r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" -serde = "1.0.116" -serde_derive = "1.0.116" -serde_json = "1.0.58" -ethereum_serde_utils = "0.5.0" -filesystem = { path = "../../common/filesystem" } -arbitrary = { version = "1.0", features = ["derive"], optional = true } +serde = { workspace = true } +serde_json = { workspace = true } +ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } [dev-dependencies] -lazy_static = "1.4.0" -rayon = "1.4.1" +lazy_static = { workspace = true } +rayon = { workspace = true } [features] -arbitrary-fuzz = ["arbitrary", "types/arbitrary-fuzz"] +arbitrary-fuzz = ["types/arbitrary-fuzz"] +portable = ["types/portable"] diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index e3d935b4c9..0663b3cba2 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -6,20 +6,23 @@ ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-te ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) - rmfile = if exist $(1) (del /F /Q $(1)) - rmdir = if exist $(1) (rmdir /Q /S $(1)) + rmfile = if exist $(1) (del /F /Q $(1)) + rmdir = if exist $(1) (rmdir /Q /S $(1)) + makedir = if not exist $(1) (mkdir $(1)) else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif $(OUTPUT_DIR): $(TARBALL) $(call rmdir,$@) - mkdir $@ + $(call makedir,$@) tar --strip-components=1 -xzf $^ -C $@ $(TARBALL): diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 99d37c38b9..ad5f21e511 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -1,5 +1,5 @@ use crate::InterchangeError; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; use std::io; diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index dc828773b9..1bb1fc550b 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -3,7 +3,7 @@ use crate::{ test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}, SigningRoot, SlashingDatabase, }; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs index ee5bb11471..ee8f522cd6 100644 --- a/validator_client/slashing_protection/tests/interop.rs +++ b/validator_client/slashing_protection/tests/interop.rs @@ -25,8 +25,10 @@ fn test_root_dir() -> PathBuf { .join("tests") } +// NOTE: I've combined two tests together to avoid a race-condition which occurs when fighting over +// which test builds the TEST_ROOT_DIR lazy static. #[test] -fn generated() { +fn generated_and_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() @@ -37,10 +39,7 @@ fn generated() { let test_case: MultiTestCase = serde_json::from_reader(&file).unwrap(); test_case.run(false); } -} -#[test] -fn generated_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 9869fb8c3f..dc5fc27488 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -192,7 +192,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { .into_iter() .fold(HashMap::new(), |mut map, duty_and_proof| { map.entry(duty_and_proof.duty.committee_index) - .or_insert_with(Vec::new) + .or_default() .push(duty_and_proof); map }); @@ -482,6 +482,14 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { ) -> Result<(), String> { let log = self.context.log(); + if !validator_duties + .iter() + .any(|duty_and_proof| duty_and_proof.selection_proof.is_some()) + { + // Exit early if no validator is aggregator + return Ok(()); + } + let aggregated_attestation = &self .beacon_nodes .first_success(|beacon_node| async move { diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index f012f55c60..c15e55f9cc 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -12,7 +12,7 @@ use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; use parking_lot::RwLock as PLRwLock; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; @@ -322,6 +322,14 @@ impl<E: EthSpec> CandidateBeaconNode<E> { "endpoint_capella_fork_epoch" => ?beacon_node_spec.capella_fork_epoch, "hint" => UPDATE_REQUIRED_LOG_HINT, ); + } else if beacon_node_spec.deneb_fork_epoch != spec.deneb_fork_epoch { + warn!( + log, + "Beacon node has mismatched Deneb fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_deneb_fork_epoch" => ?beacon_node_spec.deneb_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, + ); } Ok(()) diff --git a/validator_client/src/beacon_node_health.rs b/validator_client/src/beacon_node_health.rs index 81df0cd992..08fd803822 100644 --- a/validator_client/src/beacon_node_health.rs +++ b/validator_client/src/beacon_node_health.rs @@ -1,5 +1,5 @@ use crate::beacon_node_fallback::Config; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::fmt::{Debug, Display, Formatter}; use types::Slot; diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 98b7f69456..0bf9ad6a15 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -6,9 +6,11 @@ use crate::{ http_metrics::metrics, validator_store::{Error as ValidatorStoreError, ValidatorStore}, }; +use bls::SignatureBytes; use environment::RuntimeContext; -use eth2::BeaconNodeHttpClient; -use slog::{crit, debug, error, info, trace, warn}; +use eth2::types::{BlockContents, SignedBlockContents}; +use eth2::{BeaconNodeHttpClient, StatusCode}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::fmt::Debug; use std::future::Future; @@ -454,68 +456,23 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. - let block = proposer_fallback - .first_success_try_proposers_last(|beacon_node| async move { - let block = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blinded_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; - - info!( + let block_contents = proposer_fallback + .first_success_try_proposers_last(|beacon_node| { + let beacon_node = beacon_node; + Self::get_validator_block( + beacon_node, + slot, + randao_reveal_ref, + graffiti, + proposer_index, log, - "Received unsigned block"; - "slot" => slot.as_u64(), - ); - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } - - Ok::<_, BlockError>(block) + ) }) .await?; + let (block, maybe_blob_sidecars) = block_contents.deconstruct(); let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); + let signed_block = match self_ref .validator_store .sign_block::<Payload>(*validator_pubkey_ref, block, current_slot) @@ -541,6 +498,37 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { ))) } }; + + let maybe_signed_blobs = match maybe_blob_sidecars { + Some(blob_sidecars) => { + match self_ref + .validator_store + .sign_blobs::<Payload>(*validator_pubkey_ref, blob_sidecars) + .await + { + Ok(signed_blobs) => Some(signed_blobs), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently removed + // via the API. + warn!( + log, + "Missing pubkey for blobs"; + "info" => "a validator may have recently been removed from this VC", + "pubkey" => ?pubkey, + "slot" => ?slot + ); + return Ok(()); + } + Err(e) => { + return Err(BlockError::Recoverable(format!( + "Unable to sign blobs: {:?}", + e + ))) + } + } + } + None => None, + }; let signing_time_ms = Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); @@ -551,6 +539,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { "signing_time_ms" => signing_time_ms, ); + let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs)); + // Publish block with first available beacon node. // // Try the proposer nodes first, since we've likely gone to efforts to @@ -558,39 +548,9 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // publish a block. proposer_fallback .first_success_try_proposers_first(|beacon_node| async { - match Payload::block_type() { - BlockType::Full => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - BlockType::Blinded => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - } - Ok::<_, BlockError>(()) + let beacon_node = beacon_node; + self.publish_signed_block_contents::<Payload>(&signed_block_contents, beacon_node) + .await }) .await?; @@ -598,12 +558,132 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { log, "Successfully published block"; "block_type" => ?Payload::block_type(), - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), + "deposits" => signed_block_contents.signed_block().message().body().deposits().len(), + "attestations" => signed_block_contents.signed_block().message().body().attestations().len(), "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), + "slot" => signed_block_contents.signed_block().slot().as_u64(), ); Ok(()) } + + async fn publish_signed_block_contents<Payload: AbstractExecPayload<E>>( + &self, + signed_block_contents: &SignedBlockContents<E, Payload>, + beacon_node: BeaconNodeHttpClient, + ) -> Result<(), BlockError> { + let log = self.context.log(); + let slot = signed_block_contents.signed_block().slot(); + match Payload::block_type() { + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .clone() + .post_beacon_blocks(signed_block_contents) + .await + .or_else(|e| handle_block_post_error(e, slot, log))? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(signed_block_contents) + .await + .or_else(|e| handle_block_post_error(e, slot, log))? + } + } + Ok::<_, BlockError>(()) + } + + async fn get_validator_block<Payload: AbstractExecPayload<E>>( + beacon_node: BeaconNodeHttpClient, + slot: Slot, + randao_reveal_ref: &SignatureBytes, + graffiti: Option<Graffiti>, + proposer_index: Option<u64>, + log: &Logger, + ) -> Result<BlockContents<E, Payload>, BlockError> { + let block_contents: BlockContents<E, Payload> = match Payload::block_type() { + BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks::<E, Payload>(slot, randao_reveal_ref, graffiti.as_ref()) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blinded_blocks::<E, Payload>( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; + + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); + if proposer_index != Some(block_contents.block().proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } + + Ok::<_, BlockError>(block_contents) + } +} + +fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> { + // Handle non-200 success codes. + if let Some(status) = err.status() { + if status == StatusCode::ACCEPTED { + info!( + log, + "Block is already known to BN or might be invalid"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } else if status.is_success() { + debug!( + log, + "Block published with non-standard success code"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } + } + Err(BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {err:?}", + ))) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index fe7ca3b1d1..e0c6225789 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -170,6 +170,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-address") .long("http-address") + .requires("http") .value_name("ADDRESS") .help("Set the address for the HTTP address. The HTTP server is not encrypted \ and therefore it is unsafe to publish on a public network. When this \ @@ -189,14 +190,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-port") .long("http-port") + .requires("http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value("5062") + .default_value_if("http", None, "5062") .takes_value(true), ) .arg( Arg::with_name("http-allow-origin") .long("http-allow-origin") + .requires("http") .value_name("ORIGIN") .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ @@ -207,21 +210,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-allow-keystore-export") .long("http-allow-keystore-export") + .requires("http") .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ API method, which allows exporting keystores and passwords to HTTP API \ consumers who have access to the API token. This method is useful for \ exporting validators, however it should be used with caution since it \ exposes private key data to authorized users.") - .required(false) .takes_value(false), ) .arg( Arg::with_name("http-store-passwords-in-secrets-dir") .long("http-store-passwords-in-secrets-dir") + .requires("http") .help("If present, any validators created via the HTTP will have keystore \ passwords stored in the secrets-dir rather than the validator \ definitions file.") - .required(false) .takes_value(false), ) /* Prometheus metrics HTTP server related arguments */ @@ -234,22 +237,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("metrics-address") .long("metrics-address") + .requires("metrics") .value_name("ADDRESS") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value("127.0.0.1") + .default_value_if("metrics", None, "127.0.0.1") .takes_value(true), ) .arg( Arg::with_name("metrics-port") .long("metrics-port") + .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value("5064") + .default_value_if("metrics", None, "5064") .takes_value(true), ) .arg( Arg::with_name("metrics-allow-origin") .long("metrics-allow-origin") + .requires("metrics") .value_name("ORIGIN") .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 3d53dfa562..ea4c4f0465 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -8,7 +8,7 @@ use directory::{ }; use eth2::types::Graffiti; use sensitive_url::SensitiveUrl; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index efb14babdb..5d42ed8915 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -516,9 +516,7 @@ impl DoppelgangerService { } // Resolve the index from the server response back to a public key. - let pubkey = if let Some(pubkey) = indices_map.get(&response.index) { - pubkey - } else { + let Some(pubkey) = indices_map.get(&response.index) else { crit!( self.log, "Inconsistent indices map"; diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index da697025f1..6ab3d4969d 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -21,11 +21,12 @@ use eth2::types::{ }; use futures::{stream, StreamExt}; use parking_lot::RwLock; -use safe_arith::ArithError; +use safe_arith::{ArithError, SafeArith}; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use sync::poll_sync_committee_duties; @@ -33,14 +34,6 @@ use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; -/// Since the BN does not like it when we subscribe to slots that are close to the current time, we -/// will only subscribe to slots which are further than `SUBSCRIPTION_BUFFER_SLOTS` away. -/// -/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the -/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid -/// bringing in the entire crate. -const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; - /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. const HISTORICAL_DUTIES_EPOCHS: u64 = 2; @@ -62,6 +55,36 @@ const VALIDATOR_METRICS_MIN_COUNT: usize = 64; /// reduces the amount of data that needs to be transferred. const INITIAL_DUTIES_QUERY_SIZE: usize = 1; +/// Offsets from the attestation duty slot at which a subscription should be sent. +const ATTESTATION_SUBSCRIPTION_OFFSETS: [u64; 8] = [3, 4, 5, 6, 7, 8, 16, 32]; + +/// Check that `ATTESTATION_SUBSCRIPTION_OFFSETS` is sorted ascendingly. +const _: () = assert!({ + let mut i = 0; + loop { + let prev = if i > 0 { + ATTESTATION_SUBSCRIPTION_OFFSETS[i - 1] + } else { + 0 + }; + let curr = ATTESTATION_SUBSCRIPTION_OFFSETS[i]; + if curr < prev { + break false; + } + i += 1; + if i == ATTESTATION_SUBSCRIPTION_OFFSETS.len() { + break true; + } + } +}); +/// Since the BN does not like it when we subscribe to slots that are close to the current time, we +/// will only subscribe to slots which are further than 2 slots away. +/// +/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the +/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid +/// bringing in the entire crate. +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -84,6 +107,16 @@ pub struct DutyAndProof { pub duty: AttesterData, /// This value is only set to `Some` if the proof indicates that the validator is an aggregator. pub selection_proof: Option<SelectionProof>, + /// Track which slots we should send subscriptions at for this duty. + /// + /// This value is updated after each subscription is successfully sent. + pub subscription_slots: Arc<SubscriptionSlots>, +} + +/// Tracker containing the slots at which an attestation subscription should be sent. +pub struct SubscriptionSlots { + /// Pairs of `(slot, already_sent)` in slot-descending order. + slots: Vec<(Slot, AtomicBool)>, } impl DutyAndProof { @@ -111,17 +144,55 @@ impl DutyAndProof { } })?; + let subscription_slots = SubscriptionSlots::new(duty.slot); + Ok(Self { duty, selection_proof, + subscription_slots, }) } /// Create a new `DutyAndProof` with the selection proof waiting to be filled in. pub fn new_without_selection_proof(duty: AttesterData) -> Self { + let subscription_slots = SubscriptionSlots::new(duty.slot); Self { duty, selection_proof: None, + subscription_slots, + } + } +} + +impl SubscriptionSlots { + fn new(duty_slot: Slot) -> Arc<Self> { + let slots = ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .filter_map(|offset| duty_slot.safe_sub(offset).ok()) + .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) + .collect(); + Arc::new(Self { slots }) + } + + /// Return `true` if we should send a subscription at `slot`. + fn should_send_subscription_at(&self, slot: Slot) -> bool { + // Iterate slots from smallest to largest looking for one that hasn't been completed yet. + self.slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) + } + + /// Update our record of subscribed slots to account for successful subscription at `slot`. + fn record_successful_subscription_at(&self, slot: Slot) { + for (scheduled_slot, already_sent) in self.slots.iter().rev() { + if slot >= *scheduled_slot { + already_sent.store(true, Ordering::Relaxed); + } else { + break; + } } } } @@ -570,8 +641,24 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( let subscriptions_timer = metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); - // This vector is likely to be a little oversized, but it won't reallocate. - let mut subscriptions = Vec::with_capacity(local_pubkeys.len() * 2); + // This vector is intentionally oversized by 10% so that it won't reallocate. + // Each validator has 2 attestation duties occuring in the current and next epoch, for which + // they must send `ATTESTATION_SUBSCRIPTION_OFFSETS.len()` subscriptions. These subscription + // slots are approximately evenly distributed over the two epochs, usually with a slight lag + // that balances out (some subscriptions for the current epoch were sent in the previous, and + // some subscriptions for the next next epoch will be sent in the next epoch but aren't included + // in our calculation). We cancel the factor of 2 from the formula for simplicity. + let overallocation_numerator = 110; + let overallocation_denominator = 100; + let num_expected_subscriptions = overallocation_numerator + * std::cmp::max( + 1, + local_pubkeys.len() * ATTESTATION_SUBSCRIPTION_OFFSETS.len() + / E::slots_per_epoch() as usize, + ) + / overallocation_denominator; + let mut subscriptions = Vec::with_capacity(num_expected_subscriptions); + let mut subscription_slots_to_confirm = Vec::with_capacity(num_expected_subscriptions); // For this epoch and the next epoch, produce any beacon committee subscriptions. // @@ -584,10 +671,10 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( .read() .iter() .filter_map(|(_, map)| map.get(epoch)) - // The BN logs a warning if we try and subscribe to current or near-by slots. Give it a - // buffer. .filter(|(_, duty_and_proof)| { - current_slot + SUBSCRIPTION_BUFFER_SLOTS < duty_and_proof.duty.slot + duty_and_proof + .subscription_slots + .should_send_subscription_at(current_slot) }) .for_each(|(_, duty_and_proof)| { let duty = &duty_and_proof.duty; @@ -599,7 +686,8 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( committees_at_slot: duty.committees_at_slot, slot: duty.slot, is_aggregator, - }) + }); + subscription_slots_to_confirm.push(duty_and_proof.subscription_slots.clone()); }); } @@ -624,6 +712,16 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( "Failed to subscribe validators"; "error" => %e ) + } else { + // Record that subscriptions were successfully sent. + debug!( + log, + "Broadcast attestation subscriptions"; + "count" => subscriptions.len(), + ); + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } } } @@ -1184,3 +1282,67 @@ async fn notify_block_production_service<T: SlotClock + 'static, E: EthSpec>( }; } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn subscription_slots_exact() { + for duty_slot in [ + Slot::new(32), + Slot::new(47), + Slot::new(99), + Slot::new(1002003), + ] { + let subscription_slots = SubscriptionSlots::new(duty_slot); + + // Run twice to check idempotence (subscription slots shouldn't be marked as done until + // we mark them manually). + for _ in 0..2 { + for offset in ATTESTATION_SUBSCRIPTION_OFFSETS { + assert!(subscription_slots.should_send_subscription_at(duty_slot - offset)); + } + } + + // Mark each slot as complete and check that all prior slots are still marked + // incomplete. + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .enumerate() + { + subscription_slots.record_successful_subscription_at(duty_slot - offset); + for lower_offset in ATTESTATION_SUBSCRIPTION_OFFSETS + .into_iter() + .rev() + .skip(i + 1) + { + assert!(lower_offset < offset); + assert!( + subscription_slots.should_send_subscription_at(duty_slot - lower_offset) + ); + } + } + } + } + #[test] + fn subscription_slots_mark_multiple() { + for (i, offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let duty_slot = Slot::new(64); + let subscription_slots = SubscriptionSlots::new(duty_slot); + + subscription_slots.record_successful_subscription_at(duty_slot - offset); + + // All past offsets (earlier slots) should be marked as complete. + for (j, other_offset) in ATTESTATION_SUBSCRIPTION_OFFSETS.into_iter().enumerate() { + let past = j >= i; + assert_eq!(other_offset >= offset, past); + assert_eq!( + subscription_slots.should_send_subscription_at(duty_slot - other_offset), + !past + ); + } + } + } +} diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index bf21097385..53e0ae3281 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -162,7 +162,7 @@ impl SyncDutiesMap { committees_writer .entry(committee_period) - .or_insert_with(CommitteeDuties::default) + .or_default() .init(validator_indices); // Return shared reference @@ -602,9 +602,7 @@ pub async fn fill_in_aggregation_proofs<T: SlotClock + 'static, E: EthSpec>( // Add to global storage (we add regularly so the proofs can be used ASAP). let sync_map = duties_service.sync_duties.committees.read(); - let committee_duties = if let Some(duties) = sync_map.get(&sync_committee_period) { - duties - } else { + let Some(committee_duties) = sync_map.get(&sync_committee_period) else { debug!( log, "Missing sync duties"; diff --git a/validator_client/src/graffiti_file.rs b/validator_client/src/graffiti_file.rs index 5c1f84e10b..29da3dca5a 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/src/graffiti_file.rs @@ -1,4 +1,4 @@ -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader}; diff --git a/validator_client/src/http_api/create_signed_voluntary_exit.rs b/validator_client/src/http_api/create_signed_voluntary_exit.rs index b777d15806..a9586da57e 100644 --- a/validator_client/src/http_api/create_signed_voluntary_exit.rs +++ b/validator_client/src/http_api/create_signed_voluntary_exit.rs @@ -1,5 +1,6 @@ use crate::validator_store::ValidatorStore; use bls::{PublicKey, PublicKeyBytes}; +use eth2::types::GenericResponse; use slog::{info, Logger}; use slot_clock::SlotClock; use std::sync::Arc; @@ -11,7 +12,7 @@ pub async fn create_signed_voluntary_exit<T: 'static + SlotClock + Clone, E: Eth validator_store: Arc<ValidatorStore<T, E>>, slot_clock: T, log: Logger, -) -> Result<SignedVoluntaryExit, warp::Rejection> { +) -> Result<GenericResponse<SignedVoluntaryExit>, warp::Rejection> { let epoch = match maybe_epoch { Some(epoch) => epoch, None => get_current_epoch::<T, E>(slot_clock).ok_or_else(|| { @@ -60,7 +61,7 @@ pub async fn create_signed_voluntary_exit<T: 'static + SlotClock + Clone, E: Eth )) })?; - Ok(signed_voluntary_exit) + Ok(GenericResponse::from(signed_voluntary_exit)) } /// Calculates the current epoch from the genesis time and current time. diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 1528ce712b..1cd503b730 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1214,7 +1214,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) - .or(get_std_remotekeys), + .or(get_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), ) .or(warp::post().and( post_validators @@ -1225,15 +1226,18 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) - .or(post_std_remotekeys), + .or(post_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )) - .or(warp::patch().and(patch_validators)) + .or(warp::patch() + .and(patch_validators.recover(warp_utils::reject::handle_rejection))) .or(warp::delete().and( delete_lighthouse_keystores .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) - .or(delete_std_remotekeys), + .or(delete_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )), ) // The auth route and logs are the only routes that are allowed to be accessed without the API token. diff --git a/validator_client/src/http_api/test_utils.rs b/validator_client/src/http_api/test_utils.rs index 7e7af5e711..360d74fbbf 100644 --- a/validator_client/src/http_api/test_utils.rs +++ b/validator_client/src/http_api/test_utils.rs @@ -250,9 +250,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::<ConfigAndPresetBellatrix>() + .get_lighthouse_spec::<ConfigAndPresetCapella>() .await - .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .map(|res| ConfigAndPreset::Capella(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec(), None); diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 35dcfffc55..ba6f2a1976 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -206,9 +206,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::<ConfigAndPresetCapella>() + .get_lighthouse_spec::<ConfigAndPresetDeneb>() .await - .map(|res| ConfigAndPreset::Capella(res.data)) + .map(|res| ConfigAndPreset::Deneb(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec(), None); @@ -503,7 +503,7 @@ impl ApiTester { .await; assert!(resp.is_ok()); - assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch); + assert_eq!(resp.unwrap().data.message.epoch, expected_exit_epoch); self } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index d60872e497..f301af1c21 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -2146,7 +2146,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions().clone(); + let web3_vals = vals.validator_definitions(); // Import remotekeys. let import_res = tester @@ -2164,7 +2164,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions().clone(); + let remote_vals = vals.validator_definitions(); // Web3signer should not be overwritten since it is enabled. assert!(web3_vals == remote_vals); diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 52b52126bd..ed16f52d28 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -59,6 +59,11 @@ lazy_static::lazy_static! { "Total count of attempted block signings", &["status"] ); + pub static ref SIGNED_BLOBS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( + "vc_signed_beacon_blobs_total", + "Total count of attempted blob signings", + &["status"] + ); pub static ref SIGNED_ATTESTATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 51da8b7ce4..8212841dbb 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -82,6 +82,7 @@ const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; +const HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -98,6 +99,8 @@ pub struct ProductionValidatorClient<T: EthSpec> { slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option<SocketAddr>, config: Config, + beacon_nodes: Arc<BeaconNodeFallback<SystemTimeSlotClock, T>>, + genesis_time: u64, } impl<T: EthSpec> ProductionValidatorClient<T> { @@ -307,6 +310,8 @@ impl<T: EthSpec> ProductionValidatorClient<T> { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, + get_validator_block_ssz: slot_duration + / HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) @@ -504,12 +509,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.service_context("sync_committee".into()), ); - // Wait until genesis has occurred. - // - // It seems most sensible to move this into the `start_service` function, but I'm caution - // of making too many changes this close to genesis (<1 week). - wait_for_genesis(&beacon_nodes, genesis_time, &context).await?; - Ok(Self { context, duties_service, @@ -522,10 +521,12 @@ impl<T: EthSpec> ProductionValidatorClient<T> { config, slot_clock, http_api_listen_addr: None, + genesis_time, + beacon_nodes, }) } - pub fn start_service(&mut self) -> Result<(), String> { + pub async fn start_service(&mut self) -> Result<(), String> { // We use `SLOTS_PER_EPOCH` as the capacity of the block notification channel, because // we don't expect notifications to be delayed by more than a single slot, let alone a // whole epoch! @@ -533,6 +534,45 @@ impl<T: EthSpec> ProductionValidatorClient<T> { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); + let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + + self.http_api_listen_addr = if self.config.http_api.enabled { + let ctx = Arc::new(http_api::Context { + task_executor: self.context.executor.clone(), + api_secret, + block_service: Some(self.block_service.clone()), + validator_store: Some(self.validator_store.clone()), + validator_dir: Some(self.config.validator_dir.clone()), + secrets_dir: Some(self.config.secrets_dir.clone()), + graffiti_file: self.config.graffiti_file.clone(), + graffiti_flag: self.config.graffiti, + spec: self.context.eth2_config.spec.clone(), + config: self.config.http_api.clone(), + sse_logging_components: self.context.sse_logging_components.clone(), + slot_clock: self.slot_clock.clone(), + log: log.clone(), + _phantom: PhantomData, + }); + + let exit = self.context.executor.exit(); + + let (listen_addr, server) = http_api::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + self.context + .clone() + .executor + .spawn_without_exit(server, "http-api"); + + Some(listen_addr) + } else { + info!(log, "HTTP API server is disabled"); + None + }; + + // Wait until genesis has occurred. + wait_for_genesis(&self.beacon_nodes, self.genesis_time, &self.context).await?; + duties_service::start_update_service(self.duties_service.clone(), block_service_tx); self.block_service @@ -571,42 +611,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?; - let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; - - self.http_api_listen_addr = if self.config.http_api.enabled { - let ctx = Arc::new(http_api::Context { - task_executor: self.context.executor.clone(), - api_secret, - block_service: Some(self.block_service.clone()), - validator_store: Some(self.validator_store.clone()), - validator_dir: Some(self.config.validator_dir.clone()), - secrets_dir: Some(self.config.secrets_dir.clone()), - graffiti_file: self.config.graffiti_file.clone(), - graffiti_flag: self.config.graffiti, - spec: self.context.eth2_config.spec.clone(), - config: self.config.http_api.clone(), - sse_logging_components: self.context.sse_logging_components.clone(), - slot_clock: self.slot_clock.clone(), - log: log.clone(), - _phantom: PhantomData, - }); - - let exit = self.context.executor.exit(); - - let (listen_addr, server) = http_api::serve(ctx, exit) - .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; - - self.context - .clone() - .executor - .spawn_without_exit(server, "http-api"); - - Some(listen_addr) - } else { - info!(log, "HTTP API server is disabled"); - None - }; - if self.config.enable_latency_measurement_service { latency::start_latency_service( self.context.clone(), diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 0de2f2f54f..96bfd2511f 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,6 +37,7 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock<T, Payload>), + BlobSidecar(&'a Payload::Sidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof<T>), SelectionProof(Slot), @@ -59,6 +60,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), + SignableMessage::BlobSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -182,6 +184,10 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, + SignableMessage::BlobSidecar(_) => { + // https://github.com/ConsenSys/web3signer/issues/726 + unimplemented!("Web3Signer blob signing not implemented.") + } SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 2c1f0cb3fc..d7d74c9448 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -27,6 +27,7 @@ pub enum ForkName { Altair, Bellatrix, Capella, + Deneb, } #[derive(Debug, PartialEq, Serialize)] @@ -95,6 +96,11 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> Web3SignerObject<'a, T, Pa block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Deneb(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Deneb, + block: None, + block_header: Some(block.block_header()), + }), } } diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 0318a1d5bf..2560561c81 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -157,13 +157,11 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { .checked_sub(slot_duration / 3) .unwrap_or_else(|| Duration::from_secs(0)); - let slot_duties = if let Some(duties) = self + let Some(slot_duties) = self .duties_service .sync_duties .get_duties_for_slot::<E>(slot, &self.duties_service.spec) - { - duties - } else { + else { debug!(log, "No duties known for slot {}", slot); return Ok(()); }; diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 365f7f7347..612dd96bcd 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -6,6 +6,7 @@ use crate::{ Config, }; use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; +use eth2::types::VariableList; use parking_lot::{Mutex, RwLock}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, @@ -17,11 +18,13 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; +use types::sidecar::Sidecar; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, - Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, Keypair, PublicKeyBytes, + SelectionProof, SidecarList, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, SignedSidecar, SignedSidecarList, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, @@ -369,11 +372,35 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { } fn signing_context(&self, domain: Domain, signing_epoch: Epoch) -> SigningContext { - SigningContext { - domain, - epoch: signing_epoch, - fork: self.fork(signing_epoch), - genesis_validators_root: self.genesis_validators_root, + if domain == Domain::VoluntaryExit { + match self.spec.fork_name_at_epoch(signing_epoch) { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } + } + // EIP-7044 + ForkName::Deneb => SigningContext { + domain, + epoch: signing_epoch, + fork: Fork { + previous_version: self.spec.capella_fork_version, + current_version: self.spec.capella_fork_version, + epoch: signing_epoch, + }, + genesis_validators_root: self.genesis_validators_root, + }, + } + } else { + SigningContext { + domain, + epoch: signing_epoch, + fork: self.fork(signing_epoch), + genesis_validators_root: self.genesis_validators_root, + } } } @@ -540,6 +567,39 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { } } + pub async fn sign_blobs<Payload: AbstractExecPayload<E>>( + &self, + validator_pubkey: PublicKeyBytes, + blob_sidecars: SidecarList<E, Payload::Sidecar>, + ) -> Result<SignedSidecarList<E, Payload::Sidecar>, Error> { + let mut signed_blob_sidecars = Vec::new(); + for blob_sidecar in blob_sidecars.into_iter() { + let slot = blob_sidecar.slot(); + let signing_epoch = slot.epoch(E::slots_per_epoch()); + let signing_context = self.signing_context(Domain::BlobSidecar, signing_epoch); + let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::<E, Payload>( + SignableMessage::BlobSidecar(blob_sidecar.as_ref()), + signing_context, + &self.spec, + &self.task_executor, + ) + .await?; + + metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]); + + signed_blob_sidecars.push(SignedSidecar { + message: blob_sidecar, + signature, + _phantom: PhantomData, + }); + } + + Ok(VariableList::from(signed_blob_sidecars)) + } + pub async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 851510820e..35af2b1ce7 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -1,30 +1,29 @@ [package] name = "validator_manager" version = "0.1.0" -edition = "2021" +edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bls = { path = "../crypto/bls" } -clap = "2.33.3" -types = { path = "../consensus/types" } -environment = { path = "../lighthouse/environment" } -eth2_network_config = { path = "../common/eth2_network_config" } -clap_utils = { path = "../common/clap_utils" } -eth2_wallet = { path = "../crypto/eth2_wallet" } -eth2_keystore = { path = "../crypto/eth2_keystore" } -account_utils = { path = "../common/account_utils" } -serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.58" -ethereum_serde_utils = "0.5.0" -tree_hash = "0.5.0" -eth2 = { path = "../common/eth2", features = ["lighthouse"]} -hex = "0.4.2" -tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } +bls = { workspace = true } +clap = { workspace = true } +types = { workspace = true } +environment = { workspace = true } +eth2_network_config = { workspace = true } +clap_utils = { workspace = true } +eth2_wallet = { workspace = true } +eth2_keystore = { workspace = true } +account_utils = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +ethereum_serde_utils = { workspace = true } +tree_hash = { workspace = true } +eth2 = { workspace = true } +hex = { workspace = true } +tokio = { workspace = true } [dev-dependencies] -tempfile = "3.1.0" -regex = "1.6.0" -eth2_network_config = { path = "../common/eth2_network_config" } -validator_client = { path = "../validator_client" } +tempfile = { workspace = true } +regex = { workspace = true } +validator_client = { workspace = true } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 23e2c566dc..67cbc3cc23 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "watch" version = "0.1.0" -edition = "2018" +edition = { workspace = true } [lib] name = "watch" @@ -14,33 +14,35 @@ path = "src/main.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33.3" -log = "0.4.14" -env_logger = "0.9.0" -types = { path = "../consensus/types" } -eth2 = { path = "../common/eth2" } -beacon_node = { path = "../beacon_node"} -tokio = { version = "1.14.0", features = ["time"] } +clap = { workspace = true } +log = { workspace = true } +env_logger = { workspace = true } +types = { workspace = true } +eth2 = { workspace = true } +beacon_node = { workspace = true } +tokio = { workspace = true } axum = "0.6.18" -hyper = "0.14.20" -serde = "1.0.116" -serde_json = "1.0.58" -reqwest = { version = "0.11.0", features = ["json","stream"] } -url = "2.2.2" -rand = "0.7.3" +hyper = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +reqwest = { workspace = true } +url = { workspace = true } +rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -byteorder = "1.4.3" -bls = { path = "../crypto/bls" } -hex = "0.4.2" -r2d2 = "0.8.9" -serde_yaml = "0.8.24" +byteorder = { workspace = true } +bls = { workspace = true } +hex = { workspace = true } +r2d2 = { workspace = true } +serde_yaml = { workspace = true } [dev-dependencies] tokio-postgres = "0.7.5" -http_api = { path = "../beacon_node/http_api" } -beacon_chain = { path = "../beacon_node/beacon_chain" } -network = { path = "../beacon_node/network" } -testcontainers = "0.14.0" -unused_port = { path = "../common/unused_port" } -task_executor = { path = "../common/task_executor" } +http_api = { workspace = true } +beacon_chain = { workspace = true } +network = { workspace = true } +# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 +testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } +unused_port = { workspace = true } +task_executor = { workspace = true } +logging = { workspace = true } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index af1cde26b7..dc0b8af6e3 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -1,17 +1,27 @@ #![recursion_limit = "256"] #![cfg(unix)] -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + ChainConfig, }; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use http_api::test_utils::{create_api_server, ApiServer}; +use log::error; +use logging::test_logger; use network::NetworkReceivers; - use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; +use std::collections::HashMap; +use std::env; +use std::net::SocketAddr; +use std::time::Duration; +use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; use tokio::sync::oneshot; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; +use unused_port::unused_tcp4_port; use url::Url; use watch::{ client::WatchHttpClient, @@ -21,15 +31,40 @@ use watch::{ updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, }; -use log::error; -use std::env; -use std::net::SocketAddr; -use std::time::Duration; -use tokio::{runtime, task::JoinHandle}; -use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; -use unused_port::unused_tcp4_port; +#[derive(Debug)] +pub struct Postgres(HashMap<String, String>); -use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; +impl Default for Postgres { + fn default() -> Self { + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_DB".to_owned(), "postgres".to_owned()); + env_vars.insert("POSTGRES_HOST_AUTH_METHOD".into(), "trust".into()); + + Self(env_vars) + } +} + +impl Image for Postgres { + type Args = (); + + fn name(&self) -> String { + "postgres".to_owned() + } + + fn tag(&self) -> String { + "11-alpine".to_owned() + } + + fn ready_conditions(&self) -> Vec<WaitFor> { + vec![WaitFor::message_on_stderr( + "database system is ready to accept connections", + )] + } + + fn env_vars(&self) -> Box<dyn Iterator<Item = (&String, &String)> + '_> { + Box::new(self.0.iter()) + } +} type E = MainnetEthSpec; @@ -91,6 +126,11 @@ impl TesterBuilder { pub async fn new() -> TesterBuilder { let harness = BeaconChainHarness::builder(E::default()) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) + .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build();