diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 7b606dd0f6..b07f2ad3d4 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,8 @@ on: branches: - unstable - stable + tags: + - v* env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} @@ -13,20 +15,48 @@ env: LCLI_IMAGE_NAME: ${{ github.repository_owner }}/lcli jobs: - extract-branch-name: + # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX + # which is either empty or `-unstable`. + # + # It would be nice if the arch didn't get spliced into the version between `latest` and + # `unstable`, but for now we keep the two parts of the version separate for backwards + # compatibility. + extract-version: runs-on: ubuntu-18.04 steps: - - name: Extract branch name - run: echo "::set-output name=BRANCH_NAME::$(echo ${GITHUB_REF#refs/heads/})" - id: extract_branch + - name: Extract version (if stable) + if: github.event.ref == 'refs/heads/stable' + run: | + echo "VERSION=latest" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV + - name: Extract version (if unstable) + if: github.event.ref == 'refs/heads/unstable' + run: | + echo "VERSION=latest" >> $GITHUB_ENV + echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if tagged release) + if: startsWith(github.event.ref, 'refs/tags') + run: | + echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_ENV + echo "VERSION_SUFFIX=" >> $GITHUB_ENV outputs: - BRANCH_NAME: ${{ steps.extract_branch.outputs.BRANCH_NAME }} - build-docker-arm64: + VERSION: ${{ env.VERSION }} + VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} + build-docker-single-arch: + name: build-docker-${{ matrix.binary }} runs-on: ubuntu-18.04 - needs: [extract-branch-name] - # We need to enable experimental docker features in order to use `docker buildx` + strategy: + matrix: + binary: [aarch64, + aarch64-portable, + x86_64, + x86_64-portable] + needs: [extract-version] env: + # We need to enable experimental docker features in order to use `docker buildx` DOCKER_CLI_EXPERIMENTAL: enabled + VERSION: ${{ needs.extract-version.outputs.VERSION }} + VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - uses: actions/checkout@v2 - name: Update Rust @@ -34,85 +64,76 @@ jobs: - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Cross build lighthouse binary - run: | + - name: Cross build Lighthouse binary + run: | cargo install cross - make build-aarch64-portable - - name: Move cross-built ARM binary into Docker scope + make build-${{ matrix.binary }} + - name: Move cross-built binary into Docker scope (if ARM) + if: startsWith(matrix.binary, 'aarch64') run: | mkdir ./bin; mv ./target/aarch64-unknown-linux-gnu/release/lighthouse ./bin; - - name: Set Env - if: needs.extract-branch-name.outputs.BRANCH_NAME == 'unstable' + - name: Move cross-built binary into Docker scope (if x86_64) + if: startsWith(matrix.binary, 'x86_64') run: | - echo "TAG_SUFFIX=-unstable" >> $GITHUB_ENV; + mkdir ./bin; + mv ./target/x86_64-unknown-linux-gnu/release/lighthouse ./bin; + - name: Map aarch64 to arm64 short arch + if: startsWith(matrix.binary, 'aarch64') + run: echo "SHORT_ARCH=arm64" >> $GITHUB_ENV + - name: Map x86_64 to amd64 short arch + if: startsWith(matrix.binary, 'x86_64') + run: echo "SHORT_ARCH=amd64" >> $GITHUB_ENV; + - name: Set modernity suffix + if: endsWith(matrix.binary, '-portable') != true + run: echo "MODERNITY_SUFFIX=-modern" >> $GITHUB_ENV; # Install dependencies for emulation. Have to create a new builder to pick up emulation support. - - name: Build ARM64 dockerfile (with push) + - name: Build Dockerfile and push run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64 + docker run --privileged --rm tonistiigi/binfmt --install ${SHORT_ARCH} docker buildx create --use --name cross-builder docker buildx build \ - --platform=linux/arm64 \ + --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:latest-arm64${TAG_SUFFIX} \ + --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ --push - build-docker-amd64: - runs-on: ubuntu-18.04 - needs: [extract-branch-name] - steps: - - uses: actions/checkout@v2 - - name: Update Rust - run: rustup update stable - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Set Env - if: needs.extract-branch-name.outputs.BRANCH_NAME == 'unstable' - run: | - echo "TAG_SUFFIX=-unstable" >> $GITHUB_ENV; - - name: Build AMD64 dockerfile (with push) - run: | - docker build \ - --build-arg PORTABLE=true \ - --tag ${IMAGE_NAME}:latest-amd64${TAG_SUFFIX} \ - --file ./Dockerfile . - docker push ${IMAGE_NAME}:latest-amd64${TAG_SUFFIX} build-docker-multiarch: + name: build-docker-multiarch${{ matrix.modernity }} runs-on: ubuntu-18.04 - needs: [build-docker-arm64, build-docker-amd64, extract-branch-name] - # We need to enable experimental docker features in order to use `docker manifest` + needs: [build-docker-single-arch, extract-version] + strategy: + matrix: + modernity: ["", "-modern"] env: + # We need to enable experimental docker features in order to use `docker manifest` DOCKER_CLI_EXPERIMENTAL: enabled + VERSION: ${{ needs.extract-version.outputs.VERSION }} + VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Set Env - if: needs.extract-branch-name.outputs.BRANCH_NAME == 'unstable' - run: | - echo "TAG_SUFFIX=-unstable" >> $GITHUB_ENV; - name: Create and push multiarch manifest run: | - docker manifest create ${IMAGE_NAME}:latest${TAG_SUFFIX} \ - --amend ${IMAGE_NAME}:latest-arm64${TAG_SUFFIX} \ - --amend ${IMAGE_NAME}:latest-amd64${TAG_SUFFIX}; - docker manifest push ${IMAGE_NAME}:latest${TAG_SUFFIX} + docker manifest create ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} \ + --amend ${IMAGE_NAME}:${VERSION}-arm64${VERSION_SUFFIX}${{ matrix.modernity }} \ + --amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }}; + docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }} build-docker-lcli: runs-on: ubuntu-18.04 - needs: [extract-branch-name] + needs: [extract-version] + env: + VERSION: ${{ needs.extract-version.outputs.VERSION }} + VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} steps: - uses: actions/checkout@v2 - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Set Env - if: needs.extract-branch-name.outputs.BRANCH_NAME == 'unstable' - run: | - echo "TAG_SUFFIX=-unstable" >> $GITHUB_ENV; - name: Build lcli dockerfile (with push) run: | docker build \ --build-arg PORTABLE=true \ - --tag ${LCLI_IMAGE_NAME}:latest${TAG_SUFFIX} \ + --tag ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} \ --file ./lcli/Dockerfile . - docker push ${LCLI_IMAGE_NAME}:latest${TAG_SUFFIX} + docker push ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml new file mode 100644 index 0000000000..f97b271c35 --- /dev/null +++ b/.github/workflows/local-testnet.yml @@ -0,0 +1,50 @@ +# Test that local testnet starts successfully. +name: local testnet + +on: + push: + branches: + - unstable + pull_request: + +jobs: + run-local-testnet: + strategy: + matrix: + os: + - ubuntu-18.04 + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + + - name: Install ganache + run: npm install ganache-cli@latest --global + + # https://github.com/actions/cache/blob/main/examples.md#rust---cargo + - uses: actions/cache@v2 + id: cache-cargo + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install lighthouse + if: steps.cache-cargo.outputs.cache-hit != 'true' + run: make && make install-lcli + + - name: Start local testnet + run: ./start_local_testnet.sh + working-directory: scripts/local_testnet + + - name: Print logs + run: ./print_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4c57b8b1e7..5b28a5ec71 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,74 +20,6 @@ jobs: id: extract_version outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} - - build-docker-arm64: - runs-on: ubuntu-18.04 - needs: [extract-version] - # We need to enable experimental docker features in order to use `docker buildx` - env: - DOCKER_CLI_EXPERIMENTAL: enabled - VERSION: ${{ needs.extract-version.outputs.VERSION }} - steps: - - uses: actions/checkout@v2 - - name: Update Rust - run: rustup update stable - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Cross build lighthouse binary - run: | - cargo install cross - make build-aarch64-portable - - name: Move cross-built ARM binary into Docker scope - run: | - mkdir ./bin; - mv ./target/aarch64-unknown-linux-gnu/release/lighthouse ./bin; - # Install dependencies for emulation. Have to create a new builder to pick up emulation support. - - name: Build ARM64 dockerfile (with push) - run: | - docker run --privileged --rm tonistiigi/binfmt --install arm64 - docker buildx create --use --name cross-builder - docker buildx build \ - --platform=linux/arm64 \ - --file ./Dockerfile.cross . \ - --tag ${IMAGE_NAME}:${{ env.VERSION }}-arm64 \ - --push - build-docker-amd64: - runs-on: ubuntu-18.04 - needs: [extract-version] - env: - DOCKER_CLI_EXPERIMENTAL: enabled - VERSION: ${{ needs.extract-version.outputs.VERSION }} - steps: - - uses: actions/checkout@v2 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Build AMD64 dockerfile (with push) - run: | - docker build \ - --build-arg PORTABLE=true \ - --tag ${IMAGE_NAME}:${{ env.VERSION }}-amd64 \ - --file ./Dockerfile . - docker push ${IMAGE_NAME}:${{ env.VERSION }}-amd64 - build-docker-multiarch: - runs-on: ubuntu-18.04 - needs: [build-docker-arm64, build-docker-amd64, extract-version] - # We need to enable experimental docker features in order to use `docker manifest` - env: - DOCKER_CLI_EXPERIMENTAL: enabled - VERSION: ${{ needs.extract-version.outputs.VERSION }} - steps: - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Create and push multiarch manifest - run: | - docker manifest create ${IMAGE_NAME}:${{ env.VERSION }} \ - --amend ${IMAGE_NAME}:${{ env.VERSION }}-arm64 \ - --amend ${IMAGE_NAME}:${{ env.VERSION }}-amd64; - docker manifest push ${IMAGE_NAME}:${{ env.VERSION }} build: name: Build Release strategy: @@ -181,19 +113,19 @@ jobs: - name: Build Lighthouse for x86_64-apple-darwin portable if: matrix.arch == 'x86_64-apple-darwin-portable' - run: cargo install --path lighthouse --force --locked --features portable + run: cargo install --path lighthouse --force --locked --features portable,gnosis - name: Build Lighthouse for x86_64-apple-darwin modern if: matrix.arch == 'x86_64-apple-darwin' - run: cargo install --path lighthouse --force --locked --features modern + run: cargo install --path lighthouse --force --locked --features modern,gnosis - name: Build Lighthouse for Windows portable if: matrix.arch == 'x86_64-windows-portable' - run: cargo install --path lighthouse --force --locked --features portable + run: cargo install --path lighthouse --force --locked --features portable,gnosis - name: Build Lighthouse for Windows modern if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern + run: cargo install --path lighthouse --force --locked --features modern,gnosis - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a4e49b1c26..8b590f4e6e 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -123,7 +123,9 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Build the root Dockerfile - run: docker build . + run: docker build --build-arg FEATURES=portable -t lighthouse:local . + - name: Test the built image + run: docker run -t lighthouse:local lighthouse --version eth1-simulator-ubuntu: name: eth1-simulator-ubuntu runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index d405f9776d..6094a17f2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84450d0b4a8bd1ba4144ce8ce718fbc5d071358b1e5384bace6536b3d1f2d5b3" +checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" [[package]] name = "arbitrary" @@ -253,9 +253,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" +checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", "cc", @@ -266,6 +266,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.12.3" @@ -331,7 +337,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.1.0" +version = "2.1.2" dependencies = [ "beacon_chain", "clap", @@ -497,7 +503,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.1.0" +version = "2.1.2" dependencies = [ "beacon_node", "clap", @@ -760,7 +766,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.5", + "time 0.3.7", "timer", "tokio", "toml", @@ -855,9 +861,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if", ] @@ -1272,7 +1278,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", - "uint 0.9.1", + "uint 0.9.2", "zeroize", ] @@ -1301,7 +1307,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" dependencies = [ "der 0.5.1", - "elliptic-curve 0.11.6", + "elliptic-curve 0.11.12", "rfc6979", "signature", ] @@ -1384,10 +1390,11 @@ dependencies = [ [[package]] name = "elliptic-curve" -version = "0.11.6" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decb3a27ea454a5f23f96eb182af0671c12694d64ecc33dada74edd1301f6cfc" +checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ + "base16ct", "crypto-bigint", "der 0.5.1", "ff 0.11.0", @@ -1557,6 +1564,7 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "slashing_protection", "store", "types", ] @@ -1765,7 +1773,7 @@ dependencies = [ "serde_json", "sha3", "thiserror", - "uint 0.9.1", + "uint 0.9.2", ] [[package]] @@ -1819,7 +1827,7 @@ dependencies = [ "impl-rlp 0.3.0", "impl-serde", "primitive-types 0.9.1", - "uint 0.9.1", + "uint 0.9.2", ] [[package]] @@ -1833,7 +1841,7 @@ dependencies = [ "impl-rlp 0.3.0", "impl-serde", "primitive-types 0.10.1", - "uint 0.9.1", + "uint 0.9.2", ] [[package]] @@ -1895,9 +1903,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" dependencies = [ "instant", ] @@ -2285,9 +2293,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" +checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" dependencies = [ "bytes", "fnv", @@ -2337,9 +2345,9 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" +checksum = "c84c647447a07ca16f5fbd05b633e535cc41a08d2d74ab1e08648df53be9cb89" dependencies = [ "base64 0.13.0", "bitflags", @@ -2551,7 +2559,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite 0.2.8", - "socket2 0.4.2", + "socket2 0.4.4", "tokio", "tower-service", "tracing", @@ -2796,9 +2804,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" dependencies = [ "wasm-bindgen", ] @@ -2853,7 +2861,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.1.0" +version = "2.1.2" dependencies = [ "account_utils", "bls", @@ -2909,15 +2917,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.112" +version = "0.2.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "565dbd88872dbe4cc8a46e527f26483c1d1f7afa6b884a3bd6cd893d4f98da74" [[package]] name = "libflate" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16364af76ebb39b5869bb32c81fa93573267cd8c62bb3474e28d78fac3fb141e" +checksum = "d2d57e534717ac3e0b8dc459fe338bdfb4e29d7eea8fd0926ba649ddd3f4765f" dependencies = [ "adler32", "crc32fast", @@ -2935,9 +2943,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ "cfg-if", "winapi", @@ -3225,7 +3233,7 @@ dependencies = [ "libc", "libp2p-core 0.31.0", "log", - "socket2 0.4.2", + "socket2 0.4.4", "tokio", ] @@ -3378,7 +3386,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.1.0" +version = "2.1.2" dependencies = [ "account_manager", "account_utils", @@ -3481,9 +3489,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" dependencies = [ "scopeguard", ] @@ -4045,6 +4053,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ba99ba6393e2c3734791401b66902d981cb03bf190af674ca69949b6d5fb15" +dependencies = [ + "libc", +] + [[package]] name = "object" version = "0.27.1" @@ -4169,12 +4186,12 @@ dependencies = [ [[package]] name = "p256" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e0c5310031b5d4528ac6534bccc1446c289ac45c47b277d5aa91089c5f74fa" +checksum = "19736d80675fbe9fe33426268150b951a3fb8f5cfca2a23a17c85ef3adb24e3b" dependencies = [ "ecdsa 0.13.4", - "elliptic-curve 0.11.6", + "elliptic-curve 0.11.12", "sec1", "sha2 0.9.9", ] @@ -4457,7 +4474,7 @@ dependencies = [ "impl-codec 0.5.1", "impl-rlp 0.3.0", "impl-serde", - "uint 0.9.1", + "uint 0.9.2", ] [[package]] @@ -4470,7 +4487,7 @@ dependencies = [ "impl-codec 0.5.1", "impl-rlp 0.3.0", "impl-serde", - "uint 0.9.1", + "uint 0.9.2", ] [[package]] @@ -4616,9 +4633,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.25.2" +version = "2.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c327e191621a2158159df97cdbc2e7074bb4e940275e35abf38eb3d2595754" +checksum = "9d613b4fd96c0182e187734b4f8fc5cbc8c940bbf781819f7a52d42dc5922d25" [[package]] name = "psutil" @@ -4680,9 +4697,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ "proc-macro2", ] @@ -4737,6 +4754,7 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", + "rand_pcg", ] [[package]] @@ -4807,6 +4825,15 @@ dependencies = [ "rand_core 0.6.3", ] +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + [[package]] name = "rand_xorshift" version = "0.2.0" @@ -4970,9 +4997,9 @@ dependencies = [ [[package]] name = "rle-decode-fast" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" +checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rlp" @@ -5247,9 +5274,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "a57321bf8bc2362081b2599912d2961fe899c0efadf1b4b2f8d48b3e253bb96c" dependencies = [ "core-foundation-sys", "libc", @@ -5304,9 +5331,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.133" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] @@ -5333,9 +5360,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.133" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -5344,9 +5371,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.74" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" +checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" dependencies = [ "itoa 1.0.1", "ryu", @@ -5366,12 +5393,12 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 0.4.8", + "itoa 1.0.1", "ryu", "serde", ] @@ -5580,9 +5607,9 @@ dependencies = [ [[package]] name = "slog-json" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9b96fb6b5e80e371423b4aca6656eb537661ce8f82c2697e619f8ca85d043" +checksum = "0f7f7a952ce80fca9da17bf0a53895d11f8aa1ba063668ca53fc72e7869329e9" dependencies = [ "chrono", "serde", @@ -5712,9 +5739,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", @@ -5901,9 +5928,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", @@ -6040,9 +6067,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] @@ -6068,11 +6095,12 @@ dependencies = [ [[package]] name = "time" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41effe7cfa8af36f439fac33861b66b049edc6f9a32331e2312660529c1c24ad" +checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" dependencies = [ "libc", + "num_threads", ] [[package]] @@ -6150,9 +6178,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.15.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" +checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" dependencies = [ "bytes", "libc", @@ -6334,9 +6362,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" +checksum = "5312f325fe3588e277415f5a6cca1f4ccad0f248c4cd5a4bd33032d7286abc22" dependencies = [ "ansi_term", "lazy_static", @@ -6585,9 +6613,9 @@ dependencies = [ [[package]] name = "uint" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" +checksum = "1b1b413ebfe8c2c74a69ff124699dd156a7fa41cb1d09ba6df94aa2f2b0a4a3a" dependencies = [ "arbitrary", "byteorder", @@ -6735,6 +6763,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lockfile", + "logging", "monitoring_api", "parking_lot", "rand 0.7.3", @@ -6913,9 +6942,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6923,9 +6952,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", @@ -6938,9 +6967,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" +checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" dependencies = [ "cfg-if", "js-sys", @@ -6950,9 +6979,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6960,9 +6989,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ "proc-macro2", "quote", @@ -6973,15 +7002,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "wasm-bindgen-test" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f1aa7971fdf61ef0f353602102dbea75a56e225ed036c1e3740564b91e6b7e" +checksum = "45c8d417d87eefa0087e62e3c75ad086be39433449e2961add9a5d9ce5acc2f1" dependencies = [ "console_error_panic_hook", "js-sys", @@ -6993,9 +7022,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6006f79628dfeb96a86d4db51fbf1344cd7fd8408f06fc9aa3c84913a4789688" +checksum = "d0e560d44db5e73b69a9757a15512fe7e1ef93ed2061c928871a4025798293dd" dependencies = [ "proc-macro2", "quote", @@ -7003,9 +7032,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" dependencies = [ "js-sys", "wasm-bindgen", @@ -7112,9 +7141,9 @@ dependencies = [ [[package]] name = "which" -version = "4.2.2" +version = "4.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" +checksum = "2a5a7e487e921cf220206864a94a89b6c6905bfc19f1057fa26a4cb360e5c1d2" dependencies = [ "either", "lazy_static", @@ -7245,18 +7274,18 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.2.2" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" +checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" dependencies = [ "proc-macro2", "quote", diff --git a/Dockerfile b/Dockerfile index 5ca8cbc964..76347e9bfe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -FROM rust:1.56.1-bullseye AS builder +FROM rust:1.58.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -ARG PORTABLE -ENV PORTABLE $PORTABLE +ARG FEATURES +ENV FEATURES $FEATURES RUN cd lighthouse && make FROM ubuntu:latest diff --git a/Makefile b/Makefile index a4b880b806..f363854c32 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,6 @@ .PHONY: tests EF_TESTS = "testing/ef_tests" -BEACON_CHAIN_CRATE = "beacon_node/beacon_chain" -OP_POOL_CRATE = "beacon_node/operation_pool" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" GIT_TAG := $(shell git describe --tags --candidates 1) BIN_DIR = "bin" @@ -22,19 +20,11 @@ FORKS=phase0 altair # # Binaries will most likely be found in `./target/release` install: -ifeq ($(PORTABLE), true) - cargo install --path lighthouse --force --locked --features portable -else - cargo install --path lighthouse --force --locked -endif + cargo install --path lighthouse --force --locked --features "$(FEATURES)" # Builds the lcli binary in release (optimized). install-lcli: -ifeq ($(PORTABLE), true) - cargo install --path lcli --force --locked --features portable -else - cargo install --path lcli --force --locked -endif + cargo install --path lcli --force --locked --features "$(FEATURES)" # The following commands use `cross` to build a cross-compile. # @@ -50,13 +40,13 @@ endif # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features modern,gnosis build-x86_64-portable: - cross build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features portable + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features portable,gnosis build-aarch64: - cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features gnosis build-aarch64-portable: - cross build --release --manifest-path lighthouse/Cargo.toml --target aarch64-unknown-linux-gnu --features portable + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features portable,gnosis # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -102,21 +92,21 @@ check-benches: # Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics. check-consensus: - cargo check --manifest-path=consensus/state_processing/Cargo.toml --no-default-features + cargo check -p state_processing --no-default-features # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt - cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests" - cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto" - cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro" + cargo test --release -p ef_tests --features "ef_tests" + cargo test --release -p ef_tests --features "ef_tests,fake_crypto" + cargo test --release -p ef_tests --features "ef_tests,milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env --manifest-path=$(BEACON_CHAIN_CRATE)/Cargo.toml + env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) @@ -124,7 +114,7 @@ test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) test-op-pool-%: env FORK_NAME=$* cargo test --release \ --features 'beacon_chain/fork_from_env'\ - --manifest-path=$(OP_POOL_CRATE)/Cargo.toml + -p operation_pool # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -144,11 +134,11 @@ test-full: cargo-fmt test-release test-debug test-ef # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: cargo clippy --workspace --tests -- \ - -D clippy::fn_to_numeric_cast_any \ - -D warnings \ - -A clippy::from-over-into \ - -A clippy::upper-case-acronyms \ - -A clippy::vec-init-then-push + -D clippy::fn_to_numeric_cast_any \ + -D warnings \ + -A clippy::from-over-into \ + -A clippy::upper-case-acronyms \ + -A clippy::vec-init-then-push # Runs the makefile in the `ef_tests` repo. # diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 221c31caf6..ca8cab5bd3 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -21,6 +21,7 @@ pub const KEYSTORE_FLAG: &str = "keystore"; pub const PASSWORD_FILE_FLAG: &str = "password-file"; pub const BEACON_SERVER_FLAG: &str = "beacon-node"; pub const NO_WAIT: &str = "no-wait"; +pub const NO_CONFIRMATION: &str = "no-confirmation"; pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -59,6 +60,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") ) + .arg( + Arg::with_name(NO_CONFIRMATION) + .long(NO_CONFIRMATION) + .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + ) .arg( Arg::with_name(STDIN_INPUTS_FLAG) .takes_value(false) @@ -75,6 +81,7 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); let no_wait = matches.is_present(NO_WAIT); + let no_confirmation = matches.is_present(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; @@ -97,12 +104,14 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< stdin_inputs, ð2_network_config, no_wait, + no_confirmation, ))?; Ok(()) } /// Gets the keypair and validator_index for every validator and calls `publish_voluntary_exit` on it. +#[allow(clippy::too_many_arguments)] async fn publish_voluntary_exit( keystore_path: &Path, password_file_path: Option<&PathBuf>, @@ -111,6 +120,7 @@ async fn publish_voluntary_exit( stdin_inputs: bool, eth2_network_config: &Eth2NetworkConfig, no_wait: bool, + no_confirmation: bool, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config @@ -149,15 +159,22 @@ async fn publish_voluntary_exit( "Publishing a voluntary exit for validator: {} \n", keypair.pk ); - eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); - eprintln!( - "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", - WEBSITE_URL - ); - eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + if !no_confirmation { + eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); + eprintln!("{}\n", PROMPT); + eprintln!( + "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", + WEBSITE_URL + ); + eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + } + + let confirmation = if !no_confirmation { + account_utils::read_input_from_user(stdin_inputs)? + } else { + CONFIRMATION_PHRASE.to_string() + }; - let confirmation = account_utils::read_input_from_user(stdin_inputs)?; if confirmation == CONFIRMATION_PHRASE { // Sign and publish the voluntary exit to network let signed_voluntary_exit = voluntary_exit.sign( diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index eecef0349e..c0bc17e118 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.1.0" +version = "2.1.2" authors = ["Paul Hauner ", "Age Manning ( } Ok(block) + } else if chain.is_pre_finalization_block(attestation.data.beacon_block_root)? { + Err(Error::HeadBlockFinalized { + beacon_block_root: attestation.data.beacon_block_root, + }) } else { + // The block is either: + // + // 1) A pre-finalization block that has been pruned. We'll do one network lookup + // for it and when it fails we will penalise all involved peers. + // 2) A post-finalization block that we don't know about yet. We'll queue + // the attestation until the block becomes available (or we time out). Err(Error::UnknownHeadBlock { beacon_block_root: attestation.data.beacon_block_root, }) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4e1d54dc13..a65a943b93 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -34,6 +34,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; +use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::SnapshotCache; use crate::sync_committee_verification::{ @@ -336,6 +337,8 @@ pub struct BeaconChain { pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, + /// A cache used to track pre-finalization block roots for quick rejection. + pub pre_finalization_block_cache: PreFinalizationBlockCache, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -2855,6 +2858,10 @@ impl BeaconChain { ); } + // Inform the unknown block cache, in case it was waiting on this block. + self.pre_finalization_block_cache + .block_processed(block_root); + Ok(block_root) } diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs new file mode 100644 index 0000000000..83b204113f --- /dev/null +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -0,0 +1,97 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; +use operation_pool::{AttMaxCover, MaxCover}; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch}; + +impl BeaconChain { + pub fn compute_block_reward( + &self, + block: BeaconBlockRef<'_, T::EthSpec>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; + let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; + let mut per_attestation_rewards = block + .body() + .attestations() + .iter() + .map(|att| { + AttMaxCover::new(att, state, total_active_balance, &self.spec) + .ok_or(BeaconChainError::BlockRewardAttestationError) + }) + .collect::, _>>()?; + + // Update the attestation rewards for each previous attestation included. + // This is O(n^2) in the number of attestations n. + for i in 0..per_attestation_rewards.len() { + let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1); + let latest_att = &updated[i]; + + for att in to_update { + att.update_covering_set(latest_att.object(), latest_att.covering_set()); + } + } + + let mut prev_epoch_total = 0; + let mut curr_epoch_total = 0; + + for cover in &per_attestation_rewards { + for &reward in cover.fresh_validators_rewards.values() { + if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() + { + curr_epoch_total += reward; + } else { + prev_epoch_total += reward; + } + } + } + + let attestation_total = prev_epoch_total + curr_epoch_total; + + // Drop the covers. + let per_attestation_rewards = per_attestation_rewards + .into_iter() + .map(|cover| cover.fresh_validators_rewards) + .collect(); + + let attestation_rewards = AttestationRewards { + total: attestation_total, + prev_epoch_total, + curr_epoch_total, + per_attestation_rewards, + }; + + // Sync committee rewards. + let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit + } else { + 0 + }; + + // Total, metadata + let total = attestation_total + sync_committee_rewards; + + let meta = BlockRewardMeta { + slot: block.slot(), + parent_slot: state.latest_block_header().slot, + proposer_index: block.proposer_index(), + graffiti: block.body().graffiti().as_utf8_lossy(), + }; + + Ok(BlockReward { + total, + block_root, + meta, + attestation_rewards, + sync_committee_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c6d937c81e..c2dc0028e9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -53,6 +53,7 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; +use eth2::types::EventKind; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -617,7 +618,7 @@ impl GossipVerifiedBlock { check_block_against_anchor_slot(block.message(), chain)?; // Do not gossip a block from a finalized slot. - check_block_against_finalized_slot(block.message(), chain)?; + check_block_against_finalized_slot(block.message(), block_root, chain)?; // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. @@ -1165,6 +1166,18 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { metrics::stop_timer(committee_timer); + /* + * If we have block reward listeners, compute the block reward and push it to the + * event handler. + */ + if let Some(ref event_handler) = chain.event_handler { + if event_handler.has_block_reward_subscribers() { + let block_reward = + chain.compute_block_reward(block.message(), block_root, &state)?; + event_handler.register(EventKind::BlockReward(block_reward)); + } + } + /* * Perform `per_block_processing` on the block and state, returning early if the block is * invalid. @@ -1279,6 +1292,7 @@ fn check_block_against_anchor_slot( /// verifying that condition. fn check_block_against_finalized_slot( block: BeaconBlockRef<'_, T::EthSpec>, + block_root: Hash256, chain: &BeaconChain, ) -> Result<(), BlockError> { let finalized_slot = chain @@ -1288,6 +1302,7 @@ fn check_block_against_finalized_slot( .start_slot(T::EthSpec::slots_per_epoch()); if block.slot() <= finalized_slot { + chain.pre_finalization_block_rejected(block_root); Err(BlockError::WouldRevertFinalizedSlot { block_slot: block.slot(), finalized_slot, @@ -1360,11 +1375,11 @@ pub fn check_block_relevancy( return Err(BlockError::BlockSlotLimitReached); } - // Do not process a block from a finalized slot. - check_block_against_finalized_slot(block, chain)?; - let block_root = block_root.unwrap_or_else(|| get_block_root(signed_block)); + // Do not process a block from a finalized slot. + check_block_against_finalized_slot(block, block_root, chain)?; + // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. if chain.fork_choice.read().contains_block(&block_root) { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 24a9a916bb..e9860124c0 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -752,6 +752,7 @@ where shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), + pre_finalization_block_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 70e288ec26..6920c06039 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -137,6 +137,9 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), + BlockRewardSlotError, + BlockRewardAttestationError, + BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayloadShutdownError(TrySendError), diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 459ccb457f..6f4415ef4f 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -15,6 +15,7 @@ pub struct ServerSentEventHandler { chain_reorg_tx: Sender>, contribution_tx: Sender>, late_head: Sender>, + block_reward_tx: Sender>, log: Logger, } @@ -32,6 +33,7 @@ impl ServerSentEventHandler { let (chain_reorg_tx, _) = broadcast::channel(capacity); let (contribution_tx, _) = broadcast::channel(capacity); let (late_head, _) = broadcast::channel(capacity); + let (block_reward_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -42,6 +44,7 @@ impl ServerSentEventHandler { chain_reorg_tx, contribution_tx, late_head, + block_reward_tx, log, } } @@ -67,6 +70,8 @@ impl ServerSentEventHandler { .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), EventKind::LateHead(late_head) => self.late_head.send(EventKind::LateHead(late_head)) .map(|count| trace!(self.log, "Registering server-sent late head event"; "receiver_count" => count)), + EventKind::BlockReward(block_reward) => self.block_reward_tx.send(EventKind::BlockReward(block_reward)) + .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -105,6 +110,10 @@ impl ServerSentEventHandler { self.late_head.subscribe() } + pub fn subscribe_block_reward(&self) -> Receiver> { + self.block_reward_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -136,4 +145,8 @@ impl ServerSentEventHandler { pub fn has_late_head_subscribers(&self) -> bool { self.late_head.receiver_count() > 0 } + + pub fn has_block_reward_subscribers(&self) -> bool { + self.block_reward_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 768a869551..d41c1a5cc5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,6 +5,7 @@ mod beacon_chain; mod beacon_fork_choice_store; mod beacon_proposer_cache; mod beacon_snapshot; +pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; @@ -26,6 +27,7 @@ mod observed_block_producers; pub mod observed_operations; mod persisted_beacon_chain; mod persisted_fork_choice; +mod pre_finalization_cache; pub mod schema_change; mod shuffling_cache; mod snapshot_cache; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 28eacad559..41b7604532 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -904,6 +904,20 @@ lazy_static! { "beacon_backfill_signature_total_seconds", "Time spent verifying the signature set during backfill sync, including setup" ); + + /* + * Pre-finalization block cache. + */ + pub static ref PRE_FINALIZATION_BLOCK_CACHE_SIZE: Result = + try_create_int_gauge( + "beacon_pre_finalization_block_cache_size", + "Number of pre-finalization block roots cached for quick rejection" + ); + pub static ref PRE_FINALIZATION_BLOCK_LOOKUP_COUNT: Result = + try_create_int_gauge( + "beacon_pre_finalization_block_lookup_count", + "Number of block roots subject to single block lookups" + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, @@ -931,6 +945,11 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { ) } + if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() { + set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size); + set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_LOOKUP_COUNT, num_lookups); + } + set_gauge_by_usize( &OP_POOL_NUM_ATTESTATIONS, attestation_stats.num_attestations, diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs new file mode 100644 index 0000000000..41771b048d --- /dev/null +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -0,0 +1,119 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use itertools::process_results; +use lru::LruCache; +use parking_lot::Mutex; +use slog::debug; +use std::time::Duration; +use types::Hash256; + +const BLOCK_ROOT_CACHE_LIMIT: usize = 512; +const LOOKUP_LIMIT: usize = 8; +const METRICS_TIMEOUT: Duration = Duration::from_millis(100); + +/// Cache for rejecting attestations to blocks from before finalization. +/// +/// It stores a collection of block roots that are pre-finalization and therefore not known to fork +/// choice in `verify_head_block_is_known` during attestation processing. +#[derive(Default)] +pub struct PreFinalizationBlockCache { + cache: Mutex, +} + +struct Cache { + /// Set of block roots that are known to be pre-finalization. + block_roots: LruCache, + /// Set of block roots that are the subject of single block lookups. + in_progress_lookups: LruCache, +} + +impl Default for Cache { + fn default() -> Self { + Cache { + block_roots: LruCache::new(BLOCK_ROOT_CACHE_LIMIT), + in_progress_lookups: LruCache::new(LOOKUP_LIMIT), + } + } +} + +impl BeaconChain { + /// Check whether the block with `block_root` is known to be pre-finalization. + /// + /// The provided `block_root` is assumed to be unknown to fork choice. I.e., it + /// is not known to be a descendant of the finalized block. + /// + /// Return `true` if the attestation to this block should be rejected outright, + /// return `false` if more information is needed from a single-block-lookup. + pub fn is_pre_finalization_block(&self, block_root: Hash256) -> Result { + let mut cache = self.pre_finalization_block_cache.cache.lock(); + + // Check the cache to see if we already know this pre-finalization block root. + if cache.block_roots.contains(&block_root) { + return Ok(true); + } + + // Avoid repeating the disk lookup for blocks that are already subject to a network lookup. + // Sync will take care of de-duplicating the single block lookups. + if cache.in_progress_lookups.contains(&block_root) { + return Ok(false); + } + + // 1. Check memory for a recent pre-finalization block. + let is_recent_finalized_block = self.with_head(|head| { + process_results( + head.beacon_state.rev_iter_block_roots(&self.spec), + |mut iter| iter.any(|(_, root)| root == block_root), + ) + .map_err(BeaconChainError::BeaconStateError) + })?; + if is_recent_finalized_block { + cache.block_roots.put(block_root, ()); + return Ok(true); + } + + // 2. Check on disk. + if self.store.get_block(&block_root)?.is_some() { + cache.block_roots.put(block_root, ()); + return Ok(true); + } + + // 3. Check the network with a single block lookup. + cache.in_progress_lookups.put(block_root, ()); + if cache.in_progress_lookups.len() == LOOKUP_LIMIT { + // NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be + // imported for reasons other than being pre-finalization. The cache will eventually + // self-repair in this case by replacing old entries with new ones until all the failed + // blocks have been flushed out. Solving this issue isn't as simple as hooking the + // beacon processor's functions that handle failed blocks because we need the block root + // and it has been erased from the `BlockError` by that point. + debug!( + self.log, + "Pre-finalization lookup cache is full"; + ); + } + Ok(false) + } + + pub fn pre_finalization_block_rejected(&self, block_root: Hash256) { + // Future requests can know that this block is invalid without having to look it up again. + let mut cache = self.pre_finalization_block_cache.cache.lock(); + cache.in_progress_lookups.pop(&block_root); + cache.block_roots.put(block_root, ()); + } +} + +impl PreFinalizationBlockCache { + pub fn block_processed(&self, block_root: Hash256) { + // Future requests will find this block in fork choice, so no need to cache it in the + // ongoing lookup cache any longer. + self.cache.lock().in_progress_lookups.pop(&block_root); + } + + pub fn contains(&self, block_root: Hash256) -> bool { + self.cache.lock().block_roots.contains(&block_root) + } + + pub fn metrics(&self) -> Option<(usize, usize)> { + let cache = self.cache.try_lock_for(METRICS_TIMEOUT)?; + Some((cache.block_roots.len(), cache.in_progress_lookups.len())) + } +} diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index f5942a2be2..3c675ec6a4 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -5,7 +5,7 @@ use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }, - BeaconChain, BeaconChainTypes, WhenSlotSkipped, + BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, }; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; @@ -991,6 +991,81 @@ fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } +#[test] +fn attestation_to_finalized_block() { + let harness = get_harness(VALIDATOR_COUNT); + + // Extend the chain out a few epochs so we have some chain depth to play with. + harness.extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let finalized_checkpoint = harness + .chain + .with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.finalized_checkpoint())) + .unwrap(); + assert!(finalized_checkpoint.epoch > 0); + + let current_slot = harness.get_current_slot(); + + let earlier_slot = finalized_checkpoint + .epoch + .start_slot(MainnetEthSpec::slots_per_epoch()) + - 1; + let earlier_block = harness + .chain + .block_at_slot(earlier_slot, WhenSlotSkipped::Prev) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + let earlier_block_root = earlier_block.canonical_root(); + assert_ne!(earlier_block_root, finalized_checkpoint.root); + + let mut state = harness + .chain + .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .expect("should not error getting state") + .expect("should find state"); + + while state.slot() < current_slot { + per_slot_processing(&mut state, None, &harness.spec).expect("should process slot"); + } + + let state_root = state.update_tree_hash_cache().unwrap(); + + let (attestation, subnet_id) = harness + .get_unaggregated_attestations( + &AttestationStrategy::AllValidators, + &state, + state_root, + earlier_block_root, + current_slot, + ) + .first() + .expect("should have at least one committee") + .first() + .cloned() + .expect("should have at least one attestation in committee"); + assert_eq!(attestation.data.beacon_block_root, earlier_block_root); + + // Attestation should be rejected for attesting to a pre-finalization block. + let res = harness + .chain + .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); + assert!( + matches!(res, Err(AttnError:: HeadBlockFinalized { beacon_block_root }) + if beacon_block_root == earlier_block_root + ) + ); + + // Pre-finalization block cache should contain the block root. + assert!(harness + .chain + .pre_finalization_block_cache + .contains(earlier_block_root)); +} + #[test] fn verify_aggregate_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs new file mode 100644 index 0000000000..5cd9894ade --- /dev/null +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -0,0 +1,216 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; +use state_processing::{ + per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError, + per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, +}; +use std::sync::Arc; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock}; +use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; + +const MAX_REQUEST_RANGE_EPOCHS: usize = 100; +const BLOCK_ROOT_CHUNK_SIZE: usize = 100; + +#[derive(Debug)] +enum AttestationPerformanceError { + BlockReplay(BlockReplayError), + BeaconState(BeaconStateError), + ParticipationCache(ParticipationCacheError), + UnableToFindValidator(usize), +} + +impl From for AttestationPerformanceError { + fn from(e: BlockReplayError) -> Self { + Self::BlockReplay(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: ParticipationCacheError) -> Self { + Self::ParticipationCache(e) + } +} + +pub fn get_attestation_performance( + target: String, + query: AttestationPerformanceQuery, + chain: Arc>, +) -> Result, warp::Rejection> { + let spec = &chain.spec; + // We increment by 2 here so that when we build the state from the `prior_slot` it is + // still 1 epoch ahead of the first epoch we want to analyse. + // This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results + // for the correct epoch. + let start_epoch = query.start_epoch + 2; + let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let prior_slot = start_slot - 1; + + let end_epoch = query.end_epoch + 2; + let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); + + // Ensure end_epoch is smaller than the current epoch - 1. + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + if query.end_epoch >= current_epoch - 1 { + return Err(custom_bad_request(format!( + "end_epoch must be less than the current epoch - 1. current: {}, end: {}", + current_epoch, query.end_epoch + ))); + } + + // Check query is valid. + if start_epoch > end_epoch { + return Err(custom_bad_request(format!( + "start_epoch must not be larger than end_epoch. start: {}, end: {}", + query.start_epoch, query.end_epoch + ))); + } + + // The response size can grow exceptionally large therefore we should check that the + // query is within permitted bounds to prevent potential OOM errors. + if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { + return Err(custom_bad_request(format!( + "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", + query.start_epoch, query.end_epoch + ))); + } + + // Either use the global validator set, or the specified index. + let index_range = if target.to_lowercase() == "global" { + chain + .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) + .map_err(beacon_chain_error)? + } else { + vec![target.parse::().map_err(|_| { + custom_bad_request(format!( + "Invalid validator index: {:?}", + target.to_lowercase() + )) + })?] + }; + + // Load block roots. + let mut block_roots: Vec = chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .map_err(beacon_chain_error)? + .map(|res| res.map(|(root, _)| root)) + .collect::, _>>() + .map_err(beacon_chain_error)?; + block_roots.dedup(); + + // Load first block so we can get its parent. + let first_block_root = block_roots.first().ok_or_else(|| { + custom_server_error( + "No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(), + ) + })?; + let first_block = chain + .get_block(first_block_root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) + }) + .map_err(beacon_chain_error)?; + + // Load the block of the prior slot which will be used to build the starting state. + let prior_block = chain + .get_block(&first_block.parent_root()) + .and_then(|maybe_block| { + maybe_block + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) + }) + .map_err(beacon_chain_error)?; + + // Load state for block replay. + let state_root = prior_block.state_root(); + let state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + // Allocate an AttestationPerformance vector for each validator in the range. + let mut perfs: Vec = + AttestationPerformance::initialize(index_range.clone()); + + let post_slot_hook = |state: &mut BeaconState, + summary: Option>, + _is_skip_slot: bool| + -> Result<(), AttestationPerformanceError> { + // If a `summary` was not output then an epoch boundary was not crossed + // so we move onto the next slot. + if let Some(summary) = summary { + for (position, i) in index_range.iter().enumerate() { + let index = *i as usize; + + let val = perfs + .get_mut(position) + .ok_or(AttestationPerformanceError::UnableToFindValidator(index))?; + + // We are two epochs ahead since the summary is generated for + // `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return + // data for the epoch before that. + let epoch = state.previous_epoch().as_u64() - 1; + + let is_active = summary.is_active_unslashed_in_previous_epoch(index); + + let received_source_reward = summary.is_previous_epoch_source_attester(index)?; + + let received_head_reward = summary.is_previous_epoch_head_attester(index)?; + + let received_target_reward = summary.is_previous_epoch_target_attester(index)?; + + let inclusion_delay = summary + .previous_epoch_inclusion_info(index) + .map(|info| info.delay); + + let perf = AttestationPerformanceStatistics { + active: is_active, + head: received_head_reward, + target: received_target_reward, + source: received_source_reward, + delay: inclusion_delay, + }; + + val.epochs.insert(epoch, perf); + } + } + Ok(()) + }; + + // Initialize block replayer + let mut replayer = BlockReplayer::new(state, spec) + .no_state_root_iter() + .no_signature_verification() + .minimal_block_root_verification() + .post_slot_hook(Box::new(post_slot_hook)); + + // Iterate through block roots in chunks to reduce load on memory. + for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { + // Load blocks from the block root chunks. + let blocks = block_root_chunks + .iter() + .map(|root| { + chain + .get_block(root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) + }) + .map_err(beacon_chain_error) + }) + .collect::>, _>>()?; + + replayer = replayer + .apply_blocks(blocks, None) + .map_err(|e| custom_server_error(format!("{:?}", e)))?; + } + + drop(replayer); + + Ok(perfs) +} diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs new file mode 100644 index 0000000000..154773aa95 --- /dev/null +++ b/beacon_node/http_api/src/block_rewards.rs @@ -0,0 +1,80 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; +use slog::{warn, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; + +pub fn get_block_rewards( + query: BlockRewardsQuery, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let start_slot = query.start_slot; + let end_slot = query.end_slot; + let prior_slot = start_slot - 1; + + if start_slot > end_slot || start_slot == 0 { + return Err(custom_bad_request(format!( + "invalid start and end: {}, {}", + start_slot, end_slot + ))); + } + + let end_block_root = chain + .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; + + let blocks = chain + .store + .load_blocks_to_replay(start_slot, end_slot, end_block_root) + .map_err(|e| beacon_chain_error(e.into()))?; + + let state_root = chain + .state_root_at_slot(prior_slot) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; + + let mut state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + state + .build_all_caches(&chain.spec) + .map_err(beacon_state_error)?; + + let mut block_rewards = Vec::with_capacity(blocks.len()); + + let block_replayer = BlockReplayer::new(state, &chain.spec) + .pre_block_hook(Box::new(|state, block| { + // Compute block reward. + let block_reward = + chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + block_rewards.push(block_reward); + Ok(()) + })) + .state_root_iter( + chain + .forwards_iter_state_roots_until(prior_slot, end_slot) + .map_err(beacon_chain_error)?, + ) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(blocks, None) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "start_slot" => start_slot, + "end_slot" => end_slot, + ); + } + + drop(block_replayer); + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b0907a30c1..b30af858f7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,8 +5,10 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. +mod attestation_performance; mod attester_duties; mod block_id; +mod block_rewards; mod database; mod metrics; mod proposer_duties; @@ -2540,6 +2542,32 @@ pub fn serve( }, ); + // GET lighthouse/analysis/block_rewards + let get_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("block_rewards")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|query, chain, log| { + blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + }); + + // GET lighthouse/analysis/attestation_performance/{index} + let get_lighthouse_attestation_performance = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("attestation_performance")) + .and(warp::path::param::()) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|target, query, chain: Arc>| { + blocking_json_task(move || { + attestation_performance::get_attestation_performance(target, query, chain) + }) + }); + let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) @@ -2576,6 +2604,9 @@ pub fn serve( api_types::EventTopic::LateHead => { event_handler.subscribe_late_head() } + api_types::EventTopic::BlockReward => { + event_handler.subscribe_block_reward() + } }; receivers.push(BroadcastStream::new(receiver).map(|msg| { @@ -2661,6 +2692,8 @@ pub fn serve( .or(get_lighthouse_beacon_states_ssz.boxed()) .or(get_lighthouse_staking.boxed()) .or(get_lighthouse_database_info.boxed()) + .or(get_lighthouse_block_rewards.boxed()) + .or(get_lighthouse_attestation_performance.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 4cafcf62b1..7a2ba61997 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -219,6 +219,7 @@ pub struct NetworkLoad { pub mesh_n_high: usize, pub gossip_lazy: usize, pub history_gossip: usize, + pub heartbeat_interval: Duration, } impl From for NetworkLoad { @@ -231,7 +232,8 @@ impl From for NetworkLoad { mesh_n: 3, mesh_n_high: 4, gossip_lazy: 3, - history_gossip: 12, + history_gossip: 3, + heartbeat_interval: Duration::from_millis(1200), }, 2 => NetworkLoad { name: "Low", @@ -240,7 +242,8 @@ impl From for NetworkLoad { mesh_n: 4, mesh_n_high: 8, gossip_lazy: 3, - history_gossip: 12, + history_gossip: 3, + heartbeat_interval: Duration::from_millis(1000), }, 3 => NetworkLoad { name: "Average", @@ -249,7 +252,8 @@ impl From for NetworkLoad { mesh_n: 5, mesh_n_high: 10, gossip_lazy: 3, - history_gossip: 12, + history_gossip: 3, + heartbeat_interval: Duration::from_millis(700), }, 4 => NetworkLoad { name: "Average", @@ -258,7 +262,8 @@ impl From for NetworkLoad { mesh_n: 8, mesh_n_high: 12, gossip_lazy: 3, - history_gossip: 12, + history_gossip: 3, + heartbeat_interval: Duration::from_millis(700), }, // 5 and above _ => NetworkLoad { @@ -268,7 +273,8 @@ impl From for NetworkLoad { mesh_n: 10, mesh_n_high: 15, gossip_lazy: 5, - history_gossip: 12, + history_gossip: 6, + heartbeat_interval: Duration::from_millis(500), }, } } @@ -322,7 +328,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> Gos GossipsubConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) - .heartbeat_interval(Duration::from_millis(700)) + .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) .mesh_n_low(load.mesh_n_low) .mesh_outbound_min(load.outbound_min) diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index d194deffd4..b787c421cf 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -122,7 +122,7 @@ impl NetworkBehaviour for PeerManager { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state - error!(self.log, "Connecteded to a banned peer, re-banning"; "peer_id" => %peer_id); + error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id); // Reban the peer self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 9ece18d02c..72cb3a7ee1 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -30,6 +30,10 @@ use super::{ }; use crate::beacon_processor::DuplicateCache; +/// Set to `true` to introduce stricter penalties for peers who send some types of late consensus +/// messages. +const STRICT_LATE_MESSAGE_PENALTIES: bool = false; + /// An attestation that has been validated by the `BeaconChain`. /// /// Since this struct implements `beacon_chain::VerifiedAttestation`, it would be a logic error to @@ -346,9 +350,12 @@ impl Worker { &self.chain.slot_clock, ); - // Indicate to the `Network` service that this message is valid and can be - // propagated on the gossip network. - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + // If the attestation is still timely, propagate it. + self.propagate_attestation_if_timely( + verified_attestation.attestation(), + message_id, + peer_id, + ); if !should_import { return; @@ -539,9 +546,12 @@ impl Worker { let aggregate = &verified_aggregate.signed_aggregate; let indexed_attestation = &verified_aggregate.indexed_attestation; - // Indicate to the `Network` service that this message is valid and can be - // propagated on the gossip network. - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + // If the attestation is still timely, propagate it. + self.propagate_attestation_if_timely( + verified_aggregate.attestation(), + message_id, + peer_id, + ); // Register the attestation with any monitored validators. self.chain @@ -1165,9 +1175,8 @@ impl Worker { } }; - // Indicate to the `Network` service that this message is valid and can be - // propagated on the gossip network. - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + // If the message is still timely, propagate it. + self.propagate_sync_message_if_timely(message_slot, message_id, peer_id); // Register the sync signature with any monitored validators. self.chain @@ -1229,9 +1238,8 @@ impl Worker { } }; - // Indicate to the `Network` service that this message is valid and can be - // propagated on the gossip network. - self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + // If the message is still timely, propagate it. + self.propagate_sync_message_if_timely(contribution_slot, message_id, peer_id); self.chain .validator_monitor @@ -1311,7 +1319,7 @@ impl Worker { // Only penalize the peer if it would have been invalid at the moment we received // it. - if hindsight_verification.is_err() { + if STRICT_LATE_MESSAGE_PENALTIES && hindsight_verification.is_err() { self.gossip_penalize_peer( peer_id, PeerAction::LowToleranceError, @@ -1701,6 +1709,26 @@ impl Worker { "attn_too_many_skipped_slots", ); } + AttnError::HeadBlockFinalized { beacon_block_root } => { + debug!( + self.log, + "Rejected attestation to finalized block"; + "block_root" => ?beacon_block_root, + "attestation_slot" => failed_att.attestation().data.slot, + ); + + // We have to reject the message as it isn't a descendant of the finalized + // checkpoint. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + + // The peer that sent us this could be a lagger, or a spammer, or this failure could + // be due to us processing attestations extremely slowly. Don't be too harsh. + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "attn_to_finalized_block", + ); + } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( HotColdDBError::AttestationStateIsFinalized { .. }, ))) => { @@ -1812,7 +1840,7 @@ impl Worker { }; // Penalize the peer if the message was more than one slot late - if excessively_late && invalid_in_hindsight() { + if STRICT_LATE_MESSAGE_PENALTIES && excessively_late && invalid_in_hindsight() { self.gossip_penalize_peer( peer_id, PeerAction::HighToleranceError, @@ -2076,4 +2104,50 @@ impl Worker { "type" => ?message_type, ); } + + /// Propagate (accept) if `is_timely == true`, otherwise ignore. + fn propagate_if_timely(&self, is_timely: bool, message_id: MessageId, peer_id: PeerId) { + if is_timely { + // The message is still relevant, propagate. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } else { + // The message is not relevant, ignore. It might be that this message became irrelevant + // during the time it took to process it, or it was received invalid. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + } + + /// If an attestation (agg. or unagg.) is still valid with respect to the current time (i.e., + /// timely), propagate it on gossip. Otherwise, ignore it. + fn propagate_attestation_if_timely( + &self, + attestation: &Attestation, + message_id: MessageId, + peer_id: PeerId, + ) { + let is_timely = attestation_verification::verify_propagation_slot_range( + &self.chain.slot_clock, + attestation, + ) + .is_ok(); + + self.propagate_if_timely(is_timely, message_id, peer_id) + } + + /// If a sync committee signature or sync committee contribution is still valid with respect to + /// the current time (i.e., timely), propagate it on gossip. Otherwise, ignore it. + fn propagate_sync_message_if_timely( + &self, + sync_message_slot: Slot, + message_id: MessageId, + peer_id: PeerId, + ) { + let is_timely = self + .chain + .slot_clock + .now() + .map_or(false, |current_slot| sync_message_slot == current_slot); + + self.propagate_if_timely(is_timely, message_id, peer_id) + } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 35cf3fa90e..c6f68d5faa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,6 +7,7 @@ use crate::{ NetworkConfig, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; use lighthouse_network::{ @@ -279,7 +280,7 @@ impl NetworkService { log: network_log, }; - spawn_service(executor, network_service); + network_service.spawn_service(executor); Ok((network_globals, network_send)) } @@ -320,428 +321,531 @@ impl NetworkService { result } -} -fn spawn_service( - executor: task_executor::TaskExecutor, - mut service: NetworkService, -) { - let mut shutdown_sender = executor.shutdown_sender(); + fn send_to_router(&mut self, msg: RouterMessage) { + if let Err(mpsc::error::SendError(msg)) = self.router_send.send(msg) { + debug!(self.log, "Failed to send msg to router"; "msg" => ?msg); + } + } - // spawn on the current executor - executor.spawn(async move { + fn spawn_service(mut self, executor: task_executor::TaskExecutor) { + let mut shutdown_sender = executor.shutdown_sender(); - loop { - // build the futures to check simultaneously - tokio::select! { - _ = service.metrics_update.tick(), if service.metrics_enabled => { - // update various network metrics - metrics::update_gossip_metrics::( - service.libp2p.swarm.behaviour().gs(), - &service.network_globals, - ); - // update sync metrics - metrics::update_sync_metrics(&service.network_globals); + // spawn on the current executor + let service_fut = async move { + loop { + tokio::select! { + _ = self.metrics_update.tick(), if self.metrics_enabled => { + // update various network metrics + metrics::update_gossip_metrics::( + self.libp2p.swarm.behaviour().gs(), + &self.network_globals, + ); + // update sync metrics + metrics::update_sync_metrics(&self.network_globals); + } - } - _ = service.gossipsub_parameter_update.tick() => { - if let Ok(slot) = service.beacon_chain.slot() { - if let Some(active_validators) = service.beacon_chain.with_head(|head| { - Ok::<_, BeaconChainError>( - head - .beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - service - .beacon_chain - .epoch() - .ok() - .map(|current_epoch| { - head - .beacon_state - .validators() - .iter() - .filter(|validator| - validator.is_active_at(current_epoch) - ) - .count() - }) - }) - ) - }).unwrap_or(None) { - if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() { - error!( - service.log, - "Failed to update gossipsub parameters"; - "active_validators" => active_validators - ); - } + _ = self.gossipsub_parameter_update.tick() => self.update_gossipsub_parameters(), + + // handle a message sent to the network + Some(msg) = self.network_recv.recv() => self.on_network_msg(msg, &mut shutdown_sender).await, + + // process any attestation service events + Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), + + // process any sync committee service events + Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + + event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, + + Some(_) = &mut self.next_fork_update => self.update_next_fork(), + + Some(_) = &mut self.next_unsubscribe => { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + info!(self.log, "Unsubscribed from old fork topics"); + self.next_unsubscribe = Box::pin(None.into()); + } + + Some(_) = &mut self.next_fork_subscriptions => { + if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { + let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); + let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); + info!(self.log, "Subscribing to new fork topics"); + self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.next_fork_subscriptions = Box::pin(None.into()); + } + else { + error!(self.log, "Fork subscription scheduled but no fork scheduled"); } } } - // handle a message sent to the network - Some(message) = service.network_recv.recv() => { + metrics::update_bandwidth_metrics(self.libp2p.bandwidth.clone()); + } + }; + executor.spawn(service_fut, "network"); + } + + /// Handle an event received from the network. + async fn on_libp2p_event( + &mut self, + ev: Libp2pEvent, + shutdown_sender: &mut Sender, + ) { + match ev { + Libp2pEvent::Behaviour(event) => match event { + BehaviourEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + BehaviourEvent::PeerConnectedIncoming(_) + | BehaviourEvent::PeerBanned(_) + | BehaviourEvent::PeerUnbanned(_) => { + // No action required for these events. + } + BehaviourEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + BehaviourEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { + peer_id, + id, + request, + }); + } + BehaviourEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { + peer_id, + request_id: id, + response, + }); + } + BehaviourEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + BehaviourEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + BehaviourEvent::PubsubMessage { + id, + source, + message, + .. + } => { match message { - NetworkMessage::SendRequest{ peer_id, request, request_id } => { - service.libp2p.send_request(peer_id, request_id, request); - } - NetworkMessage::SendResponse{ peer_id, response, id } => { - service.libp2p.send_response(peer_id, id, response); - } - NetworkMessage::SendErrorResponse{ peer_id, error, id, reason } => { - service.libp2p.respond_with_error(peer_id, id, error, reason); - } - NetworkMessage::UPnPMappingEstablished { tcp_socket, udp_socket} => { - service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); - // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_socket) = tcp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - // if the discovery service is not auto-updating, update it with the - // UPnP mappings - if !service.discovery_auto_update { - if let Some(udp_socket) = udp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - } - }, - NetworkMessage::ValidationResult { - propagation_source, - message_id, - validation_result, - } => { - trace!(service.log, "Propagating gossipsub message"; - "propagation_peer" => ?propagation_source, - "message_id" => %message_id, - "validation_result" => ?validation_result - ); - service - .libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, message_id, validation_result - ); - } - NetworkMessage::Publish { messages } => { - let mut topic_kinds = Vec::new(); - for message in &messages { - if !topic_kinds.contains(&message.kind()) { - topic_kinds.push(message.kind()); - } - } - debug!( - service.log, - "Sending pubsub messages"; - "count" => messages.len(), - "topics" => ?topic_kinds - ); - service.libp2p.swarm.behaviour_mut().publish(messages); - } - NetworkMessage::ReportPeer { peer_id, action, source, msg } => service.libp2p.report_peer(&peer_id, action, source, msg), - NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = service + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self .attestation_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Attestation validator subscription failed"; "error" => e); - } - } - NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = service - .sync_committee_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Sync committee calidator subscription failed"; "error" => e); - } - } - NetworkMessage::SubscribeCoreTopics => { - if service.shutdown_after_sync { - let _ = shutdown_sender - .send(ShutdownReason::Success( - "Beacon node completed sync. Shutting down as --shutdown-after-sync flag is enabled")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); - return; - } - let mut subscribed_topics: Vec = vec![]; - for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - - // If we are to subscribe to all subnets we do it here - if service.subscribe_all_subnets { - for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { - let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { - let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } - - if !subscribed_topics.is_empty() { - info!( - service.log, - "Subscribed to topics"; - "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() - ); - } - } - } - } - // process any attestation service events - Some(attestation_service_message) = service.attestation_service.next() => { - match attestation_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); - } - } - } - // process any sync committee service events - Some(sync_committee_service_message) = service.sync_committee_service.next() => { - match sync_committee_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); - } - } - } - libp2p_event = service.libp2p.next_event() => { - // poll the swarm - match libp2p_event { - Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDialed(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer dialed to router"); }); - }, - BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => { - // No action required for these events. - }, - BehaviourEvent::PeerDisconnected(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDisconnected(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer disconnect to router"); - }); - }, - BehaviourEvent::RequestReceived{peer_id, id, request} => { - let _ = service - .router_send - .send(RouterMessage::RPCRequestReceived{peer_id, id, request}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - } - BehaviourEvent::ResponseReceived{peer_id, id, response} => { - let _ = service - .router_send - .send(RouterMessage::RPCResponseReceived{ peer_id, request_id: id, response }) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - - } - BehaviourEvent::RPCFailed{id, peer_id} => { - let _ = service - .router_send - .send(RouterMessage::RPCFailed{ peer_id, request_id: id}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - - } - BehaviourEvent::StatusPeer(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::StatusPeer(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send re-status peer to router"); - }); - } - BehaviourEvent::PubsubMessage { + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( id, source, message, - .. - } => { - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = service.attestation_service.should_process_attestation( - subnet, - attestation, - ); - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, should_process)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - _ => { - // all else is sent to the router - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, true)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - } - } + should_process, + )); } - Libp2pEvent::NewListenAddr(multiaddr) => { - service.network_globals.listen_multiaddrs.write().push(multiaddr); - } - Libp2pEvent::ZeroListeners => { - let _ = shutdown_sender - .send(ShutdownReason::Failure("All listeners are closed. Unable to listen")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } } } - Some(_) = &mut service.next_fork_update => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); + }, + Libp2pEvent::NewListenAddr(multiaddr) => { + self.network_globals + .listen_multiaddrs + .write() + .push(multiaddr); + } + Libp2pEvent::ZeroListeners => { + let _ = shutdown_sender + .send(ShutdownReason::Failure( + "All listeners are closed. Unable to listen", + )) + .await + .map_err(|e| { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + }); + } + } + } - let fork_context = &service.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { - info!( - service.log, - "Transitioned to new fork"; - "old_fork" => ?fork_context.current_fork(), - "new_fork" => ?new_fork_name, - ); - fork_context.update_current_fork(*new_fork_name); - - service + /// Handle a message sent to the network service. + async fn on_network_msg( + &mut self, + msg: NetworkMessage, + shutdown_sender: &mut Sender, + ) { + match msg { + NetworkMessage::SendRequest { + peer_id, + request, + request_id, + } => { + self.libp2p.send_request(peer_id, request_id, request); + } + NetworkMessage::SendResponse { + peer_id, + response, + id, + } => { + self.libp2p.send_response(peer_id, id, response); + } + NetworkMessage::SendErrorResponse { + peer_id, + error, + id, + reason, + } => { + self.libp2p.respond_with_error(peer_id, id, error, reason); + } + NetworkMessage::UPnPMappingEstablished { + tcp_socket, + udp_socket, + } => { + self.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); + // If there is an external TCP port update, modify our local ENR. + if let Some(tcp_socket) = tcp_socket { + if let Err(e) = self + .libp2p + .swarm + .behaviour_mut() + .discovery_mut() + .update_enr_tcp_port(tcp_socket.port()) + { + warn!(self.log, "Failed to update ENR"; "error" => e); + } + } + // if the discovery service is not auto-updating, update it with the + // UPnP mappings + if !self.discovery_auto_update { + if let Some(udp_socket) = udp_socket { + if let Err(e) = self .libp2p .swarm .behaviour_mut() - .update_fork_version(new_enr_fork_id.clone()); - // Reinitialize the next_fork_update - service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); - - // Set the next_unsubscribe delay. - let epoch_duration = service.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); - let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); - - // Update the `next_fork_subscriptions` timer if the next fork is known. - service.next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&service.beacon_chain).into()); - service.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); - info!(service.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); - } else { - crit!(service.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); - } - - } - Some(_) = &mut service.next_unsubscribe => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); - info!(service.log, "Unsubscribed from old fork topics"); - service.next_unsubscribe = Box::pin(None.into()); - } - Some(_) = &mut service.next_fork_subscriptions => { - if let Some((fork_name, _)) = service.beacon_chain.duration_to_next_fork() { - let fork_version = service.beacon_chain.spec.fork_version_for_name(fork_name); - let fork_digest = ChainSpec::compute_fork_digest(fork_version, service.beacon_chain.genesis_validators_root); - info!(service.log, "Subscribing to new fork topics"); - service.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); - service.next_fork_subscriptions = Box::pin(None.into()); - } - else { - error!(service.log, "Fork subscription scheduled but no fork scheduled"); + .discovery_mut() + .update_enr_udp_socket(udp_socket) + { + warn!(self.log, "Failed to update ENR"; "error" => e); + } } } } - metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); + NetworkMessage::ValidationResult { + propagation_source, + message_id, + validation_result, + } => { + trace!(self.log, "Propagating gossipsub message"; + "propagation_peer" => ?propagation_source, + "message_id" => %message_id, + "validation_result" => ?validation_result + ); + self.libp2p + .swarm + .behaviour_mut() + .report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); + } + NetworkMessage::Publish { messages } => { + let mut topic_kinds = Vec::new(); + for message in &messages { + if !topic_kinds.contains(&message.kind()) { + topic_kinds.push(message.kind()); + } + } + debug!( + self.log, + "Sending pubsub messages"; + "count" => messages.len(), + "topics" => ?topic_kinds + ); + self.libp2p.swarm.behaviour_mut().publish(messages); + } + NetworkMessage::ReportPeer { + peer_id, + action, + source, + msg, + } => self.libp2p.report_peer(&peer_id, action, source, msg), + NetworkMessage::GoodbyePeer { + peer_id, + reason, + source, + } => self.libp2p.goodbye_peer(&peer_id, reason, source), + NetworkMessage::AttestationSubscribe { subscriptions } => { + if let Err(e) = self + .attestation_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Attestation validator subscription failed"; "error" => e); + } + } + NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = self + .sync_committee_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); + } + } + NetworkMessage::SubscribeCoreTopics => { + if self.shutdown_after_sync { + if let Err(e) = shutdown_sender + .send(ShutdownReason::Success( + "Beacon node completed sync. \ + Shutting down as --shutdown-after-sync flag is enabled", + )) + .await + { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + } + return; + } + let mut subscribed_topics: Vec = vec![]; + for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + topic_kind.clone(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + + // If we are to subscribe to all subnets we do it here + if self.subscribe_all_subnets { + for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { + let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + let subnet_max = <::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64(); + for subnet_id in 0..subnet_max { + let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + subnet.into(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } + + if !subscribed_topics.is_empty() { + info!( + self.log, + "Subscribed to topics"; + "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + ); + } + } } - }, "network"); + } + + fn update_gossipsub_parameters(&mut self) { + if let Ok(slot) = self.beacon_chain.slot() { + if let Some(active_validators) = self + .beacon_chain + .with_head(|head| { + Ok::<_, BeaconChainError>( + head.beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + .or_else(|| { + // if active validator cached was not build we count the + // active validators + self.beacon_chain.epoch().ok().map(|current_epoch| { + head.beacon_state + .validators() + .iter() + .filter(|validator| validator.is_active_at(current_epoch)) + .count() + }) + }), + ) + }) + .unwrap_or(None) + { + if self + .libp2p + .swarm + .behaviour_mut() + .update_gossipsub_parameters(active_validators, slot) + .is_err() + { + error!( + self.log, + "Failed to update gossipsub parameters"; + "active_validators" => active_validators + ); + } + } + } + } + + fn on_attestation_service_msg(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } + } + } + + fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } + } + } + + fn update_next_fork(&mut self) { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + + let fork_context = &self.fork_context; + if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + info!( + self.log, + "Transitioned to new fork"; + "old_fork" => ?fork_context.current_fork(), + "new_fork" => ?new_fork_name, + ); + fork_context.update_current_fork(*new_fork_name); + + self.libp2p + .swarm + .behaviour_mut() + .update_fork_version(new_enr_fork_id); + // Reinitialize the next_fork_update + self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); + + // Set the next_unsubscribe delay. + let epoch_duration = + self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); + let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + + // Update the `next_fork_subscriptions` timer if the next fork is known. + self.next_fork_subscriptions = + Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); + self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); + info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + } else { + crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + } + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 610081319d..0c34eef274 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -54,6 +54,13 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Return type when attempting to start the backfill sync process. @@ -119,7 +126,7 @@ pub struct BackFillSync { /// Batches validated by this chain. validated_batches: u64, - /// We keep track of peer that are participating in the backfill sync. Unlike RangeSync, + /// We keep track of peers that are participating in the backfill sync. Unlike RangeSync, /// BackFillSync uses all synced peers to download the chain from. If BackFillSync fails, we don't /// want to penalize all our synced peers, so we use this variable to keep track of peers that /// have participated and only penalize these peers if backfill sync fails. @@ -539,7 +546,7 @@ impl BackFillSync { "error" => %e, "batch" => self.processing_target); // This is unlikely to happen but it would stall syncing since the batch now has no // blocks to continue, and the chain is expecting a processing result that won't - // arrive. To mitigate this, (fake) fail this processing so that the batch is + // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. self.on_batch_process_result( network, @@ -795,7 +802,7 @@ impl BackFillSync { for attempt in batch.attempts() { // The validated batch has been re-processed if attempt.hash != processed_attempt.hash { - // The re-downloaded version was different + // The re-downloaded version was different. if processed_attempt.peer_id != attempt.peer_id { // A different peer sent the correct batch, the previous peer did not // We negatively score the original peer. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 32f2a26367..960dd12afc 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -131,7 +131,7 @@ pub enum SyncRequestType { RangeSync(Epoch, ChainId), } -/// The result of processing a multiple blocks (a chain segment). +/// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index e0b15cb498..7239081ad1 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -19,6 +19,34 @@ pub trait BatchConfig { fn max_batch_download_attempts() -> u8; /// The max batch processing attempts. fn max_batch_processing_attempts() -> u8; + /// Hashing function of a batch's attempt. Used for scoring purposes. + /// + /// When a batch fails processing, it is possible that the batch is wrong (faulty or + /// incomplete) or that a previous one is wrong. For this reason we need to re-download and + /// re-process the batches awaiting validation and the current one. Consider this scenario: + /// + /// ```ignore + /// BatchA BatchB BatchC BatchD + /// -----X Empty Empty Y----- + /// ``` + /// + /// BatchA declares that it refers X, but BatchD declares that it's first block is Y. There is no + /// way to know if BatchD is faulty/incomplete or if batches B and/or C are missing blocks. It is + /// also possible that BatchA belongs to a different chain to the rest starting in some block + /// midway in the batch's range. For this reason, the four batches would need to be re-downloaded + /// and re-processed. + /// + /// If batchD was actually good, it will still register two processing attempts for the same set of + /// blocks. In this case, we don't want to penalize the peer that provided the first version, since + /// it's equal to the successfully processed one. + /// + /// The function `batch_attempt_hash` provides a way to compare two batch attempts without + /// storing the full set of blocks. + /// + /// Note that simpler hashing functions considered in the past (hash of first block, hash of last + /// block, number of received blocks) are not good enough to differentiate attempts. For this + /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -30,6 +58,11 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Error type of a batch in a wrong state. @@ -300,7 +333,7 @@ impl BatchInfo { pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { - self.state = BatchState::Processing(Attempt::new(peer, &blocks)); + self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); Ok(blocks) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -386,11 +419,8 @@ pub struct Attempt { } impl Attempt { - #[allow(clippy::ptr_arg)] - fn new(peer_id: PeerId, blocks: &Vec>) -> Self { - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - blocks.hash(&mut hasher); - let hash = hasher.finish(); + fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 2cc3ffaf6b..c9b252ca11 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -6,15 +6,16 @@ mod metrics; mod persistence; mod sync_aggregate_id; +pub use attestation::AttMaxCover; +pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, }; use crate::sync_aggregate_id::SyncAggregateId; -use attestation::AttMaxCover; use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; -use max_cover::{maximum_cover, MaxCover}; +use max_cover::maximum_cover; use parking_lot::RwLock; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 8ea35f7348..7836ac14a4 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -407,4 +407,44 @@ The endpoint will return immediately. See the beacon node logs for an indication ### `/lighthouse/database/historical_blocks` Manually provide `SignedBeaconBlock`s to backfill the database. This is intended -for use by Lighthouse developers during testing only. \ No newline at end of file +for use by Lighthouse developers during testing only. + +### `/lighthouse/block_rewards` + +Fetch information about the block rewards paid to proposers for a range of consecutive blocks. + +Two query parameters are required: + +* `start_slot` (inclusive): the slot of the first block to compute rewards for. +* `end_slot` (inclusive): the slot of the last block to compute rewards for. + +Example: + +```bash +curl "http://localhost:5052/lighthouse/block_rewards?start_slot=1&end_slot=32" | jq +``` + +```json +[ + { + "block_root": "0x51576c2fcf0ab68d7d93c65e6828e620efbb391730511ffa35584d6c30e51410", + "attestation_rewards": { + "total": 4941156, + }, + .. + }, + .. +] +``` + +Caveats: + +* Presently only attestation rewards are computed. +* The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] + in the source. +* For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_slot` needs to be loaded from the database, and + loading a state on a boundary is most efficient. + +[block_reward_src]: +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_reward.rs \ No newline at end of file diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index d09a9e54a2..33f6f6ff7a 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -6,13 +6,13 @@ The validator client HTTP server requires that all requests have the following HTTP header: - Name: `Authorization` -- Value: `Basic ` +- Value: `Bearer ` Where `` is a string that can be obtained from the validator client host. Here is an example `Authorization` header: ``` -Authorization Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` ## Obtaining the API token @@ -35,12 +35,27 @@ to the file containing the api token. Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/prater/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` +The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only +one accessible without the token): + +```bash +curl http://localhost:5062/lighthouse/auth +``` + +Response: + +```json +{ + "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" +} +``` + ## Example Here is an example `curl` command using the API token in the `Authorization` header: ```bash -curl localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" +curl localhost:5062/lighthouse/version -H "Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" ``` The server should respond with its version: diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 16fd8ff8a7..14d18312e5 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -4,15 +4,19 @@ HTTP Path | Description | | --- | -- | -[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version -[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine -[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator -[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators -[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator -[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator +[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. +[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. +[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator. +[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. +[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. +[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. +[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. [`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. [`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. +[`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. + +In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). ## `GET /lighthouse/version` @@ -153,6 +157,37 @@ Typical Responses | 200 } ``` +## `GET /lighthouse/auth` + +Fetch the filesystem path of the [authorization token](./api-vc-auth-header.md). +Unlike the other endpoints this may be called _without_ providing an authorization token. + +This API is intended to be called from the same machine as the validator client, so that the token +file may be read by a local user with access rights. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/auth` +Method | GET +Required Headers | - +Typical Responses | 200 + +### Example Path + +``` +localhost:5062/lighthouse/auth +``` + +### Example Response Body + +```json +{ + "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" +} +``` + ## `GET /lighthouse/validators` Lists all validators managed by this validator client. diff --git a/book/src/api-vc.md b/book/src/api-vc.md index 6ee79d4f72..74c493ebea 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -1,9 +1,12 @@ # Validator Client API -Lighthouse implements a HTTP/JSON API for the validator client. Since there is -no Eth2 standard validator client API, Lighthouse has defined its own. +Lighthouse implements a JSON HTTP API for the validator client which enables programmatic management +of validators and keys. -A full list of endpoints can be found in [Endpoints](./api-vc-endpoints.md). +The API includes all of the endpoints from the [standard keymanager +API](https://ethereum.github.io/keymanager-APIs/) that is implemented by other clients and remote +signers. It also includes some Lighthouse-specific endpoints which are described in +[Endpoints](./api-vc-endpoints.md). > Note: All requests to the HTTP server must supply an > [`Authorization`](./api-vc-auth-header.md) header. All responses contain a diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 7dee3320e9..9b458078e2 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -19,15 +19,16 @@ project. The `Makefile` in the project contains four targets for cross-compiling: - `build-x86_64`: builds an optimized version for x86_64 processors (suitable for most users). - Supports Intel Broadwell (2014) and newer, and AMD Ryzen (2017) and newer. - `build-x86_64-portable`: builds a version for x86_64 processors which avoids using some modern CPU - instructions that are incompatible with older CPUs. Suitable for pre-Broadwell/Ryzen CPUs. -- `build-aarch64`: builds an optimized version for 64-bit ARM processors - (suitable for Raspberry Pi 4). + instructions that are incompatible with older CPUs. +- `build-aarch64`: builds an optimized version for 64-bit ARM processors (suitable for Raspberry Pi 4). - `build-aarch64-portable`: builds a version for 64-bit ARM processors which avoids using some modern CPU instructions. In practice, very few ARM processors lack the instructions necessary to run the faster non-portable build. +For more information about optimized vs portable builds see +[Portability](./installation-binaries.md#portability). + ### Example ```bash diff --git a/book/src/docker.md b/book/src/docker.md index 965dd7816f..eebbd5dde2 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -1,20 +1,17 @@ # Docker Guide -This repository has a `Dockerfile` in the root which builds an image with the -`lighthouse` binary installed. A pre-built image is available on Docker Hub. +There are two ways to obtain a Lighthouse Docker image: -## Obtaining the Docker image +1. [Docker Hub](#docker-hub), or +2. By [building a Docker image from source](#building-the-docker-image). -There are two ways to obtain the docker image, either via Docker Hub or -building the image from source. Once you have obtained the docker image via one -of these methods, proceed to [Using the Docker image](#using-the-docker-image). +Once you have obtained the docker image via one of these methods, proceed to [Using the Docker +image](#using-the-docker-image). -### Docker Hub +## Docker Hub -Lighthouse maintains the -[sigp/lighthouse](https://hub.docker.com/repository/docker/sigp/lighthouse/) -Docker Hub repository which provides an easy way to run Lighthouse without -building the image yourself. +Lighthouse maintains the [sigp/lighthouse][docker_hub] Docker Hub repository which provides an easy +way to run Lighthouse without building the image yourself. Obtain the latest image with: @@ -28,26 +25,69 @@ Download and test the image with: $ docker run sigp/lighthouse lighthouse --version ``` -If you can see the latest [Lighthouse -release](https://github.com/sigp/lighthouse/releases) version (see example -below), then you've -successfully installed Lighthouse via Docker. +If you can see the latest [Lighthouse release](https://github.com/sigp/lighthouse/releases) version +(see example below), then you've successfully installed Lighthouse via Docker. -#### Example Version Output +> Pro tip: try the `latest-modern` image for a 20-30% speed-up! See [Available Docker +> Images](#available-docker-images) below. + +### Example Version Output ``` Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` -> Note: when you're running the Docker Hub image you're relying upon a -> pre-built binary instead of building from source. +### Available Docker Images -> Note: due to the Docker Hub image being compiled to work on arbitrary machines, it isn't as highly -> optimized as an image built from source. We're working to improve this, but for now if you want -> the absolute best performance, please build the image yourself. +There are several images available on Docker Hub. -### Building the Docker Image +Most users should use the `latest-modern` tag, which corresponds to the latest stable release of +Lighthouse with optimizations enabled. If you are running on older hardware then the default +`latest` image bundles a _portable_ version of Lighthouse which is slower but with better hardware +compatibility (see [Portability](./installation-binaries.md#portability)). + +To install a specific tag (in this case `latest-modern`) add the tag name to your `docker` commands +like so: + +``` +$ docker pull sigp/lighthouse:latest-modern +``` + +Image tags follow this format: + +``` +${version}${arch}${stability}${modernity} +``` + +The `version` is: + +* `vX.Y.Z` for a tagged Lighthouse release, e.g. `v2.1.1` +* `latest` for the `stable` branch (latest release) or `unstable` branch + +The `stability` is: + +* `-unstable` for the `unstable` branch +* empty for a tagged release or the `stable` branch + +The `arch` is: + +* `-amd64` for x86_64, e.g. Intel, AMD +* `-arm64` for aarch64, e.g. Rasperry Pi 4 +* empty for a multi-arch image (works on either `amd64` or `arm64` platforms) + +The `modernity` is: + +* `-modern` for optimized builds +* empty for a `portable` unoptimized build + +Examples: + +* `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) +* `latest-amd64`: most recent Lighthouse release for older x86_64 CPUs +* `latest-amd64-unstable`: most recent `unstable` build for older x86_64 CPUs + +## Building the Docker Image To build the image from source, navigate to the root of the repository and run: @@ -103,3 +143,5 @@ If you use the `--http` flag you may also want to expose the HTTP port with `-p ```bash $ docker run -p 9000:9000 -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0 ``` + +[docker_hub]: https://hub.docker.com/repository/docker/sigp/lighthouse/ diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 4f092c1e29..7a5aad32bf 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -20,13 +20,13 @@ Additionally there is also a `-portable` suffix which indicates if the `portable - Without `portable`: uses modern CPU instructions to provide the fastest signature verification times (may cause `Illegal instruction` error on older CPUs) - With `portable`: approx. 20% slower, but should work on all modern 64-bit processors. +For details, see [Portability](#portability). + ## Usage Each binary is contained in a `.tar.gz` archive. For this example, lets assume the user needs a portable `x86_64` binary. -> Whilst this example uses `v0.2.13` we recommend always using the latest release. - ### Steps 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and @@ -41,6 +41,19 @@ a portable `x86_64` binary. > Windows users will need to execute the commands in Step 3 from PowerShell. +## Portability + +Portable builds of Lighthouse are designed to run on the widest range of hardware possible, but +sacrifice the ability to make use of modern CPU instructions. + +If you have a modern CPU then you should try running a non-portable build to get a 20-30% speed up. + +* For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set +extension is compatible with the optimized build. This includes Intel Broadwell (2014) +and newer, and AMD Ryzen (2017) and newer. +* For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by +the Raspberry Pi 4. + ## Troubleshooting If you get a SIGILL (exit code 132), then your CPU is incompatible with the optimized build diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 73fff42dfe..91af60078a 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -75,7 +75,7 @@ The example assumes that the `wally` wallet was generated from the [wallet](./wallet-create.md) example. ```bash -lighthouse --network pyrmont account validator create --name wally --wallet-password wally.pass --count 1 +lighthouse --network pyrmont account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` This command will: diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 2d596aad6e..32b7b19511 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.1.0" +version = "2.1.2" authors = ["Sigma Prime "] edition = "2018" diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index 2afc063808..6b933013fc 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -63,6 +63,9 @@ pub fn run( EthSpecId::Mainnet => { main::(lh_matches, bn_matches, eth2_network_config, log) } + EthSpecId::Gnosis => { + main::(lh_matches, bn_matches, eth2_network_config, log) + } } { slog::crit!(slog_scope::logger(), "{}", e); } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index dc79a1f203..89de380385 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -85,15 +85,23 @@ pub fn write_file_via_temporary( Ok(()) } -/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN`. +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `PlainText`. pub fn random_password() -> PlainText { + random_password_raw_string().into_bytes().into() +} + +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `ZeroizeString`. +pub fn random_password_string() -> ZeroizeString { + random_password_raw_string().into() +} + +/// Common implementation for `random_password` and `random_password_string`. +fn random_password_raw_string() -> String { rand::thread_rng() .sample_iter(&Alphanumeric) .take(DEFAULT_PASSWORD_LEN) .map(char::from) - .collect::() - .into_bytes() - .into() + .collect() } /// Remove any number of newline or carriage returns from the end of a vector of bytes. diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 418c0fb3c6..d66683bee0 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -46,9 +46,6 @@ pub enum Error { } /// Defines how the validator client should attempt to sign messages for this validator. -/// -/// Presently there is only a single variant, however we expect more variants to arise (e.g., -/// remote signing). #[derive(Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] pub enum SigningDefinition { @@ -78,6 +75,12 @@ pub enum SigningDefinition { }, } +impl SigningDefinition { + pub fn is_local_keystore(&self) -> bool { + matches!(self, SigningDefinition::LocalKeystore { .. }) + } +} + /// A validator that may be initialized by this validator client. /// /// Presently there is only a single variant, however we expect more variants to arise (e.g., @@ -293,6 +296,11 @@ impl ValidatorDefinitions { Ok(()) } + /// Retain only the definitions matching the given predicate. + pub fn retain(&mut self, f: impl FnMut(&ValidatorDefinition) -> bool) { + self.0.retain(f); + } + /// Adds a new `ValidatorDefinition` to `self`. pub fn push(&mut self, def: ValidatorDefinition) { self.0.push(def) diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f1c9f5061e..d039a0c91a 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } +slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } [target.'cfg(target_os = "linux")'.dependencies] # TODO: update psutil once fix is merged: https://github.com/rust-psutil/rust-psutil/pull/93 @@ -35,4 +36,4 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] -lighthouse = ["proto_array", "psutil", "procinfo", "store"] +lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 153667d7e9..608a2c9e22 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -9,6 +9,7 @@ #[cfg(feature = "lighthouse")] pub mod lighthouse; +#[cfg(feature = "lighthouse")] pub mod lighthouse_vc; pub mod mixin; pub mod types; @@ -27,6 +28,7 @@ use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; +use std::path::PathBuf; use std::time::Duration; pub const V1: EndpointVersion = EndpointVersion(1); @@ -58,6 +60,12 @@ pub enum Error { InvalidServerSentEvent(String), /// The server returned an invalid SSZ response. InvalidSsz(ssz::DecodeError), + /// An I/O error occurred while loading an API token from disk. + TokenReadError(PathBuf, std::io::Error), + /// The client has been configured without a server pubkey, but requires one for this request. + NoServerPubkey, + /// The client has been configured without an API token, but requires one for this request. + NoToken, } impl From for Error { @@ -81,6 +89,8 @@ impl Error { Error::InvalidJson(_) => None, Error::InvalidServerSentEvent(_) => None, Error::InvalidSsz(_) => None, + Error::TokenReadError(..) => None, + Error::NoServerPubkey | Error::NoToken => None, } } } @@ -245,6 +255,7 @@ impl BeaconNodeHttpClient { } /// Perform a HTTP POST request, returning a JSON response. + #[cfg(feature = "lighthouse")] async fn post_with_response( &self, url: U, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index a8993a39c5..adf73d8b92 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,5 +1,8 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. +mod attestation_performance; +mod block_rewards; + use crate::{ ok_or_error, types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, @@ -12,6 +15,10 @@ use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use store::{AnchorInfo, Split}; +pub use attestation_performance::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; +pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs new file mode 100644 index 0000000000..5ce1d90a38 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_performance.rs @@ -0,0 +1,39 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::Epoch; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceStatistics { + pub active: bool, + pub head: bool, + pub target: bool, + pub source: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub delay: Option, +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformance { + pub index: u64, + pub epochs: HashMap, +} + +impl AttestationPerformance { + pub fn initialize(indices: Vec) -> Vec { + let mut vec = Vec::with_capacity(indices.len()); + for index in indices { + vec.push(Self { + index, + ..Default::default() + }) + } + vec + } +} + +/// Query parameters for the `/lighthouse/analysis/attestation_performance` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceQuery { + pub start_epoch: Epoch, + pub end_epoch: Epoch, +} diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs new file mode 100644 index 0000000000..186cbd888c --- /dev/null +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -0,0 +1,54 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::{Hash256, Slot}; + +/// Details about the rewards paid to a block proposer for proposing a block. +/// +/// All rewards in GWei. +/// +/// Presently this only counts attestation rewards, but in future should be expanded +/// to include information on slashings and sync committee aggregates too. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockReward { + /// Sum of all reward components. + pub total: u64, + /// Block root of the block that these rewards are for. + pub block_root: Hash256, + /// Metadata about the block, particularly reward-relevant metadata. + pub meta: BlockRewardMeta, + /// Rewards due to attestations. + pub attestation_rewards: AttestationRewards, + /// Sum of rewards due to sync committee signatures. + pub sync_committee_rewards: u64, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardMeta { + pub slot: Slot, + pub parent_slot: Slot, + pub proposer_index: u64, + pub graffiti: String, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationRewards { + /// Total block reward from attestations included. + pub total: u64, + /// Total rewards from previous epoch attestations. + pub prev_epoch_total: u64, + /// Total rewards from current epoch attestations. + pub curr_epoch_total: u64, + /// Vec of attestation rewards for each attestation included. + /// + /// Each element of the vec is a map from validator index to reward. + pub per_attestation_rewards: Vec>, +} + +/// Query parameters for the `/lighthouse/block_rewards` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardsQuery { + /// Lower slot limit for block rewards returned (inclusive). + pub start_slot: Slot, + /// Upper slot limit for block rewards returned (inclusive). + pub end_slot: Slot, +} diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index cd640e6158..e7c74668e8 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -10,6 +10,9 @@ use reqwest::{ use ring::digest::{digest, SHA256}; use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; +use std::fmt::{self, Display}; +use std::fs; +use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; @@ -20,18 +23,36 @@ pub use reqwest::{Response, StatusCode, Url}; pub struct ValidatorClientHttpClient { client: reqwest::Client, server: SensitiveUrl, - secret: ZeroizeString, - server_pubkey: PublicKey, - send_authorization_header: bool, + secret: Option, + server_pubkey: Option, + authorization_header: AuthorizationHeader, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum AuthorizationHeader { + /// Do not send any Authorization header. + Omit, + /// Send a `Basic` Authorization header (legacy). + Basic, + /// Send a `Bearer` Authorization header. + Bearer, +} + +impl Display for AuthorizationHeader { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // The `Omit` variant should never be `Display`ed, but would result in a harmless rejection. + write!(f, "{:?}", self) + } } /// Parse an API token and return a secp256k1 public key. -pub fn parse_pubkey(secret: &str) -> Result { +/// +/// If the token does not start with the Lighthouse token prefix then `Ok(None)` will be returned. +/// An error will be returned if the token looks like a Lighthouse token but doesn't correspond to a +/// valid public key. +pub fn parse_pubkey(secret: &str) -> Result, Error> { let secret = if !secret.starts_with(SECRET_PREFIX) { - return Err(Error::InvalidSecret(format!( - "secret does not start with {}", - SECRET_PREFIX - ))); + return Ok(None); } else { &secret[SECRET_PREFIX.len()..] }; @@ -52,16 +73,31 @@ pub fn parse_pubkey(secret: &str) -> Result { PublicKey::parse_compressed(&arr) .map_err(|e| Error::InvalidSecret(format!("invalid secp256k1 pubkey: {:?}", e))) }) + .map(Some) } impl ValidatorClientHttpClient { + /// Create a new client pre-initialised with an API token. pub fn new(server: SensitiveUrl, secret: String) -> Result { Ok(Self { client: reqwest::Client::new(), server, server_pubkey: parse_pubkey(&secret)?, - secret: secret.into(), - send_authorization_header: true, + secret: Some(secret.into()), + authorization_header: AuthorizationHeader::Bearer, + }) + } + + /// Create a client without an API token. + /// + /// A token can be fetched by using `self.get_auth`, and then reading the token from disk. + pub fn new_unauthenticated(server: SensitiveUrl) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + secret: None, + server_pubkey: None, + authorization_header: AuthorizationHeader::Omit, }) } @@ -74,8 +110,35 @@ impl ValidatorClientHttpClient { client, server, server_pubkey: parse_pubkey(&secret)?, - secret: secret.into(), - send_authorization_header: true, + secret: Some(secret.into()), + authorization_header: AuthorizationHeader::Bearer, + }) + } + + /// Get a reference to this client's API token, if any. + pub fn api_token(&self) -> Option<&ZeroizeString> { + self.secret.as_ref() + } + + /// Read an API token from the specified `path`, stripping any trailing whitespace. + pub fn load_api_token_from_file(path: &Path) -> Result { + let token = fs::read_to_string(path).map_err(|e| Error::TokenReadError(path.into(), e))?; + Ok(ZeroizeString::from(token.trim_end().to_string())) + } + + /// Add an authentication token to use when making requests. + /// + /// If the token is Lighthouse-like, a pubkey derivation will be attempted. In the case + /// of failure the token will still be stored, and the client can continue to be used to + /// communicate with non-Lighthouse nodes. + pub fn add_auth_token(&mut self, token: ZeroizeString) -> Result<(), Error> { + let pubkey_res = parse_pubkey(token.as_str()); + + self.secret = Some(token); + self.authorization_header = AuthorizationHeader::Bearer; + + pubkey_res.map(|opt_pubkey| { + self.server_pubkey = opt_pubkey; }) } @@ -84,10 +147,20 @@ impl ValidatorClientHttpClient { /// Failing to send the `Authorization` header will cause the VC to reject requests with a 403. /// This function is intended only for testing purposes. pub fn send_authorization_header(&mut self, should_send: bool) { - self.send_authorization_header = should_send; + if should_send { + self.authorization_header = AuthorizationHeader::Bearer; + } else { + self.authorization_header = AuthorizationHeader::Omit; + } + } + + /// Use the legacy basic auth style (bearer auth preferred by default now). + pub fn use_basic_auth(&mut self) { + self.authorization_header = AuthorizationHeader::Basic; } async fn signed_body(&self, response: Response) -> Result { + let server_pubkey = self.server_pubkey.as_ref().ok_or(Error::NoServerPubkey)?; let sig = response .headers() .get("Signature") @@ -105,7 +178,7 @@ impl ValidatorClientHttpClient { .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; - Some(libsecp256k1::verify(&message, &sig, &self.server_pubkey)) + Some(libsecp256k1::verify(&message, &sig, server_pubkey)) }) .filter(|is_valid| *is_valid) .ok_or(Error::InvalidSignatureHeader)?; @@ -121,11 +194,18 @@ impl ValidatorClientHttpClient { fn headers(&self) -> Result { let mut headers = HeaderMap::new(); - if self.send_authorization_header { - let header_value = HeaderValue::from_str(&format!("Basic {}", self.secret.as_str())) - .map_err(|e| { - Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) - })?; + if self.authorization_header == AuthorizationHeader::Basic + || self.authorization_header == AuthorizationHeader::Bearer + { + let secret = self.secret.as_ref().ok_or(Error::NoToken)?; + let header_value = HeaderValue::from_str(&format!( + "{} {}", + self.authorization_header, + secret.as_str() + )) + .map_err(|e| { + Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) + })?; headers.insert("Authorization", header_value); } @@ -133,8 +213,8 @@ impl ValidatorClientHttpClient { Ok(headers) } - /// Perform a HTTP GET request. - async fn get(&self, url: U) -> Result { + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response(&self, url: U) -> Result { let response = self .client .get(url) @@ -142,20 +222,25 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + async fn get(&self, url: U) -> Result { + let response = self.get_response(url).await?; self.signed_json(response).await } + async fn get_unsigned(&self, url: U) -> Result { + self.get_response(url) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_opt(&self, url: U) -> Result, Error> { - let response = self - .client - .get(url) - .headers(self.headers()?) - .send() - .await - .map_err(Error::Reqwest)?; - match ok_or_error(response).await { + match self.get_response(url).await { Ok(resp) => self.signed_json(resp).await.map(Option::Some), Err(err) => { if err.status() == Some(StatusCode::NOT_FOUND) { @@ -168,11 +253,11 @@ impl ValidatorClientHttpClient { } /// Perform a HTTP POST request. - async fn post( + async fn post_with_raw_response( &self, url: U, body: &T, - ) -> Result { + ) -> Result { let response = self .client .post(url) @@ -181,10 +266,27 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + async fn post( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.post_with_raw_response(url, body).await?; self.signed_json(response).await } + async fn post_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.post_with_raw_response(url, body).await?; + Ok(response.json().await?) + } + /// Perform a HTTP PATCH request. async fn patch(&self, url: U, body: &T) -> Result<(), Error> { let response = self @@ -200,6 +302,24 @@ impl ValidatorClientHttpClient { Ok(()) } + /// Perform a HTTP DELETE request. + async fn delete_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self + .client + .delete(url) + .headers(self.headers()?) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + let response = ok_or_error(response).await?; + Ok(response.json().await?) + } + /// `GET lighthouse/version` pub async fn get_lighthouse_version(&self) -> Result, Error> { let mut path = self.server.full.clone(); @@ -317,7 +437,7 @@ impl ValidatorClientHttpClient { pub async fn post_lighthouse_validators_web3signer( &self, request: &[Web3SignerValidatorRequest], - ) -> Result, Error> { + ) -> Result<(), Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -345,6 +465,50 @@ impl ValidatorClientHttpClient { self.patch(path, &ValidatorPatchRequest { enabled }).await } + + fn make_keystores_url(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("keystores"); + Ok(url) + } + + /// `GET lighthouse/auth` + pub async fn get_auth(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("auth"); + self.get_unsigned(url).await + } + + /// `GET eth/v1/keystores` + pub async fn get_keystores(&self) -> Result { + let url = self.make_keystores_url()?; + self.get_unsigned(url).await + } + + /// `POST eth/v1/keystores` + pub async fn post_keystores( + &self, + req: &ImportKeystoresRequest, + ) -> Result { + let url = self.make_keystores_url()?; + self.post_with_unsigned_response(url, req).await + } + + /// `DELETE eth/v1/keystores` + pub async fn delete_keystores( + &self, + req: &DeleteKeystoresRequest, + ) -> Result { + let url = self.make_keystores_url()?; + self.delete_with_unsigned_response(url, req).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an diff --git a/common/eth2/src/lighthouse_vc/mod.rs b/common/eth2/src/lighthouse_vc/mod.rs index b7de7c7152..81b4fca283 100644 --- a/common/eth2/src/lighthouse_vc/mod.rs +++ b/common/eth2/src/lighthouse_vc/mod.rs @@ -1,4 +1,5 @@ pub mod http_client; +pub mod std_types; pub mod types; /// The number of bytes in the secp256k1 public key used as the authorization token for the VC API. diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs new file mode 100644 index 0000000000..ebcce3fab0 --- /dev/null +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -0,0 +1,104 @@ +use account_utils::ZeroizeString; +use eth2_keystore::Keystore; +use serde::{Deserialize, Serialize}; +use slashing_protection::interchange::Interchange; +use types::PublicKeyBytes; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct AuthResponse { + pub token_path: String, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct ListKeystoresResponse { + pub data: Vec, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct SingleKeystoreResponse { + pub validating_pubkey: PublicKeyBytes, + pub derivation_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub readonly: Option, +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ImportKeystoresRequest { + pub keystores: Vec, + pub passwords: Vec, + pub slashing_protection: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); + +impl std::ops::Deref for KeystoreJsonStr { + type Target = Keystore; + fn deref(&self) -> &Keystore { + &self.0 + } +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); + +#[derive(Debug, Deserialize, Serialize)] +pub struct ImportKeystoresResponse { + pub data: Vec>, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Status { + pub status: T, + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, +} + +impl Status { + pub fn ok(status: T) -> Self { + Self { + status, + message: None, + } + } + + pub fn error(status: T, message: String) -> Self { + Self { + status, + message: Some(message), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum ImportKeystoreStatus { + Imported, + Duplicate, + Error, +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DeleteKeystoresRequest { + pub pubkeys: Vec, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct DeleteKeystoresResponse { + pub data: Vec>, + #[serde(with = "eth2_serde_utils::json_str")] + pub slashing_protection: Interchange, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum DeleteKeystoreStatus { + Deleted, + NotActive, + NotFound, + Error, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 9e311c9d6b..25b3050538 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use crate::lighthouse::Health; +pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; pub use types::*; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a761b9ed12..78567ad83c 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -10,6 +10,9 @@ use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; +#[cfg(feature = "lighthouse")] +use crate::lighthouse::BlockReward; + /// An API error serializable to JSON. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] @@ -839,6 +842,8 @@ pub enum EventKind { ChainReorg(SseChainReorg), ContributionAndProof(Box>), LateHead(SseLateHead), + #[cfg(feature = "lighthouse")] + BlockReward(BlockReward), } impl EventKind { @@ -852,6 +857,8 @@ impl EventKind { EventKind::ChainReorg(_) => "chain_reorg", EventKind::ContributionAndProof(_) => "contribution_and_proof", EventKind::LateHead(_) => "late_head", + #[cfg(feature = "lighthouse")] + EventKind::BlockReward(_) => "block_reward", } } @@ -904,6 +911,10 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Contribution and Proof: {:?}", e)) })?, ))), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), + )?)), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -929,6 +940,8 @@ pub enum EventTopic { ChainReorg, ContributionAndProof, LateHead, + #[cfg(feature = "lighthouse")] + BlockReward, } impl FromStr for EventTopic { @@ -944,6 +957,8 @@ impl FromStr for EventTopic { "chain_reorg" => Ok(EventTopic::ChainReorg), "contribution_and_proof" => Ok(EventTopic::ContributionAndProof), "late_head" => Ok(EventTopic::LateHead), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventTopic::BlockReward), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -960,6 +975,8 @@ impl fmt::Display for EventTopic { EventTopic::ChainReorg => write!(f, "chain_reorg"), EventTopic::ContributionAndProof => write!(f, "contribution_and_proof"), EventTopic::LateHead => write!(f, "late_head"), + #[cfg(feature = "lighthouse")] + EventTopic::BlockReward => write!(f, "block_reward"), } } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index b45ad9d1e2..fafa15ef8d 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -53,6 +53,13 @@ impl Eth2Config { spec: ChainSpec::minimal(), } } + + pub fn gnosis() -> Self { + Self { + eth_spec_id: EthSpecId::Gnosis, + spec: ChainSpec::gnosis(), + } + } } /// A directory that can be built by downloading files via HTTP. @@ -229,5 +236,6 @@ macro_rules! define_hardcoded_nets { define_hardcoded_nets!( (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), (pyrmont, "pyrmont", GENESIS_STATE_IS_KNOWN), - (prater, "prater", GENESIS_STATE_IS_KNOWN) + (prater, "prater", GENESIS_STATE_IS_KNOWN), + (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN) ); diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml new file mode 100644 index 0000000000..4b232d8b32 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -0,0 +1,5 @@ +# Gnosis Chain Team +- enr:-IS4QGmLwm7gFd0L0CEisllrb1op3v-wAGSc7_pwSMGgN3bOS9Fz7m1dWbwuuPHKqeETz9MbhjVuoWk0ohkyRv98kVoBgmlkgnY0gmlwhGjtlgaJc2VjcDI1NmsxoQLMdh0It9fJbuiLydZ9fpF6MRzgNle0vODaDiMqhbC7WIN1ZHCCIyg +- enr:-IS4QFUVG3dvLPCUEI7ycRvFm0Ieg_ITa5tALmJ9LI7dJ6ieT3J4fF9xLRjOoB4ApV-Rjp7HeLKzyTWG1xRdbFBNZPQBgmlkgnY0gmlwhErP5weJc2VjcDI1NmsxoQOBbaJBvx0-w_pyZUhQl9A510Ho2T0grE0K8JevzES99IN1ZHCCIyg +- enr:-Ku4QOQk8V-Hu2gxFzRXmLYIO4AvWDZhoMFwTf3n3DYm_mbsWv0ZitoqiN6JZUUj6Li6e1Jk1w2zFSVHKPMUP1g5tsgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC1PTpmJc2VjcDI1NmsxoQL1Ynt5PoA0UOcHa1Rfn98rmnRlLzNuWTePPP4m4qHVroN1ZHCCKvg +- enr:-Ku4QFaTwgoms-EiiRIfHUH3FXprWUFgjHg4UuWvilqoUQtDbmTszVIxUEOwQUmA2qkiP-T9wXjc_rVUuh9cU7WgwbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC0hBmCJc2VjcDI1NmsxoQOpsg1XCrXmCwZKcSTcycLwldoKUMHPUpMEVGeg_EEhuYN1ZHCCKvg diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml new file mode 100644 index 0000000000..c34ebed7d5 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -0,0 +1,85 @@ +# Gnosis Beacon Chain config + +# Extends the gnosis preset +PRESET_BASE: 'gnosis' + +# Transition +# --------------------------------------------------------------- +# TBD, 2**256-2**10 is a placeholder +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Genesis +# --------------------------------------------------------------- +# `2**12` (= 4,096) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 4096 +# Dec 8, 2021, 13:00 UTC +MIN_GENESIS_TIME: 1638968400 +# Gnosis Beacon Chain initial fork version +GENESIS_FORK_VERSION: 0x00000064 +# 6000 seconds (100 minutes) +GENESIS_DELAY: 6000 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x01000064 +ALTAIR_FORK_EPOCH: 512 +# Merge +BELLATRIX_FORK_VERSION: 0x02000064 +BELLATRIX_FORK_EPOCH: 18446744073709551615 +# Sharding +SHARDING_FORK_VERSION: 0x03000064 +SHARDING_FORK_EPOCH: 18446744073709551615 + +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 4294967296 + + +# Time parameters +# --------------------------------------------------------------- +# 5 seconds +SECONDS_PER_SLOT: 5 +# 6 (estimate from Gnosis Chain) +SECONDS_PER_ETH1_BLOCK: 6 +# 2**8 (= 256) epochs ~8 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~8 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**10 (= 1024) ~1.4 hour +ETH1_FOLLOW_DISTANCE: 1024 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**12 (= 4096) +CHURN_LIMIT_QUOTIENT: 4096 + + +# Fork choice +# --------------------------------------------------------------- +# TODO: enable once proposer boosting is desired on mainnet +# 70% +# PROPOSER_SCORE_BOOST: 70 + +# Deposit contract +# --------------------------------------------------------------- +# Gnosis Chain +DEPOSIT_CHAIN_ID: 100 +DEPOSIT_NETWORK_ID: 100 +DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt new file mode 100644 index 0000000000..0071371e28 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt @@ -0,0 +1 @@ +19469077 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/gnosis/genesis.ssz.zip new file mode 100644 index 0000000000..3bfb326a24 Binary files /dev/null and b/common/eth2_network_config/built_in_network_configs/gnosis/genesis.ssz.zip differ diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 4085d392a6..044548bd61 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -226,7 +226,7 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Config, Eth1Data, Hash256, MainnetEthSpec}; + use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec, GNOSIS}; type E = MainnetEthSpec; @@ -263,7 +263,11 @@ mod tests { .unwrap_or_else(|_| panic!("{:?}", net.name)); // Ensure we can parse the YAML config to a chain spec. - config.chain_spec::().unwrap(); + if net.name == GNOSIS { + config.chain_spec::().unwrap(); + } else { + config.chain_spec::().unwrap(); + } assert_eq!( config.genesis_state_bytes.is_some(), diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 6f2baf132c..3c6b2459ec 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.1.0-", + prefix = "Lighthouse/v2.1.2-", fallback = "unknown" ); diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 402cdc27aa..681849a78c 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -11,22 +11,20 @@ use std::env; use std::os::raw::c_int; use std::result::Result; -/// The value to be provided to `malloc_mmap_threshold`. +/// The optimal mmap threshold for Lighthouse seems to be around 128KB. /// -/// Value chosen so that values of the validators tree hash cache will *not* be allocated via -/// `mmap`. -/// -/// The size of a single chunk is: -/// -/// NODES_PER_VALIDATOR * VALIDATORS_PER_ARENA * 32 = 15 * 4096 * 32 = 1.875 MiB -const OPTIMAL_MMAP_THRESHOLD: c_int = 2 * 1_024 * 1_024; +/// By default GNU malloc will start with a threshold of 128KB and adjust it upwards, but we've +/// found that the upwards adjustments tend to result in heap fragmentation. Explicitly setting the +/// threshold to 128KB disables the dynamic adjustments and encourages `mmap` usage, which keeps the +/// heap size under control. +const OPTIMAL_MMAP_THRESHOLD: c_int = 128 * 1_024; /// Constants used to configure malloc internals. /// /// Source: /// /// https://github.com/lattera/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/malloc/malloc.h#L115-L123 -const M_MMAP_THRESHOLD: c_int = -4; +const M_MMAP_THRESHOLD: c_int = -3; /// Environment variables used to configure malloc. /// @@ -134,8 +132,8 @@ fn env_var_present(name: &str) -> bool { /// ## Resources /// /// - https://man7.org/linux/man-pages/man3/mallopt.3.html -fn malloc_mmap_threshold(num_arenas: c_int) -> Result<(), c_int> { - into_result(mallopt(M_MMAP_THRESHOLD, num_arenas)) +fn malloc_mmap_threshold(threshold: c_int) -> Result<(), c_int> { + into_result(mallopt(M_MMAP_THRESHOLD, threshold)) } fn mallopt(param: c_int, val: c_int) -> c_int { diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 16965f43cd..8699a8cf2c 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -67,11 +67,7 @@ const BEACON_PROCESS_METRICS: &[JsonMetric] = &[ "disk_beaconchain_bytes_total", JsonType::Integer, ), - JsonMetric::new( - "libp2p_peer_connected_peers_total", - "network_peers_connected", - JsonType::Integer, - ), + JsonMetric::new("libp2p_peers", "network_peers_connected", JsonType::Integer), JsonMetric::new( "libp2p_outbound_bytes", "network_libp2p_bytes_total_transmit", diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 4d6de05163..861a6afe96 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -134,15 +134,18 @@ impl<'a> Builder<'a> { self } + /// Return the path to the validator dir to be built, i.e. `base_dir/pubkey`. + pub fn get_dir_path(base_validators_dir: &Path, voting_keystore: &Keystore) -> PathBuf { + base_validators_dir.join(format!("0x{}", voting_keystore.pubkey())) + } + /// Consumes `self`, returning a `ValidatorDir` if no error is encountered. pub fn build(self) -> Result { let (voting_keystore, voting_password) = self .voting_keystore .ok_or(Error::UninitializedVotingKeystore)?; - let dir = self - .base_validators_dir - .join(format!("0x{}", voting_keystore.pubkey())); + let dir = Self::get_dir_path(&self.base_validators_dir, &voting_keystore); if dir.exists() { return Err(Error::DirectoryAlreadyExists(dir)); diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 965a63c60d..54eb55b8fc 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -9,8 +9,6 @@ license = "Apache-2.0" [dependencies] serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" +serde_json = "1.0.58" hex = "0.4.2" ethereum-types = "0.12.1" - -[dev-dependencies] -serde_json = "1.0.58" diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs new file mode 100644 index 0000000000..b9a1813915 --- /dev/null +++ b/consensus/serde_utils/src/json_str.rs @@ -0,0 +1,25 @@ +//! Serialize a datatype as a JSON-blob within a single string. +use serde::{ + de::{DeserializeOwned, Error as _}, + ser::Error as _, + Deserialize, Deserializer, Serialize, Serializer, +}; + +/// Serialize as a JSON object within a string. +pub fn serialize(value: &T, serializer: S) -> Result +where + S: Serializer, + T: Serialize, +{ + serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) +} + +/// Deserialize a JSON object embedded in a string. +pub fn deserialize<'de, T, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: DeserializeOwned, +{ + let json_str = String::deserialize(deserializer)?; + serde_json::from_str(&json_str).map_err(D::Error::custom) +} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 87179997e3..81e2bbe963 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -3,6 +3,7 @@ mod quoted_int; pub mod fixed_bytes_hex; pub mod hex; pub mod hex_vec; +pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u32_hex; diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 31386a8fb1..8358003e4b 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -42,19 +42,7 @@ pub fn process_sync_aggregate( } // Compute participant and proposer rewards - let total_active_balance = state.get_total_active_balance()?; - let total_active_increments = - total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? - .safe_mul(total_active_increments)?; - let max_participant_rewards = total_base_rewards - .safe_mul(SYNC_REWARD_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - .safe_div(T::slots_per_epoch())?; - let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; - let proposer_reward = participant_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + let (participant_reward, proposer_reward) = compute_sync_aggregate_rewards(state, spec)?; // Apply participant and proposer rewards let committee_indices = state.get_sync_committee_indices(¤t_sync_committee)?; @@ -73,3 +61,26 @@ pub fn process_sync_aggregate( Ok(()) } + +/// Compute the `(participant_reward, proposer_reward)` for a sync aggregate. +/// +/// The `state` should be the pre-state from the same slot as the block containing the aggregate. +pub fn compute_sync_aggregate_rewards( + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(u64, u64), BlockProcessingError> { + let total_active_balance = state.get_total_active_balance()?; + let total_active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + .safe_mul(total_active_increments)?; + let max_participant_rewards = total_base_rewards + .safe_mul(SYNC_REWARD_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + .safe_div(T::slots_per_epoch())?; + let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; + let proposer_reward = participant_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + Ok((participant_reward, proposer_reward)) +} diff --git a/consensus/types/presets/gnosis/altair.yaml b/consensus/types/presets/gnosis/altair.yaml new file mode 100644 index 0000000000..ff6bd98a00 --- /dev/null +++ b/consensus/types/presets/gnosis/altair.yaml @@ -0,0 +1,24 @@ +# Gnosis Beacon Chain preset - Altair + +# Updated penalty values +# --------------------------------------------------------------- +# 3 * 2**24 (= 50,331,648) +INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648 +# 2**6 (= 64) +MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64 +# 2 +PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2 + + +# Sync committee +# --------------------------------------------------------------- +# 2**9 (= 512) +SYNC_COMMITTEE_SIZE: 512 +# 2**8 (= 256) +EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512 + + +# Sync protocol +# --------------------------------------------------------------- +# 1 +MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 diff --git a/consensus/types/presets/gnosis/bellatrix.yaml b/consensus/types/presets/gnosis/bellatrix.yaml new file mode 100644 index 0000000000..e938af4792 --- /dev/null +++ b/consensus/types/presets/gnosis/bellatrix.yaml @@ -0,0 +1,21 @@ +# Gnosis Beacon Chain preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml new file mode 100644 index 0000000000..87c73e6fb7 --- /dev/null +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -0,0 +1,94 @@ +# Gnosis Beacon Chain preset - Phase0 + +# Misc +# --------------------------------------------------------------- +# 2**6 (= 64) +MAX_COMMITTEES_PER_SLOT: 64 +# 2**7 (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 + + +# Fork Choice +# --------------------------------------------------------------- +# 2**3 (= 8) +SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 + + +# Time parameters +# --------------------------------------------------------------- +# 2**0 (= 1) slots 5 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 1 +# 2**4 (= 16) slots 1.87 minutes +SLOTS_PER_EPOCH: 16 +# 2**0 (= 1) epochs 1.87 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 7.47 minutes +MAX_SEED_LOOKAHEAD: 4 +# 2**6 (= 64) epochs ~2 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 64 +# 2**13 (= 8,192) slots ~15.9 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**2 (= 4) epochs 7.47 minutes +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + + +# State list lengths +# --------------------------------------------------------------- +# 2**16 (= 65,536) epochs ~85 days +EPOCHS_PER_HISTORICAL_VECTOR: 65536 +# 2**13 (= 8,192) epochs ~10.6 days +EPOCHS_PER_SLASHINGS_VECTOR: 8192 +# 2**24 (= 16,777,216) historical roots, ~15,243 years +HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots +VALIDATOR_REGISTRY_LIMIT: 1099511627776 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**6 (= 64) +BASE_REWARD_FACTOR: 25 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**26 (= 67,108,864) +INACTIVITY_PENALTY_QUOTIENT: 67108864 +# 2**7 (= 128) (lower safety margin at Phase 0 genesis) +MIN_SLASHING_PENALTY_QUOTIENT: 128 +# 1 (lower safety margin at Phase 0 genesis) +PROPORTIONAL_SLASHING_MULTIPLIER: 1 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) +MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f191eb8671..fa74f9d29c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -596,6 +596,161 @@ impl ChainSpec { ..ChainSpec::mainnet() } } + + /// Returns a `ChainSpec` compatible with the Gnosis Beacon Chain specification. + pub fn gnosis() -> Self { + Self { + /* + * Constants + */ + genesis_slot: Slot::new(0), + far_future_epoch: Epoch::new(u64::MAX), + base_rewards_per_epoch: 4, + deposit_contract_tree_depth: 32, + + /* + * Misc + */ + max_committees_per_slot: 64, + target_committee_size: 128, + min_per_epoch_churn_limit: 4, + churn_limit_quotient: 4_096, + shuffle_round_count: 90, + min_genesis_active_validator_count: 4_096, + min_genesis_time: 1638968400, // Dec 8, 2020 + hysteresis_quotient: 4, + hysteresis_downward_multiplier: 1, + hysteresis_upward_multiplier: 5, + + /* + * Gwei values + */ + min_deposit_amount: option_wrapper(|| { + u64::checked_pow(2, 0)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + ejection_balance: option_wrapper(|| { + u64::checked_pow(2, 4)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + effective_balance_increment: option_wrapper(|| { + u64::checked_pow(2, 0)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + + /* + * Initial Values + */ + genesis_fork_version: [0x00, 0x00, 0x00, 0x64], + bls_withdrawal_prefix_byte: 0, + + /* + * Time parameters + */ + genesis_delay: 6000, // 100 minutes + seconds_per_slot: 5, + min_attestation_inclusion_delay: 1, + min_seed_lookahead: Epoch::new(1), + max_seed_lookahead: Epoch::new(4), + min_epochs_to_inactivity_penalty: 4, + min_validator_withdrawability_delay: Epoch::new(256), + shard_committee_period: 256, + + /* + * Reward and penalty quotients + */ + base_reward_factor: 25, + whistleblower_reward_quotient: 512, + proposer_reward_quotient: 8, + inactivity_penalty_quotient: u64::checked_pow(2, 26).expect("pow does not overflow"), + min_slashing_penalty_quotient: 128, + proportional_slashing_multiplier: 1, + + /* + * Signature domains + */ + domain_beacon_proposer: 0, + domain_beacon_attester: 1, + domain_randao: 2, + domain_deposit: 3, + domain_voluntary_exit: 4, + domain_selection_proof: 5, + domain_aggregate_and_proof: 6, + + /* + * Fork choice + */ + safe_slots_to_update_justified: 8, + proposer_score_boost: None, + + /* + * Eth1 + */ + eth1_follow_distance: 1024, + seconds_per_eth1_block: 6, + deposit_chain_id: 100, + deposit_network_id: 100, + deposit_contract_address: "0B98057eA310F4d31F2a452B414647007d1645d9" + .parse() + .expect("chain spec deposit contract address"), + + /* + * Altair hard fork params + */ + inactivity_penalty_quotient_altair: option_wrapper(|| { + u64::checked_pow(2, 24)?.checked_mul(3) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_altair: u64::checked_pow(2, 6) + .expect("pow does not overflow"), + proportional_slashing_multiplier_altair: 2, + inactivity_score_bias: 4, + inactivity_score_recovery_rate: 16, + min_sync_committee_participants: 1, + epochs_per_sync_committee_period: Epoch::new(512), + domain_sync_committee: 7, + domain_sync_committee_selection_proof: 8, + domain_contribution_and_proof: 9, + altair_fork_version: [0x01, 0x00, 0x00, 0x64], + altair_fork_epoch: Some(Epoch::new(256)), + + /* + * Merge hard fork params + */ + inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) + .expect("pow does not overflow"), + min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) + .expect("pow does not overflow"), + proportional_slashing_multiplier_bellatrix: 3, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64], + bellatrix_fork_epoch: None, + terminal_total_difficulty: Uint256::MAX + .checked_sub(Uint256::from(2u64.pow(10))) + .expect("subtraction does not overflow") + // Add 1 since the spec declares `2**256 - 2**10` and we use + // `Uint256::MAX` which is `2*256- 1`. + .checked_add(Uint256::one()) + .expect("addition does not overflow"), + terminal_block_hash: Hash256::zero(), + terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), + + /* + * Network specific + */ + boot_nodes: vec![], + network_id: 100, // Gnosis Chain network id + attestation_propagation_slot_range: 32, + attestation_subnet_count: 64, + random_subnets_per_validator: 1, + maximum_gossip_clock_disparity_millis: 500, + target_aggregators_per_committee: 16, + epochs_per_random_subnet_subscription: 256, + } + } } impl Default for ChainSpec { @@ -688,10 +843,16 @@ fn default_bellatrix_fork_epoch() -> Option> { None } -fn default_terminal_total_difficulty() -> Uint256 { - "115792089237316195423570985008687907853269984665640564039457584007913129638912" - .parse() - .unwrap() +/// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). +/// +/// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 +const fn default_terminal_total_difficulty() -> Uint256 { + ethereum_types::U256([ + 18446744073709550592, + 18446744073709551615, + 18446744073709551615, + 18446744073709551615, + ]) } fn default_terminal_block_hash() -> Hash256 { @@ -746,6 +907,7 @@ impl Config { match self.preset_base.as_str() { "minimal" => Some(EthSpecId::Minimal), "mainnet" => Some(EthSpecId::Mainnet), + "gnosis" => Some(EthSpecId::Gnosis), _ => None, } } @@ -1038,4 +1200,73 @@ mod yaml_tests { .expect("should have applied spec"); assert_eq!(new_spec, ChainSpec::minimal()); } + + #[test] + fn test_defaults() { + // Spec yaml string. Fields that serialize/deserialize with a default value are commented out. + let spec = r#" + PRESET_BASE: 'mainnet' + #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 + #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 + #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 + MIN_GENESIS_TIME: 1606824000 + GENESIS_FORK_VERSION: 0x00000000 + GENESIS_DELAY: 604800 + ALTAIR_FORK_VERSION: 0x01000000 + ALTAIR_FORK_EPOCH: 74240 + #BELLATRIX_FORK_VERSION: 0x02000000 + #BELLATRIX_FORK_EPOCH: 18446744073709551614 + SHARDING_FORK_VERSION: 0x03000000 + SHARDING_FORK_EPOCH: 18446744073709551615 + SECONDS_PER_SLOT: 12 + SECONDS_PER_ETH1_BLOCK: 14 + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 + SHARD_COMMITTEE_PERIOD: 256 + ETH1_FOLLOW_DISTANCE: 2048 + INACTIVITY_SCORE_BIAS: 4 + INACTIVITY_SCORE_RECOVERY_RATE: 16 + EJECTION_BALANCE: 16000000000 + MIN_PER_EPOCH_CHURN_LIMIT: 4 + CHURN_LIMIT_QUOTIENT: 65536 + PROPOSER_SCORE_BOOST: 70 + DEPOSIT_CHAIN_ID: 1 + DEPOSIT_NETWORK_ID: 1 + DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + "#; + + let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); + assert_eq!( + chain_spec.terminal_total_difficulty, + default_terminal_total_difficulty() + ); + assert_eq!( + chain_spec.terminal_block_hash, + default_terminal_block_hash() + ); + assert_eq!( + chain_spec.terminal_block_hash_activation_epoch, + default_terminal_block_hash_activation_epoch() + ); + + assert_eq!( + chain_spec.bellatrix_fork_epoch, + default_bellatrix_fork_epoch() + ); + + assert_eq!( + chain_spec.bellatrix_fork_version, + default_bellatrix_fork_version() + ); + } + + #[test] + fn test_total_terminal_difficulty() { + assert_eq!( + Ok(default_terminal_total_difficulty()), + Uint256::from_dec_str( + "115792089237316195423570985008687907853269984665640564039457584007913129638912" + ) + ); + } } diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index ae0cafe1ff..e616976026 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,17 +3,17 @@ use crate::*; use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ - Unsigned, U0, U1024, U1073741824, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, - U4096, U512, U64, U65536, U8, U8192, + bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U16, + U16777216, U2, U2048, U256, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; -use ssz_types::typenum::{bit::B0, UInt, U1048576, U256, U625}; pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; +pub const GNOSIS: &str = "gnosis"; /// Used to identify one of the `EthSpec` instances defined here. #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -21,6 +21,7 @@ const MINIMAL: &str = "minimal"; pub enum EthSpecId { Mainnet, Minimal, + Gnosis, } impl FromStr for EthSpecId { @@ -30,6 +31,7 @@ impl FromStr for EthSpecId { match s { MAINNET => Ok(EthSpecId::Mainnet), MINIMAL => Ok(EthSpecId::Minimal), + GNOSIS => Ok(EthSpecId::Gnosis), _ => Err(format!("Unknown eth spec: {}", s)), } } @@ -40,6 +42,7 @@ impl fmt::Display for EthSpecId { let s = match self { EthSpecId::Mainnet => MAINNET, EthSpecId::Minimal => MINIMAL, + EthSpecId::Gnosis => GNOSIS, }; write!(f, "{}", s) } @@ -317,3 +320,46 @@ impl EthSpec for MinimalEthSpec { EthSpecId::Minimal } } + +/// Gnosis Beacon Chain specifications. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] +pub struct GnosisEthSpec; + +impl EthSpec for GnosisEthSpec { + type JustificationBitsLength = U4; + type SubnetBitfieldLength = U64; + type MaxValidatorsPerCommittee = U2048; + type GenesisEpoch = U0; + type SlotsPerEpoch = U16; + type EpochsPerEth1VotingPeriod = U64; + type SlotsPerHistoricalRoot = U8192; + type EpochsPerHistoricalVector = U65536; + type EpochsPerSlashingsVector = U8192; + type HistoricalRootsLimit = U16777216; + type ValidatorRegistryLimit = U1099511627776; + type MaxProposerSlashings = U16; + type MaxAttesterSlashings = U2; + type MaxAttestations = U128; + type MaxDeposits = U16; + type MaxVoluntaryExits = U16; + type SyncCommitteeSize = U512; + type SyncCommitteeSubnetCount = U4; + type MaxBytesPerTransaction = U1073741824; // 1,073,741,824 + type MaxTransactionsPerPayload = U1048576; // 1,048,576 + type BytesPerLogsBloom = U256; + type GasLimitDenominator = U1024; + type MinGasLimit = U5000; + type MaxExtraDataBytes = U32; + type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count + type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch + type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch + + fn default_spec() -> ChainSpec { + ChainSpec::gnosis() + } + + fn spec_name() -> EthSpecId { + EthSpecId::Gnosis + } +} diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index ccda1a06a0..8ee38e46a6 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -187,7 +187,7 @@ impl BellatrixPreset { #[cfg(test)] mod test { use super::*; - use crate::{MainnetEthSpec, MinimalEthSpec}; + use crate::{GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; use serde::de::DeserializeOwned; use std::env; use std::fs::File; @@ -226,6 +226,11 @@ mod test { preset_test::(); } + #[test] + fn gnosis_presets_consistent() { + preset_test::(); + } + #[test] fn minimal_presets_consistent() { preset_test::(); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index a6062e5b8c..339dfee5b1 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.1.0" +version = "2.1.2" authors = ["Paul Hauner "] edition = "2018" diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 5a4177ead9..27ec8cc86c 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.56.1-bullseye AS builder +FROM rust:1.58.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/main.rs b/lcli/src/main.rs index a494cd3822..9af4b25548 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -38,7 +38,7 @@ fn main() { .value_name("STRING") .takes_value(true) .required(true) - .possible_values(&["minimal", "mainnet"]) + .possible_values(&["minimal", "mainnet", "gnosis"]) .default_value("mainnet") .global(true), ) @@ -665,6 +665,7 @@ fn main() { .and_then(|eth_spec_id| match eth_spec_id { EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches), EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches), + EthSpecId::Gnosis => run(EnvironmentBuilder::gnosis(), &matches), }); match result { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 787b992a22..22b2a7645e 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.1.0" +version = "2.1.2" authors = ["Sigma Prime "] edition = "2018" autotests = false @@ -16,6 +16,8 @@ modern = ["bls/supranational-force-adx"] milagro = ["bls/milagro"] # Support minimal spec (used for testing only). spec-minimal = [] +# Support Gnosis spec and Gnosis Beacon Chain. +gnosis = [] [dependencies] beacon_node = { "path" = "../beacon_node" } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index e536d3c95b..448c84b54d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -21,7 +21,7 @@ use std::path::PathBuf; use std::sync::Arc; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; -use types::{EthSpec, MainnetEthSpec, MinimalEthSpec}; +use types::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; #[cfg(target_family = "unix")] use { @@ -87,6 +87,19 @@ impl EnvironmentBuilder { } } +impl EnvironmentBuilder { + /// Creates a new builder using the `gnosis` eth2 specification. + pub fn gnosis() -> Self { + Self { + runtime: None, + log: None, + eth_spec_instance: GnosisEthSpec, + eth2_config: Eth2Config::gnosis(), + eth2_network_config: None, + } + } +} + impl EnvironmentBuilder { /// Specifies that a multi-threaded tokio runtime should be used. Ideal for production uses. /// diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 693b3de821..51c1075cdb 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -52,11 +52,12 @@ fn main() { "{}\n\ BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ - Specs: mainnet (true), minimal ({})", + Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), cfg!(feature = "spec-minimal"), + cfg!(feature = "gnosis"), ).as_str() ) .arg( @@ -302,9 +303,11 @@ fn main() { match eth_spec_id { EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches, eth2_network_config), + #[cfg(feature = "gnosis")] + EthSpecId::Gnosis => run(EnvironmentBuilder::gnosis(), &matches, eth2_network_config), #[cfg(feature = "spec-minimal")] EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches, eth2_network_config), - #[cfg(not(feature = "spec-minimal"))] + #[cfg(not(all(feature = "spec-minimal", feature = "gnosis")))] other => { eprintln!( "Eth spec `{}` is not supported by this build of Lighthouse", diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 883c666029..8151aac249 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -4,6 +4,8 @@ # Starts a beacon node based upon a genesis state created by `./setup.sh`. # +set -Eeuo pipefail + source ./vars.env SUBSCRIBE_ALL_SUBNETS= diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh index bef207a694..ca02a24140 100755 --- a/scripts/local_testnet/bootnode.sh +++ b/scripts/local_testnet/bootnode.sh @@ -5,6 +5,8 @@ # Starts a bootnode from the generated enr. # +set -Eeuo pipefail + source ./vars.env echo "Generating bootnode enr" diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index bc4db74c61..b01b1a2dff 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -4,6 +4,8 @@ # Deletes all files associated with the local testnet. # +set -Eeuo pipefail + source ./vars.env if [ -d $DATADIR ]; then diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 762700dbd6..69edc1e770 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -Eeuo pipefail + source ./vars.env exec ganache-cli \ diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index c729a1645a..be6b7f3d66 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,11 +1,16 @@ #!/usr/bin/env bash # Kill processes +set -Eeuo pipefail + # First parameter is the file with # one pid per line. if [ -f "$1" ]; then while read pid do + # handle the case of blank lines + [[ -n "$pid" ]] || continue + echo killing $pid kill $pid done < $1 diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/print_logs.sh new file mode 100755 index 0000000000..2a9e7822a6 --- /dev/null +++ b/scripts/local_testnet/print_logs.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Print the tail of all the logs output from local testnet + +set -Eeuo pipefail + +source ./vars.env + +for f in "$TESTNET_DIR"/*.log +do + [[ -e "$f" ]] || break # handle the case of no *.log files + echo "=============================================================================" + echo "$f" + echo "=============================================================================" + tail "$f" + echo "" +done diff --git a/scripts/local_testnet/reset_genesis_time.sh b/scripts/local_testnet/reset_genesis_time.sh index c7332e327e..68c8fb6b4c 100755 --- a/scripts/local_testnet/reset_genesis_time.sh +++ b/scripts/local_testnet/reset_genesis_time.sh @@ -4,6 +4,8 @@ # Resets the beacon state genesis time to now. # +set -Eeuo pipefail + source ./vars.env NOW=$(date +%s) diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index cdae9b2ba2..69d55660fa 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Start all processes necessary to create a local testnet +set -Eeuo pipefail + source ./vars.env # VC_COUNT is defaulted in vars.env @@ -49,7 +51,7 @@ for (( bn=1; bn<=$BN_COUNT; bn++ )); do done for (( vc=1; vc<=$VC_COUNT; vc++ )); do touch $LOG_DIR/validator_node_$vc.log -done +done # Sleep with a message sleeping() { @@ -67,7 +69,7 @@ execute_command() { EX_NAME=$2 shift shift - CMD="$EX_NAME $@ &>> $LOG_DIR/$LOG_NAME" + CMD="$EX_NAME $@ >> $LOG_DIR/$LOG_NAME 2>&1" echo "executing: $CMD" echo "$CMD" > "$LOG_DIR/$LOG_NAME" eval "$CMD &" @@ -89,11 +91,11 @@ execute_command_add_PID() { # Delay to let ganache-cli to get started execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh -sleeping 2 +sleeping 10 -# Delay to get data setup -execute_command setup.log ./setup.sh -sleeping 15 +# Setup data +echo "executing: ./setup.sh >> $LOG_DIR/setup.log" +./setup.sh >> $LOG_DIR/setup.log 2>&1 # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 47f390ba76..b1c3188ee3 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Stop all processes that were started with start_local_testnet.sh +set -Eeuo pipefail + source ./vars.env PID_FILE=$TESTNET_DIR/PIDS.pid diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 6755384be5..5aa75dfe2d 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -6,6 +6,8 @@ # # Usage: ./validator_client.sh +set -Eeuo pipefail + source ./vars.env DEBUG_LEVEL=${3:-info} diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index f88e9eb716..208fbb6d85 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -43,3 +43,6 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 + +# Command line arguments for validator client +VC_ARGS="" diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index d9084af348..61b95397d7 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,4 +1,4 @@ -FROM rust:1.56.1-bullseye AS builder +FROM rust:1.58.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 4e8aa57a5b..08f5cec07c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,6 +10,7 @@ path = "src/lib.rs" [dev-dependencies] tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } +logging = { path = "../common/logging" } [dependencies] tree_hash = "0.4.1" @@ -48,7 +49,7 @@ hyper = "0.14.4" eth2_serde_utils = "0.1.1" libsecp256k1 = "0.6.0" ring = "0.16.19" -rand = "0.7.3" +rand = { version = "0.7.3", features = ["small_rng"] } lighthouse_metrics = { path = "../common/lighthouse_metrics" } lazy_static = "1.4.0" itertools = "0.10.0" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 634e49feea..697bd602bf 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -3,6 +3,11 @@ name = "slashing_protection" version = "0.1.0" authors = ["Michael Sproul ", "pscott "] edition = "2018" +autotests = false + +[[test]] +name = "slashing_protection_tests" +path = "tests/main.rs" [dependencies] tempfile = "3.1.0" diff --git a/validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite b/validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite new file mode 100644 index 0000000000..5a95fe36e6 Binary files /dev/null and b/validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite differ diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 858acbfe9b..1610b52372 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -30,6 +30,7 @@ pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; #[derive(PartialEq, Debug)] pub enum NotSafe { UnregisteredValidator(PublicKeyBytes), + DisabledValidator(PublicKeyBytes), InvalidBlock(InvalidBlock), InvalidAttestation(InvalidAttestation), PermissionsError, diff --git a/validator_client/slashing_protection/src/registration_tests.rs b/validator_client/slashing_protection/src/registration_tests.rs index 40a3d6ee71..472f41577d 100644 --- a/validator_client/slashing_protection/src/registration_tests.rs +++ b/validator_client/slashing_protection/src/registration_tests.rs @@ -2,6 +2,7 @@ use crate::test_utils::*; use crate::*; +use std::iter; use tempfile::tempdir; #[test] @@ -30,3 +31,44 @@ fn double_register_validators() { assert_eq!(slashing_db.num_validator_rows().unwrap(), num_validators); assert_eq!(validator_ids, get_validator_ids()); } + +#[test] +fn reregister_validator() { + let dir = tempdir().unwrap(); + let slashing_db_file = dir.path().join("slashing_protection.sqlite"); + let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); + + let pk = pubkey(0); + + // Register validator. + slashing_db.register_validator(pk).unwrap(); + let id = slashing_db.get_validator_id(&pk).unwrap(); + + slashing_db + .with_transaction(|txn| { + // Disable. + slashing_db.update_validator_status(txn, id, false)?; + + // Fetching the validator as "registered" should now fail. + assert_eq!( + slashing_db.get_validator_id_in_txn(txn, &pk).unwrap_err(), + NotSafe::DisabledValidator(pk) + ); + + // Fetching its status should return false. + let (fetched_id, enabled) = + slashing_db.get_validator_id_with_status(txn, &pk)?.unwrap(); + assert_eq!(fetched_id, id); + assert!(!enabled); + + // Re-registering the validator should preserve its ID while changing its status to + // enabled. + slashing_db.register_validators_in_txn(iter::once(&pk), txn)?; + + let re_reg_id = slashing_db.get_validator_id_in_txn(txn, &pk)?; + assert_eq!(re_reg_id, id); + + Ok::<_, NotSafe>(()) + }) + .unwrap(); +} diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 2b187f46ef..9f585c010a 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -28,6 +28,9 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(100); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; +/// Column ID of the `validators.enabled` column. +pub const VALIDATORS_ENABLED_CID: i64 = 2; + #[derive(Debug, Clone)] pub struct SlashingDatabase { conn_pool: Pool, @@ -55,7 +58,7 @@ impl SlashingDatabase { restrict_file_permissions(path).map_err(|_| NotSafe::PermissionsError)?; let conn_pool = Self::open_conn_pool(path)?; - let conn = conn_pool.get()?; + let mut conn = conn_pool.get()?; conn.execute( "CREATE TABLE validators ( @@ -88,13 +91,55 @@ impl SlashingDatabase { params![], )?; + // The tables created above are for the v0 schema. We immediately update them + // to the latest schema without dropping the connection. + let txn = conn.transaction()?; + Self::apply_schema_migrations(&txn)?; + txn.commit()?; + Ok(Self { conn_pool }) } /// Open an existing `SlashingDatabase` from disk. + /// + /// This will automatically check for and apply the latest schema migrations. pub fn open(path: &Path) -> Result { let conn_pool = Self::open_conn_pool(path)?; - Ok(Self { conn_pool }) + let db = Self { conn_pool }; + db.with_transaction(Self::apply_schema_migrations)?; + Ok(db) + } + + fn apply_schema_migrations(txn: &Transaction) -> Result<(), NotSafe> { + // Add the `enabled` column to the `validators` table if it does not already exist. + let enabled_col_exists = txn + .query_row( + "SELECT cid, name FROM pragma_table_info('validators') WHERE name = 'enabled'", + params![], + |row| Ok((row.get(0)?, row.get(1)?)), + ) + .optional()? + .map(|(cid, name): (i64, String)| { + // Check that the enabled column is in the correct position with the right name. + // This is a defensive check that shouldn't do anything in practice unless the + // slashing DB has been manually edited. + if cid == VALIDATORS_ENABLED_CID && name == "enabled" { + Ok(()) + } else { + Err(NotSafe::ConsistencyError) + } + }) + .transpose()? + .is_some(); + + if !enabled_col_exists { + txn.execute( + "ALTER TABLE validators ADD COLUMN enabled BOOL NOT NULL DEFAULT TRUE", + params![], + )?; + } + + Ok(()) } /// Open a new connection pool with all of the necessary settings and tweaks. @@ -166,15 +211,37 @@ impl SlashingDatabase { public_keys: impl Iterator, txn: &Transaction, ) -> Result<(), NotSafe> { - let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; + let mut stmt = + txn.prepare("INSERT INTO validators (public_key, enabled) VALUES (?1, TRUE)")?; for pubkey in public_keys { - if self.get_validator_id_opt(txn, pubkey)?.is_none() { - stmt.execute([pubkey.as_hex_string()])?; + match self.get_validator_id_with_status(txn, pubkey)? { + None => { + stmt.execute([pubkey.as_hex_string()])?; + } + Some((validator_id, false)) => { + self.update_validator_status(txn, validator_id, true)?; + } + Some((_, true)) => { + // Validator already registered and enabled. + } } } Ok(()) } + pub fn update_validator_status( + &self, + txn: &Transaction, + validator_id: i64, + status: bool, + ) -> Result<(), NotSafe> { + txn.execute( + "UPDATE validators SET enabled = ? WHERE id = ?", + params![status, validator_id], + )?; + Ok(()) + } + /// Check that all of the given validators are registered. pub fn check_validator_registrations<'a>( &self, @@ -203,7 +270,7 @@ impl SlashingDatabase { .collect() } - /// Get the database-internal ID for a validator. + /// Get the database-internal ID for an enabled validator. /// /// This is NOT the same as a validator index, and depends on the ordering that validators /// are registered with the slashing protection database (and may vary between machines). @@ -213,26 +280,43 @@ impl SlashingDatabase { self.get_validator_id_in_txn(&txn, public_key) } - fn get_validator_id_in_txn( + pub fn get_validator_id_in_txn( &self, txn: &Transaction, public_key: &PublicKeyBytes, ) -> Result { - self.get_validator_id_opt(txn, public_key)? - .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key)) + let (validator_id, enabled) = self + .get_validator_id_with_status(txn, public_key)? + .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + if enabled { + Ok(validator_id) + } else { + Err(NotSafe::DisabledValidator(*public_key)) + } } - /// Optional version of `get_validator_id`. - fn get_validator_id_opt( + /// Get validator ID regardless of whether or not it is enabled. + pub fn get_validator_id_ignoring_status( &self, txn: &Transaction, public_key: &PublicKeyBytes, - ) -> Result, NotSafe> { + ) -> Result { + let (validator_id, _) = self + .get_validator_id_with_status(txn, public_key)? + .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + Ok(validator_id) + } + + pub fn get_validator_id_with_status( + &self, + txn: &Transaction, + public_key: &PublicKeyBytes, + ) -> Result, NotSafe> { Ok(txn .query_row( - "SELECT id FROM validators WHERE public_key = ?1", + "SELECT id, enabled FROM validators WHERE public_key = ?1", params![&public_key.as_hex_string()], - |row| row.get(0), + |row| Ok((row.get(0)?, row.get(1)?)), ) .optional()?) } @@ -722,13 +806,21 @@ impl SlashingDatabase { ) -> Result { let mut conn = self.conn_pool.get()?; let txn = &conn.transaction()?; + self.export_interchange_info_in_txn(genesis_validators_root, selected_pubkeys, txn) + } + pub fn export_interchange_info_in_txn( + &self, + genesis_validators_root: Hash256, + selected_pubkeys: Option<&[PublicKeyBytes]>, + txn: &Transaction, + ) -> Result { // Determine the validator IDs and public keys to export data for. let to_export = if let Some(selected_pubkeys) = selected_pubkeys { selected_pubkeys .iter() .map(|pubkey| { - let id = self.get_validator_id_in_txn(txn, pubkey)?; + let id = self.get_validator_id_ignoring_status(txn, pubkey)?; Ok((id, *pubkey)) }) .collect::>()? @@ -1089,7 +1181,6 @@ impl From for InterchangeError { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::pubkey; use tempfile::tempdir; #[test] @@ -1106,8 +1197,7 @@ mod tests { let file = dir.path().join("db.sqlite"); let _db1 = SlashingDatabase::create(&file).unwrap(); - let db2 = SlashingDatabase::open(&file).unwrap(); - db2.register_validator(pubkey(0)).unwrap_err(); + SlashingDatabase::open(&file).unwrap_err(); } // Attempting to create the same database twice should error. @@ -1152,9 +1242,12 @@ mod tests { fn test_transaction_failure() { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let _db1 = SlashingDatabase::create(&file).unwrap(); + let db = SlashingDatabase::create(&file).unwrap(); - let db2 = SlashingDatabase::open(&file).unwrap(); - db2.test_transaction().unwrap_err(); + db.with_transaction(|_| { + db.test_transaction().unwrap_err(); + Ok::<(), NotSafe>(()) + }) + .unwrap(); } } diff --git a/validator_client/slashing_protection/tests/main.rs b/validator_client/slashing_protection/tests/main.rs new file mode 100644 index 0000000000..5b66bd87e6 --- /dev/null +++ b/validator_client/slashing_protection/tests/main.rs @@ -0,0 +1,2 @@ +mod interop; +mod migration; diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs new file mode 100644 index 0000000000..cd3561f211 --- /dev/null +++ b/validator_client/slashing_protection/tests/migration.rs @@ -0,0 +1,68 @@ +//! Tests for upgrading a previous version of the database to the latest schema. +use slashing_protection::{NotSafe, SlashingDatabase}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use tempfile::tempdir; +use types::Hash256; + +fn test_data_dir() -> PathBuf { + Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") +} + +/// Copy `filename` from the test data dir to the temporary `dest` for testing. +fn make_copy(filename: &str, dest: &Path) -> PathBuf { + let source_file = test_data_dir().join(filename); + let dest_file = dest.join(filename); + fs::copy(source_file, &dest_file).unwrap(); + dest_file +} + +#[test] +fn add_enabled_column() { + let tmp = tempdir().unwrap(); + + let path = make_copy("v0_no_enabled_column.sqlite", tmp.path()); + let num_expected_validators = 5; + + // Database should open without errors, indicating successfull application of migrations. + // The input file has no `enabled` column, which should get added when opening it here. + let db = SlashingDatabase::open(&path).unwrap(); + + // Check that exporting an interchange file lists all the validators. + let interchange = db.export_all_interchange_info(Hash256::zero()).unwrap(); + assert_eq!(interchange.data.len(), num_expected_validators); + + db.with_transaction(|txn| { + // Check that all the validators are enabled and unique. + let uniq_validator_ids = interchange + .data + .iter() + .map(|data| { + let (validator_id, enabled) = db + .get_validator_id_with_status(txn, &data.pubkey) + .unwrap() + .unwrap(); + assert!(enabled); + (validator_id, data.pubkey) + }) + .collect::>(); + + assert_eq!(uniq_validator_ids.len(), num_expected_validators); + + // Check that we can disable them all. + for (&validator_id, pubkey) in &uniq_validator_ids { + db.update_validator_status(txn, validator_id, false) + .unwrap(); + let (loaded_id, enabled) = db + .get_validator_id_with_status(txn, pubkey) + .unwrap() + .unwrap(); + assert_eq!(validator_id, loaded_id); + assert!(!enabled); + } + + Ok::<_, NotSafe>(()) + }) + .unwrap(); +} diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 487b5744d0..18780c3092 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -253,22 +253,19 @@ impl CandidateBeaconNode { "our_genesis_fork" => ?spec.genesis_fork_version, ); return Err(CandidateError::Incompatible); - } else if *spec != beacon_node_spec { + } else if beacon_node_spec.altair_fork_epoch != spec.altair_fork_epoch { warn!( log, - "Beacon node config does not match exactly"; + "Beacon node has mismatched Altair fork epoch"; "endpoint" => %self.beacon_node, - "advice" => "check that the BN is updated and configured for any upcoming forks", + "endpoint_altair_fork_epoch" => ?beacon_node_spec.altair_fork_epoch, ); - debug!( + } else if beacon_node_spec.bellatrix_fork_epoch != spec.bellatrix_fork_epoch { + warn!( log, - "Beacon node config"; - "config" => ?beacon_node_spec, - ); - debug!( - log, - "Our config"; - "config" => ?spec, + "Beacon node has mismatched Bellatrix fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_bellatrix_fork_epoch" => ?beacon_node_spec.bellatrix_fork_epoch, ); } diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index 531180cbad..484ac50bd3 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -162,25 +162,32 @@ impl ApiSecret { } /// Returns the path for the API token file - pub fn api_token_path(&self) -> &PathBuf { - &self.pk_path + pub fn api_token_path(&self) -> PathBuf { + self.pk_path.clone() } - /// Returns the value of the `Authorization` header which is used for verifying incoming HTTP - /// requests. - fn auth_header_value(&self) -> String { - format!("Basic {}", self.api_token()) + /// Returns the values of the `Authorization` header which indicate a valid incoming HTTP + /// request. + /// + /// For backwards-compatibility we accept the token in a basic authentication style, but this is + /// technically invalid according to RFC 7617 because the token is not a base64-encoded username + /// and password. As such, bearer authentication should be preferred. + fn auth_header_values(&self) -> Vec { + vec![ + format!("Basic {}", self.api_token()), + format!("Bearer {}", self.api_token()), + ] } /// Returns a `warp` header which filters out request that have a missing or inaccurate /// `Authorization` header. pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> { - let expected = self.auth_header_value(); + let expected = self.auth_header_values(); warp::any() .map(move || expected.clone()) .and(warp::filters::header::header("Authorization")) - .and_then(move |expected: String, header: String| async move { - if header == expected { + .and_then(move |expected: Vec, header: String| async move { + if expected.contains(&header) { Ok(()) } else { Err(warp_utils::reject::invalid_auth(header)) diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs new file mode 100644 index 0000000000..ce4035581c --- /dev/null +++ b/validator_client/src/http_api/keystores.rs @@ -0,0 +1,290 @@ +//! Implementation of the standard keystore management API. +use crate::{signing_method::SigningMethod, InitializedValidators, ValidatorStore}; +use account_utils::ZeroizeString; +use eth2::lighthouse_vc::std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, + ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, KeystoreJsonStr, + ListKeystoresResponse, SingleKeystoreResponse, Status, +}; +use eth2_keystore::Keystore; +use slog::{info, warn, Logger}; +use slot_clock::SlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::sync::Weak; +use tokio::runtime::Runtime; +use types::{EthSpec, PublicKeyBytes}; +use validator_dir::Builder as ValidatorDirBuilder; +use warp::Rejection; +use warp_utils::reject::{custom_bad_request, custom_server_error}; + +pub fn list( + validator_store: Arc>, +) -> ListKeystoresResponse { + let initialized_validators_rwlock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rwlock.read(); + + let keystores = initialized_validators + .validator_definitions() + .iter() + .filter(|def| def.enabled) + .map(|def| { + let validating_pubkey = def.voting_public_key.compress(); + + let (derivation_path, readonly) = initialized_validators + .signing_method(&validating_pubkey) + .map_or((None, None), |signing_method| match *signing_method { + SigningMethod::LocalKeystore { + ref voting_keystore, + .. + } => (voting_keystore.path(), None), + SigningMethod::Web3Signer { .. } => (None, Some(true)), + }); + + SingleKeystoreResponse { + validating_pubkey, + derivation_path, + readonly, + } + }) + .collect::>(); + + ListKeystoresResponse { data: keystores } +} + +pub fn import( + request: ImportKeystoresRequest, + validator_dir: PathBuf, + validator_store: Arc>, + runtime: Weak, + log: Logger, +) -> Result { + // Check request validity. This is the only cases in which we should return a 4xx code. + if request.keystores.len() != request.passwords.len() { + return Err(custom_bad_request(format!( + "mismatched numbers of keystores ({}) and passwords ({})", + request.keystores.len(), + request.passwords.len(), + ))); + } + + info!( + log, + "Importing keystores via standard HTTP API"; + "count" => request.keystores.len(), + ); + + // Import slashing protection data before keystores, so that new keystores don't start signing + // without it. Do not return early on failure, propagate the failure to each key. + let slashing_protection_status = + if let Some(InterchangeJsonStr(slashing_protection)) = request.slashing_protection { + // Warn for missing slashing protection. + for KeystoreJsonStr(ref keystore) in &request.keystores { + if let Some(public_key) = keystore.public_key() { + let pubkey_bytes = public_key.compress(); + if !slashing_protection + .data + .iter() + .any(|data| data.pubkey == pubkey_bytes) + { + warn!( + log, + "Slashing protection data not provided"; + "public_key" => ?public_key, + ); + } + } + } + + validator_store.import_slashing_protection(slashing_protection) + } else { + warn!(log, "No slashing protection data provided with keystores"); + Ok(()) + }; + + // Import each keystore. Some keystores may fail to be imported, so we record a status for each. + let mut statuses = Vec::with_capacity(request.keystores.len()); + + for (KeystoreJsonStr(keystore), password) in request + .keystores + .into_iter() + .zip(request.passwords.into_iter()) + { + let pubkey_str = keystore.pubkey().to_string(); + + let status = if let Err(e) = &slashing_protection_status { + // Slashing protection import failed, do not attempt to import the key. Record an + // error status. + Status::error( + ImportKeystoreStatus::Error, + format!("slashing protection import failed: {:?}", e), + ) + } else if let Some(runtime) = runtime.upgrade() { + // Import the keystore. + match import_single_keystore( + keystore, + password, + validator_dir.clone(), + &validator_store, + runtime, + ) { + Ok(status) => Status::ok(status), + Err(e) => { + warn!( + log, + "Error importing keystore, skipped"; + "pubkey" => pubkey_str, + "error" => ?e, + ); + Status::error(ImportKeystoreStatus::Error, e) + } + } + } else { + Status::error( + ImportKeystoreStatus::Error, + "validator client shutdown".into(), + ) + }; + statuses.push(status); + } + + Ok(ImportKeystoresResponse { data: statuses }) +} + +fn import_single_keystore( + keystore: Keystore, + password: ZeroizeString, + validator_dir_path: PathBuf, + validator_store: &ValidatorStore, + runtime: Arc, +) -> Result { + // Check if the validator key already exists, erroring if it is a remote signer validator. + let pubkey = keystore + .public_key() + .ok_or_else(|| format!("invalid pubkey: {}", keystore.pubkey()))?; + if let Some(def) = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .find(|def| def.voting_public_key == pubkey) + { + if !def.signing_definition.is_local_keystore() { + return Err("cannot import duplicate of existing remote signer validator".into()); + } else if def.enabled { + return Ok(ImportKeystoreStatus::Duplicate); + } + } + + // Check that the password is correct. + // In future we should re-structure to avoid the double decryption here. It's not as simple + // as removing this check because `add_validator_keystore` will break if provided with an + // invalid validator definition (`update_validators` will get stuck trying to decrypt with the + // wrong password indefinitely). + keystore + .decrypt_keypair(password.as_ref()) + .map_err(|e| format!("incorrect password: {:?}", e))?; + + let validator_dir = ValidatorDirBuilder::new(validator_dir_path) + .voting_keystore(keystore, password.as_ref()) + .store_withdrawal_keystore(false) + .build() + .map_err(|e| format!("failed to build validator directory: {:?}", e))?; + + // Drop validator dir so that `add_validator_keystore` can re-lock the keystore. + let voting_keystore_path = validator_dir.voting_keystore_path(); + drop(validator_dir); + + runtime + .block_on(validator_store.add_validator_keystore( + voting_keystore_path, + password, + true, + None, + )) + .map_err(|e| format!("failed to initialize validator: {:?}", e))?; + + Ok(ImportKeystoreStatus::Imported) +} + +pub fn delete( + request: DeleteKeystoresRequest, + validator_store: Arc>, + runtime: Weak, + log: Logger, +) -> Result { + // Remove from initialized validators. + let initialized_validators_rwlock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rwlock.write(); + + let mut statuses = request + .pubkeys + .iter() + .map(|pubkey_bytes| { + match delete_single_keystore(pubkey_bytes, &mut initialized_validators, runtime.clone()) + { + Ok(status) => Status::ok(status), + Err(error) => { + warn!( + log, + "Error deleting keystore"; + "pubkey" => ?pubkey_bytes, + "error" => ?error, + ); + Status::error(DeleteKeystoreStatus::Error, error) + } + } + }) + .collect::>(); + + // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out + // of date as it resets when it can't be decrypted. We update it just a single time to avoid + // continually resetting it after each key deletion. + if let Some(runtime) = runtime.upgrade() { + runtime + .block_on(initialized_validators.update_validators()) + .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; + } + + // Export the slashing protection data. + let slashing_protection = validator_store + .export_slashing_protection_for_keys(&request.pubkeys) + .map_err(|e| { + custom_server_error(format!("error exporting slashing protection: {:?}", e)) + })?; + + // Update stasuses based on availability of slashing protection data. + for (pubkey, status) in request.pubkeys.iter().zip(statuses.iter_mut()) { + if status.status == DeleteKeystoreStatus::NotFound + && slashing_protection + .data + .iter() + .any(|interchange_data| interchange_data.pubkey == *pubkey) + { + status.status = DeleteKeystoreStatus::NotActive; + } + } + + Ok(DeleteKeystoresResponse { + data: statuses, + slashing_protection, + }) +} + +fn delete_single_keystore( + pubkey_bytes: &PublicKeyBytes, + initialized_validators: &mut InitializedValidators, + runtime: Weak, +) -> Result { + if let Some(runtime) = runtime.upgrade() { + let pubkey = pubkey_bytes + .decompress() + .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; + + runtime + .block_on(initialized_validators.delete_definition_and_keystore(&pubkey)) + .map_err(|e| format!("unable to disable and delete: {:?}", e)) + } else { + Err("validator client shutdown".into()) + } +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 5e0f3443a2..8a5b24f87b 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,14 +1,18 @@ mod api_secret; mod create_validator; +mod keystores; mod tests; use crate::ValidatorStore; use account_utils::mnemonic_from_phrase; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; -use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes}; +use eth2::lighthouse_vc::{ + std_types::AuthResponse, + types::{self as api_types, PublicKey, PublicKeyBytes}, +}; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; -use slog::{crit, info, Logger}; +use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; use std::future::Future; use std::marker::PhantomData; @@ -106,7 +110,7 @@ pub fn serve( // Configure CORS. let cors_builder = { let builder = warp::cors() - .allow_methods(vec!["GET", "POST", "PATCH"]) + .allow_methods(vec!["GET", "POST", "PATCH", "DELETE"]) .allow_headers(vec!["Content-Type", "Authorization"]); warp_utils::cors::set_builder_origins( @@ -125,7 +129,20 @@ pub fn serve( } let authorization_header_filter = ctx.api_secret.authorization_header_filter(); - let api_token_path = ctx.api_secret.api_token_path(); + let mut api_token_path = ctx.api_secret.api_token_path(); + + // Attempt to convert the path to an absolute path, but don't error if it fails. + match api_token_path.canonicalize() { + Ok(abs_path) => api_token_path = abs_path, + Err(e) => { + warn!( + log, + "Error canonicalizing token path"; + "error" => ?e, + ); + } + }; + let signer = ctx.api_secret.signer(); let signer = warp::any().map(move || signer.clone()); @@ -154,9 +171,15 @@ pub fn serve( }) }); + let inner_ctx = ctx.clone(); + let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); + let api_token_path_inner = api_token_path.clone(); + let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // GET lighthouse/version let get_node_version = warp::path("lighthouse") .and(warp::path("version")) @@ -348,7 +371,7 @@ pub fn serve( .and(warp::path("keystore")) .and(warp::path::end()) .and(warp::body::json()) - .and(validator_dir_filter) + .and(validator_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) .and(runtime_filter.clone()) @@ -451,9 +474,9 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) - .and(validator_store_filter) - .and(signer) - .and(runtime_filter) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and(runtime_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, @@ -495,6 +518,60 @@ pub fn serve( }, ); + // GET /lighthouse/auth + let get_auth = warp::path("lighthouse").and(warp::path("auth").and(warp::path::end())); + let get_auth = get_auth + .and(signer.clone()) + .and(api_token_path_filter) + .and_then(|signer, token_path: PathBuf| { + blocking_signed_json_task(signer, move || { + Ok(AuthResponse { + token_path: token_path.display().to_string(), + }) + }) + }); + + // Standard key-manager endpoints. + let eth_v1 = warp::path("eth").and(warp::path("v1")); + let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); + + // GET /eth/v1/keystores + let get_std_keystores = std_keystores + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and_then(|signer, validator_store: Arc>| { + blocking_signed_json_task(signer, move || Ok(keystores::list(validator_store))) + }); + + // POST /eth/v1/keystores + let post_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_dir_filter) + .and(validator_store_filter.clone()) + .and(runtime_filter.clone()) + .and(log_filter.clone()) + .and_then( + |request, signer, validator_dir, validator_store, runtime, log| { + blocking_signed_json_task(signer, move || { + keystores::import(request, validator_dir, validator_store, runtime, log) + }) + }, + ); + + // DELETE /eth/v1/keystores + let delete_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer) + .and(validator_store_filter) + .and(runtime_filter) + .and(log_filter) + .and_then(|request, signer, validator_store, runtime, log| { + blocking_signed_json_task(signer, move || { + keystores::delete(request, validator_store, runtime, log) + }) + }); + let routes = warp::any() .and(authorization_header_filter) // Note: it is critical that the `authorization_header_filter` is applied to all routes. @@ -508,16 +585,21 @@ pub fn serve( .or(get_lighthouse_health) .or(get_lighthouse_spec) .or(get_lighthouse_validators) - .or(get_lighthouse_validators_pubkey), + .or(get_lighthouse_validators_pubkey) + .or(get_std_keystores), ) .or(warp::post().and( post_validators .or(post_validators_keystore) .or(post_validators_mnemonic) - .or(post_validators_web3signer), + .or(post_validators_web3signer) + .or(post_std_keystores), )) - .or(warp::patch().and(patch_validators)), + .or(warp::patch().and(patch_validators)) + .or(warp::delete().and(delete_std_keystores)), ) + // The auth route is the only route that is allowed to be accessed without the API token. + .or(warp::get().and(get_auth)) // Maps errors into HTTP responses. .recover(warp_utils::reject::handle_rejection) // Add a `Server` header. @@ -550,7 +632,7 @@ pub async fn blocking_signed_json_task( ) -> Result where S: Fn(&[u8]) -> String, - F: Fn() -> Result + Send + 'static, + F: FnOnce() -> Result + Send + 'static, T: Serialize + Send + 'static, { warp_utils::task::blocking_task(func) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index c9ef869be5..fda622901b 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1,6 +1,8 @@ #![cfg(test)] #![cfg(not(debug_assertions))] +mod keystores; + use crate::doppelganger_service::DoppelgangerService; use crate::{ http_api::{ApiSecret, Config as HttpConfig, Context}, @@ -9,16 +11,16 @@ use crate::{ }; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - ZeroizeString, + random_password_string, ZeroizeString, }; use deposit_contract::decode_eth1_tx_data; -use environment::null_logger; use eth2::{ lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, Error as ApiError, }; use eth2_keystore::KeystoreBuilder; +use logging::test_logger; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; @@ -40,6 +42,7 @@ type E = MainnetEthSpec; struct ApiTester { client: ValidatorClientHttpClient, initialized_validators: Arc>, + validator_store: Arc>, url: SensitiveUrl, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, @@ -58,7 +61,7 @@ fn build_runtime() -> Arc { impl ApiTester { pub async fn new(runtime: std::sync::Weak) -> Self { - let log = null_logger().unwrap(); + let log = test_logger(); let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); @@ -92,7 +95,7 @@ impl ApiTester { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(runtime.clone(), exit, log.clone(), shutdown_tx); - let validator_store = ValidatorStore::<_, E>::new( + let validator_store = Arc::new(ValidatorStore::<_, E>::new( initialized_validators, slashing_protection, Hash256::repeat_byte(42), @@ -101,7 +104,7 @@ impl ApiTester { slot_clock, executor, log.clone(), - ); + )); validator_store .register_all_in_doppelganger_protection_if_enabled() @@ -113,7 +116,7 @@ impl ApiTester { runtime, api_secret, validator_dir: Some(validator_dir.path().into()), - validator_store: Some(Arc::new(validator_store)), + validator_store: Some(validator_store.clone()), spec: E::default_spec(), config: HttpConfig { enabled: true, @@ -144,11 +147,12 @@ impl ApiTester { let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap(); Self { - initialized_validators, - _validator_dir: validator_dir, client, + initialized_validators, + validator_store, url, _server_shutdown: shutdown_tx, + _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, } } @@ -456,7 +460,7 @@ impl ApiTester { self.client .post_lighthouse_validators_web3signer(&request) .await - .unwrap_err(); + .unwrap(); assert_eq!(self.vals_total(), initial_vals + s.count); if s.enabled { @@ -608,6 +612,34 @@ fn routes_with_invalid_auth() { .await }) .await + .test_with_invalid_auth(|client| async move { client.get_keystores().await }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password_string(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .map(KeystoreJsonStr) + .unwrap(); + client + .post_keystores(&ImportKeystoresRequest { + keystores: vec![keystore], + passwords: vec![password], + slashing_protection: None, + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let keypair = Keypair::random(); + client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: vec![keypair.pk.compress()], + }) + .await + }) + .await }); } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs new file mode 100644 index 0000000000..1b35a0b57b --- /dev/null +++ b/validator_client/src/http_api/tests/keystores.rs @@ -0,0 +1,977 @@ +use super::*; +use account_utils::random_password_string; +use eth2::lighthouse_vc::{ + http_client::ValidatorClientHttpClient as HttpClient, + std_types::{KeystoreJsonStr as Keystore, *}, + types::Web3SignerValidatorRequest, +}; +// use eth2_keystore::Keystore; +use itertools::Itertools; +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use slashing_protection::interchange::{Interchange, InterchangeMetadata}; +use std::collections::HashMap; +use std::path::Path; + +fn new_keystore(password: ZeroizeString) -> Keystore { + let keypair = Keypair::random(); + Keystore( + KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .unwrap(), + ) +} + +fn web3_signer_url() -> String { + "http://localhost:1/this-url-hopefully-doesnt-exist".into() +} + +fn new_web3signer_validator() -> (Keypair, Web3SignerValidatorRequest) { + let keypair = Keypair::random(); + let pk = keypair.pk.clone(); + (keypair, web3signer_validator_with_pubkey(pk)) +} + +fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorRequest { + Web3SignerValidatorRequest { + enable: true, + description: "".into(), + graffiti: None, + voting_public_key: pubkey, + url: web3_signer_url(), + root_certificate_path: None, + request_timeout_ms: None, + } +} + +fn run_test(f: F) +where + F: FnOnce(ApiTester) -> V, + V: Future, +{ + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester = ApiTester::new(weak_runtime).await; + f(tester).await + }); +} + +fn run_dual_vc_test(f: F) +where + F: FnOnce(ApiTester, ApiTester) -> V, + V: Future, +{ + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester1 = ApiTester::new(weak_runtime.clone()).await; + let tester2 = ApiTester::new(weak_runtime).await; + f(tester1, tester2).await + }); +} + +fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { + keystore.0.public_key().unwrap().compress() +} + +fn all_with_status(count: usize, status: T) -> impl Iterator { + std::iter::repeat(status).take(count) +} + +fn all_imported(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Imported) +} + +fn all_duplicate(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Duplicate) +} + +fn all_import_error(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Error) +} + +fn all_deleted(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::Deleted) +} + +fn all_not_active(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::NotActive) +} + +fn all_not_found(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::NotFound) +} + +fn all_delete_error(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::Error) +} + +fn check_get_response<'a>( + response: &ListKeystoresResponse, + expected_keystores: impl IntoIterator, +) { + for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { + assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); + assert_eq!(ks1.derivation_path, ks2.path()); + assert!(ks1.readonly == None || ks1.readonly == Some(false)); + } +} + +fn check_import_response( + response: &ImportKeystoresResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + expected_status, status.status, + "message: {:?}", + status.message + ); + } +} + +fn check_delete_response<'a>( + response: &DeleteKeystoresResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + status.status, expected_status, + "message: {:?}", + status.message + ); + } +} + +#[test] +fn get_auth_no_token() { + run_test(|mut tester| async move { + tester.client.send_authorization_header(false); + let auth_response = tester.client.get_auth().await.unwrap(); + + // Load the file from the returned path. + let token_path = Path::new(&auth_response.token_path); + let token = HttpClient::load_api_token_from_file(token_path).unwrap(); + + // The token should match the one that the client was originally initialised with. + assert!(tester.client.api_token() == Some(&token)); + }) +} + +#[test] +fn get_empty_keystores() { + run_test(|tester| async move { + let res = tester.client.get_keystores().await.unwrap(); + assert_eq!(res, ListKeystoresResponse { data: vec![] }); + }) +} + +#[test] +fn import_new_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &keystores); + }) +} + +#[test] +fn import_only_duplicate_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + + // All keystores should be imported on first import. + let import_res = tester.client.post_keystores(&req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // No keystores should be imported on repeat import. + let import_res = tester.client.post_keystores(&req).await.unwrap(); + check_import_response(&import_res, all_duplicate(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &keystores); + }) +} + +#[test] +fn import_some_duplicate_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let num_keystores = 5; + let keystores_all = (0..num_keystores) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Import even numbered keystores first. + let keystores1 = keystores_all + .iter() + .enumerate() + .filter_map(|(i, keystore)| { + if i % 2 == 0 { + Some(keystore.clone()) + } else { + None + } + }) + .collect::>(); + + let req1 = ImportKeystoresRequest { + keystores: keystores1.clone(), + passwords: vec![password.clone(); keystores1.len()], + slashing_protection: None, + }; + + let req2 = ImportKeystoresRequest { + keystores: keystores_all.clone(), + passwords: vec![password.clone(); keystores_all.len()], + slashing_protection: None, + }; + + let import_res = tester.client.post_keystores(&req1).await.unwrap(); + check_import_response(&import_res, all_imported(keystores1.len())); + + // Check partial import. + let expected = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Duplicate + } else { + ImportKeystoreStatus::Imported + } + }); + let import_res = tester.client.post_keystores(&req2).await.unwrap(); + check_import_response(&import_res, expected); + }) +} + +#[test] +fn import_wrong_number_of_passwords() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let err = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone()], + slashing_protection: None, + }) + .await + .unwrap_err(); + assert_eq!(err.status().unwrap(), 400); + }) +} + +#[test] +fn get_web3_signer_keystores() { + run_test(|tester| async move { + let num_local = 3; + let num_remote = 2; + + // Add some local validators. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_imported(keystores.len())); + + // Add some web3signer validators. + let remote_vals = (0..num_remote) + .map(|_| new_web3signer_validator().1) + .collect::>(); + + tester + .client + .post_lighthouse_validators_web3signer(&remote_vals) + .await + .unwrap(); + + // Check that both local and remote validators are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + + let expected_responses = keystores + .iter() + .map(|local_keystore| SingleKeystoreResponse { + validating_pubkey: keystore_pubkey(local_keystore), + derivation_path: local_keystore.path(), + readonly: None, + }) + .chain(remote_vals.iter().map(|remote_val| SingleKeystoreResponse { + validating_pubkey: remote_val.voting_public_key.compress(), + derivation_path: None, + readonly: Some(true), + })) + .collect::>(); + + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} + +#[test] +fn import_and_delete_conflicting_web3_signer_keystores() { + run_test(|tester| async move { + let num_keystores = 3; + + // Create some keystores to be used as both web3signer keystores and local keystores. + let password = random_password_string(); + let keystores = (0..num_keystores) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + + // Add the validators as web3signer validators. + let remote_vals = pubkeys + .iter() + .map(|pubkey| web3signer_validator_with_pubkey(pubkey.decompress().unwrap())) + .collect::>(); + + tester + .client + .post_lighthouse_validators_web3signer(&remote_vals) + .await + .unwrap(); + + // Attempt to import the same validators as local validators, which should error. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_import_error(keystores.len())); + + // Attempt to delete the web3signer validators, which should fail. + let delete_req = DeleteKeystoresRequest { + pubkeys: pubkeys.clone(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_delete_error(keystores.len())); + + // Get should still list all the validators as `readonly`. + let get_res = tester.client.get_keystores().await.unwrap(); + for (ks, pubkey) in get_res.data.iter().zip_eq(&pubkeys) { + assert_eq!(ks.validating_pubkey, *pubkey); + assert_eq!(ks.derivation_path, None); + assert_eq!(ks.readonly, Some(true)); + } + + // Disabling the web3signer validators should *still* prevent them from being + // overwritten. + for pubkey in &pubkeys { + tester + .client + .patch_lighthouse_validators(pubkey, false) + .await + .unwrap(); + } + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_import_error(keystores.len())); + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_delete_error(keystores.len())); + }) +} + +#[test] +fn import_keystores_wrong_password() { + run_test(|tester| async move { + let num_keystores = 4; + let (keystores, correct_passwords): (Vec<_>, Vec<_>) = (0..num_keystores) + .map(|_| { + let password = random_password_string(); + (new_keystore(password.clone()), password) + }) + .unzip(); + + // First import with some incorrect passwords. + let incorrect_passwords = (0..num_keystores) + .map(|i| { + if i % 2 == 0 { + random_password_string() + } else { + correct_passwords[i].clone() + } + }) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: incorrect_passwords.clone(), + slashing_protection: None, + }) + .await + .unwrap(); + + let expected_statuses = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Error + } else { + ImportKeystoreStatus::Imported + } + }); + check_import_response(&import_res, expected_statuses); + + // Import again with the correct passwords and check that the statuses are as expected. + let correct_import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: correct_passwords.clone(), + slashing_protection: None, + }; + let import_res = tester + .client + .post_keystores(&correct_import_req) + .await + .unwrap(); + let expected_statuses = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Imported + } else { + ImportKeystoreStatus::Duplicate + } + }); + check_import_response(&import_res, expected_statuses); + + // Import one final time, at which point all keys should be duplicates. + let import_res = tester + .client + .post_keystores(&correct_import_req) + .await + .unwrap(); + check_import_response( + &import_res, + (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), + ); + }); +} + +#[test] +fn import_invalid_slashing_protection() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Invalid slashing protection data with mismatched version and mismatched GVR. + let slashing_protection = Interchange { + metadata: InterchangeMetadata { + interchange_format_version: 0, + genesis_validators_root: Hash256::zero(), + }, + data: vec![], + }; + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_import_error(keystores.len())); + + // Check that GET lists none of the failed keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &[]); + }) +} + +fn all_indices(count: usize) -> Vec { + (0..count).collect() +} + +#[test] +fn migrate_all_with_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + all_indices(n), + all_indices(n), + all_indices(n), + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (2, make_attestation(1, 2), false), + ], + ); +} + +#[test] +fn migrate_some_with_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + vec![0, 1], + vec![0, 1], + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (0, make_attestation(2, 3), true), + (1, make_attestation(3, 4), true), + ], + ); +} + +#[test] +fn migrate_some_missing_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + vec![0, 1], + vec![0], + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), true), + (0, make_attestation(2, 3), true), + ], + ); +} + +#[test] +fn migrate_some_extra_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + all_indices(n), + all_indices(n), + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (0, make_attestation(2, 3), true), + (1, make_attestation(3, 4), true), + (2, make_attestation(2, 3), false), + ], + ); +} + +/// Run a test that creates some validators on one VC, and then migrates them to a second VC. +/// +/// All indices given are in the range 0..`num_validators`. They are *not* validator indices in the +/// ordinary sense. +/// +/// Parameters: +/// +/// - `num_validators`: the total number of validators to create +/// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)` +/// - `delete_indices`: validators to delete from the first VC +/// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should +/// be a subset of `delete_indices` or the test will panic. +/// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. +/// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool +/// indicates whether the signing should be successful. +fn generic_migration_test( + num_validators: usize, + first_vc_attestations: Vec<(usize, Attestation)>, + delete_indices: Vec, + slashing_protection_indices: Vec, + import_indices: Vec, + second_vc_attestations: Vec<(usize, Attestation, bool)>, +) { + run_dual_vc_test(move |tester1, tester2| async move { + // Create the validators on VC1. + let (keystores, passwords): (Vec<_>, Vec<_>) = (0..num_validators) + .map(|_| { + let password = random_password_string(); + (new_keystore(password.clone()), password) + }) + .unzip(); + + let import_res = tester1 + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: passwords.clone(), + slashing_protection: None, + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // Sign attestations on VC1. + for (validator_index, mut attestation) in first_vc_attestations { + let public_key = keystore_pubkey(&keystores[validator_index]); + let current_epoch = attestation.data.target.epoch; + tester1 + .validator_store + .sign_attestation(public_key, 0, &mut attestation, current_epoch) + .await + .unwrap(); + } + + // Delete the selected keys from VC1. + let delete_res = tester1 + .client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: delete_indices + .iter() + .copied() + .map(|i| keystore_pubkey(&keystores[i])) + .collect(), + }) + .await + .unwrap(); + check_delete_response(&delete_res, all_deleted(delete_indices.len())); + + // Check that slashing protection data was returned for all selected validators. + assert_eq!( + delete_res.slashing_protection.data.len(), + delete_indices.len() + ); + for &i in &delete_indices { + assert!(delete_res + .slashing_protection + .data + .iter() + .any(|interchange_data| interchange_data.pubkey == keystore_pubkey(&keystores[i]))); + } + + // Filter slashing protection according to `slashing_protection_indices`. + let mut slashing_protection = delete_res.slashing_protection; + let data = std::mem::take(&mut slashing_protection.data); + + for &i in &slashing_protection_indices { + let pubkey = keystore_pubkey(&keystores[i]); + slashing_protection.data.push( + data.iter() + .find(|interchange_data| interchange_data.pubkey == pubkey) + .expect("slashing protection indices should be subset of deleted") + .clone(), + ); + } + assert_eq!( + slashing_protection.data.len(), + slashing_protection_indices.len() + ); + + // Import into the 2nd VC using the slashing protection data. + let import_res = tester2 + .client + .post_keystores(&ImportKeystoresRequest { + keystores: import_indices + .iter() + .copied() + .map(|i| keystores[i].clone()) + .collect(), + passwords: import_indices + .iter() + .copied() + .map(|i| passwords[i].clone()) + .collect(), + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(import_indices.len())); + + // Sign attestations on the second VC. + for (validator_index, mut attestation, should_succeed) in second_vc_attestations { + let public_key = keystore_pubkey(&keystores[validator_index]); + let current_epoch = attestation.data.target.epoch; + match tester2 + .validator_store + .sign_attestation(public_key, 0, &mut attestation, current_epoch) + .await + { + Ok(()) => assert!(should_succeed), + Err(e) => assert!(!should_succeed, "{:?}", e), + } + } + }); +} + +#[test] +fn delete_keystores_twice() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // 1. Import all keystores. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // 2. Delete all. + let delete_req = DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_deleted(keystores.len())); + + // 3. Delete again. + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_not_active(keystores.len())); + }) +} + +#[test] +fn delete_nonexistent_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Delete all. + let delete_req = DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_not_found(keystores.len())); + }) +} + +fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { + Attestation { + aggregation_bits: BitList::with_capacity( + ::MaxValidatorsPerCommittee::to_usize(), + ) + .unwrap(), + data: AttestationData { + source: Checkpoint { + epoch: Epoch::new(source_epoch), + root: Hash256::from_low_u64_le(source_epoch), + }, + target: Checkpoint { + epoch: Epoch::new(target_epoch), + root: Hash256::from_low_u64_le(target_epoch), + }, + ..AttestationData::default() + }, + signature: AggregateSignature::empty(), + } +} + +#[test] +fn delete_concurrent_with_signing() { + let runtime = build_runtime(); + let num_keys = 8; + let num_signing_threads = 8; + let num_attestations = 100; + let num_delete_threads = 8; + let num_delete_attempts = 100; + let delete_prob = 0.01; + + assert!( + num_keys % num_signing_threads == 0, + "num_keys should be divisible by num threads for simplicity" + ); + + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester = ApiTester::new(weak_runtime).await; + + // Generate a lot of keys and import them. + let password = random_password_string(); + let keystores = (0..num_keys) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // Start several threads signing attestations at sequential epochs. + let mut join_handles = vec![]; + + for thread_index in 0..num_signing_threads { + let keys_per_thread = num_keys / num_signing_threads; + let validator_store = tester.validator_store.clone(); + let thread_pubkeys = all_pubkeys + [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] + .to_vec(); + + let handle = runtime.spawn(async move { + for j in 0..num_attestations { + let mut att = make_attestation(j, j + 1); + for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + let _ = validator_store + .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) + .await; + } + } + }); + join_handles.push(handle); + } + + // Concurrently, delete each validator one at a time. Store the slashing protection + // data so we can ensure it doesn't change after a key is exported. + let mut delete_handles = vec![]; + for _ in 0..num_delete_threads { + let client = tester.client.clone(); + let all_pubkeys = all_pubkeys.clone(); + + let handle = runtime.spawn(async move { + let mut rng = SmallRng::from_entropy(); + + let mut slashing_protection = vec![]; + for _ in 0..num_delete_attempts { + let to_delete = all_pubkeys + .iter() + .filter(|_| rng.gen_bool(delete_prob)) + .copied() + .collect::>(); + + if !to_delete.is_empty() { + let delete_res = client + .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) + .await + .unwrap(); + + for status in delete_res.data.iter() { + assert_ne!(status.status, DeleteKeystoreStatus::Error); + } + + slashing_protection.push(delete_res.slashing_protection); + } + } + slashing_protection + }); + + delete_handles.push(handle); + } + + // Collect slashing protection. + let mut slashing_protection_map = HashMap::new(); + let collected_slashing_protection = futures::future::join_all(delete_handles).await; + + for interchange in collected_slashing_protection + .into_iter() + .map(Result::unwrap) + .flatten() + { + for validator_data in interchange.data { + slashing_protection_map + .entry(validator_data.pubkey) + .and_modify(|existing| { + assert_eq!( + *existing, validator_data, + "slashing protection data changed after first export" + ) + }) + .or_insert(validator_data); + } + } + + futures::future::join_all(join_handles).await + }); +} + +#[test] +fn delete_then_reimport() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // 1. Import all keystores. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // 2. Delete all. + let delete_res = tester + .client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }) + .await + .unwrap(); + check_delete_response(&delete_res, all_deleted(keystores.len())); + + // 3. Re-import + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + }) +} diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 72e651f7d1..5900c8e56b 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -14,19 +14,22 @@ use account_utils::{ }, ZeroizeString, }; +use eth2::lighthouse_vc::std_types::DeleteKeystoreStatus; use eth2_keystore::Keystore; use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError}; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; -use std::fs::File; +use std::fs::{self, File}; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use types::{Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; +use validator_dir::Builder as ValidatorDirBuilder; use crate::key_cache; use crate::key_cache::KeyCache; @@ -67,6 +70,10 @@ pub enum Error { UnableToSaveDefinitions(validator_definitions::Error), /// It is not legal to try and initialize a disabled validator definition. UnableToInitializeDisabledValidator, + /// There was an error while deleting a keystore file. + UnableToDeleteKeystore(PathBuf, io::Error), + /// There was an error while deleting a validator dir. + UnableToDeleteValidatorDir(PathBuf, io::Error), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), /// There was an error running a tokio async task. @@ -83,6 +90,8 @@ pub enum Error { InvalidWeb3SignerRootCertificateFile(io::Error), InvalidWeb3SignerRootCertificate(ReqwestError), UnableToBuildWeb3SignerClient(ReqwestError), + /// Unable to apply an action to a validator because it is using a remote signer. + InvalidActionOnRemoteValidator, } impl From for Error { @@ -101,12 +110,15 @@ pub struct InitializedValidator { impl InitializedValidator { /// Return a reference to this validator's lockfile if it has one. - pub fn keystore_lockfile(&self) -> Option<&Lockfile> { + pub fn keystore_lockfile(&self) -> Option> { match self.signing_method.as_ref() { SigningMethod::LocalKeystore { ref voting_keystore_lockfile, .. - } => Some(voting_keystore_lockfile), + } => MutexGuard::try_map(voting_keystore_lockfile.lock(), |option_lockfile| { + option_lockfile.as_mut() + }) + .ok(), // Web3Signer validators do not have any lockfiles. SigningMethod::Web3Signer { .. } => None, } @@ -213,7 +225,7 @@ impl InitializedValidator { let lockfile_path = get_lockfile_path(&voting_keystore_path) .ok_or_else(|| Error::BadVotingKeystorePath(voting_keystore_path.clone()))?; - let voting_keystore_lockfile = Lockfile::new(lockfile_path)?; + let voting_keystore_lockfile = Mutex::new(Some(Lockfile::new(lockfile_path)?)); SigningMethod::LocalKeystore { voting_keystore_path, @@ -381,6 +393,25 @@ impl InitializedValidators { .map(|v| v.signing_method.clone()) } + /// Add a validator definition to `self`, replacing any disabled definition with the same + /// voting public key. + /// + /// The on-disk representation of the validator definitions & the key cache will both be + /// updated. + pub async fn add_definition_replace_disabled( + &mut self, + def: ValidatorDefinition, + ) -> Result<(), Error> { + // Drop any disabled definitions with the same public key. + let delete_def = |existing_def: &ValidatorDefinition| { + !existing_def.enabled && existing_def.voting_public_key == def.voting_public_key + }; + self.definitions.retain(|def| !delete_def(def)); + + // Add the definition. + self.add_definition(def).await + } + /// Add a validator definition to `self`, overwriting the on-disk representation of `self`. pub async fn add_definition(&mut self, def: ValidatorDefinition) -> Result<(), Error> { if self @@ -403,6 +434,91 @@ impl InitializedValidators { Ok(()) } + /// Delete the validator definition and keystore for `pubkey`. + /// + /// The delete is carried out in stages so that the filesystem is never left in an inconsistent + /// state, even in case of errors or crashes. + pub async fn delete_definition_and_keystore( + &mut self, + pubkey: &PublicKey, + ) -> Result { + // 1. Disable the validator definition. + // + // We disable before removing so that in case of a crash the auto-discovery mechanism + // won't re-activate the keystore. + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| &def.voting_public_key == pubkey) + { + if def.signing_definition.is_local_keystore() { + def.enabled = false; + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + } else { + return Err(Error::InvalidActionOnRemoteValidator); + } + } else { + return Ok(DeleteKeystoreStatus::NotFound); + } + + // 2. Delete from `self.validators`, which holds the signing method. + // Delete the keystore files. + if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) { + if let SigningMethod::LocalKeystore { + ref voting_keystore_path, + ref voting_keystore_lockfile, + ref voting_keystore, + .. + } = *initialized_validator.signing_method + { + // Drop the lock file so that it may be deleted. This is particularly important on + // Windows where the lockfile will fail to be deleted if it is still open. + drop(voting_keystore_lockfile.lock().take()); + + self.delete_keystore_or_validator_dir(voting_keystore_path, voting_keystore)?; + } + } + + // 3. Delete from validator definitions entirely. + self.definitions + .retain(|def| &def.voting_public_key != pubkey); + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(DeleteKeystoreStatus::Deleted) + } + + /// Attempt to delete the voting keystore file, or its entire validator directory. + /// + /// Some parts of the VC assume the existence of a validator based on the existence of a + /// directory in the validators dir named like a public key. + fn delete_keystore_or_validator_dir( + &self, + voting_keystore_path: &Path, + voting_keystore: &Keystore, + ) -> Result<(), Error> { + // If the parent directory is a `ValidatorDir` within `self.validators_dir`, then + // delete the entire directory so that it may be recreated if the keystore is + // re-imported. + if let Some(validator_dir) = voting_keystore_path.parent() { + if validator_dir + == ValidatorDirBuilder::get_dir_path(&self.validators_dir, voting_keystore) + { + fs::remove_dir_all(validator_dir) + .map_err(|e| Error::UnableToDeleteValidatorDir(validator_dir.into(), e))?; + return Ok(()); + } + } + // Otherwise just delete the keystore file. + fs::remove_file(voting_keystore_path) + .map_err(|e| Error::UnableToDeleteKeystore(voting_keystore_path.into(), e))?; + Ok(()) + } + /// Returns a slice of all defined validators (regardless of their enabled state). pub fn validator_definitions(&self) -> &[ValidatorDefinition] { self.definitions.as_slice() @@ -456,17 +572,24 @@ impl InitializedValidators { /// Tries to decrypt the key cache. /// - /// Returns `Ok(true)` if decryption was successful, `Ok(false)` if it couldn't get decrypted - /// and an error if a needed password couldn't get extracted. + /// Returns the decrypted cache if decryption was successful, or an error if a required password + /// wasn't provided and couldn't be read interactively. /// + /// In the case that the cache contains UUIDs for unknown validator definitions then it cannot + /// be decrypted and will be replaced by a new empty cache. + /// + /// The mutable `key_stores` argument will be used to accelerate decyption by bypassing + /// filesystem accesses for keystores that are already known. In the case that a keystore + /// from the validator definitions is not yet in this map, it will be loaded from disk and + /// inserted into the map. async fn decrypt_key_cache( &self, mut cache: KeyCache, key_stores: &mut HashMap, ) -> Result { - //read relevant key_stores + // Read relevant key stores from the filesystem. let mut definitions_map = HashMap::new(); - for def in self.definitions.as_slice() { + for def in self.definitions.as_slice().iter().filter(|def| def.enabled) { match &def.signing_definition { SigningDefinition::LocalKeystore { voting_keystore_path, @@ -487,10 +610,11 @@ impl InitializedValidators { //check if all paths are in the definitions_map for uuid in cache.uuids() { if !definitions_map.contains_key(uuid) { - warn!( + debug!( self.log, - "Unknown uuid in cache"; - "uuid" => format!("{}", uuid) + "Resetting the key cache"; + "keystore_uuid" => %uuid, + "reason" => "impossible to decrypt due to missing keystore", ); return Ok(KeyCache::new()); } @@ -547,7 +671,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - async fn update_validators(&mut self) -> Result<(), Error> { + pub(crate) async fn update_validators(&mut self) -> Result<(), Error> { //use key cache if available let mut key_stores = HashMap::new(); diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 7f28700a20..3c12ac1e62 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -6,6 +6,7 @@ use crate::http_metrics::metrics; use eth2_keystore::Keystore; use lockfile::Lockfile; +use parking_lot::Mutex; use reqwest::Client; use std::path::PathBuf; use std::sync::Arc; @@ -75,7 +76,7 @@ pub enum SigningMethod { /// A validator that is defined by an EIP-2335 keystore on the local filesystem. LocalKeystore { voting_keystore_path: PathBuf, - voting_keystore_lockfile: Lockfile, + voting_keystore_lockfile: Mutex>, voting_keystore: Keystore, voting_keypair: Arc, }, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index d7efa806ae..884b97694e 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -6,7 +6,9 @@ use crate::{ }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; -use slashing_protection::{NotSafe, Safe, SlashingDatabase}; +use slashing_protection::{ + interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, +}; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; use std::iter::FromIterator; @@ -183,7 +185,7 @@ impl ValidatorStore { self.validators .write() - .add_definition(validator_def.clone()) + .add_definition_replace_disabled(validator_def.clone()) .await .map_err(|e| format!("Unable to add definition: {:?}", e))?; @@ -693,6 +695,48 @@ impl ValidatorStore { Ok(SignedContributionAndProof { message, signature }) } + pub fn import_slashing_protection( + &self, + interchange: Interchange, + ) -> Result<(), InterchangeError> { + self.slashing_protection + .import_interchange_info(interchange, self.genesis_validators_root)?; + Ok(()) + } + + /// Export slashing protection data while also disabling the given keys in the database. + /// + /// If any key is unknown to the slashing protection database it will be silently omitted + /// from the result. It is the caller's responsibility to check whether all keys provided + /// had data returned for them. + pub fn export_slashing_protection_for_keys( + &self, + pubkeys: &[PublicKeyBytes], + ) -> Result { + self.slashing_protection.with_transaction(|txn| { + let known_pubkeys = pubkeys + .iter() + .filter_map(|pubkey| { + let validator_id = self + .slashing_protection + .get_validator_id_ignoring_status(txn, pubkey) + .ok()?; + + Some( + self.slashing_protection + .update_validator_status(txn, validator_id, false) + .map(|()| *pubkey), + ) + }) + .collect::, _>>()?; + self.slashing_protection.export_interchange_info_in_txn( + self.genesis_validators_root, + Some(&known_pubkeys), + txn, + ) + }) + } + /// Prune the slashing protection database so that it remains performant. /// /// This function will only do actual pruning periodically, so it should usually be