From d60c24ef1cc0b5dfa930e1dd4fc85abc29e5fc4c Mon Sep 17 00:00:00 2001 From: ThreeHrSleep <151536303+ThreeHrSleep@users.noreply.github.com> Date: Thu, 13 Mar 2025 04:01:05 +0530 Subject: [PATCH] Integrate tracing (#6339) Tracing Integration - [reference](https://github.com/eth-protocol-fellows/cohort-five/blob/5bbf1859e921065bd69f8671038ed16643465b86/projects/project-ideas.md?plain=1#L297) - [x] replace slog & log with tracing throughout the codebase - [x] implement custom crit log - [x] make relevant changes in the formatter - [x] replace sloggers - [x] re-write SSE logging components cc: @macladson @eserilev --- Cargo.lock | 486 +++------- Cargo.toml | 12 +- beacon_node/Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 5 +- .../beacon_chain/src/attestation_rewards.rs | 15 +- .../beacon_chain/src/attestation_simulator.rs | 21 +- .../src/attestation_verification.rs | 14 +- .../beacon_chain/src/beacon_block_reward.rs | 22 +- .../beacon_chain/src/beacon_block_streamer.rs | 98 +- beacon_node/beacon_chain/src/beacon_chain.rs | 731 ++++++-------- .../beacon_chain/src/blob_verification.rs | 9 +- .../beacon_chain/src/block_verification.rs | 76 +- beacon_node/beacon_chain/src/builder.rs | 117 +-- .../beacon_chain/src/canonical_head.rs | 193 ++-- .../src/data_availability_checker.rs | 73 +- .../overflow_lru_cache.rs | 75 +- .../src/data_column_verification.rs | 9 +- beacon_node/beacon_chain/src/eth1_chain.rs | 70 +- .../src/eth1_finalization_cache.rs | 48 +- beacon_node/beacon_chain/src/events.rs | 22 +- .../beacon_chain/src/execution_payload.rs | 54 +- beacon_node/beacon_chain/src/fetch_blobs.rs | 67 +- beacon_node/beacon_chain/src/fork_revert.rs | 17 +- .../beacon_chain/src/graffiti_calculator.rs | 80 +- .../beacon_chain/src/historical_blocks.rs | 11 +- .../src/light_client_server_cache.rs | 8 +- beacon_node/beacon_chain/src/migrate.rs | 156 ++- .../src/otb_verification_service.rs | 369 ++++++++ .../src/pre_finalization_cache.rs | 7 +- .../beacon_chain/src/proposer_prep_service.rs | 16 +- beacon_node/beacon_chain/src/schema_change.rs | 20 +- .../src/schema_change/migration_schema_v20.rs | 22 +- .../src/schema_change/migration_schema_v21.rs | 23 +- .../src/schema_change/migration_schema_v22.rs | 33 +- .../beacon_chain/src/shuffling_cache.rs | 22 +- .../beacon_chain/src/state_advance_timer.rs | 120 +-- .../src/sync_committee_rewards.rs | 6 +- beacon_node/beacon_chain/src/test_utils.rs | 105 +-- .../beacon_chain/src/validator_monitor.rs | 653 ++++++++----- .../src/validator_pubkey_cache.rs | 8 +- beacon_node/beacon_chain/tests/bellatrix.rs | 2 - .../beacon_chain/tests/block_verification.rs | 10 +- beacon_node/beacon_chain/tests/capella.rs | 1 - .../beacon_chain/tests/op_verification.rs | 3 - .../tests/payload_invalidation.rs | 2 - beacon_node/beacon_chain/tests/store_tests.rs | 18 +- .../beacon_chain/tests/validator_monitor.rs | 2 - beacon_node/beacon_processor/Cargo.toml | 2 +- beacon_node/beacon_processor/src/lib.rs | 614 ++++++------ .../src/work_reprocessing_queue.rs | 175 ++-- beacon_node/client/Cargo.toml | 4 +- beacon_node/client/src/builder.rs | 178 ++-- .../src/compute_light_client_updates.rs | 7 +- beacon_node/client/src/notifier.rs | 389 ++++---- beacon_node/eth1/Cargo.toml | 3 +- beacon_node/eth1/src/service.rs | 176 ++-- beacon_node/eth1/tests/test.rs | 52 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/execution_layer/src/engines.rs | 72 +- beacon_node/execution_layer/src/lib.rs | 349 +++---- .../execution_layer/src/payload_status.rs | 30 +- .../src/test_utils/mock_builder.rs | 72 +- .../src/test_utils/mock_execution_layer.rs | 3 +- .../execution_layer/src/test_utils/mod.rs | 15 +- beacon_node/genesis/Cargo.toml | 3 +- .../genesis/src/eth1_genesis_service.rs | 106 +-- beacon_node/genesis/tests/tests.rs | 6 +- beacon_node/http_api/Cargo.toml | 3 +- beacon_node/http_api/src/block_rewards.rs | 32 +- beacon_node/http_api/src/lib.rs | 309 ++---- beacon_node/http_api/src/proposer_duties.rs | 9 +- .../http_api/src/publish_attestations.rs | 68 +- beacon_node/http_api/src/publish_blocks.rs | 213 ++--- .../http_api/src/sync_committee_rewards.rs | 5 +- beacon_node/http_api/src/sync_committees.rs | 62 +- beacon_node/http_api/src/test_utils.rs | 23 +- .../tests/broadcast_validation_tests.rs | 8 - beacon_node/http_api/tests/tests.rs | 9 +- beacon_node/http_metrics/Cargo.toml | 3 +- beacon_node/http_metrics/src/lib.rs | 12 +- beacon_node/http_metrics/tests/tests.rs | 6 +- beacon_node/lighthouse_network/Cargo.toml | 8 +- .../lighthouse_network/src/discovery/enr.rs | 25 +- .../lighthouse_network/src/discovery/mod.rs | 228 +++-- .../src/discovery/subnet_predicate.rs | 10 +- .../lighthouse_network/src/listen_addr.rs | 22 - .../src/peer_manager/mod.rs | 125 ++- .../src/peer_manager/network_behaviour.rs | 34 +- .../src/peer_manager/peerdb.rs | 149 ++- .../lighthouse_network/src/rpc/handler.rs | 110 ++- .../lighthouse_network/src/rpc/methods.rs | 16 - beacon_node/lighthouse_network/src/rpc/mod.rs | 124 +-- .../src/rpc/self_limiter.rs | 27 +- .../src/service/api_types.rs | 16 - .../lighthouse_network/src/service/mod.rs | 494 +++++++--- .../lighthouse_network/src/service/utils.rs | 45 +- .../lighthouse_network/src/types/globals.rs | 23 +- .../lighthouse_network/tests/common.rs | 62 +- .../lighthouse_network/tests/rpc_tests.rs | 160 ++-- beacon_node/network/Cargo.toml | 8 +- beacon_node/network/src/nat.rs | 10 +- .../gossip_methods.rs | 891 ++++++++---------- .../src/network_beacon_processor/mod.rs | 152 ++- .../network_beacon_processor/rpc_methods.rs | 368 ++++---- .../network_beacon_processor/sync_methods.rs | 263 +++--- .../src/network_beacon_processor/tests.rs | 5 - beacon_node/network/src/persisted_dht.rs | 5 +- beacon_node/network/src/router.rs | 137 ++- beacon_node/network/src/service.rs | 121 +-- beacon_node/network/src/service/tests.rs | 32 +- .../src/subnet_service/attestation_subnets.rs | 681 +++++++++++++ beacon_node/network/src/subnet_service/mod.rs | 152 ++- .../src/subnet_service/sync_subnets.rs | 345 +++++++ .../network/src/subnet_service/tests/mod.rs | 21 +- .../network/src/sync/backfill_sync/mod.rs | 257 ++++- .../network/src/sync/block_lookups/mod.rs | 322 +++++-- beacon_node/network/src/sync/manager.rs | 107 +-- .../network/src/sync/network_context.rs | 304 ++++-- .../src/sync/network_context/custody.rs | 59 +- beacon_node/network/src/sync/peer_sampling.rs | 167 ++-- .../network/src/sync/range_sync/batch.rs | 56 +- .../network/src/sync/range_sync/chain.rs | 216 +++-- .../src/sync/range_sync/chain_collection.rs | 40 +- .../network/src/sync/range_sync/range.rs | 94 +- beacon_node/network/src/sync/tests/lookups.rs | 32 +- beacon_node/network/src/sync/tests/mod.rs | 2 - beacon_node/src/config.rs | 84 +- beacon_node/src/lib.rs | 63 +- beacon_node/store/Cargo.toml | 4 +- beacon_node/store/src/chunked_iter.rs | 19 +- beacon_node/store/src/garbage_collection.rs | 8 +- beacon_node/store/src/hot_cold_store.rs | 323 +++---- beacon_node/store/src/iter.rs | 9 +- beacon_node/store/src/reconstruct.rs | 21 +- beacon_node/tests/test.rs | 2 - beacon_node/timer/Cargo.toml | 2 +- beacon_node/timer/src/lib.rs | 7 +- book/src/help_bn.md | 17 +- book/src/help_general.md | 17 +- book/src/help_vc.md | 17 +- book/src/help_vm.md | 17 +- book/src/help_vm_create.md | 17 +- book/src/help_vm_import.md | 17 +- book/src/help_vm_move.md | 17 +- boot_node/Cargo.toml | 6 +- boot_node/src/config.rs | 17 +- boot_node/src/lib.rs | 48 +- boot_node/src/server.rs | 54 +- common/account_utils/Cargo.toml | 2 +- .../src/validator_definitions.rs | 15 +- common/eth2_network_config/Cargo.toml | 3 +- common/eth2_network_config/src/lib.rs | 26 +- common/lighthouse_version/Cargo.toml | 2 +- common/logging/Cargo.toml | 6 +- common/logging/src/async_record.rs | 307 ------ common/logging/src/lib.rs | 378 +++----- common/logging/src/macros.rs | 6 + common/logging/src/sse_logging_components.rs | 98 +- common/logging/src/tracing_logging_layer.rs | 521 +++++++++- common/logging/tests/test.rs | 51 - common/monitoring_api/Cargo.toml | 2 +- common/monitoring_api/src/lib.rs | 29 +- common/task_executor/Cargo.toml | 10 +- common/task_executor/src/lib.rs | 104 +- common/task_executor/src/test_utils.rs | 14 +- consensus/fork_choice/Cargo.toml | 3 +- consensus/fork_choice/src/fork_choice.rs | 32 +- consensus/types/Cargo.toml | 3 +- consensus/types/src/slot_epoch_macros.rs | 11 - .../generate_deterministic_keypairs.rs | 2 +- database_manager/Cargo.toml | 2 +- database_manager/src/lib.rs | 68 +- lcli/Cargo.toml | 3 +- lcli/src/block_root.rs | 4 +- lcli/src/main.rs | 42 +- lcli/src/parse_ssz.rs | 4 +- lcli/src/skip_slots.rs | 2 +- lcli/src/state_root.rs | 2 +- lcli/src/transition_blocks.rs | 13 +- lighthouse/Cargo.toml | 3 +- lighthouse/environment/Cargo.toml | 11 +- lighthouse/environment/src/lib.rs | 352 +++---- lighthouse/environment/src/tracing_common.rs | 78 ++ .../environment/tests/environment_builder.rs | 2 - lighthouse/src/main.rs | 198 ++-- lighthouse/src/metrics.rs | 9 +- lighthouse/tests/beacon_node.rs | 10 +- scripts/tests/doppelganger_protection.sh | 2 +- slasher/Cargo.toml | 4 +- slasher/service/Cargo.toml | 2 +- slasher/service/src/service.rs | 82 +- slasher/src/database.rs | 6 +- slasher/src/slasher.rs | 72 +- slasher/tests/attester_slashings.rs | 5 +- slasher/tests/proposer_slashings.rs | 5 +- slasher/tests/random.rs | 6 +- slasher/tests/wrap_around.rs | 3 +- testing/ef_tests/src/cases/fork_choice.rs | 1 - .../src/test_rig.rs | 14 +- testing/simulator/Cargo.toml | 3 + testing/simulator/src/basic_sim.rs | 46 +- testing/simulator/src/fallback_sim.rs | 47 +- testing/test-test_logger/Cargo.toml | 9 - testing/test-test_logger/src/lib.rs | 22 - testing/validator_test_rig/Cargo.toml | 2 +- .../src/mock_beacon_node.rs | 8 +- testing/web3signer_tests/src/lib.rs | 12 +- validator_client/Cargo.toml | 2 +- .../beacon_node_fallback/Cargo.toml | 2 +- .../src/beacon_node_health.rs | 8 +- .../beacon_node_fallback/src/lib.rs | 131 +-- .../doppelganger_service/Cargo.toml | 3 +- .../doppelganger_service/src/lib.rs | 118 +-- validator_client/graffiti_file/Cargo.toml | 2 +- validator_client/graffiti_file/src/lib.rs | 8 +- validator_client/http_api/Cargo.toml | 3 +- .../src/create_signed_voluntary_exit.rs | 10 +- validator_client/http_api/src/keystores.rs | 39 +- validator_client/http_api/src/lib.rs | 64 +- validator_client/http_api/src/remotekeys.rs | 28 +- validator_client/http_api/src/test_utils.rs | 8 +- validator_client/http_api/src/tests.rs | 8 +- validator_client/http_metrics/Cargo.toml | 3 +- validator_client/http_metrics/src/lib.rs | 12 +- .../initialized_validators/Cargo.toml | 2 +- .../initialized_validators/src/lib.rs | 70 +- .../slashing_protection/Cargo.toml | 1 + validator_client/src/check_synced.rs | 25 + validator_client/src/config.rs | 15 +- validator_client/src/latency.rs | 11 +- validator_client/src/lib.rs | 173 ++-- validator_client/src/notifier.rs | 106 +-- .../validator_services/Cargo.toml | 3 +- .../src/attestation_service.rs | 176 ++-- .../validator_services/src/block_service.rs | 132 +-- .../validator_services/src/duties_service.rs | 180 ++-- .../src/preparation_service.rs | 72 +- .../validator_services/src/sync.rs | 108 +-- .../src/sync_committee_service.rs | 173 ++-- validator_client/validator_store/Cargo.toml | 3 +- validator_client/validator_store/src/lib.rs | 71 +- 241 files changed, 9485 insertions(+), 9328 deletions(-) create mode 100644 beacon_node/beacon_chain/src/otb_verification_service.rs create mode 100644 beacon_node/network/src/subnet_service/attestation_subnets.rs create mode 100644 beacon_node/network/src/subnet_service/sync_subnets.rs delete mode 100644 common/logging/src/async_record.rs create mode 100644 common/logging/src/macros.rs delete mode 100644 common/logging/tests/test.rs create mode 100644 lighthouse/environment/src/tracing_common.rs delete mode 100644 testing/test-test_logger/Cargo.toml delete mode 100644 testing/test-test_logger/src/lib.rs create mode 100644 validator_client/src/check_synced.rs diff --git a/Cargo.lock b/Cargo.lock index f85cf18784..d559c2f1fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -51,7 +51,7 @@ dependencies = [ "rpassword", "serde", "serde_yaml", - "slog", + "tracing", "types", "validator_dir", "zeroize", @@ -72,12 +72,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - [[package]] name = "aead" version = "0.5.2" @@ -218,7 +212,7 @@ dependencies = [ "foldhash", "getrandom 0.2.15", "hashbrown 0.15.2", - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "k256 0.13.4", "keccak-asm", @@ -750,9 +744,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "bb97d56060ee67d285efb8001fec9d2a4c710c32efd2e14b5cbb5ba71930fc2d" [[package]] name = "beacon_chain" @@ -795,10 +789,6 @@ dependencies = [ "serde", "serde_json", "slasher", - "slog", - "slog-async", - "slog-term", - "sloggers", "slot_clock", "smallvec", "ssz_types", @@ -810,6 +800,7 @@ dependencies = [ "tempfile", "tokio", "tokio-stream", + "tracing", "tree_hash", "tree_hash_derive", "types", @@ -839,10 +830,10 @@ dependencies = [ "sensitive_url", "serde_json", "slasher", - "slog", "store", "strum", "task_executor", + "tracing", "types", "unused_port", ] @@ -858,10 +849,10 @@ dependencies = [ "itertools 0.10.5", "logging", "serde", - "slog", "slot_clock", "strum", "tokio", + "tracing", "types", "validator_metrics", "validator_test_rig", @@ -880,12 +871,12 @@ dependencies = [ "num_cpus", "parking_lot 0.12.3", "serde", - "slog", "slot_clock", "strum", "task_executor", "tokio", "tokio-util", + "tracing", "types", ] @@ -1068,11 +1059,9 @@ dependencies = [ "log", "logging", "serde", - "slog", - "slog-async", - "slog-scope", - "slog-term", "tokio", + "tracing", + "tracing-subscriber", "types", ] @@ -1268,7 +1257,9 @@ checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", + "wasm-bindgen", "windows-link", ] @@ -1332,9 +1323,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" dependencies = [ "clap_builder", "clap_derive", @@ -1342,9 +1333,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" dependencies = [ "anstream", "anstyle", @@ -1355,9 +1346,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1407,6 +1398,7 @@ dependencies = [ "http_metrics", "kzg", "lighthouse_network", + "logging", "metrics", "monitoring_api", "network", @@ -1417,7 +1409,6 @@ dependencies = [ "serde_yaml", "slasher", "slasher_service", - "slog", "slot_clock", "state_processing", "store", @@ -1425,6 +1416,8 @@ dependencies = [ "time", "timer", "tokio", + "tracing", + "tracing-subscriber", "types", ] @@ -1899,12 +1892,6 @@ dependencies = [ "libc", ] -[[package]] -name = "dary_heap" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" - [[package]] name = "data-encoding" version = "2.8.0" @@ -1942,9 +1929,9 @@ dependencies = [ "environment", "hex", "serde", - "slog", "store", "strum", + "tracing", "types", ] @@ -2138,16 +2125,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -2159,17 +2136,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - [[package]] name = "discv5" version = "0.9.1" @@ -2224,10 +2190,10 @@ dependencies = [ "futures", "logging", "parking_lot 0.12.3", - "slog", "slot_clock", "task_executor", "tokio", + "tracing", "types", ] @@ -2471,19 +2437,20 @@ name = "environment" version = "0.1.2" dependencies = [ "async-channel 1.9.0", + "clap", "ctrlc", "eth2_config", "eth2_network_config", "futures", "logging", + "logroller", "serde", - "slog", - "slog-async", - "slog-json", - "slog-term", - "sloggers", "task_executor", "tokio", + "tracing", + "tracing-appender", + "tracing-log", + "tracing-subscriber", "types", ] @@ -2493,15 +2460,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" -[[package]] -name = "erased-serde" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" -dependencies = [ - "serde", -] - [[package]] name = "errno" version = "0.3.10" @@ -2530,12 +2488,11 @@ dependencies = [ "sensitive_url", "serde", "serde_yaml", - "slog", - "sloggers", "state_processing", "superstruct", "task_executor", "tokio", + "tracing", "tree_hash", "types", ] @@ -2648,15 +2605,14 @@ dependencies = [ "eth2_config", "ethereum_ssz", "kzg", - "logging", "pretty_reqwest_error", "reqwest", "sensitive_url", "serde_yaml", "sha2 0.9.9", - "slog", "tempfile", "tokio", + "tracing", "types", "url", "zip", @@ -2784,7 +2740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", - "ring 0.17.13", + "ring 0.17.14", "sha2 0.10.8", ] @@ -3048,7 +3004,6 @@ dependencies = [ "serde", "serde_json", "sha2 0.9.9", - "slog", "slot_clock", "ssz_types", "state_processing", @@ -3058,6 +3013,7 @@ dependencies = [ "tempfile", "tokio", "tokio-stream", + "tracing", "tree_hash", "tree_hash_derive", "triehash", @@ -3254,12 +3210,13 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "logging", "metrics", "proto_array", - "slog", "state_processing", "store", "tokio", + "tracing", "types", ] @@ -3465,12 +3422,13 @@ dependencies = [ "ethereum_ssz", "futures", "int_to_bytes", + "logging", "merkle_proof", "rayon", "sensitive_url", - "slog", "state_processing", "tokio", + "tracing", "tree_hash", "types", ] @@ -3549,8 +3507,8 @@ dependencies = [ "bls", "hex", "serde", - "slog", "tempfile", + "tracing", "types", ] @@ -3590,7 +3548,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -3608,8 +3566,8 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.2.0", - "indexmap 2.7.1", + "http 1.3.0", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -3913,9 +3871,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "0a761d192fbf18bdef69f5ceedd0d1333afcbda0ee23840373b8317570d23c65" dependencies = [ "bytes", "fnv", @@ -3940,18 +3898,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http 1.3.0", ] [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", - "http 1.2.0", + "futures-core", + "http 1.3.0", "http-body 1.0.1", "pin-project-lite", ] @@ -3989,7 +3947,6 @@ dependencies = [ "sensitive_url", "serde", "serde_json", - "slog", "slot_clock", "state_processing", "store", @@ -3998,6 +3955,7 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", + "tracing", "tree_hash", "types", "warp", @@ -4017,10 +3975,10 @@ dependencies = [ "metrics", "reqwest", "serde", - "slog", "slot_clock", "store", "tokio", + "tracing", "types", "warp", "warp_utils", @@ -4078,7 +4036,7 @@ dependencies = [ "futures-channel", "futures-util", "h2 0.4.8", - "http 1.2.0", + "http 1.3.0", "http-body 1.0.1", "httparse", "httpdate", @@ -4125,7 +4083,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.2.0", + "http 1.3.0", "http-body 1.0.1", "hyper 1.6.0", "pin-project-lite", @@ -4346,7 +4304,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.2.0", + "http 1.3.0", "http-body-util", "hyper 1.6.0", "hyper-util", @@ -4367,7 +4325,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.2.0", + "http 1.3.0", "http-body-util", "hyper 1.6.0", "hyper-util", @@ -4452,9 +4410,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "arbitrary", "equivalent", @@ -4479,8 +4437,8 @@ dependencies = [ "serde", "serde_json", "signing_method", - "slog", "tokio", + "tracing", "types", "url", "validator_dir", @@ -4634,7 +4592,7 @@ dependencies = [ "base64 0.22.1", "js-sys", "pem", - "ring 0.17.13", + "ring 0.17.14", "serde", "serde_json", "simple_asn1", @@ -4757,10 +4715,11 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "sloggers", "snap", "state_processing", "store", + "tracing", + "tracing-subscriber", "tree_hash", "types", "validator_dir", @@ -4791,33 +4750,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.170" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" - -[[package]] -name = "libflate" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e" -dependencies = [ - "adler32", - "core2", - "crc32fast", - "dary_heap", - "libflate_lz77", -] - -[[package]] -name = "libflate_lz77" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d" -dependencies = [ - "core2", - "hashbrown 0.14.5", - "rle-decode-fast", -] +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libloading" @@ -4949,7 +4884,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.48.1" -source = "git+https://github.com/sigp/rust-libp2p.git?tag=sigp-gossipsub-0.1#3e24b1bbec5fae182595aee0958f823be87afaad" +source = "git+https://github.com/sigp/rust-libp2p.git?branch=sigp-gossipsub#3e24b1bbec5fae182595aee0958f823be87afaad" dependencies = [ "async-channel 2.3.1", "asynchronous-codec", @@ -5127,7 +5062,7 @@ dependencies = [ "libp2p-tls", "quinn", "rand 0.8.5", - "ring 0.17.13", + "ring 0.17.14", "rustls 0.23.23", "socket2", "thiserror 2.0.12", @@ -5188,16 +5123,16 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcaebc1069dea12c5b86a597eaaddae0317c2c2cb9ec99dc94f82fd340f5c78b" +checksum = "42bbf5084fb44133267ad4caaa72a253d68d709edd2ed1cf9b42431a8ead8fd5" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", "rcgen", - "ring 0.17.13", + "ring 0.17.14", "rustls 0.23.23", "rustls-webpki 0.101.7", "thiserror 2.0.12", @@ -5348,10 +5283,11 @@ dependencies = [ "serde_yaml", "slasher", "slashing_protection", - "slog", "store", "task_executor", "tempfile", + "tracing", + "tracing-subscriber", "types", "unused_port", "validator_client", @@ -5396,9 +5332,6 @@ dependencies = [ "regex", "serde", "sha2 0.9.9", - "slog", - "slog-async", - "slog-term", "smallvec", "snap", "ssz_types", @@ -5410,6 +5343,8 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-util", + "tracing", + "tracing-subscriber", "types", "unsigned-varint 0.8.0", "unused_port", @@ -5510,14 +5445,12 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", + "logroller", "metrics", + "once_cell", "parking_lot 0.12.3", "serde", "serde_json", - "slog", - "slog-term", - "sloggers", - "take_mut", "tokio", "tracing", "tracing-appender", @@ -5526,6 +5459,18 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "logroller" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e8dd932139da44917b3cd5812ed9536d985aa67203778e0507347579499f49c" +dependencies = [ + "chrono", + "flate2", + "regex", + "thiserror 1.0.69", +] + [[package]] name = "loom" version = "0.7.2" @@ -5758,7 +5703,7 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.2.0", + "http 1.3.0", "http-body 1.0.1", "http-body-util", "hyper 1.6.0", @@ -5804,10 +5749,10 @@ dependencies = [ "sensitive_url", "serde", "serde_json", - "slog", "store", "task_executor", "tokio", + "tracing", ] [[package]] @@ -6012,10 +5957,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "serde_json", - "slog", - "slog-async", - "slog-term", - "sloggers", "slot_clock", "smallvec", "ssz_types", @@ -6024,6 +5965,8 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", + "tracing", + "tracing-subscriber", "types", ] @@ -6207,9 +6150,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" [[package]] name = "oneshot_broadcast" @@ -6982,7 +6925,7 @@ dependencies = [ "bytes", "getrandom 0.2.15", "rand 0.8.5", - "ring 0.17.13", + "ring 0.17.14", "rustc-hash 2.1.1", "rustls 0.23.23", "rustls-pki-types", @@ -7141,12 +7084,13 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.11.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" dependencies = [ "pem", - "ring 0.16.20", + "ring 0.17.14", + "rustls-pki-types", "time", "yasna", ] @@ -7343,9 +7287,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.13" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -7355,12 +7299,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rle-decode-fast" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" - [[package]] name = "rlp" version = "0.5.2" @@ -7560,9 +7498,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dade4812df5c384711475be5fcd8c162555352945401aed22a35bffeab61f657" +checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" dependencies = [ "bitflags 2.9.0", "errno", @@ -7578,7 +7516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.13", + "ring 0.17.14", "rustls-webpki 0.101.7", "sct", ] @@ -7590,7 +7528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.13", + "ring 0.17.14", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7604,7 +7542,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "once_cell", - "ring 0.17.13", + "ring 0.17.14", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7644,7 +7582,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.13", + "ring 0.17.14", "untrusted 0.9.0", ] @@ -7654,7 +7592,7 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring 0.17.13", + "ring 0.17.14", "rustls-pki-types", "untrusted 0.9.0", ] @@ -7788,7 +7726,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.13", + "ring 0.17.14", "untrusted 0.9.0", ] @@ -7955,7 +7893,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -8113,16 +8051,19 @@ version = "0.2.0" dependencies = [ "clap", "env_logger 0.9.3", + "environment", "eth2_network_config", "execution_layer", "futures", "kzg", + "logging", "node_test_rig", "parking_lot 0.12.3", "rayon", "sensitive_url", "serde_json", "tokio", + "tracing-subscriber", "types", ] @@ -8149,7 +8090,6 @@ dependencies = [ "libmdbx", "lmdb-rkv", "lmdb-rkv-sys", - "logging", "lru", "maplit", "metrics", @@ -8159,10 +8099,10 @@ dependencies = [ "redb", "safe_arith", "serde", - "slog", "ssz_types", "strum", "tempfile", + "tracing", "tree_hash", "tree_hash_derive", "types", @@ -8177,11 +8117,11 @@ dependencies = [ "lighthouse_network", "network", "slasher", - "slog", "slot_clock", "state_processing", "task_executor", "tokio", + "tracing", "types", ] @@ -8199,111 +8139,10 @@ dependencies = [ "serde", "serde_json", "tempfile", + "tracing", "types", ] -[[package]] -name = "slog" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" -dependencies = [ - "erased-serde", -] - -[[package]] -name = "slog-async" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c8038f898a2c79507940990f05386455b3a317d8f18d4caea7cbc3d5096b84" -dependencies = [ - "crossbeam-channel", - "slog", - "take_mut", - "thread_local", -] - -[[package]] -name = "slog-json" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e1e53f61af1e3c8b852eef0a9dee29008f55d6dd63794f3f12cef786cf0f219" -dependencies = [ - "serde", - "serde_json", - "slog", - "time", -] - -[[package]] -name = "slog-kvfilter" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae939ed7d169eed9699f4f5cd440f046f5dc5dfc27c19e3cd311619594c175e0" -dependencies = [ - "regex", - "slog", -] - -[[package]] -name = "slog-scope" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95a4b4c3274cd2869549da82b57ccc930859bdbf5bcea0424bc5f140b3c786" -dependencies = [ - "arc-swap", - "lazy_static", - "slog", -] - -[[package]] -name = "slog-stdlog" -version = "4.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6706b2ace5bbae7291d3f8d2473e2bfab073ccd7d03670946197aec98471fa3e" -dependencies = [ - "log", - "slog", - "slog-scope", -] - -[[package]] -name = "slog-term" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" -dependencies = [ - "is-terminal", - "slog", - "term", - "thread_local", - "time", -] - -[[package]] -name = "sloggers" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75062c2738b82cd45ae633623caae3393f43eb00aada1dc2d3ebe88db6b0db9b" -dependencies = [ - "chrono", - "libc", - "libflate", - "once_cell", - "regex", - "serde", - "slog", - "slog-async", - "slog-json", - "slog-kvfilter", - "slog-scope", - "slog-stdlog", - "slog-term", - "trackable", - "winapi", - "windows-acl", -] - [[package]] name = "slot_clock" version = "0.2.0" @@ -8339,7 +8178,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", - "ring 0.17.13", + "ring 0.17.14", "rustc_version 0.4.1", "sha2 0.10.8", "subtle", @@ -8389,9 +8228,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22bc24c8a61256950632fb6b68ea09f6b5c988070924c6292eb5933635202e00" +checksum = "dad0fa7e9a85c06d0a6ba5100d733fff72e231eb6db2d86078225cf716fd2d95" dependencies = [ "arbitrary", "ethereum_serde_utils", @@ -8476,13 +8315,13 @@ dependencies = [ "redb", "safe_arith", "serde", - "slog", - "sloggers", "smallvec", "state_processing", "strum", "superstruct", "tempfile", + "tracing", + "tracing-subscriber", "types", "xdelta3", "zstd 0.13.3", @@ -8665,12 +8504,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" -[[package]] -name = "take_mut" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" - [[package]] name = "tap" version = "1.0.1" @@ -8696,10 +8529,7 @@ version = "0.1.0" dependencies = [ "async-channel 1.9.0", "futures", - "logging", "metrics", - "slog", - "sloggers", "tokio", "tracing", ] @@ -8714,21 +8544,10 @@ dependencies = [ "fastrand", "getrandom 0.3.1", "once_cell", - "rustix 1.0.1", + "rustix 1.0.2", "windows-sys 0.59.0", ] -[[package]] -name = "term" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" -dependencies = [ - "dirs-next", - "rustversion", - "winapi", -] - [[package]] name = "termcolor" version = "1.4.1" @@ -8744,18 +8563,10 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.1", + "rustix 1.0.2", "windows-sys 0.59.0", ] -[[package]] -name = "test-test_logger" -version = "0.1.0" -dependencies = [ - "logging", - "slog", -] - [[package]] name = "test_random_derive" version = "0.2.0" @@ -8890,10 +8701,10 @@ name = "timer" version = "0.2.0" dependencies = [ "beacon_chain", - "slog", "slot_clock", "task_executor", "tokio", + "tracing", ] [[package]] @@ -9077,7 +8888,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "toml_datetime", "winnow 0.5.40", ] @@ -9088,7 +8899,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "toml_datetime", "winnow 0.7.3", ] @@ -9165,6 +8976,16 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + [[package]] name = "tracing-subscriber" version = "0.3.19" @@ -9175,31 +8996,15 @@ dependencies = [ "nu-ansi-term", "once_cell", "regex", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", -] - -[[package]] -name = "trackable" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" -dependencies = [ - "trackable_derive", -] - -[[package]] -name = "trackable_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" -dependencies = [ - "quote", - "syn 1.0.109", + "tracing-serde", ] [[package]] @@ -9282,7 +9087,6 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "log", "maplit", "merkle_proof", "metastruct", @@ -9299,7 +9103,6 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "slog", "smallvec", "ssz_types", "state_processing", @@ -9308,6 +9111,7 @@ dependencies = [ "tempfile", "test_random_derive", "tokio", + "tracing", "tree_hash", "tree_hash_derive", ] @@ -9506,9 +9310,9 @@ dependencies = [ "sensitive_url", "serde", "slashing_protection", - "slog", "slot_clock", "tokio", + "tracing", "types", "validator_http_api", "validator_http_metrics", @@ -9560,9 +9364,9 @@ dependencies = [ "rand 0.8.5", "sensitive_url", "serde", + "serde_json", "signing_method", "slashing_protection", - "slog", "slot_clock", "sysinfo", "system_health", @@ -9570,6 +9374,7 @@ dependencies = [ "tempfile", "tokio", "tokio-stream", + "tracing", "types", "url", "validator_dir", @@ -9586,12 +9391,13 @@ version = "0.1.0" dependencies = [ "health_metrics", "lighthouse_version", + "logging", "malloc_utils", "metrics", "parking_lot 0.12.3", "serde", - "slog", "slot_clock", + "tracing", "types", "validator_metrics", "validator_services", @@ -9644,11 +9450,12 @@ dependencies = [ "eth2", "futures", "graffiti_file", + "logging", "parking_lot 0.12.3", "safe_arith", - "slog", "slot_clock", "tokio", + "tracing", "tree_hash", "types", "validator_metrics", @@ -9662,13 +9469,14 @@ dependencies = [ "account_utils", "doppelganger_service", "initialized_validators", + "logging", "parking_lot 0.12.3", "serde", "signing_method", "slashing_protection", - "slog", "slot_clock", "task_executor", + "tracing", "types", "validator_metrics", ] @@ -9683,7 +9491,7 @@ dependencies = [ "regex", "sensitive_url", "serde_json", - "slog", + "tracing", "types", ] diff --git a/Cargo.toml b/Cargo.toml index 8183c08555..bad374201d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,7 +84,6 @@ members = [ "testing/node_test_rig", "testing/simulator", "testing/state_transition_vectors", - "testing/test-test_logger", "testing/validator_test_rig", "testing/web3signer_tests", @@ -151,6 +150,7 @@ hyper = "1" itertools = "0.10" libsecp256k1 = "0.7" log = "0.4" +logroller = "0.1.4" lru = "0.12" maplit = "1" milhouse = "0.5" @@ -181,14 +181,6 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = [ - "max_level_debug", - "release_max_level_debug", - "nested-values", -] } -slog-async = "2" -slog-term = "2" -sloggers = { version = "2", features = ["json"] } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" ssz_types = "0.10" @@ -209,7 +201,7 @@ tracing = "0.1.40" tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tree_hash = "0.9" tree_hash_derive = "0.9" url = "2" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f6948e8743..e30705719e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -39,9 +39,9 @@ monitoring_api = { workspace = true } sensitive_url = { workspace = true } serde_json = { workspace = true } slasher = { workspace = true } -slog = { workspace = true } store = { workspace = true } strum = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } types = { workspace = true } unused_port = { workspace = true } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 7b725d3519..0cf9ae1a10 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -58,10 +58,6 @@ sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slasher = { workspace = true } -slog = { workspace = true } -slog-async = { workspace = true } -slog-term = { workspace = true } -sloggers = { workspace = true } slot_clock = { workspace = true } smallvec = { workspace = true } ssz_types = { workspace = true } @@ -73,6 +69,7 @@ task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 4f7c480c8c..97fe8dccd4 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -4,7 +4,6 @@ use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; use safe_arith::SafeArith; use serde_utils::quoted_u64::Quoted; -use slog::debug; use state_processing::common::base::{self, SqrtTotalActiveBalance}; use state_processing::per_epoch_processing::altair::{ process_inactivity_updates_slow, process_justification_and_finalization, @@ -29,6 +28,7 @@ use store::consts::altair::{ PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }; +use tracing::debug; use types::consts::altair::WEIGHT_DENOMINATOR; use types::{BeaconState, Epoch, EthSpec, RelativeEpoch}; @@ -38,7 +38,11 @@ impl BeaconChain { epoch: Epoch, validators: Vec, ) -> Result { - debug!(self.log, "computing attestation rewards"; "epoch" => epoch, "validator_count" => validators.len()); + debug!( + %epoch, + validator_count = validators.len(), + "computing attestation rewards" + ); // Get state let state_slot = (epoch + 1).end_slot(T::EthSpec::slots_per_epoch()); @@ -214,10 +218,9 @@ impl BeaconChain { // Return 0s for unknown/inactive validator indices. let Ok(validator) = state.get_validator(validator_index) else { debug!( - self.log, - "No rewards for inactive/unknown validator"; - "index" => validator_index, - "epoch" => previous_epoch + index = validator_index, + epoch = %previous_epoch, + "No rewards for inactive/unknown validator" ); total_rewards.push(TotalAttestationRewards { validator_index: validator_index as u64, diff --git a/beacon_node/beacon_chain/src/attestation_simulator.rs b/beacon_node/beacon_chain/src/attestation_simulator.rs index c97c4490af..59d316578b 100644 --- a/beacon_node/beacon_chain/src/attestation_simulator.rs +++ b/beacon_node/beacon_chain/src/attestation_simulator.rs @@ -1,9 +1,9 @@ use crate::{BeaconChain, BeaconChainTypes}; -use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::sleep; +use tracing::{debug, error}; use types::{EthSpec, Slot}; /// Don't run the attestation simulator if the head slot is this many epochs @@ -36,10 +36,7 @@ async fn attestation_simulator_service( Some(duration) => { sleep(duration + additional_delay).await; - debug!( - chain.log, - "Simulating unagg. attestation production"; - ); + debug!("Simulating unagg. attestation production"); // Run the task in the executor let inner_chain = chain.clone(); @@ -53,7 +50,7 @@ async fn attestation_simulator_service( ); } None => { - error!(chain.log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; } @@ -85,10 +82,9 @@ pub fn produce_unaggregated_attestation( let data = unaggregated_attestation.data(); debug!( - chain.log, - "Produce unagg. attestation"; - "attestation_source" => data.source.root.to_string(), - "attestation_target" => data.target.root.to_string(), + attestation_source = data.source.root.to_string(), + attestation_target = data.target.root.to_string(), + "Produce unagg. attestation" ); chain @@ -98,9 +94,8 @@ pub fn produce_unaggregated_attestation( } Err(e) => { debug!( - chain.log, - "Failed to simulate attestation"; - "error" => ?e + error = ?e, + "Failed to simulate attestation" ); } } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 00e8615487..baacd93c45 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -43,7 +43,6 @@ use crate::{ use bls::verify_signature_sets; use itertools::Itertools; use proto_array::Block as ProtoBlock; -use slog::debug; use slot_clock::SlotClock; use state_processing::{ common::{ @@ -58,6 +57,7 @@ use state_processing::{ }; use std::borrow::Cow; use strum::AsRefStr; +use tracing::debug; use tree_hash::TreeHash; use types::{ Attestation, AttestationData, AttestationRef, BeaconCommittee, @@ -430,10 +430,9 @@ fn process_slash_info( Ok((indexed, _)) => (indexed, true, err), Err(e) => { debug!( - chain.log, - "Unable to obtain indexed form of attestation for slasher"; - "attestation_root" => format!("{:?}", attestation.tree_hash_root()), - "error" => format!("{:?}", e) + attestation_root = ?attestation.tree_hash_root(), + error = ?e, + "Unable to obtain indexed form of attestation for slasher" ); return err; } @@ -447,9 +446,8 @@ fn process_slash_info( if check_signature { if let Err(e) = verify_attestation_signature(chain, &indexed_attestation) { debug!( - chain.log, - "Signature verification for slasher failed"; - "error" => format!("{:?}", e), + error = ?e, + "Signature verification for slasher failed" ); return err; } diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index e0bb79bf38..591102126f 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -2,7 +2,6 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig}; use attesting_indices_base::get_attesting_indices; use eth2::lighthouse::StandardBlockReward; use safe_arith::SafeArith; -use slog::error; use state_processing::common::attesting_indices_base; use state_processing::{ common::{ @@ -19,6 +18,7 @@ use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, }; +use tracing::error; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, EthSpec}; type BeaconBlockSubRewardValue = u64; @@ -56,9 +56,8 @@ impl BeaconChain { .compute_beacon_block_proposer_slashing_reward(block, state) .map_err(|e| { error!( - self.log, - "Error calculating proposer slashing reward"; - "error" => ?e + error = ?e, + "Error calculating proposer slashing reward" ); BeaconChainError::BlockRewardError })?; @@ -67,9 +66,8 @@ impl BeaconChain { .compute_beacon_block_attester_slashing_reward(block, state) .map_err(|e| { error!( - self.log, - "Error calculating attester slashing reward"; - "error" => ?e + error = ?e, + "Error calculating attester slashing reward" ); BeaconChainError::BlockRewardError })?; @@ -78,9 +76,8 @@ impl BeaconChain { self.compute_beacon_block_attestation_reward_base(block, state) .map_err(|e| { error!( - self.log, - "Error calculating base block attestation reward"; - "error" => ?e + error = ?e, + "Error calculating base block attestation reward" ); BeaconChainError::BlockRewardAttestationError })? @@ -88,9 +85,8 @@ impl BeaconChain { self.compute_beacon_block_attestation_reward_altair_deneb(block, state) .map_err(|e| { error!( - self.log, - "Error calculating altair block attestation reward"; - "error" => ?e + error = ?e, + "Error calculating altair block attestation reward" ); BeaconChainError::BlockRewardAttestationError })? diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 32ec776868..e37a69040d 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,6 +1,6 @@ use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; -use slog::{crit, debug, error, Logger}; +use logging::crit; use std::collections::HashMap; use std::sync::Arc; use store::{DatabaseBlock, ExecutionPayloadDeneb}; @@ -9,6 +9,7 @@ use tokio::sync::{ RwLock, }; use tokio_stream::{wrappers::UnboundedReceiverStream, Stream}; +use tracing::{debug, error}; use types::{ ChainSpec, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, @@ -129,7 +130,6 @@ fn reconstruct_default_header_block( fn reconstruct_blocks( block_map: &mut HashMap>>, block_parts_with_bodies: HashMap>, - log: &Logger, ) { for (root, block_parts) in block_parts_with_bodies { if let Some(payload_body) = block_parts.body { @@ -156,7 +156,7 @@ fn reconstruct_blocks( reconstructed_transactions_root: header_from_payload .transactions_root(), }; - debug!(log, "Failed to reconstruct block"; "root" => ?root, "error" => ?error); + debug!(?root, ?error, "Failed to reconstruct block"); block_map.insert(root, Arc::new(Err(error))); } } @@ -232,7 +232,7 @@ impl BodiesByRange { } } - async fn execute(&mut self, execution_layer: &ExecutionLayer, log: &Logger) { + async fn execute(&mut self, execution_layer: &ExecutionLayer) { if let RequestState::UnSent(blocks_parts_ref) = &mut self.state { let block_parts_vec = std::mem::take(blocks_parts_ref); @@ -261,12 +261,12 @@ impl BodiesByRange { }); } - reconstruct_blocks(&mut block_map, with_bodies, log); + reconstruct_blocks(&mut block_map, with_bodies); } Err(e) => { let block_result = Arc::new(Err(Error::BlocksByRangeFailure(Box::new(e)).into())); - debug!(log, "Payload bodies by range failure"; "error" => ?block_result); + debug!(error = ?block_result, "Payload bodies by range failure"); for block_parts in block_parts_vec { block_map.insert(block_parts.root(), block_result.clone()); } @@ -280,9 +280,8 @@ impl BodiesByRange { &mut self, root: &Hash256, execution_layer: &ExecutionLayer, - log: &Logger, ) -> Option>> { - self.execute(execution_layer, log).await; + self.execute(execution_layer).await; if let RequestState::Sent(map) = &self.state { return map.get(root).cloned(); } @@ -313,7 +312,7 @@ impl EngineRequest { } } - pub async fn push_block_parts(&mut self, block_parts: BlockParts, log: &Logger) { + pub async fn push_block_parts(&mut self, block_parts: BlockParts) { match self { Self::ByRange(bodies_by_range) => { let mut request = bodies_by_range.write().await; @@ -327,28 +326,21 @@ impl EngineRequest { Self::NoRequest(_) => { // this should _never_ happen crit!( - log, - "Please notify the devs"; - "beacon_block_streamer" => "push_block_parts called on NoRequest Variant", + beacon_block_streamer = "push_block_parts called on NoRequest Variant", + "Please notify the devs" ); } } } - pub async fn push_block_result( - &mut self, - root: Hash256, - block_result: BlockResult, - log: &Logger, - ) { + pub async fn push_block_result(&mut self, root: Hash256, block_result: BlockResult) { // this function will only fail if something is seriously wrong match self { Self::ByRange(_) => { // this should _never_ happen crit!( - log, - "Please notify the devs"; - "beacon_block_streamer" => "push_block_result called on ByRange", + beacon_block_streamer = "push_block_result called on ByRange", + "Please notify the devs" ); } Self::NoRequest(results) => { @@ -361,24 +353,22 @@ impl EngineRequest { &self, root: &Hash256, execution_layer: &ExecutionLayer, - log: &Logger, ) -> Arc> { match self { Self::ByRange(by_range) => { by_range .write() .await - .get_block_result(root, execution_layer, log) + .get_block_result(root, execution_layer) .await } Self::NoRequest(map) => map.read().await.get(root).cloned(), } .unwrap_or_else(|| { crit!( - log, - "Please notify the devs"; - "beacon_block_streamer" => "block_result not found in request", - "root" => ?root, + beacon_block_streamer = "block_result not found in request", + ?root, + "Please notify the devs" ); Arc::new(Err(Error::BlockResultNotFound.into())) }) @@ -518,9 +508,7 @@ impl BeaconBlockStreamer { } }; - no_request - .push_block_result(root, block_result, &self.beacon_chain.log) - .await; + no_request.push_block_result(root, block_result).await; requests.insert(root, no_request.clone()); } @@ -529,9 +517,7 @@ impl BeaconBlockStreamer { by_range_blocks.sort_by_key(|block_parts| block_parts.slot()); for block_parts in by_range_blocks { let root = block_parts.root(); - by_range - .push_block_parts(block_parts, &self.beacon_chain.log) - .await; + by_range.push_block_parts(block_parts).await; requests.insert(root, by_range.clone()); } @@ -541,17 +527,12 @@ impl BeaconBlockStreamer { result.push((root, request.clone())) } else { crit!( - self.beacon_chain.log, - "Please notify the devs"; - "beacon_block_streamer" => "request not found", - "root" => ?root, + beacon_block_streamer = "request not found", + ?root, + "Please notify the devs" ); no_request - .push_block_result( - root, - Err(Error::RequestNotFound.into()), - &self.beacon_chain.log, - ) + .push_block_result(root, Err(Error::RequestNotFound.into())) .await; result.push((root, no_request.clone())); } @@ -566,10 +547,7 @@ impl BeaconBlockStreamer { block_roots: Vec, sender: UnboundedSender<(Hash256, Arc>)>, ) { - debug!( - self.beacon_chain.log, - "Using slower fallback method of eth_getBlockByHash()" - ); + debug!("Using slower fallback method of eth_getBlockByHash()"); for root in block_roots { let cached_block = self.check_caches(root); let block_result = if cached_block.is_some() { @@ -601,9 +579,8 @@ impl BeaconBlockStreamer { Ok(payloads) => payloads, Err(e) => { error!( - self.beacon_chain.log, - "BeaconBlockStreamer: Failed to load payloads"; - "error" => ?e + error = ?e, + "BeaconBlockStreamer: Failed to load payloads" ); return; } @@ -615,9 +592,7 @@ impl BeaconBlockStreamer { engine_requests += 1; } - let result = request - .get_block_result(&root, &self.execution_layer, &self.beacon_chain.log) - .await; + let result = request.get_block_result(&root, &self.execution_layer).await; let successful = result .as_ref() @@ -636,13 +611,12 @@ impl BeaconBlockStreamer { } debug!( - self.beacon_chain.log, - "BeaconBlockStreamer finished"; - "requested blocks" => n_roots, - "sent" => n_sent, - "succeeded" => n_success, - "failed" => (n_sent - n_success), - "engine requests" => engine_requests, + requested_blocks = n_roots, + sent = n_sent, + succeeded = n_success, + failed = (n_sent - n_success), + engine_requests, + "BeaconBlockStreamer finished" ); } @@ -678,9 +652,8 @@ impl BeaconBlockStreamer { ) -> impl Stream>)> { let (block_tx, block_rx) = mpsc::unbounded_channel(); debug!( - self.beacon_chain.log, - "Launching a BeaconBlockStreamer"; - "blocks" => block_roots.len(), + blocks = block_roots.len(), + "Launching a BeaconBlockStreamer" ); let executor = self.beacon_chain.task_executor.clone(); executor.spawn(self.stream(block_roots, block_tx), "get_blocks_sender"); @@ -732,7 +705,6 @@ mod tests { let harness = BeaconChainHarness::builder(MinimalEthSpec) .spec(spec) .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .logger(logging::test_logger()) .fresh_ephemeral_store() .mock_execution_layer() .build(); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 884a5d770f..0defbecf35 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -86,6 +86,7 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use kzg::Kzg; +use logging::crit; use operation_pool::{ CompactAttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella, }; @@ -93,7 +94,6 @@ use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; -use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ @@ -124,6 +124,7 @@ use store::{ use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::oneshot; use tokio_stream::Stream; +use tracing::{debug, error, info, trace, warn}; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; @@ -480,8 +481,6 @@ pub struct BeaconChain { /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, - /// Logging to CLI, etc. - pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. pub(crate) graffiti_calculator: GraffitiCalculator, /// Optional slasher. @@ -666,7 +665,6 @@ impl BeaconChain { store: BeaconStore, reset_payload_statuses: ResetPayloadStatuses, spec: &ChainSpec, - log: &Logger, ) -> Result>, Error> { let Some(persisted_fork_choice) = store.get_item::(&FORK_CHOICE_DB_KEY)? @@ -682,7 +680,6 @@ impl BeaconChain { reset_payload_statuses, fc_store, spec, - log, )?)) } @@ -1213,9 +1210,8 @@ impl BeaconChain { if header_from_payload != execution_payload_header { for txn in execution_payload.transactions() { debug!( - self.log, - "Reconstructed txn"; - "bytes" => format!("0x{}", hex::encode(&**txn)), + bytes = format!("0x{}", hex::encode(&**txn)), + "Reconstructed txn" ); } @@ -1430,7 +1426,6 @@ impl BeaconChain { slot, &parent_root, &sync_aggregate, - &self.log, &self.spec, ) } @@ -1476,10 +1471,9 @@ impl BeaconChain { Ordering::Greater => { if slot > head_state.slot() + T::EthSpec::slots_per_epoch() { warn!( - self.log, - "Skipping more than an epoch"; - "head_slot" => head_state.slot(), - "request_slot" => slot + head_slot = %head_state.slot(), + request_slot = %slot, + "Skipping more than an epoch" ) } @@ -1498,11 +1492,10 @@ impl BeaconChain { Ok(_) => (), Err(e) => { warn!( - self.log, - "Unable to load state at slot"; - "error" => ?e, - "head_slot" => head_state_slot, - "requested_slot" => slot + error = ?e, + head_slot= %head_state_slot, + requested_slot = %slot, + "Unable to load state at slot" ); return Err(Error::NoStateForSlot(slot)); } @@ -1866,9 +1859,8 @@ impl BeaconChain { // The cache returned an error. Log the error and proceed with the rest of this // function. Err(e) => warn!( - self.log, - "Early attester cache failed"; - "error" => ?e + error = ?e, + "Early attester cache failed" ), } @@ -2013,11 +2005,10 @@ impl BeaconChain { cached_values } else { debug!( - self.log, - "Attester cache miss"; - "beacon_block_root" => ?beacon_block_root, - "head_state_slot" => %head_state_slot, - "request_slot" => %request_slot, + ?beacon_block_root, + %head_state_slot, + %request_slot, + "Attester cache miss" ); // Neither the head state, nor the attester cache was able to produce the required @@ -2277,30 +2268,27 @@ impl BeaconChain { match self.naive_aggregation_pool.write().insert(attestation) { Ok(outcome) => trace!( - self.log, - "Stored unaggregated attestation"; - "outcome" => ?outcome, - "index" => attestation.committee_index(), - "slot" => attestation.data().slot.as_u64(), + ?outcome, + index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "Stored unaggregated attestation" ), Err(NaiveAggregationError::SlotTooLow { slot, lowest_permissible_slot, }) => { trace!( - self.log, - "Refused to store unaggregated attestation"; - "lowest_permissible_slot" => lowest_permissible_slot.as_u64(), - "slot" => slot.as_u64(), + lowest_permissible_slot = lowest_permissible_slot.as_u64(), + slot = slot.as_u64(), + "Refused to store unaggregated attestation" ); } Err(e) => { error!( - self.log, - "Failed to store unaggregated attestation"; - "error" => ?e, - "index" => attestation.committee_index(), - "slot" => attestation.data().slot.as_u64(), + error = ?e, + index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "Failed to store unaggregated attestation" ); return Err(Error::from(e).into()); } @@ -2340,30 +2328,27 @@ impl BeaconChain { .insert(&contribution) { Ok(outcome) => trace!( - self.log, - "Stored unaggregated sync committee message"; - "outcome" => ?outcome, - "index" => sync_message.validator_index, - "slot" => sync_message.slot.as_u64(), + ?outcome, + index = sync_message.validator_index, + slot = sync_message.slot.as_u64(), + "Stored unaggregated sync committee message" ), Err(NaiveAggregationError::SlotTooLow { slot, lowest_permissible_slot, }) => { trace!( - self.log, - "Refused to store unaggregated sync committee message"; - "lowest_permissible_slot" => lowest_permissible_slot.as_u64(), - "slot" => slot.as_u64(), + lowest_permissible_slot = lowest_permissible_slot.as_u64(), + slot = slot.as_u64(), + "Refused to store unaggregated sync committee message" ); } Err(e) => { error!( - self.log, - "Failed to store unaggregated sync committee message"; - "error" => ?e, - "index" => sync_message.validator_index, - "slot" => sync_message.slot.as_u64(), + error = ?e, + index = sync_message.validator_index, + slot = sync_message.slot.as_u64(), + "Failed to store unaggregated sync committee message" ); return Err(Error::from(e).into()); } @@ -2456,11 +2441,10 @@ impl BeaconChain { self.shuffling_is_compatible_result(block_root, target_epoch, state) .unwrap_or_else(|e| { debug!( - self.log, - "Skipping attestation with incompatible shuffling"; - "block_root" => ?block_root, - "target_epoch" => target_epoch, - "reason" => ?e, + ?block_root, + %target_epoch, + reason = ?e, + "Skipping attestation with incompatible shuffling" ); false }) @@ -2501,11 +2485,10 @@ impl BeaconChain { } } else { debug!( - self.log, - "Skipping attestation with incompatible shuffling"; - "block_root" => ?block_root, - "target_epoch" => target_epoch, - "reason" => "target epoch less than block epoch" + ?block_root, + %target_epoch, + reason = "target epoch less than block epoch", + "Skipping attestation with incompatible shuffling" ); return Ok(false); }; @@ -2514,12 +2497,11 @@ impl BeaconChain { Ok(true) } else { debug!( - self.log, - "Skipping attestation with incompatible shuffling"; - "block_root" => ?block_root, - "target_epoch" => target_epoch, - "head_shuffling_id" => ?head_shuffling_id, - "block_shuffling_id" => ?block_shuffling_id, + ?block_root, + %target_epoch, + ?head_shuffling_id, + ?block_shuffling_id, + "Skipping attestation with incompatible shuffling" ); Ok(false) } @@ -2935,8 +2917,11 @@ impl BeaconChain { imported_blocks.push((block_root, block_slot)); } AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { - warn!(self.log, "Blobs missing in response to range request"; - "block_root" => ?block_root, "slot" => slot); + warn!( + ?block_root, + %slot, + "Blobs missing in response to range request" + ); return ChainSegmentResult::Failed { imported_blocks, error: BlockError::AvailabilityCheck( @@ -2947,9 +2932,10 @@ impl BeaconChain { } } Err(BlockError::DuplicateFullyImported(block_root)) => { - debug!(self.log, - "Ignoring already known blocks while processing chain segment"; - "block_root" => ?block_root); + debug!( + ?block_root, + "Ignoring already known blocks while processing chain segment" + ); continue; } Err(error) => { @@ -2972,7 +2958,7 @@ impl BeaconChain { // TODO(das): update fork-choice, act on sampling result, adjust log level // NOTE: It is possible that sampling complets before block is imported into fork choice, // in that case we may need to update availability cache. - info!(self.log, "Sampling completed"; "block_root" => %block_root); + info!(%block_root, "Sampling completed"); } /// Returns `Ok(GossipVerifiedBlock)` if the supplied `block` should be forwarded onto the @@ -3001,23 +2987,21 @@ impl BeaconChain { Ok(verified) => { let commitments_formatted = verified.block.commitments_formatted(); debug!( - chain.log, - "Successfully verified gossip block"; - "graffiti" => graffiti_string, - "slot" => slot, - "root" => ?verified.block_root(), - "commitments" => commitments_formatted, + graffiti = graffiti_string, + %slot, + root = ?verified.block_root(), + commitments = commitments_formatted, + "Successfully verified gossip block" ); Ok(verified) } Err(e) => { debug!( - chain.log, - "Rejected gossip block"; - "error" => e.to_string(), - "graffiti" => graffiti_string, - "slot" => slot, + error = e.to_string(), + graffiti = graffiti_string, + %slot, + "Rejected gossip block" ); Err(e) @@ -3428,11 +3412,10 @@ impl BeaconChain { // The block was successfully verified and imported. Yay. Ok(status @ AvailabilityProcessingStatus::Imported(block_root)) => { debug!( - self.log, - "Beacon block imported"; - "block_root" => ?block_root, - "block_slot" => block_slot, - "source" => %block_source, + ?block_root, + %block_slot, + source = %block_source, + "Beacon block imported" ); // Increment the Prometheus counter for block processing successes. @@ -3441,20 +3424,14 @@ impl BeaconChain { Ok(status) } Ok(status @ AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { - debug!( - self.log, - "Beacon block awaiting blobs"; - "block_root" => ?block_root, - "block_slot" => slot, - ); + debug!(?block_root, %slot, "Beacon block awaiting blobs"); Ok(status) } Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { debug!( - self.log, - "Beacon block processing cancelled"; - "error" => ?e, + error = ?e, + "Beacon block processing cancelled" ); Err(e) } @@ -3462,19 +3439,14 @@ impl BeaconChain { // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { crit!( - self.log, - "Beacon block processing error"; - "error" => ?e, + error = ?e, + "Beacon block processing error" ); Err(BlockError::BeaconChainError(e)) } // The block failed verification. Err(other) => { - debug!( - self.log, - "Beacon block rejected"; - "reason" => other.to_string(), - ); + debug!(reason = other.to_string(), "Beacon block rejected"); Err(other) } } @@ -3501,31 +3473,24 @@ impl BeaconChain { // Log the PoS pandas if a merge transition just occurred. if payload_verification_outcome.is_valid_merge_transition_block { - info!(self.log, "{}", POS_PANDA_BANNER); + info!("{}", POS_PANDA_BANNER); + info!(slot = %block.slot(), "Proof of Stake Activated"); info!( - self.log, - "Proof of Stake Activated"; - "slot" => block.slot() + terminal_pow_block_hash = ?block + .message() + .execution_payload()? + .parent_hash() + .into_root(), ); info!( - self.log, ""; - "Terminal POW Block Hash" => ?block - .message() - .execution_payload()? - .parent_hash() - .into_root() + merge_transition_block_root = ?block.message().tree_hash_root(), ); info!( - self.log, ""; - "Merge Transition Block Root" => ?block.message().tree_hash_root() - ); - info!( - self.log, ""; - "Merge Transition Execution Hash" => ?block - .message() - .execution_payload()? - .block_hash() - .into_root() + merge_transition_execution_hash = ?block + .message() + .execution_payload()? + .block_hash() + .into_root(), ); } Ok(ExecutedBlock::new( @@ -3900,9 +3865,8 @@ impl BeaconChain { &self.spec, ) { warn!( - self.log, - "Early attester cache insert failed"; - "error" => ?e + error = ?e, + "Early attester cache insert failed" ); } else { let attestable_timestamp = @@ -3914,19 +3878,14 @@ impl BeaconChain { ) } } else { - warn!( - self.log, - "Early attester block missing"; - "block_root" => ?block_root - ); + warn!(?block_root, "Early attester block missing"); } } // This block did not become the head, nothing to do. Ok(_) => (), Err(e) => error!( - self.log, - "Failed to compute head during block import"; - "error" => ?e + error = ?e, + "Failed to compute head during block import" ), } drop(fork_choice_timer); @@ -3972,11 +3931,10 @@ impl BeaconChain { Ok(None) => {} Err(e) => { error!( - self.log, - "Failed to store data columns into the database"; - "msg" => "Restoring fork choice from disk", - "error" => &e, - "block_root" => ?block_root + msg = "Restoring fork choice from disk", + error = &e, + ?block_root, + "Failed to store data columns into the database" ); return Err(self .handle_import_block_db_write_error(fork_choice) @@ -3999,10 +3957,9 @@ impl BeaconChain { if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { error!( - self.log, - "Database write failed!"; - "msg" => "Restoring fork choice from disk", - "error" => ?e, + msg = "Restoring fork choice from disk", + error = ?e, + "Database write failed!" ); return Err(self .handle_import_block_db_write_error(fork_choice) @@ -4038,7 +3995,7 @@ impl BeaconChain { &mut state, ) .unwrap_or_else(|e| { - error!(self.log, "error caching light_client data {:?}", e); + error!("error caching light_client data {:?}", e); }); } @@ -4096,13 +4053,11 @@ impl BeaconChain { ), &self.store, &self.spec, - &self.log, ) { crit!( - self.log, - "No stored fork choice found to restore from"; - "error" => ?e, - "warning" => "The database is likely corrupt now, consider --purge-db" + error = ?e, + warning = "The database is likely corrupt now, consider --purge-db", + "No stored fork choice found to restore from" ); Err(BlockError::BeaconChainError(e)) } else { @@ -4141,17 +4096,15 @@ impl BeaconChain { { let mut shutdown_sender = self.shutdown_sender(); crit!( - self.log, - "Weak subjectivity checkpoint verification failed while importing block!"; - "block_root" => ?block_root, - "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, - "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, - "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, - "error" => ?e + ?block_root, + parent_root = ?block.parent_root(), + old_finalized_epoch = ?current_head_finalized_checkpoint.epoch, + new_finalized_epoch = ?new_finalized_checkpoint.epoch, + weak_subjectivity_epoch = ?wss_checkpoint.epoch, + error = ?e, + "Weak subjectivity checkpoint verification failed while importing block!" ); crit!( - self.log, "You must use the `--purge-db` flag to clear the database and restart sync. \ You may be on a hostile network." ); @@ -4220,11 +4173,10 @@ impl BeaconChain { } Err(e) => { warn!( - self.log, - "Unable to fetch sync committee"; - "epoch" => duty_epoch, - "purpose" => "validator monitor", - "error" => ?e, + epoch = %duty_epoch, + purpose = "validator monitor", + error = ?e, + "Unable to fetch sync committee" ); } } @@ -4236,11 +4188,10 @@ impl BeaconChain { Ok(indexed) => indexed, Err(e) => { debug!( - self.log, - "Failed to get indexed attestation"; - "purpose" => "validator monitor", - "attestation_slot" => attestation.data().slot, - "error" => ?e, + purpose = "validator monitor", + attestation_slot = %attestation.data().slot, + error = ?e, + "Failed to get indexed attestation" ); continue; } @@ -4292,10 +4243,9 @@ impl BeaconChain { Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {} Err(e) => { debug!( - self.log, - "Failed to register observed attestation"; - "error" => ?e, - "epoch" => a.data().target.epoch + error = ?e, + epoch = %a.data().target.epoch, + "Failed to register observed attestation" ); } } @@ -4304,11 +4254,10 @@ impl BeaconChain { Ok(indexed) => indexed, Err(e) => { debug!( - self.log, - "Failed to get indexed attestation"; - "purpose" => "observation", - "attestation_slot" => a.data().slot, - "error" => ?e, + purpose = "observation", + attestation_slot = %a.data().slot, + error = ?e, + "Failed to get indexed attestation" ); continue; } @@ -4321,11 +4270,10 @@ impl BeaconChain { .observe_validator(a.data().target.epoch, validator_index as usize) { debug!( - self.log, - "Failed to register observed block attester"; - "error" => ?e, - "epoch" => a.data().target.epoch, - "validator_index" => validator_index, + error = ?e, + epoch = %a.data().target.epoch, + validator_index, + "Failed to register observed block attester" ) } } @@ -4345,11 +4293,10 @@ impl BeaconChain { Ok(indexed) => indexed, Err(e) => { debug!( - self.log, - "Failed to get indexed attestation"; - "purpose" => "slasher", - "attestation_slot" => attestation.data().slot, - "error" => ?e, + purpose = "slasher", + attestation_slot = %attestation.data().slot, + error = ?e, + "Failed to get indexed attestation" ); continue; } @@ -4421,9 +4368,8 @@ impl BeaconChain { sync_aggregate.clone(), )) { warn!( - self.log, - "Failed to send light_client server event"; - "error" => ?e + error = ?e, + "Failed to send light_client server event" ); } } @@ -4440,9 +4386,8 @@ impl BeaconChain { ) { if let Err(e) = self.import_block_update_shuffling_cache_fallible(block_root, state) { warn!( - self.log, - "Failed to prime shuffling cache"; - "error" => ?e + error = ?e, + "Failed to prime shuffling cache" ); } } @@ -4518,10 +4463,9 @@ impl BeaconChain { let finalized_deposit_count = finalized_eth1_data.deposit_count; eth1_chain.finalize_eth1_data(finalized_eth1_data); debug!( - self.log, - "called eth1_chain.finalize_eth1_data()"; - "epoch" => current_finalized_checkpoint.epoch, - "deposit count" => finalized_deposit_count, + epoch = %current_finalized_checkpoint.epoch, + deposit_count = %finalized_deposit_count, + "called eth1_chain.finalize_eth1_data()" ); } } @@ -4544,36 +4488,32 @@ impl BeaconChain { match rx.wait_for_fork_choice(slot, timeout) { ForkChoiceWaitResult::Success(fc_slot) => { debug!( - self.log, - "Fork choice successfully updated before block production"; - "slot" => slot, - "fork_choice_slot" => fc_slot, + %slot, + fork_choice_slot = %fc_slot, + "Fork choice successfully updated before block production" ); } ForkChoiceWaitResult::Behind(fc_slot) => { warn!( - self.log, - "Fork choice notifier out of sync with block production"; - "fork_choice_slot" => fc_slot, - "slot" => slot, - "message" => "this block may be orphaned", + fork_choice_slot = %fc_slot, + %slot, + message = "this block may be orphaned", + "Fork choice notifier out of sync with block production" ); } ForkChoiceWaitResult::TimeOut => { warn!( - self.log, - "Timed out waiting for fork choice before proposal"; - "message" => "this block may be orphaned", + message = "this block may be orphaned", + "Timed out waiting for fork choice before proposal" ); } } } else { error!( - self.log, - "Producing block at incorrect slot"; - "block_slot" => slot, - "current_slot" => current_slot, - "message" => "check clock sync, this block may be orphaned", + %slot, + %current_slot, + message = "check clock sync, this block may be orphaned", + "Producing block at incorrect slot" ); } } @@ -4649,10 +4589,9 @@ impl BeaconChain { self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( - self.log, - "Proposing block to re-org current head"; - "slot" => slot, - "head_to_reorg" => %head_block_root, + %slot, + head_to_reorg = %head_block_root, + "Proposing block to re-org current head" ); (re_org_state, Some(re_org_state_root)) } else { @@ -4667,10 +4606,9 @@ impl BeaconChain { } } else { warn!( - self.log, - "Producing block that conflicts with head"; - "message" => "this block is more likely to be orphaned", - "slot" => slot, + message = "this block is more likely to be orphaned", + %slot, + "Producing block that conflicts with head" ); let state = self .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) @@ -4698,9 +4636,8 @@ impl BeaconChain { if self.spec.proposer_score_boost.is_none() { warn!( - self.log, - "Ignoring proposer re-org configuration"; - "reason" => "this network does not have proposer boost enabled" + reason = "this network does not have proposer boost enabled", + "Ignoring proposer re-org configuration" ); return None; } @@ -4709,11 +4646,7 @@ impl BeaconChain { .slot_clock .seconds_from_current_slot_start() .or_else(|| { - warn!( - self.log, - "Not attempting re-org"; - "error" => "unable to read slot clock" - ); + warn!(error = "unable to read slot clock", "Not attempting re-org"); None })?; @@ -4724,21 +4657,13 @@ impl BeaconChain { // 3. The `get_proposer_head` conditions from fork choice pass. let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); if !proposing_on_time { - debug!( - self.log, - "Not attempting re-org"; - "reason" => "not proposing on time", - ); + debug!(reason = "not proposing on time", "Not attempting re-org"); return None; } let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot); if !head_late { - debug!( - self.log, - "Not attempting re-org"; - "reason" => "head not late" - ); + debug!(reason = "head not late", "Not attempting re-org"); return None; } @@ -4759,16 +4684,14 @@ impl BeaconChain { .map_err(|e| match e { ProposerHeadError::DoNotReOrg(reason) => { debug!( - self.log, - "Not attempting re-org"; - "reason" => %reason, + %reason, + "Not attempting re-org" ); } ProposerHeadError::Error(e) => { warn!( - self.log, - "Not attempting re-org"; - "error" => ?e, + error = ?e, + "Not attempting re-org" ); } }) @@ -4780,21 +4703,16 @@ impl BeaconChain { .store .get_advanced_hot_state_from_cache(re_org_parent_block, slot) .or_else(|| { - warn!( - self.log, - "Not attempting re-org"; - "reason" => "no state in cache" - ); + warn!(reason = "no state in cache", "Not attempting re-org"); None })?; info!( - self.log, - "Attempting re-org due to weak head"; - "weak_head" => ?canonical_head, - "parent" => ?re_org_parent_block, - "head_weight" => proposer_head.head_node.weight, - "threshold_weight" => proposer_head.re_org_head_weight_threshold + weak_head = ?canonical_head, + parent = ?re_org_parent_block, + head_weight = proposer_head.head_node.weight, + threshold_weight = proposer_head.re_org_head_weight_threshold, + "Attempting re-org due to weak head" ); Some((state, state_root)) @@ -4818,10 +4736,9 @@ impl BeaconChain { // The proposer head must be equal to the canonical head or its parent. if proposer_head != head_block_root && proposer_head != head_parent_block_root { warn!( - self.log, - "Unable to compute payload attributes"; - "block_root" => ?proposer_head, - "head_block_root" => ?head_block_root, + block_root = ?proposer_head, + head_block_root = ?head_block_root, + "Unable to compute payload attributes" ); return Ok(None); } @@ -4845,12 +4762,11 @@ impl BeaconChain { } else { if head_epoch + self.config.sync_tolerance_epochs < proposal_epoch { warn!( - self.log, - "Skipping proposer preparation"; - "msg" => "this is a non-critical issue that can happen on unhealthy nodes or \ + msg = "this is a non-critical issue that can happen on unhealthy nodes or \ networks.", - "proposal_epoch" => proposal_epoch, - "head_epoch" => head_epoch, + %proposal_epoch, + %head_epoch, + "Skipping proposer preparation" ); // Don't skip the head forward more than two epochs. This avoids burdening an @@ -4883,10 +4799,7 @@ impl BeaconChain { // // Exit now, after updating the cache. if decision_root != shuffling_decision_root { - warn!( - self.log, - "Head changed during proposer preparation"; - ); + warn!("Head changed during proposer preparation"); return Ok(None); } @@ -4948,10 +4861,9 @@ impl BeaconChain { // Advance the state using the partial method. debug!( - self.log, - "Advancing state for withdrawals calculation"; - "proposal_slot" => proposal_slot, - "parent_block_root" => ?parent_block_root, + %proposal_slot, + ?parent_block_root, + "Advancing state for withdrawals calculation" ); let mut advanced_state = unadvanced_state.into_owned(); partial_state_advance( @@ -4981,9 +4893,8 @@ impl BeaconChain { .or_else(|e| match e { ProposerHeadError::DoNotReOrg(reason) => { trace!( - self.log, - "Not suppressing fork choice update"; - "reason" => %reason, + %reason, + "Not suppressing fork choice update" ); Ok(canonical_forkchoice_params) } @@ -5066,10 +4977,9 @@ impl BeaconChain { .get_slot::(shuffling_decision_root, re_org_block_slot) .ok_or_else(|| { debug!( - self.log, - "Fork choice override proposer shuffling miss"; - "slot" => re_org_block_slot, - "decision_root" => ?shuffling_decision_root, + slot = %re_org_block_slot, + decision_root = ?shuffling_decision_root, + "Fork choice override proposer shuffling miss" ); DoNotReOrg::NotProposing })? @@ -5130,11 +5040,10 @@ impl BeaconChain { }; debug!( - self.log, - "Fork choice update overridden"; - "canonical_head" => ?head_block_root, - "override" => ?info.parent_node.root, - "slot" => fork_choice_slot, + canonical_head = ?head_block_root, + ?info.parent_node.root, + slot = %fork_choice_slot, + "Fork choice update overridden" ); Ok(forkchoice_update_params) @@ -5387,9 +5296,8 @@ impl BeaconChain { if let Err(e) = import(attestation) { // Don't stop block production if there's an error, just create a log. error!( - self.log, - "Attestation did not transfer to op pool"; - "reason" => ?e + reason = ?e, + "Attestation did not transfer to op pool" ); } } @@ -5437,11 +5345,10 @@ impl BeaconChain { ) .map_err(|e| { warn!( - self.log, - "Attempted to include an invalid attestation"; - "err" => ?e, - "block_slot" => state.slot(), - "attestation" => ?att + err = ?e, + block_slot = %state.slot(), + attestation = ?att, + "Attempted to include an invalid attestation" ); }) .is_ok() @@ -5453,11 +5360,10 @@ impl BeaconChain { .validate(&state, &self.spec) .map_err(|e| { warn!( - self.log, - "Attempted to include an invalid proposer slashing"; - "err" => ?e, - "block_slot" => state.slot(), - "slashing" => ?slashing + err = ?e, + block_slot = %state.slot(), + ?slashing, + "Attempted to include an invalid proposer slashing" ); }) .is_ok() @@ -5469,11 +5375,10 @@ impl BeaconChain { .validate(&state, &self.spec) .map_err(|e| { warn!( - self.log, - "Attempted to include an invalid attester slashing"; - "err" => ?e, - "block_slot" => state.slot(), - "slashing" => ?slashing + err = ?e, + block_slot = %state.slot(), + ?slashing, + "Attempted to include an invalid attester slashing" ); }) .is_ok() @@ -5484,11 +5389,10 @@ impl BeaconChain { .validate(&state, &self.spec) .map_err(|e| { warn!( - self.log, - "Attempted to include an invalid proposer slashing"; - "err" => ?e, - "block_slot" => state.slot(), - "exit" => ?exit + err = ?e, + block_slot = %state.slot(), + ?exit, + "Attempted to include an invalid proposer slashing" ); }) .is_ok() @@ -5506,9 +5410,8 @@ impl BeaconChain { .map_err(BlockProductionError::OpPoolError)? .unwrap_or_else(|| { warn!( - self.log, - "Producing block with no sync contributions"; - "slot" => state.slot(), + slot = %state.slot(), + "Producing block with no sync contributions" ); SyncAggregate::new() }); @@ -5828,11 +5731,7 @@ impl BeaconChain { ); let block_size = block.ssz_bytes_len(); - debug!( - self.log, - "Produced block on state"; - "block_size" => block_size, - ); + debug!(%block_size, "Produced block on state"); metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); @@ -5914,11 +5813,10 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); trace!( - self.log, - "Produced beacon block"; - "parent" => ?block.parent_root(), - "attestations" => block.body().attestations_len(), - "slot" => block.slot() + parent = ?block.parent_root(), + attestations = block.body().attestations_len(), + slot = %block.slot(), + "Produced beacon block" ); Ok(BeaconBlockResponse { @@ -5941,11 +5839,7 @@ impl BeaconChain { self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { - debug!( - self.log, - "Processing payload invalidation"; - "op" => ?op, - ); + debug!(?op, "Processing payload invalidation"); // Update the execution status in fork choice. // @@ -5968,11 +5862,10 @@ impl BeaconChain { // Update fork choice. if let Err(e) = fork_choice_result { crit!( - self.log, - "Failed to process invalid payload"; - "error" => ?e, - "latest_valid_ancestor" => ?op.latest_valid_ancestor(), - "block_root" => ?op.block_root(), + error = ?e, + latest_valid_ancestor = ?op.latest_valid_ancestor(), + block_root = ?op.block_root(), + "Failed to process invalid payload" ); } @@ -5999,10 +5892,9 @@ impl BeaconChain { if justified_block.execution_status.is_invalid() { crit!( - self.log, - "The justified checkpoint is invalid"; - "msg" => "ensure you are not connected to a malicious network. This error is not \ - recoverable, please reach out to the lighthouse developers for assistance." + msg = "ensure you are not connected to a malicious network. This error is not \ + recoverable, please reach out to the lighthouse developers for assistance.", + "The justified checkpoint is invalid" ); let mut shutdown_sender = self.shutdown_sender(); @@ -6010,10 +5902,9 @@ impl BeaconChain { INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, )) { crit!( - self.log, - "Unable to trigger client shut down"; - "msg" => "shut down may already be under way", - "error" => ?e + msg = "shut down may already be under way", + error = ?e, + "Unable to trigger client shut down" ); } @@ -6086,12 +5977,7 @@ impl BeaconChain { // This prevents the routine from running during sync. let head_slot = cached_head.head_slot(); if head_slot + tolerance_slots < current_slot { - debug!( - chain.log, - "Head too old for proposer prep"; - "head_slot" => head_slot, - "current_slot" => current_slot, - ); + debug!(%head_slot, %current_slot, "Head too old for proposer prep"); return Ok(None); } @@ -6179,11 +6065,10 @@ impl BeaconChain { // Only push a log to the user if this is the first time we've seen this proposer for // this slot. info!( - self.log, - "Prepared beacon proposer"; - "prepare_slot" => prepare_slot, - "validator" => proposer, - "parent_root" => ?head_root, + %prepare_slot, + validator = proposer, + parent_root = ?head_root, + "Prepared beacon proposer" ); payload_attributes }; @@ -6213,10 +6098,9 @@ impl BeaconChain { // // This scenario might occur on an overloaded/under-resourced node. warn!( - self.log, - "Delayed proposer preparation"; - "prepare_slot" => prepare_slot, - "validator" => proposer, + %prepare_slot, + validator = proposer, + "Delayed proposer preparation" ); return Ok(None); }; @@ -6227,10 +6111,9 @@ impl BeaconChain { || till_prepare_slot <= self.config.prepare_payload_lookahead { debug!( - self.log, - "Sending forkchoiceUpdate for proposer prep"; - "till_prepare_slot" => ?till_prepare_slot, - "prepare_slot" => prepare_slot + ?till_prepare_slot, + %prepare_slot, + "Sending forkchoiceUpdate for proposer prep" ); self.update_execution_engine_forkchoice( @@ -6332,8 +6215,8 @@ impl BeaconChain { .map_err(Error::ForkchoiceUpdate)? { info!( - self.log, - "Prepared POS transition block proposer"; "slot" => next_slot + slot = %next_slot, + "Prepared POS transition block proposer" ); ( params.head_root, @@ -6391,9 +6274,8 @@ impl BeaconChain { .await?; if let Err(e) = fork_choice_update_result { error!( - self.log, - "Failed to validate payload"; - "error" => ?e + error= ?e, + "Failed to validate payload" ) }; Ok(()) @@ -6407,11 +6289,10 @@ impl BeaconChain { // error. However, we create a log to bring attention to the issue. PayloadStatus::Accepted => { warn!( - self.log, - "Fork choice update received ACCEPTED"; - "msg" => "execution engine provided an unexpected response to a fork \ + msg = "execution engine provided an unexpected response to a fork \ choice update. although this is not a serious issue, please raise \ - an issue." + an issue.", + "Fork choice update received ACCEPTED" ); Ok(()) } @@ -6420,13 +6301,12 @@ impl BeaconChain { ref validation_error, } => { warn!( - self.log, - "Invalid execution payload"; - "validation_error" => ?validation_error, - "latest_valid_hash" => ?latest_valid_hash, - "head_hash" => ?head_hash, - "head_block_root" => ?head_block_root, - "method" => "fcU", + ?validation_error, + ?latest_valid_hash, + ?head_hash, + head_block_root = ?head_block_root, + method = "fcU", + "Invalid execution payload" ); match latest_valid_hash { @@ -6474,12 +6354,11 @@ impl BeaconChain { ref validation_error, } => { warn!( - self.log, - "Invalid execution payload block hash"; - "validation_error" => ?validation_error, - "head_hash" => ?head_hash, - "head_block_root" => ?head_block_root, - "method" => "fcU", + ?validation_error, + ?head_hash, + ?head_block_root, + method = "fcU", + "Invalid execution payload block hash" ); // The execution engine has stated that the head block is invalid, however it // hasn't returned a latest valid ancestor. @@ -6594,16 +6473,19 @@ impl BeaconChain { state: &BeaconState, ) -> Result<(), BeaconChainError> { let finalized_checkpoint = state.finalized_checkpoint(); - info!(self.log, "Verifying the configured weak subjectivity checkpoint"; "weak_subjectivity_epoch" => wss_checkpoint.epoch, "weak_subjectivity_root" => ?wss_checkpoint.root); + info!( + weak_subjectivity_epoch = %wss_checkpoint.epoch, + weak_subjectivity_root = ?wss_checkpoint.root, + "Verifying the configured weak subjectivity checkpoint" + ); // If epochs match, simply compare roots. if wss_checkpoint.epoch == finalized_checkpoint.epoch && wss_checkpoint.root != finalized_checkpoint.root { crit!( - self.log, - "Root found at the specified checkpoint differs"; - "weak_subjectivity_root" => ?wss_checkpoint.root, - "finalized_checkpoint_root" => ?finalized_checkpoint.root + weak_subjectivity_root = ?wss_checkpoint.root, + finalized_checkpoint_root = ?finalized_checkpoint.root, + "Root found at the specified checkpoint differs" ); return Err(BeaconChainError::WeakSubjectivtyVerificationFailure); } else if wss_checkpoint.epoch < finalized_checkpoint.epoch { @@ -6617,17 +6499,18 @@ impl BeaconChain { Some(root) => { if root != wss_checkpoint.root { crit!( - self.log, - "Root found at the specified checkpoint differs"; - "weak_subjectivity_root" => ?wss_checkpoint.root, - "finalized_checkpoint_root" => ?finalized_checkpoint.root + weak_subjectivity_root = ?wss_checkpoint.root, + finalized_checkpoint_root = ?finalized_checkpoint.root, + "Root found at the specified checkpoint differs" ); return Err(BeaconChainError::WeakSubjectivtyVerificationFailure); } } None => { - crit!(self.log, "The root at the start slot of the given epoch could not be found"; - "wss_checkpoint_slot" => ?slot); + crit!( + wss_checkpoint_slot = ?slot, + "The root at the start slot of the given epoch could not be found" + ); return Err(BeaconChainError::WeakSubjectivtyVerificationFailure); } } @@ -6642,11 +6525,7 @@ impl BeaconChain { /// `tokio::runtime::block_on` in certain cases. pub async fn per_slot_task(self: &Arc) { if let Some(slot) = self.slot_clock.now() { - debug!( - self.log, - "Running beacon chain per slot tasks"; - "slot" => ?slot - ); + debug!(?slot, "Running beacon chain per slot tasks"); // Always run the light-weight pruning tasks (these structures should be empty during // sync anyway). @@ -6672,10 +6551,9 @@ impl BeaconChain { if let Some(tx) = &chain.fork_choice_signal_tx { if let Err(e) = tx.notify_fork_choice_complete(slot) { warn!( - chain.log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => slot, + error = ?e, + %slot, + "Error signalling fork choice waiter" ); } } @@ -6768,10 +6646,9 @@ impl BeaconChain { drop(shuffling_cache); debug!( - self.log, - "Committee cache miss"; - "shuffling_id" => ?shuffling_epoch, - "head_block_root" => head_block_root.to_string(), + shuffling_id = ?shuffling_epoch, + head_block_root = head_block_root.to_string(), + "Committee cache miss" ); // If the block's state will be so far ahead of `shuffling_epoch` that even its @@ -7182,10 +7059,6 @@ impl BeaconChain { .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) } - pub fn logger(&self) -> &Logger { - &self.log - } - /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. @@ -7226,17 +7099,17 @@ impl BeaconChain { AvailableBlockData::NoData => Ok(None), AvailableBlockData::Blobs(blobs) => { debug!( - self.log, "Writing blobs to store"; - "block_root" => %block_root, - "count" => blobs.len(), + %block_root, + count = blobs.len(), + "Writing blobs to store" ); Ok(Some(StoreOp::PutBlobs(block_root, blobs))) } AvailableBlockData::DataColumns(data_columns) => { debug!( - self.log, "Writing data columns to store"; - "block_root" => %block_root, - "count" => data_columns.len(), + %block_root, + count = data_columns.len(), + "Writing data columns to store" ); Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) } @@ -7251,9 +7124,9 @@ impl BeaconChain { .blocking_recv() .map_err(|e| format!("Did not receive data columns from sender: {e:?}"))?; debug!( - self.log, "Writing data columns to store"; - "block_root" => %block_root, - "count" => computed_data_columns.len(), + %block_root, + count = computed_data_columns.len(), + "Writing data columns to store" ); // TODO(das): Store only this node's custody columns Ok(Some(StoreOp::PutDataColumns( @@ -7275,15 +7148,11 @@ impl Drop for BeaconChain { if let Err(e) = drop() { error!( - self.log, - "Failed to persist on BeaconChain drop"; - "error" => ?e + error = ?e, + "Failed to persist on BeaconChain drop" ) } else { - info!( - self.log, - "Saved beacon chain to disk"; - ) + info!("Saved beacon chain to disk") } } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 786b627bb7..fe9d8c6bfc 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -12,9 +12,9 @@ use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::observed_data_sidecars::{DoNotObserve, ObservationStrategy, Observe}; use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; -use slog::debug; use ssz_derive::{Decode, Encode}; use std::time::Duration; +use tracing::debug; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ @@ -504,10 +504,9 @@ pub fn validate_blob_sidecar_for_gossip %block_root, - "index" => %blob_index, + %block_root, + %blob_index, + "Proposer shuffling cache miss for blob verification" ); let (parent_state_root, mut parent_state) = chain .store diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 9a8def585f..88df48d0e9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -74,7 +74,6 @@ use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -94,6 +93,7 @@ use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use strum::AsRefStr; use task_executor::JoinHandle; +use tracing::{debug, error}; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, @@ -924,12 +924,11 @@ impl GossipVerifiedBlock { let (mut parent, block) = load_parent(block, chain)?; debug!( - chain.log, - "Proposer shuffling cache miss"; - "parent_root" => ?parent.beacon_block_root, - "parent_slot" => parent.beacon_block.slot(), - "block_root" => ?block_root, - "block_slot" => block.slot(), + parent_root = ?parent.beacon_block_root, + parent_slot = %parent.beacon_block.slot(), + ?block_root, + block_slot = %block.slot(), + "Proposer shuffling cache miss" ); // The state produced is only valid for determining proposer/attester shuffling indices. @@ -1536,10 +1535,9 @@ impl ExecutionPendingBlock { // Expose Prometheus metrics. if let Err(e) = summary.observe_metrics() { error!( - chain.log, - "Failed to observe epoch summary metrics"; - "src" => "block_verification", - "error" => ?e + src = "block_verification", + error = ?e, + "Failed to observe epoch summary metrics" ); } summaries.push(summary); @@ -1567,9 +1565,8 @@ impl ExecutionPendingBlock { validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) { error!( - chain.log, - "Failed to process validator statuses"; - "error" => ?e + error = ?e, + "Failed to process validator statuses" ); } } @@ -1609,12 +1606,8 @@ impl ExecutionPendingBlock { * invalid. */ - write_state( - &format!("state_pre_block_{}", block_root), - &state, - &chain.log, - ); - write_block(block.as_block(), block_root, &chain.log); + write_state(&format!("state_pre_block_{}", block_root), &state); + write_block(block.as_block(), block_root); let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); @@ -1647,11 +1640,7 @@ impl ExecutionPendingBlock { metrics::stop_timer(state_root_timer); - write_state( - &format!("state_post_block_{}", block_root), - &state, - &chain.log, - ); + write_state(&format!("state_post_block_{}", block_root), &state); /* * Check to ensure the state root on the block matches the one we have calculated. @@ -1942,19 +1931,17 @@ fn load_parent>( if !state.all_caches_built() { debug!( - chain.log, - "Parent state lacks built caches"; - "block_slot" => block.slot(), - "state_slot" => state.slot(), + block_slot = %block.slot(), + state_slot = %state.slot(), + "Parent state lacks built caches" ); } if block.slot() != state.slot() { debug!( - chain.log, - "Parent state is not advanced"; - "block_slot" => block.slot(), - "state_slot" => state.slot(), + block_slot = %block.slot(), + state_slot = %state.slot(), + "Parent state is not advanced" ); } @@ -2160,14 +2147,11 @@ pub fn verify_header_signature( } } -fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { +fn write_state(prefix: &str, state: &BeaconState) { if WRITE_BLOCK_PROCESSING_SSZ { let mut state = state.clone(); let Ok(root) = state.canonical_root() else { - error!( - log, - "Unable to hash state for writing"; - ); + error!("Unable to hash state for writing"); return; }; let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot(), root); @@ -2180,16 +2164,15 @@ fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { let _ = file.write_all(&state.as_ssz_bytes()); } Err(e) => error!( - log, - "Failed to log state"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) + ?path, + error = ?e, + "Failed to log state" ), } } } -fn write_block(block: &SignedBeaconBlock, root: Hash256, log: &Logger) { +fn write_block(block: &SignedBeaconBlock, root: Hash256) { if WRITE_BLOCK_PROCESSING_SSZ { let filename = format!("block_slot_{}_root{}.ssz", block.slot(), root); let mut path = std::env::temp_dir().join("lighthouse"); @@ -2201,10 +2184,9 @@ fn write_block(block: &SignedBeaconBlock, root: Hash256, log: &Lo let _ = file.write_all(&block.as_ssz_bytes()); } Err(e) => error!( - log, - "Failed to log block"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) + ?path, + error = ?e, + "Failed to log block" ), } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 8d62478bea..78216770e5 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -27,11 +27,11 @@ use execution_layer::ExecutionLayer; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; +use logging::crit; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, debug, error, info, o, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{per_slot_processing, AllCaches}; use std::marker::PhantomData; @@ -39,6 +39,7 @@ use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; +use tracing::{debug, error, info}; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, @@ -96,7 +97,6 @@ pub struct BeaconChainBuilder { validator_pubkey_cache: Option>, spec: Arc, chain_config: ChainConfig, - log: Option, beacon_graffiti: GraffitiOrigin, slasher: Option>>, // Pending I/O batch that is constructed during building and should be executed atomically @@ -140,7 +140,6 @@ where validator_pubkey_cache: None, spec: Arc::new(E::default_spec()), chain_config: ChainConfig::default(), - log: None, beacon_graffiti: GraffitiOrigin::default(), slasher: None, pending_io_batch: vec![], @@ -218,14 +217,6 @@ where self } - /// Sets the logger. - /// - /// Should generally be called early in the build chain. - pub fn logger(mut self, log: Logger) -> Self { - self.log = Some(log); - self - } - /// Sets the task executor. pub fn task_executor(mut self, task_executor: TaskExecutor) -> Self { self.task_executor = Some(task_executor); @@ -261,13 +252,7 @@ where /// /// May initialize several components; including the op_pool and finalized checkpoints. pub fn resume_from_db(mut self) -> Result { - let log = self.log.as_ref().ok_or("resume_from_db requires a log")?; - - info!( - log, - "Starting beacon chain"; - "method" => "resume" - ); + info!(method = "resume", "Starting beacon chain"); let store = self .store @@ -289,7 +274,6 @@ where self.chain_config.always_reset_payload_statuses, ), &self.spec, - log, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; @@ -451,19 +435,14 @@ where .store .clone() .ok_or("weak_subjectivity_state requires a store")?; - let log = self - .log - .as_ref() - .ok_or("weak_subjectivity_state requires a log")?; // Ensure the state is advanced to an epoch boundary. let slots_per_epoch = E::slots_per_epoch(); if weak_subj_state.slot() % slots_per_epoch != 0 { debug!( - log, - "Advancing checkpoint state to boundary"; - "state_slot" => weak_subj_state.slot(), - "block_slot" => weak_subj_block.slot(), + state_slot = %weak_subj_state.slot(), + block_slot = %weak_subj_block.slot(), + "Advancing checkpoint state to boundary" ); while weak_subj_state.slot() % slots_per_epoch != 0 { per_slot_processing(&mut weak_subj_state, None, &self.spec) @@ -731,7 +710,6 @@ where mut self, ) -> Result>, String> { - let log = self.log.ok_or("Cannot build without a logger")?; let slot_clock = self .slot_clock .ok_or("Cannot build without a slot_clock.")?; @@ -749,11 +727,8 @@ where let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); let beacon_proposer_cache: Arc> = <_>::default(); - let mut validator_monitor = ValidatorMonitor::new( - validator_monitor_config, - beacon_proposer_cache.clone(), - log.new(o!("service" => "val_mon")), - ); + let mut validator_monitor = + ValidatorMonitor::new(validator_monitor_config, beacon_proposer_cache.clone()); let current_slot = if slot_clock .is_prior_to_genesis() @@ -776,19 +751,17 @@ where Ok(None) => return Err("Head block not found in store".into()), Err(StoreError::SszDecodeError(_)) => { error!( - log, - "Error decoding head block"; - "message" => "This node has likely missed a hard fork. \ - It will try to revert the invalid blocks and keep running, \ - but any stray blocks and states will not be deleted. \ - Long-term you should consider re-syncing this node." + message = "This node has likely missed a hard fork. \ + It will try to revert the invalid blocks and keep running, \ + but any stray blocks and states will not be deleted. \ + Long-term you should consider re-syncing this node.", + "Error decoding head block" ); let (block_root, block) = revert_to_fork_boundary( current_slot, initial_head_block_root, store.clone(), &self.spec, - &log, )?; // Update head tracker. @@ -848,12 +821,8 @@ where })?; let migrator_config = self.store_migrator_config.unwrap_or_default(); - let store_migrator = BackgroundMigrator::new( - store.clone(), - migrator_config, - genesis_block_root, - log.clone(), - ); + let store_migrator = + BackgroundMigrator::new(store.clone(), migrator_config, genesis_block_root); if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( @@ -978,9 +947,8 @@ where shuffling_cache: RwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, - log.clone(), )), - eth1_finalization_cache: RwLock::new(Eth1FinalizationCache::new(log.clone())), + eth1_finalization_cache: RwLock::new(Eth1FinalizationCache::default()), beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), @@ -993,12 +961,10 @@ where shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, - log: log.clone(), graffiti_calculator: GraffitiCalculator::new( self.beacon_graffiti, self.execution_layer, slot_clock.slot_duration() * E::slots_per_epoch() as u32, - log.clone(), ), slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), @@ -1010,7 +976,6 @@ where store, self.import_all_data_columns, self.spec, - log.new(o!("service" => "data_availability_checker")), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), @@ -1037,25 +1002,23 @@ where &head.beacon_state, ) { crit!( - log, - "Weak subjectivity checkpoint verification failed on startup!"; - "head_block_root" => format!("{}", head.beacon_block_root), - "head_slot" => format!("{}", head.beacon_block.slot()), - "finalized_epoch" => format!("{}", head.beacon_state.finalized_checkpoint().epoch), - "wss_checkpoint_epoch" => format!("{}", wss_checkpoint.epoch), - "error" => format!("{:?}", e), + head_block_root = %head.beacon_block_root, + head_slot = %head.beacon_block.slot(), + finalized_epoch = %head.beacon_state.finalized_checkpoint().epoch, + wss_checkpoint_epoch = %wss_checkpoint.epoch, + error = ?e, + "Weak subjectivity checkpoint verification failed on startup!" ); - crit!(log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); + crit!("You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network."); return Err(format!("Weak subjectivity verification failed: {:?}", e)); } } info!( - log, - "Beacon chain initialized"; - "head_state" => format!("{}", head.beacon_state_root()), - "head_block" => format!("{}", head.beacon_block_root), - "head_slot" => format!("{}", head.beacon_block.slot()), + head_state = %head.beacon_state_root(), + head_block = %head.beacon_block_root, + head_slot = %head.beacon_block.slot(), + "Beacon chain initialized" ); // Check for states to reconstruct (in the background). @@ -1068,11 +1031,10 @@ where // Prune finalized execution payloads in the background. if beacon_chain.store.get_config().prune_payloads { let store = beacon_chain.store.clone(); - let log = log.clone(); beacon_chain.task_executor.spawn_blocking( move || { if let Err(e) = store.try_prune_execution_payloads(false) { - error!(log, "Error pruning payloads in background"; "error" => ?e); + error!( error = ?e,"Error pruning payloads in background"); } }, "prune_payloads_background", @@ -1105,13 +1067,7 @@ where /// Sets the `BeaconChain` eth1 back-end to produce predictably junk data when producing blocks. pub fn dummy_eth1_backend(mut self) -> Result { - let log = self - .log - .as_ref() - .ok_or("dummy_eth1_backend requires a log")?; - - let backend = - CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone())?; + let backend = CachingEth1Backend::new(Eth1Config::default(), self.spec.clone())?; self.eth1_chain = Some(Eth1Chain::new_dummy(backend)); @@ -1186,7 +1142,6 @@ mod test { use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; - use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use std::time::Duration; use store::config::StoreConfig; @@ -1197,27 +1152,16 @@ mod test { type TestEthSpec = MinimalEthSpec; type Builder = BeaconChainBuilder>; - fn get_logger() -> Logger { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } - #[test] fn recent_genesis() { let validator_count = 1; let genesis_time = 13_371_337; - let log = get_logger(); let store: HotColdDB< MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral( - StoreConfig::default(), - ChainSpec::minimal().into(), - log.clone(), - ) - .unwrap(); + > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal().into()).unwrap(); let spec = MinimalEthSpec::default_spec(); let genesis_state = interop_genesis_state( @@ -1235,7 +1179,6 @@ mod test { let kzg = get_kzg(&spec); let chain = Builder::new(MinimalEthSpec, kzg) - .logger(log.clone()) .store(Arc::new(store)) .task_executor(runtime.task_executor.clone()) .genesis_state(genesis_state) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 4e21372efb..bac47f5da7 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -47,14 +47,15 @@ use fork_choice::{ ResetPayloadStatuses, }; use itertools::process_results; +use logging::crit; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; use task_executor::{JoinHandle, ShutdownReason}; +use tracing::{debug, error, info, warn}; use types::*; /// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from @@ -286,10 +287,9 @@ impl CanonicalHead { reset_payload_statuses: ResetPayloadStatuses, store: &BeaconStore, spec: &ChainSpec, - log: &Logger, ) -> Result<(), Error> { let fork_choice = - >::load_fork_choice(store.clone(), reset_payload_statuses, spec, log)? + >::load_fork_choice(store.clone(), reset_payload_statuses, spec)? .ok_or(Error::MissingPersistedForkChoice)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; @@ -475,9 +475,8 @@ impl BeaconChain { match self.slot() { Ok(current_slot) => self.recompute_head_at_slot(current_slot).await, Err(e) => error!( - self.log, - "No slot when recomputing head"; - "error" => ?e + error = ?e, + "No slot when recomputing head" ), } } @@ -515,18 +514,13 @@ impl BeaconChain { Ok(Some(())) => (), // The async task did not complete successfully since the runtime is shutting down. Ok(None) => { - debug!( - self.log, - "Did not update EL fork choice"; - "info" => "shutting down" - ); + debug!(info = "shutting down", "Did not update EL fork choice"); } // The async task did not complete successfully, tokio returned an error. Err(e) => { error!( - self.log, - "Did not update EL fork choice"; - "error" => ?e + error = ?e, + "Did not update EL fork choice" ); } }, @@ -534,17 +528,15 @@ impl BeaconChain { Ok(Err(e)) => { metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); error!( - self.log, - "Error whist recomputing head"; - "error" => ?e + error = ?e, + "Error whist recomputing head" ); } // There was an error spawning the task. Err(e) => { error!( - self.log, - "Failed to spawn recompute head task"; - "error" => ?e + error = ?e, + "Failed to spawn recompute head task" ); } } @@ -627,9 +619,8 @@ impl BeaconChain { // nothing to do. if new_view == old_view { debug!( - self.log, - "No change in canonical head"; - "head" => ?new_view.head_block_root + head = ?new_view.head_block_root, + "No change in canonical head" ); return Ok(None); } @@ -639,7 +630,7 @@ impl BeaconChain { let new_forkchoice_update_parameters = fork_choice_read_lock.get_forkchoice_update_parameters(); - perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock, &self.log); + perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock); // Drop the read lock, it's no longer required and holding it any longer than necessary // will just cause lock contention. @@ -732,9 +723,8 @@ impl BeaconChain { self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) { crit!( - self.log, - "Error updating canonical head"; - "error" => ?e + error = ?e, + "Error updating canonical head" ); } } @@ -751,9 +741,8 @@ impl BeaconChain { self.after_finalization(&new_cached_head, new_view, finalized_proto_block) { crit!( - self.log, - "Error updating finalization"; - "error" => ?e + error = ?e, + "Error updating finalization" ); } } @@ -791,7 +780,6 @@ impl BeaconChain { &new_snapshot.beacon_state, new_snapshot.beacon_block_root, &self.spec, - &self.log, ); // Determine if the new head is in a later epoch to the previous head. @@ -824,10 +812,9 @@ impl BeaconChain { .update_head_shuffling_ids(head_shuffling_ids), Err(e) => { error!( - self.log, - "Failed to get head shuffling ids"; - "error" => ?e, - "head_block_root" => ?new_snapshot.beacon_block_root + error = ?e, + head_block_root = ?new_snapshot.beacon_block_root, + "Failed to get head shuffling ids" ); } } @@ -844,7 +831,6 @@ impl BeaconChain { .as_utf8_lossy(), &self.slot_clock, self.event_handler.as_ref(), - &self.log, ); if is_epoch_transition || reorg_distance.is_some() { @@ -872,9 +858,8 @@ impl BeaconChain { } (Err(e), _) | (_, Err(e)) => { warn!( - self.log, - "Unable to find dependent roots, cannot register head event"; - "error" => ?e + error = ?e, + "Unable to find dependent roots, cannot register head event" ); } } @@ -1037,11 +1022,10 @@ fn check_finalized_payload_validity( ) -> Result<(), Error> { if let ExecutionStatus::Invalid(block_hash) = finalized_proto_block.execution_status { crit!( - chain.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + ?block_hash, + msg = "You must use the `--purge-db` flag to clear the database and restart sync. \ You may be on a hostile network.", - "block_hash" => ?block_hash + "Finalized block has an invalid payload" ); let mut shutdown_sender = chain.shutdown_sender(); shutdown_sender @@ -1083,38 +1067,34 @@ fn perform_debug_logging( old_view: &ForkChoiceView, new_view: &ForkChoiceView, fork_choice: &BeaconForkChoice, - log: &Logger, ) { if new_view.head_block_root != old_view.head_block_root { debug!( - log, - "Fork choice updated head"; - "new_head_weight" => ?fork_choice - .get_block_weight(&new_view.head_block_root), - "new_head" => ?new_view.head_block_root, - "old_head_weight" => ?fork_choice - .get_block_weight(&old_view.head_block_root), - "old_head" => ?old_view.head_block_root, + new_head_weight = ?fork_choice + .get_block_weight(&new_view.head_block_root), + new_head = ?new_view.head_block_root, + old_head_weight = ?fork_choice + .get_block_weight(&old_view.head_block_root), + old_head = ?old_view.head_block_root, + "Fork choice updated head" ) } if new_view.justified_checkpoint != old_view.justified_checkpoint { debug!( - log, - "Fork choice justified"; - "new_root" => ?new_view.justified_checkpoint.root, - "new_epoch" => new_view.justified_checkpoint.epoch, - "old_root" => ?old_view.justified_checkpoint.root, - "old_epoch" => old_view.justified_checkpoint.epoch, + new_root = ?new_view.justified_checkpoint.root, + new_epoch = %new_view.justified_checkpoint.epoch, + old_root = ?old_view.justified_checkpoint.root, + old_epoch = %old_view.justified_checkpoint.epoch, + "Fork choice justified" ) } if new_view.finalized_checkpoint != old_view.finalized_checkpoint { debug!( - log, - "Fork choice finalized"; - "new_root" => ?new_view.finalized_checkpoint.root, - "new_epoch" => new_view.finalized_checkpoint.epoch, - "old_root" => ?old_view.finalized_checkpoint.root, - "old_epoch" => old_view.finalized_checkpoint.epoch, + new_root = ?new_view.finalized_checkpoint.root, + new_epoch = %new_view.finalized_checkpoint.epoch, + old_root = ?old_view.finalized_checkpoint.root, + old_epoch = %old_view.finalized_checkpoint.epoch, + "Fork choice finalized" ) } } @@ -1149,9 +1129,8 @@ fn spawn_execution_layer_updates( .await { crit!( - chain.log, - "Failed to update execution head"; - "error" => ?e + error = ?e, + "Failed to update execution head" ); } @@ -1165,9 +1144,8 @@ fn spawn_execution_layer_updates( // know. if let Err(e) = chain.prepare_beacon_proposer(current_slot).await { crit!( - chain.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e + error = ?e, + "Failed to prepare proposers after fork choice" ); } }, @@ -1188,7 +1166,6 @@ fn detect_reorg( new_state: &BeaconState, new_block_root: Hash256, spec: &ChainSpec, - log: &Logger, ) -> Option { let is_reorg = new_state .get_block_root(old_state.slot()) @@ -1199,11 +1176,7 @@ fn detect_reorg( match find_reorg_slot(old_state, old_block_root, new_state, new_block_root, spec) { Ok(slot) => old_state.slot().saturating_sub(slot), Err(e) => { - warn!( - log, - "Could not find re-org depth"; - "error" => format!("{:?}", e), - ); + warn!(error = ?e, "Could not find re-org depth"); return None; } }; @@ -1215,13 +1188,12 @@ fn detect_reorg( reorg_distance.as_u64() as i64, ); info!( - log, - "Beacon chain re-org"; - "previous_head" => ?old_block_root, - "previous_slot" => old_state.slot(), - "new_head" => ?new_block_root, - "new_slot" => new_state.slot(), - "reorg_distance" => reorg_distance, + previous_head = ?old_block_root, + previous_slot = %old_state.slot(), + new_head = ?new_block_root, + new_slot = %new_state.slot(), + %reorg_distance, + "Beacon chain re-org" ); Some(reorg_distance) @@ -1301,7 +1273,6 @@ fn observe_head_block_delays( head_block_graffiti: String, slot_clock: &S, event_handler: Option<&ServerSentEventHandler>, - log: &Logger, ) { let block_time_set_as_head = timestamp_now(); let head_block_root = head_block.root; @@ -1434,37 +1405,35 @@ fn observe_head_block_delays( if late_head { metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_HEAD_SLOT_START_EXCEEDED_TOTAL); debug!( - log, - "Delayed head block"; - "block_root" => ?head_block_root, - "proposer_index" => head_block_proposer_index, - "slot" => head_block_slot, - "total_delay_ms" => block_delay_total.as_millis(), - "observed_delay_ms" => format_delay(&block_delays.observed), - "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), - "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time), - "execution_time_ms" => format_delay(&block_delays.execution_time), - "available_delay_ms" => format_delay(&block_delays.available), - "attestable_delay_ms" => format_delay(&block_delays.attestable), - "imported_time_ms" => format_delay(&block_delays.imported), - "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), + block_root = ?head_block_root, + proposer_index = head_block_proposer_index, + slot = %head_block_slot, + total_delay_ms = block_delay_total.as_millis(), + observed_delay_ms = format_delay(&block_delays.observed), + blob_delay_ms = format_delay(&block_delays.all_blobs_observed), + consensus_time_ms = format_delay(&block_delays.consensus_verification_time), + execution_time_ms = format_delay(&block_delays.execution_time), + available_delay_ms = format_delay(&block_delays.available), + attestable_delay_ms = format_delay(&block_delays.attestable), + imported_time_ms = format_delay(&block_delays.imported), + set_as_head_time_ms = format_delay(&block_delays.set_as_head), + "Delayed head block" ); } else { debug!( - log, - "On-time head block"; - "block_root" => ?head_block_root, - "proposer_index" => head_block_proposer_index, - "slot" => head_block_slot, - "total_delay_ms" => block_delay_total.as_millis(), - "observed_delay_ms" => format_delay(&block_delays.observed), - "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), - "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time), - "execution_time_ms" => format_delay(&block_delays.execution_time), - "available_delay_ms" => format_delay(&block_delays.available), - "attestable_delay_ms" => format_delay(&block_delays.attestable), - "imported_time_ms" => format_delay(&block_delays.imported), - "set_as_head_time_ms" => format_delay(&block_delays.set_as_head), + block_root = ?head_block_root, + proposer_index = head_block_proposer_index, + slot = %head_block_slot, + total_delay_ms = block_delay_total.as_millis(), + observed_delay_ms = format_delay(&block_delays.observed), + blob_delay_ms = format_delay(&block_delays.all_blobs_observed), + consensus_time_ms = format_delay(&block_delays.consensus_verification_time), + execution_time_ms = format_delay(&block_delays.execution_time), + available_delay_ms = format_delay(&block_delays.available), + attestable_delay_ms = format_delay(&block_delays.attestable), + imported_time_ms = format_delay(&block_delays.imported), + set_as_head_time_ms = format_delay(&block_delays.set_as_head), + "On-time head block" ); } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 875645ee9f..07d663369a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -7,7 +7,6 @@ use crate::data_availability_checker::overflow_lru_cache::{ }; use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -16,6 +15,7 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::oneshot; +use tracing::{debug, error, info_span, Instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, @@ -75,7 +75,6 @@ pub struct DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Arc, spec: Arc, - log: Logger, } pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); @@ -114,7 +113,6 @@ impl DataAvailabilityChecker { store: BeaconStore, import_all_data_columns: bool, spec: Arc, - log: Logger, ) -> Result { let custody_group_count = spec.custody_group_count(import_all_data_columns); // This should only panic if the chain spec contains invalid values. @@ -133,7 +131,6 @@ impl DataAvailabilityChecker { slot_clock, kzg, spec, - log, }) } @@ -218,7 +215,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) + .put_kzg_verified_blobs(block_root, verified_blobs) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -238,11 +235,8 @@ impl DataAvailabilityChecker { .map(KzgVerifiedCustodyDataColumn::from_asserted_custody) .collect::>(); - self.availability_cache.put_kzg_verified_data_columns( - block_root, - verified_custody_columns, - &self.log, - ) + self.availability_cache + .put_kzg_verified_data_columns(block_root, verified_custody_columns) } /// Put a list of blobs received from the EL pool into the availability cache. @@ -262,7 +256,6 @@ impl DataAvailabilityChecker { block_root, block_epoch, data_columns_recv, - &self.log, ) } else { let seen_timestamp = self @@ -272,7 +265,6 @@ impl DataAvailabilityChecker { self.availability_cache.put_kzg_verified_blobs( block_root, KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), - &self.log, ) } } @@ -286,11 +278,8 @@ impl DataAvailabilityChecker { &self, gossip_blob: GossipVerifiedBlob, ) -> Result, AvailabilityCheckError> { - self.availability_cache.put_kzg_verified_blobs( - gossip_blob.block_root(), - vec![gossip_blob.into_inner()], - &self.log, - ) + self.availability_cache + .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) } /// Check if we've cached other data columns for this block. If it satisfies the custody requirement and we also @@ -309,11 +298,8 @@ impl DataAvailabilityChecker { .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) .collect::>(); - self.availability_cache.put_kzg_verified_data_columns( - block_root, - custody_columns, - &self.log, - ) + self.availability_cache + .put_kzg_verified_data_columns(block_root, custody_columns) } /// Check if we have all the blobs for a block. Returns `Availability` which has information @@ -323,7 +309,7 @@ impl DataAvailabilityChecker { executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_pending_executed_block(executed_block, &self.log) + .put_pending_executed_block(executed_block) } pub fn remove_pending_components(&self, block_root: Hash256) { @@ -563,10 +549,9 @@ impl DataAvailabilityChecker { ) .map_err(|e| { error!( - self.log, - "Error reconstructing data columns"; - "block_root" => ?block_root, - "error" => ?e + ?block_root, + error = ?e, + "Error reconstructing data columns" ); self.availability_cache .handle_reconstruction_failure(block_root); @@ -601,14 +586,15 @@ impl DataAvailabilityChecker { data_columns_to_publish.len() as u64, ); - debug!(self.log, "Reconstructed columns"; - "count" => data_columns_to_publish.len(), - "block_root" => ?block_root, - "slot" => slot, + debug!( + count = data_columns_to_publish.len(), + ?block_root, + %slot, + "Reconstructed columns" ); self.availability_cache - .put_kzg_verified_data_columns(*block_root, data_columns_to_publish.clone(), &self.log) + .put_kzg_verified_data_columns(*block_root, data_columns_to_publish.clone()) .map(|availability| { DataColumnReconstructionResult::Success(( availability, @@ -635,14 +621,18 @@ pub fn start_availability_cache_maintenance_service( if chain.spec.deneb_fork_epoch.is_some() { let overflow_cache = chain.data_availability_checker.availability_cache.clone(); executor.spawn( - async move { availability_cache_maintenance_service(chain, overflow_cache).await }, + async move { + availability_cache_maintenance_service(chain, overflow_cache) + .instrument(info_span!( + "DataAvailabilityChecker", + service = "data_availability_checker" + )) + .await + }, "availability_cache_service", ); } else { - debug!( - chain.log, - "Deneb fork not configured, not starting availability cache maintenance service" - ); + debug!("Deneb fork not configured, not starting availability cache maintenance service"); } } @@ -666,10 +656,7 @@ async fn availability_cache_maintenance_service( break; }; - debug!( - chain.log, - "Availability cache maintenance service firing"; - ); + debug!("Availability cache maintenance service firing"); let Some(current_epoch) = chain .slot_clock .now() @@ -699,11 +686,11 @@ async fn availability_cache_maintenance_service( ); if let Err(e) = overflow_cache.do_maintenance(cutoff_epoch) { - error!(chain.log, "Failed to maintain availability cache"; "error" => ?e); + error!(error = ?e,"Failed to maintain availability cache"); } } None => { - error!(chain.log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. tokio::time::sleep(chain.slot_clock.slot_duration()).await; } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 78de538929..d4cbf5ab76 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -10,11 +10,11 @@ use crate::data_column_verification::KzgVerifiedCustodyDataColumn; use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; -use slog::{debug, Logger}; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; use tokio::sync::oneshot; +use tracing::debug; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, @@ -452,7 +452,6 @@ impl DataAvailabilityCheckerInner { &self, block_root: Hash256, kzg_verified_blobs: I, - log: &Logger, ) -> Result, AvailabilityCheckError> { let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); @@ -486,10 +485,11 @@ impl DataAvailabilityCheckerInner { // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); - debug!(log, "Component added to data availability checker"; - "component" => "blobs", - "block_root" => ?block_root, - "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + debug!( + component = "blobs", + ?block_root, + status = pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + "Component added to data availability checker" ); if let Some(available_block) = @@ -515,7 +515,6 @@ impl DataAvailabilityCheckerInner { &self, block_root: Hash256, kzg_verified_data_columns: I, - log: &Logger, ) -> Result, AvailabilityCheckError> { let mut kzg_verified_data_columns = kzg_verified_data_columns.into_iter().peekable(); let Some(epoch) = kzg_verified_data_columns @@ -539,10 +538,11 @@ impl DataAvailabilityCheckerInner { // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; - debug!(log, "Component added to data availability checker"; - "component" => "data_columns", - "block_root" => ?block_root, - "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + debug!( + component = "data_columns", + ?block_root, + status = pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + "Component added to data availability checker" ); if let Some(available_block) = @@ -569,7 +569,6 @@ impl DataAvailabilityCheckerInner { block_root: Hash256, block_epoch: Epoch, data_column_recv: oneshot::Receiver>, - log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); @@ -589,10 +588,12 @@ impl DataAvailabilityCheckerInner { // TODO(das): Error or log if we overwrite a prior receiver https://github.com/sigp/lighthouse/issues/6764 pending_components.data_column_recv = Some(data_column_recv); - debug!(log, "Component added to data availability checker"; - "component" => "data_columns_recv", - "block_root" => ?block_root, - "status" => pending_components.status_str(block_epoch, self.sampling_column_count, &self.spec), + debug!( + component = "data_columns_recv", + ?block_root, + status = + pending_components.status_str(block_epoch, self.sampling_column_count, &self.spec), + "Component added to data availability checker" ); if let Some(available_block) = @@ -667,7 +668,6 @@ impl DataAvailabilityCheckerInner { pub fn put_pending_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, - log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); let epoch = executed_block.as_block().epoch(); @@ -689,10 +689,11 @@ impl DataAvailabilityCheckerInner { // Merge in the block. pending_components.merge_block(diet_executed_block); - debug!(log, "Component added to data availability checker"; - "component" => "block", - "block_root" => ?block_root, - "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + debug!( + component = "block", + ?block_root, + status = pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + "Component added to data availability checker" ); // Check if we have all components and entire set is consistent. @@ -769,13 +770,12 @@ mod test { test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; - - use logging::test_logger; - use slog::{info, Logger}; + use logging::create_test_tracing_subscriber; use state_processing::ConsensusContext; use std::collections::VecDeque; use store::{database::interface::BeaconNodeBackend, HotColdDB, ItemStore, StoreConfig}; use tempfile::{tempdir, TempDir}; + use tracing::info; use types::non_zero_usize::new_non_zero_usize; use types::{ExecPayload, MinimalEthSpec}; @@ -785,7 +785,6 @@ mod test { fn get_store_with_spec( db_path: &TempDir, spec: Arc, - log: Logger, ) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); @@ -799,14 +798,12 @@ mod test { |_, _, _| Ok(()), config, spec, - log, ) .expect("disk store should initialize") } // get a beacon chain harness advanced to just before deneb fork async fn get_deneb_chain( - log: Logger, db_path: &TempDir, ) -> BeaconChainHarness> { let altair_fork_epoch = Epoch::new(1); @@ -823,12 +820,11 @@ mod test { spec.deneb_fork_epoch = Some(deneb_fork_epoch); let spec = Arc::new(spec); - let chain_store = get_store_with_spec::(db_path, spec.clone(), log.clone()); + let chain_store = get_store_with_spec::(db_path, spec.clone()); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.clone()) - .logger(log.clone()) .keypairs(validators_keypairs) .fresh_disk_store(chain_store) .mock_execution_layer() @@ -871,7 +867,6 @@ mod test { Cold: ItemStore, { let chain = &harness.chain; - let log = chain.log.clone(); let head = chain.head_snapshot(); let parent_state = head.beacon_state.clone(); @@ -899,7 +894,7 @@ mod test { ); // log kzg commitments - info!(log, "printing kzg commitments"); + info!("printing kzg commitments"); for comm in Vec::from( block .message() @@ -908,9 +903,9 @@ mod test { .expect("should be deneb fork") .clone(), ) { - info!(log, "kzg commitment"; "commitment" => ?comm); + info!(commitment = ?comm, "kzg commitment"); } - info!(log, "done printing kzg commitments"); + info!("done printing kzg commitments"); let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { let sidecars = @@ -967,9 +962,9 @@ mod test { EthSpec = E, >, { - let log = test_logger(); + create_test_tracing_subscriber(); let chain_db_path = tempdir().expect("should get temp dir"); - let harness = get_deneb_chain(log.clone(), &chain_db_path).await; + let harness = get_deneb_chain(&chain_db_path).await; let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); @@ -1003,7 +998,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block, harness.logger()) + .put_pending_executed_block(pending_block) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -1042,7 +1037,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -1070,7 +1065,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); assert!( matches!(availability, Availability::MissingComponents(_)), @@ -1079,7 +1074,7 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } let availability = cache - .put_pending_executed_block(pending_block, harness.logger()) + .put_pending_executed_block(pending_block) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1147,7 +1142,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block, harness.logger()) + .put_pending_executed_block(pending_block) .expect("should put block"); // grab the diet block from the cache for later testing diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 1262fcdeb8..2f95d834b5 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -10,12 +10,12 @@ use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; use proto_array::Block; use slasher::test_utils::E; -use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; use std::iter; use std::marker::PhantomData; use std::sync::Arc; +use tracing::debug; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, @@ -580,10 +580,9 @@ fn verify_proposer_and_signature( (proposer.index, proposer.fork) } else { debug!( - chain.log, - "Proposer shuffling cache miss for column verification"; - "block_root" => %block_root, - "index" => %column_index, + %block_root, + index = %column_index, + "Proposer shuffling cache miss for column verification" ); let (parent_state_root, mut parent_state) = chain .store diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index ad4f106517..43429b726c 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -3,7 +3,6 @@ use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; -use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::get_new_eth1_data; @@ -14,6 +13,7 @@ use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; +use tracing::{debug, error, trace}; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, }; @@ -283,11 +283,9 @@ where pub fn from_ssz_container( ssz_container: &SszEth1, config: Eth1Config, - log: &Logger, spec: Arc, ) -> Result { - let backend = - Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?; + let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, spec)?; Ok(Self { use_dummy_backend: ssz_container.use_dummy_backend, backend, @@ -351,12 +349,7 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { fn as_bytes(&self) -> Vec; /// Create a `Eth1ChainBackend` instance given encoded bytes. - fn from_bytes( - bytes: &[u8], - config: Eth1Config, - log: Logger, - spec: Arc, - ) -> Result; + fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result; } /// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. @@ -412,7 +405,6 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { fn from_bytes( _bytes: &[u8], _config: Eth1Config, - _log: Logger, _spec: Arc, ) -> Result { Ok(Self(PhantomData)) @@ -433,7 +425,6 @@ impl Default for DummyEth1ChainBackend { #[derive(Clone)] pub struct CachingEth1Backend { pub core: HttpService, - log: Logger, _phantom: PhantomData, } @@ -441,11 +432,10 @@ impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { + pub fn new(config: Eth1Config, spec: Arc) -> Result { Ok(Self { - core: HttpService::new(config, log.clone(), spec) + core: HttpService::new(config, spec) .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, - log, _phantom: PhantomData, }) } @@ -458,7 +448,6 @@ impl CachingEth1Backend { /// Instantiates `self` from an existing service. pub fn from_service(service: HttpService) -> Self { Self { - log: service.log.clone(), core: service, _phantom: PhantomData, } @@ -481,9 +470,8 @@ impl Eth1ChainBackend for CachingEth1Backend { }; trace!( - self.log, - "Found eth1 data votes_to_consider"; - "votes_to_consider" => votes_to_consider.len(), + votes_to_consider = votes_to_consider.len(), + "Found eth1 data votes_to_consider" ); let valid_votes = collect_valid_votes(state, &votes_to_consider); @@ -500,22 +488,20 @@ impl Eth1ChainBackend for CachingEth1Backend { .map(|vote| { let vote = vote.0.clone(); debug!( - self.log, - "No valid eth1_data votes"; - "outcome" => "Casting vote corresponding to last candidate eth1 block", - "vote" => ?vote + outcome = "Casting vote corresponding to last candidate eth1 block", + ?vote, + "No valid eth1_data votes" ); vote }) .unwrap_or_else(|| { let vote = state.eth1_data().clone(); error!( - self.log, - "No valid eth1_data votes, `votes_to_consider` empty"; - "lowest_block_number" => self.core.lowest_block_number(), - "earliest_block_timestamp" => self.core.earliest_block_timestamp(), - "genesis_time" => state.genesis_time(), - "outcome" => "casting `state.eth1_data` as eth1 vote" + lowest_block_number = self.core.lowest_block_number(), + earliest_block_timestamp = self.core.earliest_block_timestamp(), + genesis_time = state.genesis_time(), + outcome = "casting `state.eth1_data` as eth1 vote", + "No valid eth1_data votes, `votes_to_consider` empty" ); metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES); vote @@ -523,11 +509,10 @@ impl Eth1ChainBackend for CachingEth1Backend { }; debug!( - self.log, - "Produced vote for eth1 chain"; - "deposit_root" => format!("{:?}", eth1_data.deposit_root), - "deposit_count" => eth1_data.deposit_count, - "block_hash" => format!("{:?}", eth1_data.block_hash), + deposit_root = ?eth1_data.deposit_root, + deposit_count = eth1_data.deposit_count, + block_hash = ?eth1_data.block_hash, + "Produced vote for eth1 chain" ); Ok(eth1_data) @@ -592,16 +577,10 @@ impl Eth1ChainBackend for CachingEth1Backend { } /// Recover the cached backend from encoded bytes. - fn from_bytes( - bytes: &[u8], - config: Eth1Config, - log: Logger, - spec: Arc, - ) -> Result { - let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?; + fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result { + let inner = HttpService::from_bytes(bytes, config, spec)?; Ok(Self { core: inner, - log, _phantom: PhantomData, }) } @@ -742,17 +721,18 @@ mod test { mod eth1_chain_json_backend { use super::*; use eth1::DepositLog; - use logging::test_logger; + use logging::create_test_tracing_subscriber; use types::{test_utils::generate_deterministic_keypair, MainnetEthSpec}; fn get_eth1_chain() -> Eth1Chain, E> { + create_test_tracing_subscriber(); + let eth1_config = Eth1Config { ..Eth1Config::default() }; - let log = test_logger(); Eth1Chain::new( - CachingEth1Backend::new(eth1_config, log, Arc::new(MainnetEthSpec::default_spec())) + CachingEth1Backend::new(eth1_config, Arc::new(MainnetEthSpec::default_spec())) .unwrap(), ) } diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 24b6542eab..84618ceab0 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -1,7 +1,7 @@ -use slog::{debug, Logger}; use ssz_derive::{Decode, Encode}; use std::cmp; use std::collections::BTreeMap; +use tracing::debug; use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; /// The default size of the cache. @@ -104,28 +104,27 @@ pub struct Eth1FinalizationCache { by_checkpoint: CheckpointMap, pending_eth1: BTreeMap, last_finalized: Option, - log: Logger, +} + +impl Default for Eth1FinalizationCache { + fn default() -> Self { + Self { + by_checkpoint: CheckpointMap::new(), + pending_eth1: BTreeMap::new(), + last_finalized: None, + } + } } /// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to /// finalize deposits when a new epoch is finalized. /// impl Eth1FinalizationCache { - pub fn new(log: Logger) -> Self { - Eth1FinalizationCache { - by_checkpoint: CheckpointMap::new(), - pending_eth1: BTreeMap::new(), - last_finalized: None, - log, - } - } - - pub fn with_capacity(log: Logger, capacity: usize) -> Self { + pub fn with_capacity(capacity: usize) -> Self { Eth1FinalizationCache { by_checkpoint: CheckpointMap::with_capacity(capacity), pending_eth1: BTreeMap::new(), last_finalized: None, - log, } } @@ -136,10 +135,9 @@ impl Eth1FinalizationCache { eth1_finalization_data.eth1_data.clone(), ); debug!( - self.log, - "Eth1Cache: inserted pending eth1"; - "eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count, - "eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index, + eth1_data.deposit_count = eth1_finalization_data.eth1_data.deposit_count, + eth1_deposit_index = eth1_finalization_data.eth1_deposit_index, + "Eth1Cache: inserted pending eth1" ); } self.by_checkpoint @@ -154,10 +152,8 @@ impl Eth1FinalizationCache { if finalized_deposit_index >= pending_count { result = self.pending_eth1.remove(&pending_count); debug!( - self.log, - "Eth1Cache: dropped pending eth1"; - "pending_count" => pending_count, - "finalized_deposit_index" => finalized_deposit_index, + pending_count, + finalized_deposit_index, "Eth1Cache: dropped pending eth1" ); } else { break; @@ -172,9 +168,8 @@ impl Eth1FinalizationCache { self.last_finalized.clone() } else { debug!( - self.log, - "Eth1Cache: cache miss"; - "epoch" => checkpoint.epoch, + epoch = %checkpoint.epoch, + "Eth1Cache: cache miss" ); None } @@ -194,8 +189,6 @@ impl Eth1FinalizationCache { #[cfg(test)] pub mod tests { use super::*; - use sloggers::null::NullLoggerBuilder; - use sloggers::Build; use std::collections::HashMap; const SLOTS_PER_EPOCH: u64 = 32; @@ -203,8 +196,7 @@ pub mod tests { const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; fn eth1cache() -> Eth1FinalizationCache { - let log_builder = NullLoggerBuilder; - Eth1FinalizationCache::new(log_builder.build().expect("should build log")) + Eth1FinalizationCache::default() } fn random_eth1_data(deposit_count: u64) -> Eth1Data { diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 8c342893ae..d09b74e645 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -1,7 +1,7 @@ pub use eth2::types::{EventKind, SseBlock, SseFinalizedCheckpoint, SseHead}; -use slog::{trace, Logger}; use tokio::sync::broadcast; use tokio::sync::broadcast::{error::SendError, Receiver, Sender}; +use tracing::trace; use types::EthSpec; const DEFAULT_CHANNEL_CAPACITY: usize = 16; @@ -25,18 +25,14 @@ pub struct ServerSentEventHandler { attester_slashing_tx: Sender>, bls_to_execution_change_tx: Sender>, block_gossip_tx: Sender>, - log: Logger, } impl ServerSentEventHandler { - pub fn new(log: Logger, capacity_multiplier: usize) -> Self { - Self::new_with_capacity( - log, - capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY), - ) + pub fn new(capacity_multiplier: usize) -> Self { + Self::new_with_capacity(capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY)) } - pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { + pub fn new_with_capacity(capacity: usize) -> Self { let (attestation_tx, _) = broadcast::channel(capacity); let (single_attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); @@ -75,17 +71,15 @@ impl ServerSentEventHandler { attester_slashing_tx, bls_to_execution_change_tx, block_gossip_tx, - log, } } pub fn register(&self, kind: EventKind) { let log_count = |name, count| { trace!( - self.log, - "Registering server-sent event"; - "kind" => name, - "receiver_count" => count + kind = name, + receiver_count = count, + "Registering server-sent event" ); }; let result = match &kind { @@ -163,7 +157,7 @@ impl ServerSentEventHandler { .map(|count| log_count("block gossip", count)), }; if let Err(SendError(event)) = result { - trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); + trace!(?event, "No receivers registered to listen for event"); } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 720f98e298..1da8cb413b 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -17,7 +17,6 @@ use execution_layer::{ }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; -use slog::{debug, warn}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled, @@ -25,6 +24,7 @@ use state_processing::per_block_processing::{ }; use std::sync::Arc; use tokio::task::JoinHandle; +use tracing::{debug, warn}; use tree_hash::TreeHash; use types::payload::BlockProductionVersion; use types::*; @@ -85,11 +85,10 @@ impl PayloadNotifier { block_message.try_into()?; if let Err(e) = new_payload_request.perform_optimistic_sync_verifications() { warn!( - chain.log, - "Falling back to slow block hash verification"; - "block_number" => ?block_message.execution_payload().map(|payload| payload.block_number()), - "info" => "you can silence this warning with --disable-optimistic-finalized-sync", - "error" => ?e, + block_number = ?block_message.execution_payload().map(|payload| payload.block_number()), + info = "you can silence this warning with --disable-optimistic-finalized-sync", + error = ?e, + "Falling back to slow block hash verification" ); None } else { @@ -150,16 +149,15 @@ async fn notify_new_payload( ref validation_error, } => { warn!( - chain.log, - "Invalid execution payload"; - "validation_error" => ?validation_error, - "latest_valid_hash" => ?latest_valid_hash, - "execution_block_hash" => ?execution_block_hash, - "root" => ?block.tree_hash_root(), - "graffiti" => block.body().graffiti().as_utf8_lossy(), - "proposer_index" => block.proposer_index(), - "slot" => block.slot(), - "method" => "new_payload", + ?validation_error, + ?latest_valid_hash, + ?execution_block_hash, + root = ?block.tree_hash_root(), + graffiti = block.body().graffiti().as_utf8_lossy(), + proposer_index = block.proposer_index(), + slot = %block.slot(), + method = "new_payload", + "Invalid execution payload" ); // Only trigger payload invalidation in fork choice if the @@ -197,15 +195,14 @@ async fn notify_new_payload( ref validation_error, } => { warn!( - chain.log, - "Invalid execution payload block hash"; - "validation_error" => ?validation_error, - "execution_block_hash" => ?execution_block_hash, - "root" => ?block.tree_hash_root(), - "graffiti" => block.body().graffiti().as_utf8_lossy(), - "proposer_index" => block.proposer_index(), - "slot" => block.slot(), - "method" => "new_payload", + ?validation_error, + ?execution_block_hash, + root = ?block.tree_hash_root(), + graffiti = block.body().graffiti().as_utf8_lossy(), + proposer_index = block.proposer_index(), + slot = %block.slot(), + method = "new_payload", + "Invalid execution payload block hash" ); // Returning an error here should be sufficient to invalidate the block. We have no @@ -278,10 +275,9 @@ pub async fn validate_merge_block( None => { if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( - chain.log, - "Optimistically importing merge transition block"; - "block_hash" => ?execution_payload.parent_hash(), - "msg" => "the terminal block/parent was unavailable" + block_hash = ?execution_payload.parent_hash(), + msg = "the terminal block/parent was unavailable", + "Optimistically importing merge transition block" ); Ok(()) } else { diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 6e365f936d..f1da1ffc2f 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -14,11 +14,11 @@ use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes use execution_layer::json_structures::BlobAndProofV1; use execution_layer::Error as ExecutionLayerError; use metrics::{inc_counter, inc_counter_by, TryExt}; -use slog::{debug, error, o, Logger}; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; use tokio::sync::oneshot; +use tracing::{debug, error}; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::{ BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, @@ -50,11 +50,6 @@ pub async fn fetch_and_process_engine_blobs( block: Arc>>, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, ) -> Result, FetchEngineBlobError> { - let block_root_str = format!("{:?}", block_root); - let log = chain - .log - .new(o!("service" => "fetch_engine_blobs", "block_root" => block_root_str)); - let versioned_hashes = if let Some(kzg_commitments) = block .message() .body() @@ -67,10 +62,7 @@ pub async fn fetch_and_process_engine_blobs( .map(kzg_commitment_to_versioned_hash) .collect::>() } else { - debug!( - log, - "Fetch blobs not triggered - none required"; - ); + debug!("Fetch blobs not triggered - none required"); return Ok(None); }; @@ -81,22 +73,14 @@ pub async fn fetch_and_process_engine_blobs( .as_ref() .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; - debug!( - log, - "Fetching blobs from the EL"; - "num_expected_blobs" => num_expected_blobs, - ); + debug!(num_expected_blobs, "Fetching blobs from the EL"); let response = execution_layer .get_blobs(versioned_hashes) .await .map_err(FetchEngineBlobError::RequestFailed)?; if response.is_empty() || response.iter().all(|opt| opt.is_none()) { - debug!( - log, - "No blobs fetched from the EL"; - "num_expected_blobs" => num_expected_blobs, - ); + debug!(num_expected_blobs, "No blobs fetched from the EL"); inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); } else { @@ -154,11 +138,8 @@ pub async fn fetch_and_process_engine_blobs( // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. if num_fetched_blobs != num_expected_blobs { debug!( - log, - "Not all blobs fetched from the EL"; - "info" => "Unable to compute data columns", - "num_fetched_blobs" => num_fetched_blobs, - "num_expected_blobs" => num_expected_blobs, + info = "Unable to compute data columns", + num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" ); return Ok(None); } @@ -170,9 +151,21 @@ pub async fn fetch_and_process_engine_blobs( { // Avoid computing columns if block has already been imported. debug!( - log, - "Ignoring EL blobs response"; - "info" => "block has already been imported", + info = "block has already been imported", + "Ignoring EL blobs response" + ); + return Ok(None); + } + + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + // Avoid computing columns if block has already been imported. + debug!( + info = "block has already been imported", + "Ignoring EL blobs response" ); return Ok(None); } @@ -182,7 +175,6 @@ pub async fn fetch_and_process_engine_blobs( block.clone(), fixed_blob_sidecar_list.clone(), publish_fn, - log.clone(), ); Some(data_columns_receiver) @@ -194,11 +186,7 @@ pub async fn fetch_and_process_engine_blobs( None }; - debug!( - log, - "Processing engine blobs"; - "num_fetched_blobs" => num_fetched_blobs, - ); + debug!(num_fetched_blobs, "Processing engine blobs"); let availability_processing_status = chain .process_engine_blobs( @@ -226,7 +214,6 @@ fn spawn_compute_and_publish_data_columns_task( block: Arc>>, blobs: FixedBlobSidecarList, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, - log: Logger, ) -> oneshot::Receiver>>> { let chain_cloned = chain.clone(); let (data_columns_sender, data_columns_receiver) = oneshot::channel(); @@ -254,9 +241,8 @@ fn spawn_compute_and_publish_data_columns_task( Ok(d) => d, Err(e) => { error!( - log, - "Failed to build data column sidecars from blobs"; - "error" => ?e + error = ?e, + "Failed to build data column sidecars from blobs" ); return; } @@ -266,10 +252,7 @@ fn spawn_compute_and_publish_data_columns_task( // Data column receiver have been dropped - block may have already been imported. // This race condition exists because gossip columns may arrive and trigger block // import during the computation. Here we just drop the computed columns. - debug!( - log, - "Failed to send computed data columns"; - ); + debug!("Failed to send computed data columns"); return; }; diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 8d1c29f46f..c500e1b4b6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,7 +1,6 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::{ForkChoice, PayloadVerificationStatus}; use itertools::process_results; -use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, @@ -10,6 +9,7 @@ use state_processing::{ use std::sync::Arc; use std::time::Duration; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; +use tracing::{info, warn}; use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ @@ -27,7 +27,6 @@ pub fn revert_to_fork_boundary, Cold: ItemStore head_block_root: Hash256, store: Arc>, spec: &ChainSpec, - log: &Logger, ) -> Result<(Hash256, SignedBeaconBlock), String> { let current_fork = spec.fork_name_at_slot::(current_slot); let fork_epoch = spec @@ -42,10 +41,9 @@ pub fn revert_to_fork_boundary, Cold: ItemStore } warn!( - log, - "Reverting invalid head block"; - "target_fork" => %current_fork, - "fork_epoch" => fork_epoch, + target_fork = %current_fork, + %fork_epoch, + "Reverting invalid head block" ); let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root); @@ -55,10 +53,9 @@ pub fn revert_to_fork_boundary, Cold: ItemStore Some((block_root, block)) } else { info!( - log, - "Reverting block"; - "block_root" => ?block_root, - "slot" => block.slot(), + ?block_root, + slot = %block.slot(), + "Reverting block" ); None } diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 8692d374ed..23d1d69b1c 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -1,11 +1,12 @@ use crate::BeaconChain; use crate::BeaconChainTypes; use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; +use logging::crit; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::{fmt::Debug, time::Duration}; use task_executor::TaskExecutor; +use tracing::{debug, error, warn}; use types::{EthSpec, Graffiti, GRAFFITI_BYTES_LEN}; const ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE: u32 = 6; // 6 epochs @@ -51,7 +52,6 @@ pub struct GraffitiCalculator { pub beacon_graffiti: GraffitiOrigin, execution_layer: Option>, pub epoch_duration: Duration, - log: Logger, } impl GraffitiCalculator { @@ -59,13 +59,11 @@ impl GraffitiCalculator { beacon_graffiti: GraffitiOrigin, execution_layer: Option>, epoch_duration: Duration, - log: Logger, ) -> Self { Self { beacon_graffiti, execution_layer, epoch_duration, - log, } } @@ -86,7 +84,7 @@ impl GraffitiCalculator { let Some(execution_layer) = self.execution_layer.as_ref() else { // Return default graffiti if there is no execution layer. This // shouldn't occur if we're actually producing blocks. - crit!(self.log, "No execution layer available for graffiti calculation during block production!"); + crit!("No execution layer available for graffiti calculation during block production!"); return default_graffiti; }; @@ -101,7 +99,7 @@ impl GraffitiCalculator { { Ok(engine_versions) => engine_versions, Err(el_error) => { - warn!(self.log, "Failed to determine execution engine version for graffiti"; "error" => ?el_error); + warn!(error = ?el_error, "Failed to determine execution engine version for graffiti"); return default_graffiti; } }; @@ -109,9 +107,8 @@ impl GraffitiCalculator { let Some(engine_version) = engine_versions.first() else { // Got an empty array which indicates the EL doesn't support the method debug!( - self.log, "Using default lighthouse graffiti: EL does not support {} method", - ENGINE_GET_CLIENT_VERSION_V1; + ENGINE_GET_CLIENT_VERSION_V1 ); return default_graffiti; }; @@ -119,19 +116,20 @@ impl GraffitiCalculator { // More than one version implies lighthouse is connected to // an EL multiplexer. We don't support modifying the graffiti // with these configurations. - warn!( - self.log, - "Execution Engine multiplexer detected, using default graffiti" - ); + warn!("Execution Engine multiplexer detected, using default graffiti"); return default_graffiti; } - let lighthouse_commit_prefix = CommitPrefix::try_from(lighthouse_version::COMMIT_PREFIX.to_string()) - .unwrap_or_else(|error_message| { - // This really shouldn't happen but we want to definitly log if it does - crit!(self.log, "Failed to parse lighthouse commit prefix"; "error" => error_message); - CommitPrefix("00000000".to_string()) - }); + let lighthouse_commit_prefix = + CommitPrefix::try_from(lighthouse_version::COMMIT_PREFIX.to_string()) + .unwrap_or_else(|error_message| { + // This really shouldn't happen but we want to definitly log if it does + crit!( + error = error_message, + "Failed to parse lighthouse commit prefix" + ); + CommitPrefix("00000000".to_string()) + }); engine_version.calculate_graffiti(lighthouse_commit_prefix) } @@ -144,36 +142,24 @@ pub fn start_engine_version_cache_refresh_service( executor: TaskExecutor, ) { let Some(el_ref) = chain.execution_layer.as_ref() else { - debug!( - chain.log, - "No execution layer configured, not starting engine version cache refresh service" - ); + debug!("No execution layer configured, not starting engine version cache refresh service"); return; }; if matches!( chain.graffiti_calculator.beacon_graffiti, GraffitiOrigin::UserSpecified(_) ) { - debug!( - chain.log, - "Graffiti is user-specified, not starting engine version cache refresh service" - ); + debug!("Graffiti is user-specified, not starting engine version cache refresh service"); return; } let execution_layer = el_ref.clone(); - let log = chain.log.clone(); let slot_clock = chain.slot_clock.clone(); let epoch_duration = chain.graffiti_calculator.epoch_duration; executor.spawn( async move { - engine_version_cache_refresh_service::( - execution_layer, - slot_clock, - epoch_duration, - log, - ) - .await + engine_version_cache_refresh_service::(execution_layer, slot_clock, epoch_duration) + .await }, "engine_version_cache_refresh_service", ); @@ -183,13 +169,15 @@ async fn engine_version_cache_refresh_service( execution_layer: ExecutionLayer, slot_clock: T::SlotClock, epoch_duration: Duration, - log: Logger, ) { // Preload the engine version cache after a brief delay to allow for EL initialization. // This initial priming ensures cache readiness before the service's regular update cycle begins. tokio::time::sleep(ENGINE_VERSION_CACHE_PRELOAD_STARTUP_DELAY).await; if let Err(e) = execution_layer.get_engine_version(None).await { - debug!(log, "Failed to preload engine version cache"; "error" => format!("{:?}", e)); + debug!( + error = ?e, + "Failed to preload engine version cache" + ); } // this service should run 3/8 of the way through the epoch @@ -203,18 +191,14 @@ async fn engine_version_cache_refresh_service( let firing_delay = partial_firing_delay + duration_to_next_epoch + epoch_delay; tokio::time::sleep(firing_delay).await; - debug!( - log, - "Engine version cache refresh service firing"; - ); + debug!("Engine version cache refresh service firing"); match execution_layer.get_engine_version(None).await { - Err(e) => warn!(log, "Failed to populate engine version cache"; "error" => ?e), + Err(e) => warn!( error = ?e, "Failed to populate engine version cache"), Ok(versions) => { if versions.is_empty() { // Empty array indicates the EL doesn't support the method debug!( - log, "EL does not support {} method. Sleeping twice as long before retry", ENGINE_GET_CLIENT_VERSION_V1 ); @@ -227,7 +211,7 @@ async fn engine_version_cache_refresh_service( } } None => { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. tokio::time::sleep(slot_clock.slot_duration()).await; } @@ -241,10 +225,10 @@ mod tests { use crate::ChainConfig; use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use execution_layer::EngineCapabilities; - use slog::info; use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; + use tracing::info; use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; const VALIDATOR_COUNT: usize = 48; @@ -261,7 +245,6 @@ mod tests { .spec(spec) .chain_config(chain_config.unwrap_or_default()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .logger(logging::test_logger()) .fresh_ephemeral_store() .mock_execution_layer() .build(); @@ -302,7 +285,10 @@ mod tests { let graffiti_str = std::str::from_utf8(graffiti_slice).expect("bytes should convert nicely to ascii"); - info!(harness.chain.log, "results"; "lighthouse_version" => lighthouse_version::VERSION, "graffiti_str" => graffiti_str); + info!( + lighthouse_version = lighthouse_version::VERSION, + graffiti_str, "results" + ); println!("lighthouse_version: '{}'", lighthouse_version::VERSION); println!("graffiti_str: '{}'", graffiti_str); @@ -339,7 +325,7 @@ mod tests { std::str::from_utf8(&found_graffiti_bytes[..expected_graffiti_prefix_len]) .expect("bytes should convert nicely to ascii"); - info!(harness.chain.log, "results"; "expected_graffiti_string" => &expected_graffiti_string, "found_graffiti_string" => &found_graffiti_string); + info!(expected_graffiti_string, found_graffiti_string, "results"); println!("expected_graffiti_string: '{}'", expected_graffiti_string); println!("found_graffiti_string: '{}'", found_graffiti_string); diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index a9caeb18bb..7169c86174 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,7 +1,6 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; -use slog::debug; use state_processing::{ per_block_processing::ParallelSignatureSets, signature_sets::{block_proposal_signature_set_from_parts, Error as SignatureSetError}, @@ -12,6 +11,7 @@ use std::time::Duration; use store::metadata::DataColumnInfo; use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; +use tracing::debug; use types::{FixedBytesExtended, Hash256, Slot}; /// Use a longer timeout on the pubkey cache. @@ -82,11 +82,10 @@ impl BeaconChain { if blocks_to_import.len() != total_blocks { debug!( - self.log, - "Ignoring some historic blocks"; - "oldest_block_slot" => anchor_info.oldest_block_slot, - "total_blocks" => total_blocks, - "ignored" => total_blocks.saturating_sub(blocks_to_import.len()), + oldest_block_slot = %anchor_info.oldest_block_slot, + total_blocks, + ignored = total_blocks.saturating_sub(blocks_to_import.len()), + "Ignoring some historic blocks" ); } diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 78442d8df0..c9173dc0d7 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -2,12 +2,12 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; -use slog::{debug, Logger}; use ssz::Decode; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; +use tracing::debug; use tree_hash::TreeHash; use types::non_zero_usize::new_non_zero_usize; use types::{ @@ -82,7 +82,6 @@ impl LightClientServerCache { block_slot: Slot, block_parent_root: &Hash256, sync_aggregate: &SyncAggregate, - log: &Logger, chain_spec: &ChainSpec, ) -> Result<(), BeaconChainError> { metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PROCESSING_REQUESTS); @@ -170,9 +169,8 @@ impl LightClientServerCache { )?); } else { debug!( - log, - "Finalized block not available in store for light_client server"; - "finalized_block_root" => format!("{}", cached_parts.finalized_block_root), + finalized_block_root = %cached_parts.finalized_block_root, + "Finalized block not available in store for light_client server" ); } } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index bc4b8e1ed8..b64da00e76 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -3,7 +3,6 @@ use crate::errors::BeaconChainError; use crate::head_tracker::{HeadTracker, SszHeadTracker}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use parking_lot::Mutex; -use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::mem; use std::sync::{mpsc, Arc}; @@ -13,6 +12,7 @@ use store::hot_cold_store::{migrate_database, HotColdDBError}; use store::iter::RootsIterator; use store::{Error, ItemStore, StoreItem, StoreOp}; pub use store::{HotColdDB, MemoryStore}; +use tracing::{debug, error, info, warn}; use types::{ BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlockHash, Slot, @@ -44,7 +44,6 @@ pub struct BackgroundMigrator, Cold: ItemStore> tx_thread: Option, thread::JoinHandle<()>)>>, /// Genesis block root, for persisting the `PersistedBeaconChain`. genesis_block_root: Hash256, - log: Logger, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -140,7 +139,6 @@ impl, Cold: ItemStore> BackgroundMigrator>, config: MigratorConfig, genesis_block_root: Hash256, - log: Logger, ) -> Self { // Estimate last migration run from DB split slot. let prev_migration = Arc::new(Mutex::new(PrevMigration { @@ -150,14 +148,13 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, opt_tx: Option>, - log: &Logger, ) { match db.reconstruct_historic_states(Some(BLOCKS_PER_RECONSTRUCTION)) { Ok(()) => { @@ -221,9 +217,8 @@ impl, Cold: ItemStore> BackgroundMigrator ?e + error = ?e, + "Unable to requeue reconstruction notification" ); } } @@ -231,24 +226,18 @@ impl, Cold: ItemStore> BackgroundMigrator { error!( - log, - "State reconstruction failed"; - "error" => ?e, + error = ?e, + "State reconstruction failed" ); } } } - pub fn run_prune_blobs( - db: Arc>, - data_availability_boundary: Epoch, - log: &Logger, - ) { + pub fn run_prune_blobs(db: Arc>, data_availability_boundary: Epoch) { if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) { error!( - log, - "Blob pruning failed"; - "error" => ?e, + error = ?e, + "Blob pruning failed" ); } } @@ -264,7 +253,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator format!("{:?}", thread_err) + reason = ?thread_err, + "Migration thread died, so it was restarted" ); } @@ -290,21 +278,16 @@ impl, Cold: ItemStore> BackgroundMigrator>, - notif: FinalizationNotification, - log: &Logger, - ) { + fn run_migration(db: Arc>, notif: FinalizationNotification) { // Do not run too frequently. let epoch = notif.finalized_checkpoint.epoch; let mut prev_migration = notif.prev_migration.lock(); if epoch < prev_migration.epoch + prev_migration.epochs_per_migration { debug!( - log, - "Database consolidation deferred"; - "last_finalized_epoch" => prev_migration.epoch, - "new_finalized_epoch" => epoch, - "epochs_per_migration" => prev_migration.epochs_per_migration, + last_finalized_epoch = %prev_migration.epoch, + new_finalized_epoch = %epoch, + epochs_per_migration = prev_migration.epochs_per_migration, + "Database consolidation deferred" ); return; } @@ -315,7 +298,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator state, other => { error!( - log, - "Migrator failed to load state"; - "state_root" => ?finalized_state_root, - "error" => ?other + state_root = ?finalized_state_root, + error = ?other, + "Migrator failed to load state" ); return; } @@ -340,16 +322,14 @@ impl, Cold: ItemStore> BackgroundMigrator old_finalized_checkpoint, Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( - log, - "Pruning deferred because of a concurrent mutation"; - "message" => "this is expected only very rarely!" + message = "this is expected only very rarely!", + "Pruning deferred because of a concurrent mutation" ); return; } @@ -358,16 +338,15 @@ impl, Cold: ItemStore> BackgroundMigrator { warn!( - log, - "Ignoring out of order finalization request"; - "old_finalized_epoch" => old_finalized_checkpoint.epoch, - "new_finalized_epoch" => new_finalized_checkpoint.epoch, - "message" => "this is expected occasionally due to a (harmless) race condition" + old_finalized_epoch = %old_finalized_checkpoint.epoch, + new_finalized_epoch = %new_finalized_checkpoint.epoch, + message = "this is expected occasionally due to a (harmless) race condition", + "Ignoring out of order finalization request" ); return; } Err(e) => { - warn!(log, "Block pruning failed"; "error" => ?e); + warn!(error = ?e,"Block pruning failed"); return; } }; @@ -381,17 +360,12 @@ impl, Cold: ItemStore> BackgroundMigrator {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( - log, - "Database migration postponed, unaligned finalized block"; - "slot" => slot.as_u64() + slot = slot.as_u64(), + "Database migration postponed, unaligned finalized block" ); } Err(e) => { - warn!( - log, - "Database migration failed"; - "error" => format!("{:?}", e) - ); + warn!(error = ?e, "Database migration failed"); return; } }; @@ -401,12 +375,11 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", e)); + warn!(error = ?e, "Database compaction failed"); } - debug!(log, "Database consolidation complete"); + debug!("Database consolidation complete"); } /// Spawn a new child thread to run the migration process. @@ -414,7 +387,6 @@ impl, Cold: ItemStore> BackgroundMigrator>, - log: Logger, ) -> (mpsc::Sender, thread::JoinHandle<()>) { let (tx, rx) = mpsc::channel(); let inner_tx = tx.clone(); @@ -452,13 +424,13 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, new_finalized_checkpoint: Checkpoint, genesis_block_root: Hash256, - log: &Logger, ) -> Result { let old_finalized_checkpoint = store @@ -515,10 +486,9 @@ impl, Cold: ItemStore> BackgroundMigrator old_finalized_checkpoint.epoch, - "new_finalized_epoch" => new_finalized_checkpoint.epoch, + old_finalized_epoch = %old_finalized_checkpoint.epoch, + new_finalized_epoch = %new_finalized_checkpoint.epoch, + "Starting database pruning" ); // For each slot between the new finalized checkpoint and the old finalized checkpoint, // collect the beacon block root and state root of the canonical chain. @@ -546,11 +516,10 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", old_finalized_checkpoint.root), - "new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root), - "head_count" => heads.len(), + old_finalized_root = ?old_finalized_checkpoint.root, + new_finalized_root = ?new_finalized_checkpoint.root, + head_count = heads.len(), + "Extra pruning information" ); for (head_hash, head_slot) in heads { @@ -565,10 +534,9 @@ impl, Cold: ItemStore> BackgroundMigrator { warn!( - log, - "Forgetting invalid head block"; - "block_root" => ?head_hash, - "error" => ?e, + block_root = ?head_hash, + error = ?e, + "Forgetting invalid head block" ); abandoned_heads.insert(head_hash); continue; @@ -606,10 +574,9 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", head_hash), - "head_slot" => head_slot, + head_block_root = ?head_hash, + %head_slot, + "Found a chain that should already have been pruned" ); potentially_abandoned_head.take(); break; @@ -663,10 +630,9 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", abandoned_head), - "head_slot" => head_slot, + head_block_root = ?abandoned_head, + %head_slot, + "Pruning head" ); abandoned_heads.insert(abandoned_head); abandoned_blocks.extend( @@ -740,7 +706,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, old_finalized_epoch: Epoch, new_finalized_epoch: Epoch, - log: &Logger, ) -> Result<(), Error> { if !db.compact_on_prune() { return Ok(()); @@ -775,10 +740,9 @@ impl, Cold: ItemStore> BackgroundMigrator MIN_COMPACTION_PERIOD_SECONDS) { info!( - log, - "Starting database compaction"; - "old_finalized_epoch" => old_finalized_epoch, - "new_finalized_epoch" => new_finalized_epoch, + %old_finalized_epoch, + %new_finalized_epoch, + "Starting database compaction" ); db.compact()?; @@ -787,7 +751,7 @@ impl, Cold: ItemStore> BackgroundMigrator::EthSpec> + pub fn from_block(block: BeaconBlockRef) -> Self { + Self { + root: block.tree_hash_root(), + slot: block.slot(), + } + } + + pub fn root(&self) -> &Hash256 { + &self.root + } + + pub fn slot(&self) -> &Slot { + &self.slot + } + + pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + if store + .as_ref() + .item_exists::(&self.root)? + { + Ok(()) + } else { + store.as_ref().put_item(&self.root, self) + } + } + + pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + store + .as_ref() + .hot_db + .key_delete(OTBColumn.into(), self.root.as_slice()) + } + + fn is_canonical( + &self, + chain: &BeaconChain, + ) -> Result { + Ok(chain + .forwards_iter_block_roots_until(self.slot, self.slot)? + .next() + .transpose()? + .map(|(root, _)| root) + == Some(self.root)) + } +} + +impl StoreItem for OptimisticTransitionBlock { + fn db_column() -> DBColumn { + OTBColumn + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +/// The routine is expected to run once per epoch, 1/4th through the epoch. +pub const EPOCH_DELAY_FACTOR: u32 = 4; + +/// Spawns a routine which checks the validity of any optimistically imported transition blocks +/// +/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after +/// the start of each epoch. +/// +/// The service will not be started if there is no `execution_layer` on the `chain`. +pub fn start_otb_verification_service( + executor: TaskExecutor, + chain: Arc>, +) { + // Avoid spawning the service if there's no EL, it'll just error anyway. + if chain.execution_layer.is_some() { + executor.spawn( + async move { otb_verification_service(chain).await }, + "otb_verification_service", + ); + } +} + +pub fn load_optimistic_transition_blocks( + chain: &BeaconChain, +) -> Result, StoreError> { + process_results( + chain.store.hot_db.iter_column::(OTBColumn), + |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + }, + )? +} + +#[derive(Debug)] +pub enum Error { + ForkChoice(String), + BeaconChain(BeaconChainError), + StoreError(StoreError), + NoBlockFound(OptimisticTransitionBlock), +} + +pub async fn validate_optimistic_transition_blocks( + chain: &Arc>, + otbs: Vec, +) -> Result<(), Error> { + let finalized_slot = chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? + .slot; + + // separate otbs into + // non-canonical + // finalized canonical + // unfinalized canonical + let mut non_canonical_otbs = vec![]; + let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( + otbs.into_iter().map(|otb| { + otb.is_canonical(chain) + .map(|is_canonical| (otb, is_canonical)) + }), + |pair_iter| { + pair_iter + .filter_map(|(otb, is_canonical)| { + if is_canonical { + Some(otb) + } else { + non_canonical_otbs.push(otb); + None + } + }) + .partition::, _>(|otb| *otb.slot() <= finalized_slot) + }, + ) + .map_err(Error::BeaconChain)?; + + // remove non-canonical blocks that conflict with finalized checkpoint from the database + for otb in non_canonical_otbs { + if *otb.slot() <= finalized_slot { + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + } + } + + // ensure finalized canonical otb are valid, otherwise kill client + for otb in finalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + block_root = %otb.root(), + "type" = "finalized", + "Validated merge transition block" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Finalized Merge Transition Block is Invalid! Kill the Client! + crit!( + msg = "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + block_hash = ?block.canonical_root(), + "Finalized merge transition block is invalid!" + ); + let mut shutdown_sender = chain.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + )) { + crit!( + error = ?e, + shutdown_reason = INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + "Failed to shut down client" + ); + } + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + // attempt to validate any non-finalized canonical otb blocks + for otb in unfinalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + block_root = ?otb.root(), + "type" = "not finalized", + "Validated merge transition block" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload + warn!( + block_root = ?otb.root(), + "Merge transition block invalid" + ); + chain + .process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: *otb.root(), + }, + ) + .await + .map_err(|e| { + warn!( + error = ?e, + location = "process_invalid_execution_payload", + "Error checking merge transition block" + ); + Error::BeaconChain(e) + })?; + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + Ok(()) +} + +/// Loop until any optimistically imported merge transition blocks have been verified and +/// the merge has been finalized. +async fn otb_verification_service(chain: Arc>) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; + sleep(duration + additional_delay).await; + + debug!("OTB verification service firing"); + + if !is_merge_transition_complete( + &chain.canonical_head.cached_head().snapshot.beacon_state, + ) { + // We are pre-merge. Nothing to do yet. + continue; + } + + // load all optimistically imported transition blocks from the database + match load_optimistic_transition_blocks(chain.as_ref()) { + Ok(otbs) => { + if otbs.is_empty() { + if chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_or(false, |block| { + block.execution_status.is_execution_enabled() + }) + { + // there are no optimistic blocks in the database, we can exit + // the service since the merge transition is finalized and we'll + // never see another transition block + break; + } else { + debug!( + info = "waiting for the merge transition to finalize", + "No optimistic transition blocks" + ) + } + } + if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { + warn!( + error = ?e, + "Error while validating optimistic transition blocks" + ); + } + } + Err(e) => { + error!( + error = ?e, + "Error loading optimistic transition blocks" + ); + } + }; + } + None => { + error!("Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(chain.slot_clock.slot_duration()).await; + } + }; + } + debug!( + msg = "shutting down OTB verification service", + "No optimistic transition blocks in database" + ); +} diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 22b76e026c..5bd45dc59f 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -2,9 +2,9 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use itertools::process_results; use lru::LruCache; use parking_lot::Mutex; -use slog::debug; use std::num::NonZeroUsize; use std::time::Duration; +use tracing::debug; use types::non_zero_usize::new_non_zero_usize; use types::Hash256; @@ -87,10 +87,7 @@ impl BeaconChain { // blocks have been flushed out. Solving this issue isn't as simple as hooking the // beacon processor's functions that handle failed blocks because we need the block root // and it has been erased from the `BlockError` by that point. - debug!( - self.log, - "Pre-finalization lookup cache is full"; - ); + debug!("Pre-finalization lookup cache is full"); } Ok(false) } diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 140a9659fc..14f7414abc 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -1,9 +1,9 @@ use crate::{BeaconChain, BeaconChainTypes}; -use slog::{debug, error}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::sleep; +use tracing::{debug, error}; /// Spawns a routine which ensures the EL is provided advance notice of any block producers. /// @@ -38,10 +38,7 @@ async fn proposer_prep_service( slot_duration.saturating_sub(chain.config.prepare_payload_lookahead); sleep(duration + additional_delay).await; - debug!( - chain.log, - "Proposer prepare routine firing"; - ); + debug!("Proposer prepare routine firing"); let inner_chain = chain.clone(); executor.spawn( @@ -50,20 +47,19 @@ async fn proposer_prep_service( if let Err(e) = inner_chain.prepare_beacon_proposer(current_slot).await { error!( - inner_chain.log, - "Proposer prepare routine failed"; - "error" => ?e + error = ?e, + "Proposer prepare routine failed" ); } } else { - debug!(inner_chain.log, "No slot for proposer prepare routine"); + debug!("No slot for proposer prepare routine"); } }, "proposer_prep_update", ); } None => { - error!(chain.log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 9504901229..ccfae1b182 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -4,7 +4,6 @@ mod migration_schema_v21; mod migration_schema_v22; use crate::beacon_chain::BeaconChainTypes; -use slog::Logger; use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; @@ -17,7 +16,6 @@ pub fn migrate_schema( genesis_state_root: Option, from: SchemaVersion, to: SchemaVersion, - log: Logger, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. @@ -25,39 +23,39 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), genesis_state_root, from, next, log.clone())?; - migrate_schema::(db, genesis_state_root, next, to, log) + migrate_schema::(db.clone(), genesis_state_root, from, next)?; + migrate_schema::(db, genesis_state_root, next, to) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), genesis_state_root, from, next, log.clone())?; - migrate_schema::(db, genesis_state_root, next, to, log) + migrate_schema::(db.clone(), genesis_state_root, from, next)?; + migrate_schema::(db, genesis_state_root, next, to) } // // Migrations from before SchemaVersion(19) are deprecated. // (SchemaVersion(19), SchemaVersion(20)) => { - let ops = migration_schema_v20::upgrade_to_v20::(db.clone(), log)?; + let ops = migration_schema_v20::upgrade_to_v20::(db.clone())?; db.store_schema_version_atomically(to, ops) } (SchemaVersion(20), SchemaVersion(19)) => { - let ops = migration_schema_v20::downgrade_from_v20::(db.clone(), log)?; + let ops = migration_schema_v20::downgrade_from_v20::(db.clone())?; db.store_schema_version_atomically(to, ops) } (SchemaVersion(20), SchemaVersion(21)) => { - let ops = migration_schema_v21::upgrade_to_v21::(db.clone(), log)?; + let ops = migration_schema_v21::upgrade_to_v21::(db.clone())?; db.store_schema_version_atomically(to, ops) } (SchemaVersion(21), SchemaVersion(20)) => { - let ops = migration_schema_v21::downgrade_from_v21::(db.clone(), log)?; + let ops = migration_schema_v21::downgrade_from_v21::(db.clone())?; db.store_schema_version_atomically(to, ops) } (SchemaVersion(21), SchemaVersion(22)) => { // This migration needs to sync data between hot and cold DBs. The schema version is // bumped inside the upgrade_to_v22 fn - migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root, log) + migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root) } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs index d556d5988d..13fde349f5 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs @@ -2,16 +2,15 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; use operation_pool::{ PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, }; -use slog::{debug, info, Logger}; use std::sync::Arc; use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use tracing::{debug, info}; use types::Attestation; pub fn upgrade_to_v20( db: Arc>, - log: Logger, ) -> Result, Error> { - info!(log, "Upgrading from v19 to v20"); + info!("Upgrading from v19 to v20"); // Load a V15 op pool and transform it to V20. let Some(PersistedOperationPoolV15:: { @@ -24,7 +23,7 @@ pub fn upgrade_to_v20( capella_bls_change_broadcast_indices, }) = db.get_item(&OP_POOL_DB_KEY)? else { - debug!(log, "Nothing to do, no operation pool stored"); + debug!("Nothing to do, no operation pool stored"); return Ok(vec![]); }; @@ -52,9 +51,8 @@ pub fn upgrade_to_v20( pub fn downgrade_from_v20( db: Arc>, - log: Logger, ) -> Result, Error> { - info!(log, "Downgrading from v20 to v19"); + info!("Downgrading from v20 to v19"); // Load a V20 op pool and transform it to V15. let Some(PersistedOperationPoolV20:: { @@ -67,7 +65,7 @@ pub fn downgrade_from_v20( capella_bls_change_broadcast_indices, }) = db.get_item(&OP_POOL_DB_KEY)? else { - debug!(log, "Nothing to do, no operation pool stored"); + debug!("Nothing to do, no operation pool stored"); return Ok(vec![]); }; @@ -77,7 +75,10 @@ pub fn downgrade_from_v20( if let Attestation::Base(attestation) = attestation.into() { Some((attestation, indices)) } else { - info!(log, "Dropping attestation during downgrade"; "reason" => "not a base attestation"); + info!( + reason = "not a base attestation", + "Dropping attestation during downgrade" + ); None } }) @@ -88,7 +89,10 @@ pub fn downgrade_from_v20( .filter_map(|slashing| match slashing.try_into() { Ok(slashing) => Some(slashing), Err(_) => { - info!(log, "Dropping attester slashing during downgrade"; "reason" => "not a base attester slashing"); + info!( + reason = "not a base attester slashing", + "Dropping attester slashing during downgrade" + ); None } }) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs index f02f5ee6f3..d73660cf3c 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs @@ -1,18 +1,17 @@ use crate::beacon_chain::BeaconChainTypes; use crate::validator_pubkey_cache::DatabasePubkey; -use slog::{info, Logger}; use ssz::{Decode, Encode}; use std::sync::Arc; use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; +use tracing::info; use types::{Hash256, PublicKey}; const LOG_EVERY: usize = 200_000; pub fn upgrade_to_v21( db: Arc>, - log: Logger, ) -> Result, Error> { - info!(log, "Upgrading from v20 to v21"); + info!("Upgrading from v20 to v21"); let mut ops = vec![]; @@ -29,22 +28,20 @@ pub fn upgrade_to_v21( if i > 0 && i % LOG_EVERY == 0 { info!( - log, - "Public key decompression in progress"; - "keys_decompressed" => i + keys_decompressed = i, + "Public key decompression in progress" ); } } - info!(log, "Public key decompression complete"); + info!("Public key decompression complete"); Ok(ops) } pub fn downgrade_from_v21( db: Arc>, - log: Logger, ) -> Result, Error> { - info!(log, "Downgrading from v21 to v20"); + info!("Downgrading from v21 to v20"); let mut ops = vec![]; @@ -67,15 +64,11 @@ pub fn downgrade_from_v21( )); if i > 0 && i % LOG_EVERY == 0 { - info!( - log, - "Public key compression in progress"; - "keys_compressed" => i - ); + info!(keys_compressed = i, "Public key compression in progress"); } } - info!(log, "Public key compression complete"); + info!("Public key compression complete"); Ok(ops) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index 982c3ded46..0b64fdbe08 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -1,5 +1,4 @@ use crate::beacon_chain::BeaconChainTypes; -use slog::{info, Logger}; use std::sync::Arc; use store::chunked_iter::ChunkedVectorIter; use store::{ @@ -10,6 +9,7 @@ use store::{ partial_beacon_state::PartialBeaconState, AnchorInfo, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, }; +use tracing::info; use types::{BeaconState, Hash256, Slot}; const LOG_EVERY: usize = 200_000; @@ -40,9 +40,8 @@ fn load_old_schema_frozen_state( pub fn upgrade_to_v22( db: Arc>, genesis_state_root: Option, - log: Logger, ) -> Result<(), Error> { - info!(log, "Upgrading from v21 to v22"); + info!("Upgrading from v21 to v22"); let old_anchor = db.get_anchor_info(); @@ -71,9 +70,8 @@ pub fn upgrade_to_v22( // this write. if split_slot > 0 { info!( - log, - "Re-storing genesis state"; - "state_root" => ?genesis_state_root, + state_root = ?genesis_state_root, + "Re-storing genesis state" ); db.store_cold_state(&genesis_state_root, &genesis_state, &mut cold_ops)?; } @@ -87,7 +85,6 @@ pub fn upgrade_to_v22( oldest_block_slot, split_slot, &mut cold_ops, - &log, )?; // Commit this first batch of non-destructive cold database ops. @@ -107,14 +104,13 @@ pub fn upgrade_to_v22( db.store_schema_version_atomically(SchemaVersion(22), hot_ops)?; // Finally, clean up the old-format data from the freezer database. - delete_old_schema_freezer_data::(&db, &log)?; + delete_old_schema_freezer_data::(&db)?; Ok(()) } pub fn delete_old_schema_freezer_data( db: &Arc>, - log: &Logger, ) -> Result<(), Error> { let mut cold_ops = vec![]; @@ -140,11 +136,7 @@ pub fn delete_old_schema_freezer_data( } let delete_ops = cold_ops.len(); - info!( - log, - "Deleting historic states"; - "delete_ops" => delete_ops, - ); + info!(delete_ops, "Deleting historic states"); db.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. @@ -159,13 +151,11 @@ pub fn write_new_schema_block_roots( oldest_block_slot: Slot, split_slot: Slot, cold_ops: &mut Vec, - log: &Logger, ) -> Result<(), Error> { info!( - log, - "Starting beacon block root migration"; - "oldest_block_slot" => oldest_block_slot, - "genesis_block_root" => ?genesis_block_root, + %oldest_block_slot, + ?genesis_block_root, + "Starting beacon block root migration" ); // Store the genesis block root if it would otherwise not be stored. @@ -196,9 +186,8 @@ pub fn write_new_schema_block_roots( if i > 0 && i % LOG_EVERY == 0 { info!( - log, - "Beacon block root migration in progress"; - "roots_migrated" => i + roots_migrated = i, + "Beacon block root migration in progress" ); } } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index dec73a763f..1aa23c28fc 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -2,9 +2,8 @@ use std::collections::HashMap; use std::sync::Arc; use itertools::Itertools; -use slog::{debug, Logger}; - use oneshot_broadcast::{oneshot, Receiver, Sender}; +use tracing::debug; use types::{ beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, RelativeEpoch, @@ -61,16 +60,14 @@ pub struct ShufflingCache { cache: HashMap, cache_size: usize, head_shuffling_ids: BlockShufflingIds, - logger: Logger, } impl ShufflingCache { - pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self { + pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds) -> Self { Self { cache: HashMap::new(), cache_size, head_shuffling_ids, - logger, } } @@ -179,10 +176,9 @@ impl ShufflingCache { for shuffling_id in shuffling_ids_to_prune.iter() { debug!( - self.logger, - "Removing old shuffling from cache"; - "shuffling_epoch" => shuffling_id.shuffling_epoch, - "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block + shuffling_epoch = %shuffling_id.shuffling_epoch, + shuffling_decision_block = ?shuffling_id.shuffling_decision_block, + "Removing old shuffling from cache" ); self.cache.remove(shuffling_id); } @@ -294,10 +290,10 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use task_executor::test_utils::test_logger; use types::*; use crate::test_utils::EphemeralHarnessType; + use logging::create_test_tracing_subscriber; use super::*; @@ -308,6 +304,8 @@ mod test { // Creates a new shuffling cache for testing fn new_shuffling_cache() -> ShufflingCache { + create_test_tracing_subscriber(); + let current_epoch = 8; let head_shuffling_ids = BlockShufflingIds { current: shuffling_id(current_epoch), @@ -315,8 +313,8 @@ mod test { previous: Some(shuffling_id(current_epoch - 1)), block_root: Hash256::from_low_u64_le(0), }; - let logger = test_logger(); - ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger) + + ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids) } /// Returns two different committee caches for testing. diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 1d8bfff216..f4216ef76d 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -17,7 +17,6 @@ use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOC use crate::{ chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, }; -use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::sync::{ @@ -27,6 +26,7 @@ use std::sync::{ use store::KeyValueStore; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; +use tracing::{debug, error, warn}; use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform @@ -107,10 +107,9 @@ impl Lock { pub fn spawn_state_advance_timer( executor: TaskExecutor, beacon_chain: Arc>, - log: Logger, ) { executor.spawn( - state_advance_timer(executor.clone(), beacon_chain, log), + state_advance_timer(executor.clone(), beacon_chain), "state_advance_timer", ); } @@ -119,7 +118,6 @@ pub fn spawn_state_advance_timer( async fn state_advance_timer( executor: TaskExecutor, beacon_chain: Arc>, - log: Logger, ) { let is_running = Lock::new(); let slot_clock = &beacon_chain.slot_clock; @@ -127,7 +125,7 @@ async fn state_advance_timer( loop { let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; continue; @@ -161,9 +159,8 @@ async fn state_advance_timer( Ok(slot) => slot, Err(e) => { warn!( - log, - "Unable to determine slot in state advance timer"; - "error" => ?e + error = ?e, + "Unable to determine slot in state advance timer" ); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; @@ -173,37 +170,27 @@ async fn state_advance_timer( // Only spawn the state advance task if the lock was previously free. if !is_running.lock() { - let log = log.clone(); let beacon_chain = beacon_chain.clone(); let is_running = is_running.clone(); executor.spawn_blocking( move || { - match advance_head(&beacon_chain, &log) { + match advance_head(&beacon_chain) { Ok(()) => (), Err(Error::BeaconChain(e)) => error!( - log, - "Failed to advance head state"; - "error" => ?e - ), - Err(Error::StateAlreadyAdvanced { block_root }) => debug!( - log, - "State already advanced on slot"; - "block_root" => ?block_root + error = ?e, + "Failed to advance head state" ), + Err(Error::StateAlreadyAdvanced { block_root }) => { + debug!(?block_root, "State already advanced on slot") + } Err(Error::MaxDistanceExceeded { current_slot, head_slot, - }) => debug!( - log, - "Refused to advance head state"; - "head_slot" => head_slot, - "current_slot" => current_slot, - ), + }) => debug!(%head_slot, %current_slot, "Refused to advance head state"), other => warn!( - log, - "Did not advance head state"; - "reason" => ?other + reason = ?other, + "Did not advance head state" ), }; @@ -214,9 +201,8 @@ async fn state_advance_timer( ); } else { warn!( - log, - "State advance routine overloaded"; - "msg" => "system resources may be overloaded" + msg = "system resources may be overloaded", + "State advance routine overloaded" ) } @@ -225,7 +211,6 @@ async fn state_advance_timer( // Wait for the fork choice instant (which may already be past). sleep_until(fork_choice_instant).await; - let log = log.clone(); let beacon_chain = beacon_chain.clone(); let next_slot = current_slot + 1; executor.spawn( @@ -245,10 +230,9 @@ async fn state_advance_timer( .await .unwrap_or_else(|e| { warn!( - log, - "Unable to prepare proposer with lookahead"; - "error" => ?e, - "slot" => next_slot, + error = ?e, + slot = %next_slot, + "Unable to prepare proposer with lookahead" ); None }); @@ -261,10 +245,9 @@ async fn state_advance_timer( if let Some(tx) = &beacon_chain.fork_choice_signal_tx { if let Err(e) = tx.notify_fork_choice_complete(next_slot) { warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, + error = ?e, + slot = %next_slot, + "Error signalling fork choice waiter" ); } } @@ -282,10 +265,7 @@ async fn state_advance_timer( /// slot then placed in the `state_cache` to be used for block verification. /// /// See the module-level documentation for rationale. -fn advance_head( - beacon_chain: &Arc>, - log: &Logger, -) -> Result<(), Error> { +fn advance_head(beacon_chain: &Arc>) -> Result<(), Error> { let current_slot = beacon_chain.slot()?; // These brackets ensure that the `head_slot` value is dropped before we run fork choice and @@ -344,10 +324,9 @@ fn advance_head( // Expose Prometheus metrics. if let Err(e) = summary.observe_metrics() { error!( - log, - "Failed to observe epoch summary metrics"; - "src" => "state_advance_timer", - "error" => ?e + src = "state_advance_timer", + error = ?e, + "Failed to observe epoch summary metrics" ); } @@ -362,20 +341,18 @@ fn advance_head( .process_validator_statuses(state.current_epoch(), &summary, &beacon_chain.spec) { error!( - log, - "Unable to process validator statuses"; - "error" => ?e + error = ?e, + "Unable to process validator statuses" ); } } } debug!( - log, - "Advanced head state one slot"; - "head_block_root" => ?head_block_root, - "state_slot" => state.slot(), - "current_slot" => current_slot, + ?head_block_root, + state_slot = %state.slot(), + %current_slot, + "Advanced head state one slot" ); // Build the current epoch cache, to prepare to compute proposer duties. @@ -420,12 +397,11 @@ fn advance_head( .insert_committee_cache(shuffling_id.clone(), committee_cache); debug!( - log, - "Primed proposer and attester caches"; - "head_block_root" => ?head_block_root, - "next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block, - "state_epoch" => state.current_epoch(), - "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), + ?head_block_root, + next_epoch_shuffling_root = ?shuffling_id.shuffling_decision_block, + state_epoch = %state.current_epoch(), + current_epoch = %current_slot.epoch(T::EthSpec::slots_per_epoch()), + "Primed proposer and attester caches" ); } @@ -447,13 +423,12 @@ fn advance_head( let current_slot = beacon_chain.slot()?; if starting_slot < current_slot { warn!( - log, - "State advance too slow"; - "head_block_root" => %head_block_root, - "advanced_slot" => final_slot, - "current_slot" => current_slot, - "starting_slot" => starting_slot, - "msg" => "system resources may be overloaded", + %head_block_root, + advanced_slot = %final_slot, + %current_slot, + %starting_slot, + msg = "system resources may be overloaded", + "State advance too slow" ); } @@ -473,11 +448,10 @@ fn advance_head( drop(txn_lock); debug!( - log, - "Completed state advance"; - "head_block_root" => ?head_block_root, - "advanced_slot" => final_slot, - "initial_slot" => initial_slot, + ?head_block_root, + advanced_slot = %final_slot, + %initial_slot, + "Completed state advance" ); Ok(()) diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 9b35cff943..e3ff5f4ab2 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -2,10 +2,10 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::SyncCommitteeReward; use safe_arith::SafeArith; -use slog::error; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; use std::collections::HashMap; use store::RelativeEpoch; +use tracing::error; use types::{AbstractExecPayload, BeaconBlockRef, BeaconState}; impl BeaconChain { @@ -31,8 +31,8 @@ impl BeaconChain { let (participant_reward_value, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, spec).map_err(|e| { error!( - self.log, "Error calculating sync aggregate rewards"; - "error" => ?e + error = ?e, + "Error calculating sync aggregate rewards" ); BeaconChainError::SyncCommitteeRewardsSyncError })?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fcc9340715..457687fa21 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -35,6 +35,7 @@ pub use genesis::{InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; +use logging::create_test_tracing_subscriber; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; use parking_lot::Mutex; @@ -44,17 +45,12 @@ use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; -use slog::{o, Drain, Logger}; -use slog_async::Async; -use slog_term::{FullFormat, PlainSyncDecorator, TermDecorator}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; -use std::fs::{File, OpenOptions}; -use std::io::BufWriter; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; @@ -235,7 +231,6 @@ pub struct Builder { genesis_state_builder: Option>, import_all_data_columns: bool, runtime: TestRuntime, - log: Logger, } impl Builder> { @@ -247,12 +242,8 @@ impl Builder> { .expect("cannot build without validator keypairs"); let store = Arc::new( - HotColdDB::open_ephemeral( - self.store_config.clone().unwrap_or_default(), - spec.clone(), - self.log.clone(), - ) - .unwrap(), + HotColdDB::open_ephemeral(self.store_config.clone().unwrap_or_default(), spec.clone()) + .unwrap(), ); let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { // Set alternating withdrawal credentials if no builder is specified. @@ -283,12 +274,8 @@ impl Builder> { let spec = self.spec.as_ref().expect("cannot build without spec"); let store = Arc::new( - HotColdDB::open_ephemeral( - self.store_config.clone().unwrap_or_default(), - spec.clone(), - self.log.clone(), - ) - .unwrap(), + HotColdDB::open_ephemeral(self.store_config.clone().unwrap_or_default(), spec.clone()) + .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { builder @@ -372,7 +359,6 @@ where { pub fn new(eth_spec_instance: E) -> Self { let runtime = TestRuntime::default(); - let log = runtime.log.clone(); Self { eth_spec_instance, @@ -391,7 +377,6 @@ where genesis_state_builder: None, import_all_data_columns: false, runtime, - log, } } @@ -439,12 +424,6 @@ where self } - pub fn logger(mut self, log: Logger) -> Self { - self.log = log.clone(); - self.runtime.set_logger(log); - self - } - /// This mutator will be run before the `store_mutator`. pub fn initial_mutator(mut self, mutator: BoxedMutator) -> Self { assert!( @@ -501,12 +480,8 @@ where suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; - let execution_layer = ExecutionLayer::from_config( - config, - self.runtime.task_executor.clone(), - self.log.clone(), - ) - .unwrap(); + let execution_layer = + ExecutionLayer::from_config(config, self.runtime.task_executor.clone()).unwrap(); self.execution_layer = Some(execution_layer); self @@ -586,7 +561,6 @@ where pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); - let log = self.log; let spec = self.spec.expect("cannot build without spec"); let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self @@ -599,7 +573,6 @@ where let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance, kzg.clone()) - .logger(log.clone()) .custom_spec(spec.clone()) .store(self.store.expect("cannot build without store")) .store_migrator_config( @@ -614,10 +587,7 @@ where .shutdown_sender(shutdown_tx) .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 5, - ))) + .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) .validator_monitor_config(validator_monitor_config); builder = if let Some(mutator) = self.initial_mutator { @@ -737,13 +707,10 @@ where Cold: ItemStore, { pub fn builder(eth_spec_instance: E) -> Builder> { + create_test_tracing_subscriber(); Builder::new(eth_spec_instance) } - pub fn logger(&self) -> &slog::Logger { - &self.chain.log - } - pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.mock_execution_layer .as_ref() @@ -2618,7 +2585,6 @@ where return; } - let log = self.logger(); let contributions = self.make_sync_contributions(state, block_root, slot, RelativeSyncCommittee::Current); @@ -2649,7 +2615,6 @@ where slot, &block_root, &sync_aggregate, - log, &self.spec, ); } @@ -3159,58 +3124,6 @@ pub struct MakeAttestationOptions { pub fork: Fork, } -pub enum LoggerType { - Test, - // The logs are output to files for each test. - CI, - // No logs will be printed. - Null, -} - -fn ci_decorator() -> PlainSyncDecorator> { - let log_dir = std::env::var(CI_LOGGER_DIR_ENV_VAR).unwrap_or_else(|e| { - panic!("{CI_LOGGER_DIR_ENV_VAR} env var must be defined when using ci_logger: {e:?}"); - }); - let fork_name = std::env::var(FORK_NAME_ENV_VAR) - .map(|s| format!("{s}_")) - .unwrap_or_default(); - // The current test name can be got via the thread name. - let test_name = std::thread::current() - .name() - .unwrap() - .to_string() - // Colons are not allowed in files that are uploaded to GitHub Artifacts. - .replace("::", "_"); - let log_path = format!("/{log_dir}/{fork_name}{test_name}.log"); - let file = OpenOptions::new() - .create(true) - .append(true) - .open(log_path) - .unwrap(); - let file = BufWriter::new(file); - PlainSyncDecorator::new(file) -} - -pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { - match logger_type { - LoggerType::Test => { - let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); - let drain = Async::new(drain).chan_size(10_000).build().fuse(); - Logger::root(drain.filter_level(level).fuse(), o!()) - } - LoggerType::CI => { - let drain = FullFormat::new(ci_decorator()).build().fuse(); - let drain = Async::new(drain).chan_size(10_000).build().fuse(); - Logger::root(drain.filter_level(level).fuse(), o!()) - } - LoggerType::Null => { - let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); - let drain = Async::new(drain).build().fuse(); - Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } -} - pub enum NumBlobs { Random, Number(usize), diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index cb27f0727a..16f4e3f143 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -5,9 +5,9 @@ use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; use itertools::Itertools; +use logging::crit; use parking_lot::{Mutex, RwLock}; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use smallvec::SmallVec; use state_processing::common::get_attestation_participation_flag_indices; @@ -21,6 +21,7 @@ use std::str::Utf8Error; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::AbstractExecPayload; +use tracing::{debug, error, info, instrument, warn}; use types::consts::altair::{ TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }; @@ -30,7 +31,6 @@ use types::{ IndexedAttestationRef, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; - /// Used for Prometheus labels. /// /// We've used `total` for this value to align with Nimbus, as per: @@ -401,15 +401,18 @@ pub struct ValidatorMonitor { beacon_proposer_cache: Arc>, // Unaggregated attestations generated by the committee index at each slot. unaggregated_attestations: HashMap>, - log: Logger, _phantom: PhantomData, } impl ValidatorMonitor { + #[instrument(parent = None, + level = "info", + name = "validator_monitor", + skip_all + )] pub fn new( config: ValidatorMonitorConfig, beacon_proposer_cache: Arc>, - log: Logger, ) -> Self { let ValidatorMonitorConfig { auto_register, @@ -425,7 +428,6 @@ impl ValidatorMonitor { missed_blocks: <_>::default(), beacon_proposer_cache, unaggregated_attestations: <_>::default(), - log, _phantom: PhantomData, }; for pubkey in validators { @@ -437,11 +439,23 @@ impl ValidatorMonitor { /// Returns `true` when the validator count is sufficiently low enough to /// emit metrics and logs on a per-validator basis (rather than just an /// aggregated basis). + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn individual_tracking(&self) -> bool { self.validators.len() <= self.individual_tracking_threshold } /// Add some validators to `self` for additional monitoring. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { let index_opt = self .indices @@ -449,18 +463,22 @@ impl ValidatorMonitor { .find(|(_, candidate_pk)| **candidate_pk == pubkey) .map(|(index, _)| *index); - let log = self.log.clone(); self.validators.entry(pubkey).or_insert_with(|| { info!( - log, - "Started monitoring validator"; - "pubkey" => %pubkey, + %pubkey, + "Started monitoring validator" ); MonitoredValidator::new(pubkey, index_opt) }); } /// Add an unaggregated attestation + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn set_unaggregated_attestation(&mut self, attestation: Attestation) { let unaggregated_attestations = &mut self.unaggregated_attestations; @@ -474,12 +492,24 @@ impl ValidatorMonitor { self.unaggregated_attestations.insert(slot, attestation); } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn get_unaggregated_attestation(&self, slot: Slot) -> Option<&Attestation> { self.unaggregated_attestations.get(&slot) } /// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be /// imported). + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn process_valid_state( &mut self, current_epoch: Epoch, @@ -592,6 +622,12 @@ impl ValidatorMonitor { } /// Add missed non-finalized blocks for the monitored validators + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn add_validators_missed_blocks(&mut self, state: &BeaconState) { // Define range variables let current_slot = state.slot(); @@ -661,28 +697,25 @@ impl ValidatorMonitor { ); }); error!( - self.log, - "Validator missed a block"; - "index" => i, - "slot" => slot, - "parent block root" => ?prev_block_root, + index = i, + %slot, + ?prev_block_root, + "Validator missed a block" ); } } } else { warn!( - self.log, - "Missing validator index"; - "info" => "potentially inconsistency in the validator manager", - "index" => i, + info = "potentially inconsistency in the validator manager", + index = i, + "Missing validator index" ) } } else { debug!( - self.log, - "Could not get proposers from cache"; - "epoch" => ?slot_epoch, - "decision_root" => ?shuffling_decision_block, + epoch = ?slot_epoch, + decision_root = ?shuffling_decision_block, + "Could not get proposers from cache" ); } } @@ -691,6 +724,12 @@ impl ValidatorMonitor { } } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn get_proposers_by_epoch_from_cache( &mut self, epoch: Epoch, @@ -704,6 +743,12 @@ impl ValidatorMonitor { /// Process the unaggregated attestations generated by the service `attestation_simulator_service` /// and check if the attestation qualifies for a reward matching the flags source/target/head + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn process_unaggregated_attestations(&mut self, state: &BeaconState, spec: &ChainSpec) { let current_slot = state.slot(); @@ -744,27 +789,23 @@ impl ValidatorMonitor { let head_hit = flag_indices.contains(&TIMELY_HEAD_FLAG_INDEX); let target_hit = flag_indices.contains(&TIMELY_TARGET_FLAG_INDEX); let source_hit = flag_indices.contains(&TIMELY_SOURCE_FLAG_INDEX); - register_simulated_attestation( - data, head_hit, target_hit, source_hit, &self.log, - ) + register_simulated_attestation(data, head_hit, target_hit, source_hit) } Err(BeaconStateError::IncorrectAttestationSource) => { - register_simulated_attestation(data, false, false, false, &self.log) + register_simulated_attestation(data, false, false, false) } Err(err) => { error!( - self.log, - "Failed to get attestation participation flag indices"; - "error" => ?err, - "unaggregated_attestation" => ?unaggregated_attestation, + error = ?err, + ?unaggregated_attestation, + "Failed to get attestation participation flag indices" ); } } } else { error!( - self.log, - "Failed to remove unaggregated attestation from the hashmap"; - "slot" => ?slot, + ?slot, + "Failed to remove unaggregated attestation from the hashmap" ); } } @@ -780,6 +821,12 @@ impl ValidatorMonitor { /// /// We allow disabling tracking metrics on an individual validator basis /// since it can result in untenable cardinality with high validator counts. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn aggregatable_metric(&self, individual_id: &str, func: F) { func(TOTAL_LABEL); @@ -788,6 +835,12 @@ impl ValidatorMonitor { } } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn process_validator_statuses( &self, epoch: Epoch, @@ -867,13 +920,12 @@ impl ValidatorMonitor { attestation_success.push(id); if self.individual_tracking() { debug!( - self.log, - "Previous epoch attestation success"; - "matched_source" => previous_epoch_matched_source, - "matched_target" => previous_epoch_matched_target, - "matched_head" => previous_epoch_matched_head, - "epoch" => prev_epoch, - "validator" => id, + matched_source = previous_epoch_matched_source, + matched_target = previous_epoch_matched_target, + matched_head = previous_epoch_matched_head, + epoch = %prev_epoch, + validator = id, + "Previous epoch attestation success" ) } } else { @@ -886,10 +938,9 @@ impl ValidatorMonitor { attestation_miss.push(id); if self.individual_tracking() { debug!( - self.log, - "Previous epoch attestation missing"; - "epoch" => prev_epoch, - "validator" => id, + epoch = %prev_epoch, + validator = id, + "Previous epoch attestation missing" ) } } @@ -912,10 +963,9 @@ impl ValidatorMonitor { head_miss.push(id); if self.individual_tracking() { debug!( - self.log, - "Attestation failed to match head"; - "epoch" => prev_epoch, - "validator" => id, + epoch = %prev_epoch, + validator = id, + "Attestation failed to match head" ); } } @@ -938,10 +988,9 @@ impl ValidatorMonitor { target_miss.push(id); if self.individual_tracking() { debug!( - self.log, - "Attestation failed to match target"; - "epoch" => prev_epoch, - "validator" => id, + epoch = %prev_epoch, + validator = id, + "Attestation failed to match target" ); } } @@ -960,12 +1009,11 @@ impl ValidatorMonitor { suboptimal_inclusion.push(id); if self.individual_tracking() { debug!( - self.log, - "Potential sub-optimal inclusion delay"; - "optimal" => spec.min_attestation_inclusion_delay, - "delay" => inclusion_delay, - "epoch" => prev_epoch, - "validator" => id, + optimal = spec.min_attestation_inclusion_delay, + delay = inclusion_delay, + epoch = %prev_epoch, + validator = id, + "Potential sub-optimal inclusion delay" ); } } @@ -1003,12 +1051,11 @@ impl ValidatorMonitor { // logs that can be generated is capped by the size // of the sync committee. info!( - self.log, - "Current epoch sync signatures"; - "included" => summary.sync_signature_block_inclusions, - "expected" => E::slots_per_epoch(), - "epoch" => current_epoch, - "validator" => id, + included = summary.sync_signature_block_inclusions, + expected = E::slots_per_epoch(), + epoch = %current_epoch, + validator = id, + "Current epoch sync signatures" ); } } else if self.individual_tracking() { @@ -1018,10 +1065,9 @@ impl ValidatorMonitor { 0, ); debug!( - self.log, - "Validator isn't part of the current sync committee"; - "epoch" => current_epoch, - "validator" => id, + epoch = %current_epoch, + validator = id, + "Validator isn't part of the current sync committee" ); } } @@ -1032,51 +1078,52 @@ impl ValidatorMonitor { // for all validators managed by the validator monitor. if !attestation_success.is_empty() { info!( - self.log, - "Previous epoch attestation(s) success"; - "epoch" => prev_epoch, - "validators" => ?attestation_success, + epoch = %prev_epoch, + validators = ?attestation_success, + "Previous epoch attestation(s) success" ); } if !attestation_miss.is_empty() { info!( - self.log, - "Previous epoch attestation(s) missing"; - "epoch" => prev_epoch, - "validators" => ?attestation_miss, + epoch = %prev_epoch, + validators = ?attestation_miss, + "Previous epoch attestation(s) missing" ); } if !head_miss.is_empty() { info!( - self.log, - "Previous epoch attestation(s) failed to match head"; - "epoch" => prev_epoch, - "validators" => ?head_miss, + epoch = %prev_epoch, + validators = ?head_miss, + "Previous epoch attestation(s) failed to match head" ); } if !target_miss.is_empty() { info!( - self.log, - "Previous epoch attestation(s) failed to match target"; - "epoch" => prev_epoch, - "validators" => ?target_miss, + epoch = %prev_epoch, + validators = ?target_miss, + "Previous epoch attestation(s) failed to match target" ); } if !suboptimal_inclusion.is_empty() { info!( - self.log, - "Previous epoch attestation(s) had sub-optimal inclusion delay"; - "epoch" => prev_epoch, - "validators" => ?suboptimal_inclusion, + epoch = %prev_epoch, + validators = ?suboptimal_inclusion, + "Previous epoch attestation(s) had sub-optimal inclusion delay" ); } Ok(()) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> { self.indices .get(&validator_index) @@ -1084,15 +1131,33 @@ impl ValidatorMonitor { } /// Returns the number of validators monitored by `self`. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn num_validators(&self) -> usize { self.validators.len() } - // Return the `id`'s of all monitored validators. + /// Return the `id`'s of all monitored validators. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn get_all_monitored_validators(&self) -> Vec { self.validators.values().map(|val| val.id.clone()).collect() } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn get_monitored_validator(&self, index: u64) -> Option<&MonitoredValidator> { if let Some(pubkey) = self.indices.get(&index) { self.validators.get(pubkey) @@ -1101,6 +1166,12 @@ impl ValidatorMonitor { } } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn get_monitored_validator_missed_block_count(&self, validator_index: u64) -> u64 { self.missed_blocks .iter() @@ -1108,12 +1179,24 @@ impl ValidatorMonitor { .count() as u64 } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn get_beacon_proposer_cache(&self) -> Arc> { self.beacon_proposer_cache.clone() } /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn auto_register_local_validator(&mut self, validator_index: u64) { if !self.auto_register { return; @@ -1122,10 +1205,9 @@ impl ValidatorMonitor { if let Some(pubkey) = self.indices.get(&validator_index) { if !self.validators.contains_key(pubkey) { info!( - self.log, - "Started monitoring validator"; - "pubkey" => %pubkey, - "validator" => %validator_index, + %pubkey, + validator = %validator_index, + "Started monitoring validator" ); self.validators.insert( @@ -1137,6 +1219,12 @@ impl ValidatorMonitor { } /// Process a block received on gossip. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_block( &self, seen_timestamp: Duration, @@ -1148,6 +1236,12 @@ impl ValidatorMonitor { } /// Process a block received on the HTTP API from a local validator. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_block( &self, seen_timestamp: Duration, @@ -1158,6 +1252,12 @@ impl ValidatorMonitor { self.register_beacon_block("api", seen_timestamp, block, block_root, slot_clock) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_beacon_block( &self, src: &str, @@ -1184,13 +1284,12 @@ impl ValidatorMonitor { }); info!( - self.log, - "Block from monitored validator"; - "root" => ?block_root, - "delay" => %delay.as_millis(), - "slot" => %block.slot(), - "src" => src, - "validator" => %id, + ?block_root, + delay = %delay.as_millis(), + slot = %block.slot(), + src, + validator = %id, + "Block from monitored validator" ); validator.with_epoch_summary(epoch, |summary| summary.register_block(delay)); @@ -1198,6 +1297,12 @@ impl ValidatorMonitor { } /// Register an attestation seen on the gossip network. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_unaggregated_attestation( &self, seen_timestamp: Duration, @@ -1213,6 +1318,12 @@ impl ValidatorMonitor { } /// Register an attestation seen on the HTTP API. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_unaggregated_attestation( &self, seen_timestamp: Duration, @@ -1227,6 +1338,12 @@ impl ValidatorMonitor { ) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_unaggregated_attestation( &self, src: &str, @@ -1261,15 +1378,14 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Unaggregated attestation"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + delay_ms = %delay.as_millis(), + %epoch, + slot = %data.slot, + src, + validator = %id, + "Unaggregated attestation" ); } @@ -1314,6 +1430,12 @@ impl ValidatorMonitor { ) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_aggregated_attestation( &self, src: &str, @@ -1349,15 +1471,14 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Aggregated attestation"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + delay_ms = %delay.as_millis(), + %epoch, + slot = %data.slot, + src, + validator = %id, + "Aggregated attestation" ); } @@ -1396,28 +1517,26 @@ impl ValidatorMonitor { if is_first_inclusion_aggregate { info!( - self.log, - "Attestation included in aggregate"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + delay_ms = %delay.as_millis(), + %epoch, + slot = %data.slot, + src, + validator = %id, + "Attestation included in aggregate" ); } else { // Downgrade to Debug for second and onwards of logging to reduce verbosity debug!( - self.log, - "Attestation included in aggregate"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + delay_ms = %delay.as_millis(), + %epoch, + slot = %data.slot, + src, + validator = %id, + "Attestation included in aggregate" ) }; } @@ -1435,6 +1554,11 @@ impl ValidatorMonitor { /// We use the parent slot instead of block slot to ignore skip slots when calculating inclusion distance. /// /// Note: Blocks that get orphaned will skew the inclusion distance calculation. + #[instrument(parent = None, + level = "info", + name = "validator_monitor", + skip_all + )] pub fn register_attestation_in_block( &self, indexed_attestation: IndexedAttestationRef<'_, E>, @@ -1480,26 +1604,24 @@ impl ValidatorMonitor { if is_first_inclusion_block { info!( - self.log, - "Attestation included in block"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "inclusion_lag" => format!("{} slot(s)", delay), - "epoch" => %epoch, - "slot" => %data.slot, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + inclusion_lag = format!("{} slot(s)", delay), + %epoch, + slot = %data.slot, + validator = %id, + "Attestation included in block" ); } else { // Downgrade to Debug for second and onwards of logging to reduce verbosity debug!( - self.log, - "Attestation included in block"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "inclusion_lag" => format!("{} slot(s)", delay), - "epoch" => %epoch, - "slot" => %data.slot, - "validator" => %id, + head = ?data.beacon_block_root, + index = %data.index, + inclusion_lag = format!("{} slot(s)", delay), + %epoch, + slot = %data.slot, + validator = %id, + "Attestation included in block" ); } } @@ -1512,6 +1634,12 @@ impl ValidatorMonitor { } /// Register a sync committee message received over gossip. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_sync_committee_message( &self, seen_timestamp: Duration, @@ -1527,6 +1655,12 @@ impl ValidatorMonitor { } /// Register a sync committee message received over the http api. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_sync_committee_message( &self, seen_timestamp: Duration, @@ -1542,6 +1676,12 @@ impl ValidatorMonitor { } /// Register a sync committee message. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_sync_committee_message( &self, src: &str, @@ -1574,14 +1714,13 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Sync committee message"; - "head" => %sync_committee_message.beacon_block_root, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %sync_committee_message.slot, - "src" => src, - "validator" => %id, + head = %sync_committee_message.beacon_block_root, + delay_ms = %delay.as_millis(), + %epoch, + slot = %sync_committee_message.slot, + src, + validator = %id, + "Sync committee message" ); } @@ -1592,6 +1731,12 @@ impl ValidatorMonitor { } /// Register a sync committee contribution received over gossip. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_sync_committee_contribution( &self, seen_timestamp: Duration, @@ -1609,6 +1754,12 @@ impl ValidatorMonitor { } /// Register a sync committee contribution received over the http api. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_sync_committee_contribution( &self, seen_timestamp: Duration, @@ -1626,6 +1777,12 @@ impl ValidatorMonitor { } /// Register a sync committee contribution. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_sync_committee_contribution( &self, src: &str, @@ -1662,14 +1819,13 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Sync contribution"; - "head" => %beacon_block_root, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %slot, - "src" => src, - "validator" => %id, + head = %beacon_block_root, + delay_ms = %delay.as_millis(), + %epoch, + %slot, + src, + validator = %id, + "Sync contribution" ); } @@ -1691,14 +1847,13 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Sync signature included in contribution"; - "head" => %beacon_block_root, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %slot, - "src" => src, - "validator" => %id, + head = %beacon_block_root, + delay_ms = %delay.as_millis(), + %epoch, + %slot, + src, + validator = %id, + "Sync signature included in contribution" ); } @@ -1710,6 +1865,12 @@ impl ValidatorMonitor { } /// Register that the `sync_aggregate` was included in a *valid* `BeaconBlock`. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_sync_aggregate_in_block( &self, slot: Slot, @@ -1731,12 +1892,11 @@ impl ValidatorMonitor { if self.individual_tracking() { info!( - self.log, - "Sync signature included in block"; - "head" => %beacon_block_root, - "epoch" => %epoch, - "slot" => %slot, - "validator" => %id, + head = %beacon_block_root, + %epoch, + %slot, + validator = %id, + "Sync signature included in block" ); } @@ -1748,20 +1908,44 @@ impl ValidatorMonitor { } /// Register an exit from the gossip network. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_voluntary_exit(&self, exit: &VoluntaryExit) { self.register_voluntary_exit("gossip", exit) } /// Register an exit from the HTTP API. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_voluntary_exit(&self, exit: &VoluntaryExit) { self.register_voluntary_exit("api", exit) } /// Register an exit included in a *valid* beacon block. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_block_voluntary_exit(&self, exit: &VoluntaryExit) { self.register_voluntary_exit("block", exit) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_voluntary_exit(&self, src: &str, exit: &VoluntaryExit) { if let Some(validator) = self.get_validator(exit.validator_index) { let id = &validator.id; @@ -1774,11 +1958,10 @@ impl ValidatorMonitor { // Not gated behind `self.individual_tracking()` since it's an // infrequent and interesting message. info!( - self.log, - "Voluntary exit"; - "epoch" => %epoch, - "validator" => %id, - "src" => src, + %epoch, + validator = %id, + src, + "Voluntary exit" ); validator.with_epoch_summary(epoch, |summary| summary.register_exit()); @@ -1786,20 +1969,44 @@ impl ValidatorMonitor { } /// Register a proposer slashing from the gossip network. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_proposer_slashing(&self, slashing: &ProposerSlashing) { self.register_proposer_slashing("gossip", slashing) } /// Register a proposer slashing from the HTTP API. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_proposer_slashing(&self, slashing: &ProposerSlashing) { self.register_proposer_slashing("api", slashing) } /// Register a proposer slashing included in a *valid* `BeaconBlock`. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_block_proposer_slashing(&self, slashing: &ProposerSlashing) { self.register_proposer_slashing("block", slashing) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_proposer_slashing(&self, src: &str, slashing: &ProposerSlashing) { let proposer = slashing.signed_header_1.message.proposer_index; let slot = slashing.signed_header_1.message.slot; @@ -1820,13 +2027,12 @@ impl ValidatorMonitor { // Not gated behind `self.individual_tracking()` since it's an // infrequent and interesting message. crit!( - self.log, - "Proposer slashing"; - "root_2" => %root_2, - "root_1" => %root_1, - "slot" => %slot, - "validator" => %id, - "src" => src, + %root_2, + %root_1, + %slot, + validator = %id, + src, + "Proposer slashing" ); validator.with_epoch_summary(epoch, |summary| summary.register_proposer_slashing()); @@ -1834,20 +2040,44 @@ impl ValidatorMonitor { } /// Register an attester slashing from the gossip network. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_gossip_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("gossip", slashing) } /// Register an attester slashing from the HTTP API. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_api_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("api", slashing) } /// Register an attester slashing included in a *valid* `BeaconBlock`. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn register_block_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("block", slashing) } + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] fn register_attester_slashing(&self, src: &str, slashing: AttesterSlashingRef<'_, E>) { let data = slashing.attestation_1().data(); let attestation_1_indices: HashSet = slashing @@ -1875,12 +2105,11 @@ impl ValidatorMonitor { // Not gated behind `self.individual_tracking()` since it's an // infrequent and interesting message. crit!( - self.log, - "Attester slashing"; - "epoch" => %epoch, - "slot" => %data.slot, - "validator" => %id, - "src" => src, + %epoch, + slot = %data.slot, + validator = %id, + src, + "Attester slashing" ); validator.with_epoch_summary(epoch, |summary| summary.register_attester_slashing()); @@ -1890,6 +2119,12 @@ impl ValidatorMonitor { /// Scrape `self` for metrics. /// /// Should be called whenever Prometheus is scraping Lighthouse. + #[instrument(parent = None, + level = "info", + fields(service = "validator_monitor"), + name = "validator_monitor", + skip_all + )] pub fn scrape_metrics(&self, slot_clock: &S, spec: &ChainSpec) { metrics::set_gauge( &metrics::VALIDATOR_MONITOR_VALIDATORS_TOTAL, @@ -2074,7 +2309,6 @@ fn register_simulated_attestation( head_hit: bool, target_hit: bool, source_hit: bool, - log: &Logger, ) { if head_hit { metrics::inc_counter(&metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT); @@ -2097,15 +2331,14 @@ fn register_simulated_attestation( } debug!( - log, - "Simulated attestation evaluated"; - "attestation_source" => ?data.source.root, - "attestation_target" => ?data.target.root, - "attestation_head" => ?data.beacon_block_root, - "attestation_slot" => ?data.slot, - "source_hit" => source_hit, - "target_hit" => target_hit, - "head_hit" => head_hit, + attestation_source = ?data.source.root, + attestation_target = ?data.target.root, + attestation_head = ?data.beacon_block_root, + attestation_slot = ?data.slot, + source_hit, + target_hit, + head_hit, + "Simulated attestation evaluated" ); } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 877c297a3b..39d2c2c2d7 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -210,7 +210,7 @@ impl DatabasePubkey { mod test { use super::*; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use logging::test_logger; + use logging::create_test_tracing_subscriber; use std::sync::Arc; use store::HotColdDB; use types::{EthSpec, Keypair, MainnetEthSpec}; @@ -231,10 +231,8 @@ mod test { } fn get_store() -> BeaconStore { - Arc::new( - HotColdDB::open_ephemeral(<_>::default(), Arc::new(E::default_spec()), test_logger()) - .unwrap(), - ) + create_test_tracing_subscriber(); + Arc::new(HotColdDB::open_ephemeral(<_>::default(), Arc::new(E::default_spec())).unwrap()) } #[allow(clippy::needless_range_loop)] diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5080b0890b..3a424e73ba 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -50,7 +50,6 @@ async fn merge_with_terminal_block_hash_override() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) - .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() @@ -107,7 +106,6 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) - .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 2a881b5b0f..5e39bf32c2 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -12,7 +12,7 @@ use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, InvalidSignature, NotifyExecutionLayer, }; -use logging::test_logger; +use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::{attesting_indices_base, attesting_indices_electra}, @@ -1295,15 +1295,11 @@ async fn verify_and_process_gossip_data_sidecars( #[tokio::test] async fn verify_block_for_gossip_slashing_detection() { + create_test_tracing_subscriber(); let slasher_dir = tempdir().unwrap(); let spec = Arc::new(test_spec::()); let slasher = Arc::new( - Slasher::open( - SlasherConfig::new(slasher_dir.path().into()), - spec.clone(), - test_logger(), - ) - .unwrap(), + Slasher::open(SlasherConfig::new(slasher_dir.path().into()), spec.clone()).unwrap(), ); let inner_slasher = slasher.clone(); diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index 3ce5702f2e..2c2ba8e01a 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -40,7 +40,6 @@ async fn base_altair_bellatrix_capella() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) - .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 44fb298d6c..86ab0cce80 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -9,7 +9,6 @@ use beacon_chain::{ }, BeaconChainError, }; -use sloggers::{null::NullLoggerBuilder, Build}; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; @@ -35,7 +34,6 @@ fn get_store(db_path: &TempDir) -> Arc { let cold_path = db_path.path().join("cold_db"); let blobs_path = db_path.path().join("blobs_db"); let config = StoreConfig::default(); - let log = NullLoggerBuilder.build().expect("logger should build"); HotColdDB::open( &hot_path, &cold_path, @@ -43,7 +41,6 @@ fn get_store(db_path: &TempDir) -> Arc { |_, _, _| Ok(()), config, spec, - log, ) .expect("disk store should initialize") } diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 01b790bb25..f81fe482ef 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -12,7 +12,6 @@ use execution_layer::{ ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; -use logging::test_logger; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; use std::collections::HashMap; @@ -56,7 +55,6 @@ impl InvalidPayloadRig { reconstruct_historic_states: true, ..ChainConfig::default() }) - .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() .fresh_ephemeral_store() diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 997a2859b7..9212ed998d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -14,7 +14,7 @@ use beacon_chain::{ migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; -use logging::test_logger; +use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; use slot_clock::{SlotClock, TestingSlotClock}; @@ -59,10 +59,10 @@ fn get_store_generic( config: StoreConfig, spec: ChainSpec, ) -> Arc, BeaconNodeBackend>> { + create_test_tracing_subscriber(); let hot_path = db_path.path().join("chain_db"); let cold_path = db_path.path().join("freezer_db"); let blobs_path = db_path.path().join("blobs_db"); - let log = test_logger(); HotColdDB::open( &hot_path, @@ -71,7 +71,6 @@ fn get_store_generic( |_, _, _| Ok(()), config, spec.into(), - log, ) .expect("disk store should initialize") } @@ -109,7 +108,6 @@ fn get_harness_generic( let harness = TestHarness::builder(MinimalEthSpec) .spec(store.get_chain_spec().clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() .chain_config(chain_config) @@ -2359,7 +2357,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - let log = harness.chain.logger().clone(); + let temp2 = tempdir().unwrap(); let store = get_store(&temp2); let spec = test_spec::(); @@ -2385,7 +2383,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .store(store.clone()) .custom_spec(test_spec::().into()) .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) .weak_subjectivity_state( wss_state, wss_block.clone(), @@ -2399,10 +2396,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) + .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) .build() .expect("should build"); @@ -3054,7 +3048,6 @@ async fn schema_downgrade_to_min_version() { genesis_state_root, CURRENT_SCHEMA_VERSION, min_version, - store.logger().clone(), ) .expect("schema downgrade to minimum version should work"); @@ -3064,7 +3057,6 @@ async fn schema_downgrade_to_min_version() { genesis_state_root, min_version, CURRENT_SCHEMA_VERSION, - store.logger().clone(), ) .expect("schema upgrade from minimum version should work"); @@ -3072,7 +3064,6 @@ async fn schema_downgrade_to_min_version() { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) - .logger(store.logger().clone()) .testing_slot_clock(slot_clock) .resumed_disk_store(store.clone()) .mock_execution_layer() @@ -3090,7 +3081,6 @@ async fn schema_downgrade_to_min_version() { genesis_state_root, CURRENT_SCHEMA_VERSION, min_version_sub_1, - harness.logger().clone(), ) .expect_err("should not downgrade below minimum version"); } diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 180db6d76d..bca37b4e6d 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -2,7 +2,6 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; -use logging::test_logger; use std::sync::LazyLock; use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; @@ -22,7 +21,6 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .logger(test_logger()) .fresh_ephemeral_store() .mock_execution_layer() .validator_monitor_config(ValidatorMonitorConfig { diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index c96e0868d7..afd4660c9a 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -13,12 +13,12 @@ metrics = { workspace = true } num_cpus = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } strum = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } tokio-util = { workspace = true } +tracing = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index a8960d47c9..e864cb1fd9 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -44,10 +44,10 @@ use crate::work_reprocessing_queue::{ use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::{MessageId, NetworkGlobals, PeerId}; +use logging::crit; use logging::TimeLatch; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::cmp; use std::collections::{HashSet, VecDeque}; @@ -61,6 +61,7 @@ use strum::IntoStaticStr; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; +use tracing::{debug, error, trace, warn}; use types::{ Attestation, BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, @@ -305,14 +306,13 @@ impl FifoQueue { /// Add a new item to the queue. /// /// Drops `item` if the queue is full. - pub fn push(&mut self, item: T, item_desc: &str, log: &Logger) { + pub fn push(&mut self, item: T, item_desc: &str) { if self.queue.len() == self.max_length { error!( - log, - "Work queue is full"; - "msg" => "the system has insufficient resources for load", - "queue_len" => self.max_length, - "queue" => item_desc, + msg = "the system has insufficient resources for load", + queue_len = self.max_length, + queue = item_desc, + "Work queue is full" ) } else { self.queue.push_back(item); @@ -827,7 +827,6 @@ pub struct BeaconProcessor { pub executor: TaskExecutor, pub current_workers: usize, pub config: BeaconProcessorConfig, - pub log: Logger, } impl BeaconProcessor { @@ -938,7 +937,6 @@ impl BeaconProcessor { work_reprocessing_rx, &self.executor, Arc::new(slot_clock), - self.log.clone(), maximum_gossip_clock_disparity, )?; @@ -969,9 +967,8 @@ impl BeaconProcessor { { Err(e) => { warn!( - self.log, - "Unable to queue backfill work event. Will try to process now."; - "error" => %e + error = %e, + "Unable to queue backfill work event. Will try to process now." ); match e { TrySendError::Full(reprocess_queue_message) @@ -982,9 +979,8 @@ impl BeaconProcessor { ) => Some(backfill_batch.into()), other => { crit!( - self.log, - "Unexpected queue message type"; - "message_type" => other.as_ref() + message_type = other.as_ref(), + "Unexpected queue message type" ); // This is an unhandled exception, drop the message. continue; @@ -1005,11 +1001,7 @@ impl BeaconProcessor { Some(InboundEvent::WorkEvent(event)) | Some(InboundEvent::ReprocessingWork(event)) => Some(event), None => { - debug!( - self.log, - "Gossip processor stopped"; - "msg" => "stream ended" - ); + debug!(msg = "stream ended", "Gossip processor stopped"); break; } }; @@ -1050,238 +1042,234 @@ impl BeaconProcessor { None if can_spawn => { // Check for chain segments first, they're the most efficient way to get // blocks into the system. - let work_event: Option> = if let Some(item) = - chain_segment_queue.pop() - { - Some(item) - // Check sync blocks before gossip blocks, since we've already explicitly - // requested these blocks. - } else if let Some(item) = rpc_block_queue.pop() { - Some(item) - } else if let Some(item) = rpc_blob_queue.pop() { - Some(item) - } else if let Some(item) = rpc_custody_column_queue.pop() { - Some(item) - // TODO(das): decide proper prioritization for sampling columns - } else if let Some(item) = rpc_custody_column_queue.pop() { - Some(item) - } else if let Some(item) = rpc_verify_data_column_queue.pop() { - Some(item) - } else if let Some(item) = sampling_result_queue.pop() { - Some(item) - // Check delayed blocks before gossip blocks, the gossip blocks might rely - // on the delayed ones. - } else if let Some(item) = delayed_block_queue.pop() { - Some(item) - // Check gossip blocks before gossip attestations, since a block might be - // required to verify some attestations. - } else if let Some(item) = gossip_block_queue.pop() { - Some(item) - } else if let Some(item) = gossip_blob_queue.pop() { - Some(item) - } else if let Some(item) = gossip_data_column_queue.pop() { - Some(item) - // Check the priority 0 API requests after blocks and blobs, but before attestations. - } else if let Some(item) = api_request_p0_queue.pop() { - Some(item) - // Check the aggregates, *then* the unaggregates since we assume that - // aggregates are more valuable to local validators and effectively give us - // more information with less signature verification time. - } else if aggregate_queue.len() > 0 { - let batch_size = cmp::min( - aggregate_queue.len(), - self.config.max_gossip_aggregate_batch_size, - ); + let work_event: Option> = + if let Some(item) = chain_segment_queue.pop() { + Some(item) + // Check sync blocks before gossip blocks, since we've already explicitly + // requested these blocks. + } else if let Some(item) = rpc_block_queue.pop() { + Some(item) + } else if let Some(item) = rpc_blob_queue.pop() { + Some(item) + } else if let Some(item) = rpc_custody_column_queue.pop() { + Some(item) + // TODO(das): decide proper prioritization for sampling columns + } else if let Some(item) = rpc_custody_column_queue.pop() { + Some(item) + } else if let Some(item) = rpc_verify_data_column_queue.pop() { + Some(item) + } else if let Some(item) = sampling_result_queue.pop() { + Some(item) + // Check delayed blocks before gossip blocks, the gossip blocks might rely + // on the delayed ones. + } else if let Some(item) = delayed_block_queue.pop() { + Some(item) + // Check gossip blocks before gossip attestations, since a block might be + // required to verify some attestations. + } else if let Some(item) = gossip_block_queue.pop() { + Some(item) + } else if let Some(item) = gossip_blob_queue.pop() { + Some(item) + } else if let Some(item) = gossip_data_column_queue.pop() { + Some(item) + // Check the priority 0 API requests after blocks and blobs, but before attestations. + } else if let Some(item) = api_request_p0_queue.pop() { + Some(item) + // Check the aggregates, *then* the unaggregates since we assume that + // aggregates are more valuable to local validators and effectively give us + // more information with less signature verification time. + } else if aggregate_queue.len() > 0 { + let batch_size = cmp::min( + aggregate_queue.len(), + self.config.max_gossip_aggregate_batch_size, + ); - if batch_size < 2 { - // One single aggregate is in the queue, process it individually. - aggregate_queue.pop() - } else { - // Collect two or more aggregates into a batch, so they can take - // advantage of batch signature verification. - // - // Note: this will convert the `Work::GossipAggregate` item into a - // `Work::GossipAggregateBatch` item. - let mut aggregates = Vec::with_capacity(batch_size); - let mut process_batch_opt = None; - for _ in 0..batch_size { - if let Some(item) = aggregate_queue.pop() { - match item { - Work::GossipAggregate { - aggregate, - process_individual: _, - process_batch, - } => { - aggregates.push(*aggregate); - if process_batch_opt.is_none() { - process_batch_opt = Some(process_batch); + if batch_size < 2 { + // One single aggregate is in the queue, process it individually. + aggregate_queue.pop() + } else { + // Collect two or more aggregates into a batch, so they can take + // advantage of batch signature verification. + // + // Note: this will convert the `Work::GossipAggregate` item into a + // `Work::GossipAggregateBatch` item. + let mut aggregates = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; + for _ in 0..batch_size { + if let Some(item) = aggregate_queue.pop() { + match item { + Work::GossipAggregate { + aggregate, + process_individual: _, + process_batch, + } => { + aggregates.push(*aggregate); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } + } + _ => { + error!("Invalid item in aggregate queue"); } - } - _ => { - error!(self.log, "Invalid item in aggregate queue"); } } } - } - if let Some(process_batch) = process_batch_opt { - // Process all aggregates with a single worker. - Some(Work::GossipAggregateBatch { - aggregates, - process_batch, - }) - } else { - // There is no good reason for this to - // happen, it is a serious logic error. - // Since we only form batches when multiple - // work items exist, we should always have a - // work closure at this point. - crit!(self.log, "Missing aggregate work"); - None - } - } - // Check the unaggregated attestation queue. - // - // Potentially use batching. - } else if attestation_queue.len() > 0 { - let batch_size = cmp::min( - attestation_queue.len(), - self.config.max_gossip_attestation_batch_size, - ); - - if batch_size < 2 { - // One single attestation is in the queue, process it individually. - attestation_queue.pop() - } else { - // Collect two or more attestations into a batch, so they can take - // advantage of batch signature verification. - // - // Note: this will convert the `Work::GossipAttestation` item into a - // `Work::GossipAttestationBatch` item. - let mut attestations = Vec::with_capacity(batch_size); - let mut process_batch_opt = None; - for _ in 0..batch_size { - if let Some(item) = attestation_queue.pop() { - match item { - Work::GossipAttestation { - attestation, - process_individual: _, - process_batch, - } => { - attestations.push(*attestation); - if process_batch_opt.is_none() { - process_batch_opt = Some(process_batch); - } - } - _ => error!( - self.log, - "Invalid item in attestation queue" - ), - } + if let Some(process_batch) = process_batch_opt { + // Process all aggregates with a single worker. + Some(Work::GossipAggregateBatch { + aggregates, + process_batch, + }) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!("Missing aggregate work"); + None } } + // Check the unaggregated attestation queue. + // + // Potentially use batching. + } else if attestation_queue.len() > 0 { + let batch_size = cmp::min( + attestation_queue.len(), + self.config.max_gossip_attestation_batch_size, + ); - if let Some(process_batch) = process_batch_opt { - // Process all attestations with a single worker. - Some(Work::GossipAttestationBatch { - attestations, - process_batch, - }) + if batch_size < 2 { + // One single attestation is in the queue, process it individually. + attestation_queue.pop() } else { - // There is no good reason for this to - // happen, it is a serious logic error. - // Since we only form batches when multiple - // work items exist, we should always have a - // work closure at this point. - crit!(self.log, "Missing attestations work"); - None + // Collect two or more attestations into a batch, so they can take + // advantage of batch signature verification. + // + // Note: this will convert the `Work::GossipAttestation` item into a + // `Work::GossipAttestationBatch` item. + let mut attestations = Vec::with_capacity(batch_size); + let mut process_batch_opt = None; + for _ in 0..batch_size { + if let Some(item) = attestation_queue.pop() { + match item { + Work::GossipAttestation { + attestation, + process_individual: _, + process_batch, + } => { + attestations.push(*attestation); + if process_batch_opt.is_none() { + process_batch_opt = Some(process_batch); + } + } + _ => error!("Invalid item in attestation queue"), + } + } + } + + if let Some(process_batch) = process_batch_opt { + // Process all attestations with a single worker. + Some(Work::GossipAttestationBatch { + attestations, + process_batch, + }) + } else { + // There is no good reason for this to + // happen, it is a serious logic error. + // Since we only form batches when multiple + // work items exist, we should always have a + // work closure at this point. + crit!("Missing attestations work"); + None + } } - } - // Convert any gossip attestations that need to be converted. - } else if let Some(item) = attestation_to_convert_queue.pop() { - Some(item) - // Check sync committee messages after attestations as their rewards are lesser - // and they don't influence fork choice. - } else if let Some(item) = sync_contribution_queue.pop() { - Some(item) - } else if let Some(item) = sync_message_queue.pop() { - Some(item) - // Aggregates and unaggregates queued for re-processing are older and we - // care about fresher ones, so check those first. - } else if let Some(item) = unknown_block_aggregate_queue.pop() { - Some(item) - } else if let Some(item) = unknown_block_attestation_queue.pop() { - Some(item) - // Check RPC methods next. Status messages are needed for sync so - // prioritize them over syncing requests from other peers (BlocksByRange - // and BlocksByRoot) - } else if let Some(item) = status_queue.pop() { - Some(item) - } else if let Some(item) = bbrange_queue.pop() { - Some(item) - } else if let Some(item) = bbroots_queue.pop() { - Some(item) - } else if let Some(item) = blbrange_queue.pop() { - Some(item) - } else if let Some(item) = blbroots_queue.pop() { - Some(item) - } else if let Some(item) = dcbroots_queue.pop() { - Some(item) - } else if let Some(item) = dcbrange_queue.pop() { - Some(item) - // Prioritize sampling requests after block syncing requests - } else if let Some(item) = unknown_block_sampling_request_queue.pop() { - Some(item) - // Check slashings after all other consensus messages so we prioritize - // following head. - // - // Check attester slashings before proposer slashings since they have the - // potential to slash multiple validators at once. - } else if let Some(item) = gossip_attester_slashing_queue.pop() { - Some(item) - } else if let Some(item) = gossip_proposer_slashing_queue.pop() { - Some(item) - // Check exits and address changes late since our validators don't get - // rewards from them. - } else if let Some(item) = gossip_voluntary_exit_queue.pop() { - Some(item) - } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { - Some(item) - // Check the priority 1 API requests after we've - // processed all the interesting things from the network - // and things required for us to stay in good repute - // with our P2P peers. - } else if let Some(item) = api_request_p1_queue.pop() { - Some(item) - // Handle backfill sync chain segments. - } else if let Some(item) = backfill_chain_segment.pop() { - Some(item) - // Handle light client requests. - } else if let Some(item) = lc_gossip_finality_update_queue.pop() { - Some(item) - } else if let Some(item) = lc_gossip_optimistic_update_queue.pop() { - Some(item) - } else if let Some(item) = unknown_light_client_update_queue.pop() { - Some(item) - } else if let Some(item) = lc_bootstrap_queue.pop() { - Some(item) - } else if let Some(item) = lc_rpc_optimistic_update_queue.pop() { - Some(item) - } else if let Some(item) = lc_rpc_finality_update_queue.pop() { - Some(item) - } else if let Some(item) = lc_update_range_queue.pop() { - Some(item) - // This statement should always be the final else statement. - } else { - // Let the journal know that a worker is freed and there's nothing else - // for it to do. - if let Some(work_journal_tx) = &work_journal_tx { - // We don't care if this message was successfully sent, we only use the journal - // during testing. - let _ = work_journal_tx.try_send(NOTHING_TO_DO); - } - None - }; + // Convert any gossip attestations that need to be converted. + } else if let Some(item) = attestation_to_convert_queue.pop() { + Some(item) + // Check sync committee messages after attestations as their rewards are lesser + // and they don't influence fork choice. + } else if let Some(item) = sync_contribution_queue.pop() { + Some(item) + } else if let Some(item) = sync_message_queue.pop() { + Some(item) + // Aggregates and unaggregates queued for re-processing are older and we + // care about fresher ones, so check those first. + } else if let Some(item) = unknown_block_aggregate_queue.pop() { + Some(item) + } else if let Some(item) = unknown_block_attestation_queue.pop() { + Some(item) + // Check RPC methods next. Status messages are needed for sync so + // prioritize them over syncing requests from other peers (BlocksByRange + // and BlocksByRoot) + } else if let Some(item) = status_queue.pop() { + Some(item) + } else if let Some(item) = bbrange_queue.pop() { + Some(item) + } else if let Some(item) = bbroots_queue.pop() { + Some(item) + } else if let Some(item) = blbrange_queue.pop() { + Some(item) + } else if let Some(item) = blbroots_queue.pop() { + Some(item) + } else if let Some(item) = dcbroots_queue.pop() { + Some(item) + } else if let Some(item) = dcbrange_queue.pop() { + Some(item) + // Prioritize sampling requests after block syncing requests + } else if let Some(item) = unknown_block_sampling_request_queue.pop() { + Some(item) + // Check slashings after all other consensus messages so we prioritize + // following head. + // + // Check attester slashings before proposer slashings since they have the + // potential to slash multiple validators at once. + } else if let Some(item) = gossip_attester_slashing_queue.pop() { + Some(item) + } else if let Some(item) = gossip_proposer_slashing_queue.pop() { + Some(item) + // Check exits and address changes late since our validators don't get + // rewards from them. + } else if let Some(item) = gossip_voluntary_exit_queue.pop() { + Some(item) + } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { + Some(item) + // Check the priority 1 API requests after we've + // processed all the interesting things from the network + // and things required for us to stay in good repute + // with our P2P peers. + } else if let Some(item) = api_request_p1_queue.pop() { + Some(item) + // Handle backfill sync chain segments. + } else if let Some(item) = backfill_chain_segment.pop() { + Some(item) + // Handle light client requests. + } else if let Some(item) = lc_gossip_finality_update_queue.pop() { + Some(item) + } else if let Some(item) = lc_gossip_optimistic_update_queue.pop() { + Some(item) + } else if let Some(item) = unknown_light_client_update_queue.pop() { + Some(item) + } else if let Some(item) = lc_bootstrap_queue.pop() { + Some(item) + } else if let Some(item) = lc_rpc_optimistic_update_queue.pop() { + Some(item) + } else if let Some(item) = lc_rpc_finality_update_queue.pop() { + Some(item) + } else if let Some(item) = lc_update_range_queue.pop() { + Some(item) + // This statement should always be the final else statement. + } else { + // Let the journal know that a worker is freed and there's nothing else + // for it to do. + if let Some(work_journal_tx) = &work_journal_tx { + // We don't care if this message was successfully sent, we only use the journal + // during testing. + let _ = work_journal_tx.try_send(NOTHING_TO_DO); + } + None + }; if let Some(work_event) = work_event { let work_type = work_event.to_type(); @@ -1296,9 +1284,8 @@ impl BeaconProcessor { // I cannot see any good reason why this would happen. None => { warn!( - self.log, - "Unexpected gossip processor condition"; - "msg" => "no new work and cannot spawn worker" + msg = "no new work and cannot spawn worker", + "Unexpected gossip processor condition" ); None } @@ -1313,10 +1300,9 @@ impl BeaconProcessor { &[work_id], ); trace!( - self.log, - "Gossip processor skipping work"; - "msg" => "chain is syncing", - "work_id" => work_id + msg = "chain is syncing", + work_id = work_id, + "Gossip processor skipping work" ); None } @@ -1335,89 +1321,75 @@ impl BeaconProcessor { // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. Work::GossipAttestationBatch { .. } => crit!( - self.log, - "Unsupported inbound event"; - "type" => "GossipAttestationBatch" + work_type = "GossipAttestationBatch", + "Unsupported inbound event" ), Work::GossipAggregate { .. } => aggregate_queue.push(work), // Aggregate batches are formed internally within the `BeaconProcessor`, // they are not sent from external services. - Work::GossipAggregateBatch { .. } => crit!( - self.log, - "Unsupported inbound event"; - "type" => "GossipAggregateBatch" - ), - Work::GossipBlock { .. } => { - gossip_block_queue.push(work, work_id, &self.log) - } - Work::GossipBlobSidecar { .. } => { - gossip_blob_queue.push(work, work_id, &self.log) + Work::GossipAggregateBatch { .. } => { + crit!( + work_type = "GossipAggregateBatch", + "Unsupported inbound event" + ) } + Work::GossipBlock { .. } => gossip_block_queue.push(work, work_id), + Work::GossipBlobSidecar { .. } => gossip_blob_queue.push(work, work_id), Work::GossipDataColumnSidecar { .. } => { - gossip_data_column_queue.push(work, work_id, &self.log) + gossip_data_column_queue.push(work, work_id) } Work::DelayedImportBlock { .. } => { - delayed_block_queue.push(work, work_id, &self.log) + delayed_block_queue.push(work, work_id) } Work::GossipVoluntaryExit { .. } => { - gossip_voluntary_exit_queue.push(work, work_id, &self.log) + gossip_voluntary_exit_queue.push(work, work_id) } Work::GossipProposerSlashing { .. } => { - gossip_proposer_slashing_queue.push(work, work_id, &self.log) + gossip_proposer_slashing_queue.push(work, work_id) } Work::GossipAttesterSlashing { .. } => { - gossip_attester_slashing_queue.push(work, work_id, &self.log) + gossip_attester_slashing_queue.push(work, work_id) } Work::GossipSyncSignature { .. } => sync_message_queue.push(work), Work::GossipSyncContribution { .. } => { sync_contribution_queue.push(work) } Work::GossipLightClientFinalityUpdate { .. } => { - lc_gossip_finality_update_queue.push(work, work_id, &self.log) + lc_gossip_finality_update_queue.push(work, work_id) } Work::GossipLightClientOptimisticUpdate { .. } => { - lc_gossip_optimistic_update_queue.push(work, work_id, &self.log) + lc_gossip_optimistic_update_queue.push(work, work_id) } Work::RpcBlock { .. } | Work::IgnoredRpcBlock { .. } => { - rpc_block_queue.push(work, work_id, &self.log) + rpc_block_queue.push(work, work_id) } - Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), + Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id), Work::RpcCustodyColumn { .. } => { - rpc_custody_column_queue.push(work, work_id, &self.log) + rpc_custody_column_queue.push(work, work_id) } Work::RpcVerifyDataColumn(_) => { - rpc_verify_data_column_queue.push(work, work_id, &self.log) - } - Work::SamplingResult(_) => { - sampling_result_queue.push(work, work_id, &self.log) - } - Work::ChainSegment { .. } => { - chain_segment_queue.push(work, work_id, &self.log) + rpc_verify_data_column_queue.push(work, work_id) } + Work::SamplingResult(_) => sampling_result_queue.push(work, work_id), + Work::ChainSegment { .. } => chain_segment_queue.push(work, work_id), Work::ChainSegmentBackfill { .. } => { - backfill_chain_segment.push(work, work_id, &self.log) - } - Work::Status { .. } => status_queue.push(work, work_id, &self.log), - Work::BlocksByRangeRequest { .. } => { - bbrange_queue.push(work, work_id, &self.log) - } - Work::BlocksByRootsRequest { .. } => { - bbroots_queue.push(work, work_id, &self.log) - } - Work::BlobsByRangeRequest { .. } => { - blbrange_queue.push(work, work_id, &self.log) + backfill_chain_segment.push(work, work_id) } + Work::Status { .. } => status_queue.push(work, work_id), + Work::BlocksByRangeRequest { .. } => bbrange_queue.push(work, work_id), + Work::BlocksByRootsRequest { .. } => bbroots_queue.push(work, work_id), + Work::BlobsByRangeRequest { .. } => blbrange_queue.push(work, work_id), Work::LightClientBootstrapRequest { .. } => { - lc_bootstrap_queue.push(work, work_id, &self.log) + lc_bootstrap_queue.push(work, work_id) } Work::LightClientOptimisticUpdateRequest { .. } => { - lc_rpc_optimistic_update_queue.push(work, work_id, &self.log) + lc_rpc_optimistic_update_queue.push(work, work_id) } Work::LightClientFinalityUpdateRequest { .. } => { - lc_rpc_finality_update_queue.push(work, work_id, &self.log) + lc_rpc_finality_update_queue.push(work, work_id) } Work::LightClientUpdatesByRangeRequest { .. } => { - lc_update_range_queue.push(work, work_id, &self.log) + lc_update_range_queue.push(work, work_id) } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) @@ -1426,29 +1398,23 @@ impl BeaconProcessor { unknown_block_aggregate_queue.push(work) } Work::GossipBlsToExecutionChange { .. } => { - gossip_bls_to_execution_change_queue.push(work, work_id, &self.log) - } - Work::BlobsByRootsRequest { .. } => { - blbroots_queue.push(work, work_id, &self.log) + gossip_bls_to_execution_change_queue.push(work, work_id) } + Work::BlobsByRootsRequest { .. } => blbroots_queue.push(work, work_id), Work::DataColumnsByRootsRequest { .. } => { - dcbroots_queue.push(work, work_id, &self.log) + dcbroots_queue.push(work, work_id) } Work::DataColumnsByRangeRequest { .. } => { - dcbrange_queue.push(work, work_id, &self.log) + dcbrange_queue.push(work, work_id) } Work::UnknownLightClientOptimisticUpdate { .. } => { - unknown_light_client_update_queue.push(work, work_id, &self.log) + unknown_light_client_update_queue.push(work, work_id) } Work::UnknownBlockSamplingRequest { .. } => { - unknown_block_sampling_request_queue.push(work, work_id, &self.log) - } - Work::ApiRequestP0 { .. } => { - api_request_p0_queue.push(work, work_id, &self.log) - } - Work::ApiRequestP1 { .. } => { - api_request_p1_queue.push(work, work_id, &self.log) + unknown_block_sampling_request_queue.push(work, work_id) } + Work::ApiRequestP0 { .. } => api_request_p0_queue.push(work, work_id), + Work::ApiRequestP1 { .. } => api_request_p1_queue.push(work, work_id), }; Some(work_type) } @@ -1526,19 +1492,17 @@ impl BeaconProcessor { if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( - self.log, - "Aggregate attestation queue full"; - "msg" => "the system has insufficient resources for load", - "queue_len" => aggregate_queue.max_length, + msg = "the system has insufficient resources for load", + queue_len = aggregate_queue.max_length, + "Aggregate attestation queue full" ) } if attestation_queue.is_full() && attestation_debounce.elapsed() { error!( - self.log, - "Attestation queue full"; - "msg" => "the system has insufficient resources for load", - "queue_len" => attestation_queue.max_length, + msg = "the system has insufficient resources for load", + queue_len = attestation_queue.max_length, + "Attestation queue full" ) } } @@ -1569,7 +1533,6 @@ impl BeaconProcessor { let send_idle_on_drop = SendOnDrop { tx: idle_tx, _worker_timer: worker_timer, - log: self.log.clone(), }; let worker_id = self.current_workers; @@ -1578,10 +1541,9 @@ impl BeaconProcessor { let executor = self.executor.clone(); trace!( - self.log, - "Spawning beacon processor worker"; - "work" => work_id, - "worker" => worker_id, + work = work_id, + worker = worker_id, + "Spawning beacon processor worker" ); let task_spawner = TaskSpawner { @@ -1719,8 +1681,8 @@ impl TaskSpawner { } } -/// This struct will send a message on `self.tx` when it is dropped. An error will be logged on -/// `self.log` if the send fails (this happens when the node is shutting down). +/// This struct will send a message on `self.tx` when it is dropped. An error will be logged +/// if the send fails (this happens when the node is shutting down). /// /// ## Purpose /// @@ -1733,17 +1695,15 @@ pub struct SendOnDrop { tx: mpsc::Sender<()>, // The field is unused, but it's here to ensure the timer is dropped once the task has finished. _worker_timer: Option, - log: Logger, } impl Drop for SendOnDrop { fn drop(&mut self) { if let Err(e) = self.tx.try_send(()) { warn!( - self.log, - "Unable to free worker"; - "msg" => "did not free worker, shutdown may be underway", - "error" => %e + msg = "did not free worker, shutdown may be underway", + error = %e, + "Unable to free worker" ) } } diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index a43310ac83..a4f539aea0 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -16,8 +16,8 @@ use fnv::FnvHashMap; use futures::task::Poll; use futures::{Stream, StreamExt}; use itertools::Itertools; +use logging::crit; use logging::TimeLatch; -use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::future::Future; @@ -29,6 +29,7 @@ use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; +use tracing::{debug, error, trace, warn}; use types::{EthSpec, Hash256, Slot}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; @@ -374,7 +375,6 @@ pub fn spawn_reprocess_scheduler( work_reprocessing_rx: Receiver, executor: &TaskExecutor, slot_clock: Arc, - log: Logger, maximum_gossip_clock_disparity: Duration, ) -> Result<(), String> { // Sanity check @@ -386,14 +386,10 @@ pub fn spawn_reprocess_scheduler( executor.spawn( async move { while let Some(msg) = queue.next().await { - queue.handle_message(msg, &log); + queue.handle_message(msg); } - debug!( - log, - "Re-process queue stopped"; - "msg" => "shutting down" - ); + debug!(msg = "shutting down", "Re-process queue stopped"); }, TASK_NAME, ); @@ -436,7 +432,7 @@ impl ReprocessQueue { } } - fn handle_message(&mut self, msg: InboundEvent, log: &Logger) { + fn handle_message(&mut self, msg: InboundEvent) { use ReprocessQueueMessage::*; match msg { // Some block has been indicated as "early" and should be processed when the @@ -455,10 +451,9 @@ impl ReprocessQueue { if self.queued_gossip_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { if self.early_block_debounce.elapsed() { warn!( - log, - "Early blocks queue is full"; - "queue_size" => MAXIMUM_QUEUED_BLOCKS, - "msg" => "check system clock" + queue_size = MAXIMUM_QUEUED_BLOCKS, + msg = "check system clock", + "Early blocks queue is full" ); } // Drop the block. @@ -490,10 +485,7 @@ impl ReprocessQueue { .try_send(ReadyWork::Block(early_block)) .is_err() { - error!( - log, - "Failed to send block"; - ); + error!("Failed to send block"); } } } @@ -507,10 +499,9 @@ impl ReprocessQueue { if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { if self.rpc_block_debounce.elapsed() { warn!( - log, - "RPC blocks queue is full"; - "queue_size" => MAXIMUM_QUEUED_BLOCKS, - "msg" => "check system clock" + queue_size = MAXIMUM_QUEUED_BLOCKS, + msg = "check system clock", + "RPC blocks queue is full" ); } // Return the block to the beacon processor signalling to @@ -522,10 +513,7 @@ impl ReprocessQueue { })) .is_err() { - error!( - log, - "Failed to send rpc block to beacon processor"; - ); + error!("Failed to send rpc block to beacon processor"); } return; } @@ -536,29 +524,24 @@ impl ReprocessQueue { } InboundEvent::ReadyRpcBlock(queued_rpc_block) => { debug!( - log, - "Sending rpc block for reprocessing"; - "block_root" => %queued_rpc_block.beacon_block_root + %queued_rpc_block.beacon_block_root, + "Sending rpc block for reprocessing" ); if self .ready_work_tx .try_send(ReadyWork::RpcBlock(queued_rpc_block)) .is_err() { - error!( - log, - "Failed to send rpc block to beacon processor"; - ); + error!("Failed to send rpc block to beacon processor"); } } InboundEvent::Msg(UnknownBlockAggregate(queued_aggregate)) => { if self.attestations_delay_queue.len() >= MAXIMUM_QUEUED_ATTESTATIONS { if self.attestation_delay_debounce.elapsed() { error!( - log, - "Aggregate attestation delay queue is full"; - "queue_size" => MAXIMUM_QUEUED_ATTESTATIONS, - "msg" => "check system clock" + queue_size = MAXIMUM_QUEUED_ATTESTATIONS, + msg = "check system clock", + "Aggregate attestation delay queue is full" ); } // Drop the attestation. @@ -588,10 +571,9 @@ impl ReprocessQueue { if self.attestations_delay_queue.len() >= MAXIMUM_QUEUED_ATTESTATIONS { if self.attestation_delay_debounce.elapsed() { error!( - log, - "Attestation delay queue is full"; - "queue_size" => MAXIMUM_QUEUED_ATTESTATIONS, - "msg" => "check system clock" + queue_size = MAXIMUM_QUEUED_ATTESTATIONS, + msg = "check system clock", + "Attestation delay queue is full" ); } // Drop the attestation. @@ -623,10 +605,9 @@ impl ReprocessQueue { if self.lc_updates_delay_queue.len() >= MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES { if self.lc_update_delay_debounce.elapsed() { error!( - log, - "Light client updates delay queue is full"; - "queue_size" => MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES, - "msg" => "check system clock" + queue_size = MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES, + msg = "check system clock", + "Light client updates delay queue is full" ); } // Drop the light client update. @@ -658,9 +639,8 @@ impl ReprocessQueue { if self.sampling_requests_delay_queue.len() >= MAXIMUM_QUEUED_SAMPLING_REQUESTS { if self.sampling_request_delay_debounce.elapsed() { error!( - log, - "Sampling requests delay queue is full"; - "queue_size" => MAXIMUM_QUEUED_SAMPLING_REQUESTS, + queue_size = MAXIMUM_QUEUED_SAMPLING_REQUESTS, + "Sampling requests delay queue is full" ); } // Drop the inbound message. @@ -724,23 +704,21 @@ impl ReprocessQueue { // There is a mismatch between the attestation ids registered for this // root and the queued attestations. This should never happen. error!( - log, - "Unknown queued attestation for block root"; - "block_root" => ?block_root, - "att_id" => ?id, + ?block_root, + att_id = ?id, + "Unknown queued attestation for block root" ); } } if failed_to_send_count > 0 { error!( - log, - "Ignored scheduled attestation(s) for block"; - "hint" => "system may be overloaded", - "parent_root" => ?parent_root, - "block_root" => ?block_root, - "failed_count" => failed_to_send_count, - "sent_count" => sent_count, + hint = "system may be overloaded", + ?parent_root, + ?block_root, + failed_count = failed_to_send_count, + sent_count, + "Ignored scheduled attestation(s) for block" ); } } @@ -772,18 +750,17 @@ impl ReprocessQueue { } } else { // This should never happen. - error!(log, "Unknown sampling request for block root"; "block_root" => ?block_root, "id" => ?id); + error!(?block_root, ?id, "Unknown sampling request for block root"); } } if failed_to_send_count > 0 { error!( - log, - "Ignored scheduled sampling requests for block"; - "hint" => "system may be overloaded", - "block_root" => ?block_root, - "failed_count" => failed_to_send_count, - "sent_count" => sent_count, + hint = "system may be overloaded", + ?block_root, + failed_to_send_count, + sent_count, + "Ignored scheduled sampling requests for block" ); } } @@ -795,10 +772,9 @@ impl ReprocessQueue { .remove(&parent_root) { debug!( - log, - "Dequeuing light client optimistic updates"; - "parent_root" => %parent_root, - "count" => queued_lc_id.len(), + %parent_root, + count = queued_lc_id.len(), + "Dequeuing light client optimistic updates" ); for lc_id in queued_lc_id { @@ -818,23 +794,16 @@ impl ReprocessQueue { // Send the work match self.ready_work_tx.try_send(work) { - Ok(_) => trace!( - log, - "reprocessing light client update sent"; - ), - Err(_) => error!( - log, - "Failed to send scheduled light client update"; - ), + Ok(_) => trace!("reprocessing light client update sent"), + Err(_) => error!("Failed to send scheduled light client update"), } } else { // There is a mismatch between the light client update ids registered for this // root and the queued light client updates. This should never happen. error!( - log, - "Unknown queued light client update for parent root"; - "parent_root" => ?parent_root, - "lc_id" => ?lc_id, + ?parent_root, + ?lc_id, + "Unknown queued light client update for parent root" ); } } @@ -855,11 +824,7 @@ impl ReprocessQueue { if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this // program works, but still process the block anyway. - error!( - log, - "Unknown block in delay queue"; - "block_root" => ?block_root - ); + error!(?block_root, "Unknown block in delay queue"); } if self @@ -867,10 +832,7 @@ impl ReprocessQueue { .try_send(ReadyWork::Block(ready_block)) .is_err() { - error!( - log, - "Failed to pop queued block"; - ); + error!("Failed to pop queued block"); } } InboundEvent::ReadyAttestation(queued_id) => { @@ -901,10 +863,9 @@ impl ReprocessQueue { } { if self.ready_work_tx.try_send(work).is_err() { error!( - log, - "Ignored scheduled attestation"; - "hint" => "system may be overloaded", - "beacon_block_root" => ?root + hint = "system may be overloaded", + beacon_block_root = ?root, + "Ignored scheduled attestation" ); } @@ -929,10 +890,7 @@ impl ReprocessQueue { }, ) { if self.ready_work_tx.try_send(work).is_err() { - error!( - log, - "Failed to send scheduled light client optimistic update"; - ); + error!("Failed to send scheduled light client optimistic update"); } if let Some(queued_lc_updates) = self @@ -955,11 +913,7 @@ impl ReprocessQueue { duration.as_millis().to_string() }); - debug!( - log, - "Sending scheduled backfill work"; - "millis_from_slot_start" => millis_from_slot_start - ); + debug!(%millis_from_slot_start, "Sending scheduled backfill work"); match self .ready_work_tx @@ -971,9 +925,8 @@ impl ReprocessQueue { Err(mpsc::error::TrySendError::Full(ReadyWork::BackfillSync(batch))) | Err(mpsc::error::TrySendError::Closed(ReadyWork::BackfillSync(batch))) => { error!( - log, - "Failed to send scheduled backfill work"; - "info" => "sending work back to queue" + info = "sending work back to queue", + "Failed to send scheduled backfill work" ); self.queued_backfill_batches.insert(0, batch); @@ -984,10 +937,7 @@ impl ReprocessQueue { } // The message was not sent and we didn't get the correct // return result. This is a logic error. - _ => crit!( - log, - "Unexpected return from try_send error"; - ), + _ => crit!("Unexpected return from try_send error"), } } } @@ -1057,7 +1007,7 @@ impl ReprocessQueue { #[cfg(test)] mod tests { use super::*; - use logging::test_logger; + use logging::create_test_tracing_subscriber; use slot_clock::{ManualSlotClock, TestingSlotClock}; use std::ops::Add; use std::sync::Arc; @@ -1105,8 +1055,8 @@ mod tests { // See: https://github.com/sigp/lighthouse/issues/5504#issuecomment-2050930045 #[tokio::test] async fn backfill_schedule_failed_should_reschedule() { + create_test_tracing_subscriber(); let runtime = TestRuntime::default(); - let log = test_logger(); let (work_reprocessing_tx, work_reprocessing_rx) = mpsc::channel(1); let (ready_work_tx, mut ready_work_rx) = mpsc::channel(1); let slot_duration = 12; @@ -1117,7 +1067,6 @@ mod tests { work_reprocessing_rx, &runtime.task_executor, slot_clock.clone(), - log, Duration::from_millis(500), ) .unwrap(); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 614115eb58..e11fc23072 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,6 +27,7 @@ http_api = { workspace = true } http_metrics = { path = "../http_metrics" } kzg = { workspace = true } lighthouse_network = { workspace = true } +logging = { workspace = true } metrics = { workspace = true } monitoring_api = { workspace = true } network = { workspace = true } @@ -35,11 +36,12 @@ serde = { workspace = true } serde_json = { workspace = true } slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -slog = { workspace = true } slot_clock = { workspace = true } store = { workspace = true } task_executor = { workspace = true } time = "0.3.5" timer = { path = "../timer" } tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e3bfd60a48..c8ff6521c8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -35,7 +35,6 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; -use slog::{debug, info, warn, Logger}; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -44,6 +43,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; use tokio::sync::oneshot; +use tracing::{debug, info, warn}; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -170,11 +170,9 @@ where let runtime_context = runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; let context = runtime_context.service_context("beacon".into()); - let log = context.log(); let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; let event_handler = if self.http_api_config.enabled { Some(ServerSentEventHandler::new( - context.log().clone(), self.http_api_config.sse_capacity_multiplier, )) } else { @@ -183,12 +181,8 @@ where let execution_layer = if let Some(config) = config.execution_layer.clone() { let context = runtime_context.service_context("exec".into()); - let execution_layer = ExecutionLayer::from_config( - config, - context.executor.clone(), - context.log().clone(), - ) - .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; + let execution_layer = ExecutionLayer::from_config(config, context.executor.clone()) + .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; Some(execution_layer) } else { None @@ -205,7 +199,6 @@ where }; let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) - .logger(context.log().clone()) .store(store) .task_executor(context.executor.clone()) .custom_spec(spec.clone()) @@ -245,7 +238,7 @@ where // using it. let client_genesis = if matches!(client_genesis, ClientGenesis::FromStore) && !chain_exists { - info!(context.log(), "Defaulting to deposit contract genesis"); + info!("Defaulting to deposit contract genesis"); ClientGenesis::DepositContract } else if chain_exists { @@ -253,9 +246,8 @@ where || matches!(client_genesis, ClientGenesis::CheckpointSyncUrl { .. }) { info!( - context.log(), - "Refusing to checkpoint sync"; - "msg" => "database already exists, use --purge-db to force checkpoint sync" + msg = "database already exists, use --purge-db to force checkpoint sync", + "Refusing to checkpoint sync" ); } @@ -295,12 +287,9 @@ where builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::GenesisState => { - info!( - context.log(), - "Starting from known genesis state"; - ); + info!("Starting from known genesis state"); - let genesis_state = genesis_state(&runtime_context, &config, log).await?; + let genesis_state = genesis_state(&runtime_context, &config).await?; // If the user has not explicitly allowed genesis sync, prevent // them from trying to sync from genesis if we're outside of the @@ -348,12 +337,9 @@ where anchor_block_bytes, anchor_blobs_bytes, } => { - info!(context.log(), "Starting checkpoint sync"); + info!("Starting checkpoint sync"); if config.chain.genesis_backfill { - info!( - context.log(), - "Blocks will downloaded all the way back to genesis" - ); + info!("Blocks will downloaded all the way back to genesis"); } let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec) @@ -371,7 +357,7 @@ where } else { None }; - let genesis_state = genesis_state(&runtime_context, &config, log).await?; + let genesis_state = genesis_state(&runtime_context, &config).await?; builder .weak_subjectivity_state( @@ -384,15 +370,11 @@ where } ClientGenesis::CheckpointSyncUrl { url } => { info!( - context.log(), - "Starting checkpoint sync"; - "remote_url" => %url, + remote_url = %url, + "Starting checkpoint sync" ); if config.chain.genesis_backfill { - info!( - context.log(), - "Blocks will be downloaded all the way back to genesis" - ); + info!("Blocks will be downloaded all the way back to genesis"); } let remote = BeaconNodeHttpClient::new( @@ -406,7 +388,7 @@ where // We want to fetch deposit snapshot before fetching the finalized beacon state to // ensure that the snapshot is not newer than the beacon state that satisfies the // deposit finalization conditions - debug!(context.log(), "Downloading deposit snapshot"); + debug!("Downloading deposit snapshot"); let deposit_snapshot_result = remote .get_deposit_snapshot() .await @@ -423,22 +405,18 @@ where if deposit_snapshot.is_valid() { Some(deposit_snapshot) } else { - warn!(context.log(), "Remote BN sent invalid deposit snapshot!"); + warn!("Remote BN sent invalid deposit snapshot!"); None } } Ok(None) => { - warn!( - context.log(), - "Remote BN does not support EIP-4881 fast deposit sync" - ); + warn!("Remote BN does not support EIP-4881 fast deposit sync"); None } Err(e) => { warn!( - context.log(), - "Remote BN does not support EIP-4881 fast deposit sync"; - "error" => e + error = e, + "Remote BN does not support EIP-4881 fast deposit sync" ); None } @@ -447,21 +425,18 @@ where None }; - debug!( - context.log(), - "Downloading finalized state"; - ); + debug!("Downloading finalized state"); let state = remote .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) .await .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; - debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); + debug!(slot = ?state.slot(), "Downloaded finalized state"); let finalized_block_slot = state.latest_block_header().slot; - debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); + debug!(block_slot = ?finalized_block_slot,"Downloading finalized block"); let block = remote .get_beacon_blocks_ssz::(BlockId::Slot(finalized_block_slot), &spec) .await @@ -476,24 +451,23 @@ where .ok_or("Finalized block missing from remote, it returned 404")?; let block_root = block.canonical_root(); - debug!(context.log(), "Downloaded finalized block"); + debug!("Downloaded finalized block"); let blobs = if block.message().body().has_blobs() { - debug!(context.log(), "Downloading finalized blobs"); + debug!("Downloading finalized blobs"); if let Some(response) = remote .get_blobs::(BlockId::Root(block_root), None) .await .map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))? { - debug!(context.log(), "Downloaded finalized blobs"); + debug!("Downloaded finalized blobs"); Some(response.data) } else { warn!( - context.log(), - "Checkpoint server is missing blobs"; - "block_root" => %block_root, - "hint" => "use a different URL or ask the provider to update", - "impact" => "db will be slightly corrupt until these blobs are pruned", + block_root = %block_root, + hint = "use a different URL or ask the provider to update", + impact = "db will be slightly corrupt until these blobs are pruned", + "Checkpoint server is missing blobs" ); None } @@ -501,35 +475,31 @@ where None }; - let genesis_state = genesis_state(&runtime_context, &config, log).await?; + let genesis_state = genesis_state(&runtime_context, &config).await?; info!( - context.log(), - "Loaded checkpoint block and state"; - "block_slot" => block.slot(), - "state_slot" => state.slot(), - "block_root" => ?block_root, + block_slot = %block.slot(), + state_slot = %state.slot(), + block_root = ?block_root, + "Loaded checkpoint block and state" ); let service = deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( config.eth1, - context.log().clone(), spec.clone(), &snapshot, ) { Ok(service) => { info!( - context.log(), - "Loaded deposit tree snapshot"; - "deposits loaded" => snapshot.deposit_count, + deposits_loaded = snapshot.deposit_count, + "Loaded deposit tree snapshot" ); Some(service) } Err(e) => { - warn!(context.log(), - "Unable to load deposit snapshot"; - "error" => ?e + warn!(error = ?e, + "Unable to load deposit snapshot" ); None } @@ -541,18 +511,14 @@ where } ClientGenesis::DepositContract => { info!( - context.log(), - "Waiting for eth2 genesis from eth1"; - "eth1_endpoints" => format!("{:?}", &config.eth1.endpoint), - "contract_deploy_block" => config.eth1.deposit_contract_deploy_block, - "deposit_contract" => &config.eth1.deposit_contract_address + eth1_endpoints = ?config.eth1.endpoint, + contract_deploy_block = config.eth1.deposit_contract_deploy_block, + deposit_contract = &config.eth1.deposit_contract_address, + "Waiting for eth2 genesis from eth1" ); - let genesis_service = Eth1GenesisService::new( - config.eth1, - context.log().clone(), - context.eth2_config().spec.clone(), - )?; + let genesis_service = + Eth1GenesisService::new(config.eth1, context.eth2_config().spec.clone())?; // If the HTTP API server is enabled, start an instance of it where it only // contains a reference to the eth1 service (all non-eth1 endpoints will fail @@ -575,7 +541,6 @@ where beacon_processor_send: None, beacon_processor_reprocess_send: None, eth1_service: Some(genesis_service.eth1_service.clone()), - log: context.log().clone(), sse_logging_components: runtime_context.sse_logging_components.clone(), }); @@ -587,10 +552,9 @@ where let (listen_addr, server) = http_api::serve(ctx, exit_future) .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; - let log_clone = context.log().clone(); let http_api_task = async move { server.await; - debug!(log_clone, "HTTP API server task ended"); + debug!("HTTP API server task ended"); }; context @@ -617,9 +581,8 @@ where // We will restart it again after we've finished setting up for genesis. while TcpListener::bind(http_listen).is_err() { warn!( - context.log(), - "Waiting for HTTP server port to open"; - "port" => http_listen + port = %http_listen, + "Waiting for HTTP server port to open" ); tokio::time::sleep(Duration::from_secs(1)).await; } @@ -738,7 +701,7 @@ where .as_ref() .ok_or("monitoring_client requires a runtime_context")? .service_context("monitoring_client".into()); - let monitoring_client = MonitoringHttpClient::new(config, context.log().clone())?; + let monitoring_client = MonitoringHttpClient::new(config)?; monitoring_client.auto_update( context.executor, vec![ProcessType::BeaconNode, ProcessType::System], @@ -798,7 +761,6 @@ where .beacon_processor_config .take() .ok_or("build requires a beacon_processor_config")?; - let log = runtime_context.log().clone(); let http_api_listen_addr = if self.http_api_config.enabled { let ctx = Arc::new(http_api::Context { @@ -812,7 +774,6 @@ where beacon_processor_channels.work_reprocessing_tx.clone(), ), sse_logging_components: runtime_context.sse_logging_components.clone(), - log: log.clone(), }); let exit = runtime_context.executor.exit(); @@ -820,10 +781,9 @@ where let (listen_addr, server) = http_api::serve(ctx, exit) .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; - let http_log = runtime_context.log().clone(); let http_api_task = async move { server.await; - debug!(http_log, "HTTP API server task ended"); + debug!("HTTP API server task ended"); }; runtime_context @@ -833,7 +793,7 @@ where Some(listen_addr) } else { - info!(log, "HTTP server is disabled"); + info!("HTTP server is disabled"); None }; @@ -844,7 +804,6 @@ where db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), gossipsub_registry: self.libp2p_registry.take().map(std::sync::Mutex::new), - log: log.clone(), }); let exit = runtime_context.executor.exit(); @@ -858,7 +817,7 @@ where Some(listen_addr) } else { - debug!(log, "Metrics server is disabled"); + debug!("Metrics server is disabled"); None }; @@ -874,7 +833,6 @@ where executor: beacon_processor_context.executor.clone(), current_workers: 0, config: beacon_processor_config, - log: beacon_processor_context.log().clone(), } .spawn_manager( beacon_processor_channels.beacon_processor_rx, @@ -895,12 +853,7 @@ where } let state_advance_context = runtime_context.service_context("state_advance".into()); - let state_advance_log = state_advance_context.log().clone(); - spawn_state_advance_timer( - state_advance_context.executor, - beacon_chain.clone(), - state_advance_log, - ); + spawn_state_advance_timer(state_advance_context.executor, beacon_chain.clone()); if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. @@ -929,9 +882,8 @@ where // node comes online. if let Err(e) = result { warn!( - log, - "Failed to update head on execution engines"; - "error" => ?e + error = ?e, + "Failed to update head on execution engines" ); } }, @@ -954,14 +906,12 @@ where let inner_chain = beacon_chain.clone(); let light_client_update_context = runtime_context.service_context("lc_update".to_string()); - let log = light_client_update_context.log().clone(); light_client_update_context.executor.spawn( async move { compute_light_client_updates( &inner_chain, light_client_server_rv, beacon_processor_channels.work_reprocessing_tx, - &log, ) .await }, @@ -1044,7 +994,6 @@ where cold_path: &Path, blobs_path: &Path, config: StoreConfig, - log: Logger, ) -> Result { let context = self .runtime_context @@ -1073,7 +1022,6 @@ where genesis_state_root, from, to, - log, ) }; @@ -1084,7 +1032,6 @@ where schema_upgrade, config, spec, - context.log().clone(), ) .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(store); @@ -1132,22 +1079,15 @@ where CachingEth1Backend::from_service(eth1_service_from_genesis) } else if config.purge_cache { - CachingEth1Backend::new(config, context.log().clone(), spec)? + CachingEth1Backend::new(config, spec)? } else { beacon_chain_builder .get_persisted_eth1_backend()? .map(|persisted| { - Eth1Chain::from_ssz_container( - &persisted, - config.clone(), - &context.log().clone(), - spec.clone(), - ) - .map(|chain| chain.into_backend()) + Eth1Chain::from_ssz_container(&persisted, config.clone(), spec.clone()) + .map(|chain| chain.into_backend()) }) - .unwrap_or_else(|| { - CachingEth1Backend::new(config, context.log().clone(), spec.clone()) - })? + .unwrap_or_else(|| CachingEth1Backend::new(config, spec.clone()))? }; self.eth1_service = Some(backend.core.clone()); @@ -1230,7 +1170,6 @@ where async fn genesis_state( context: &RuntimeContext, config: &ClientConfig, - log: &Logger, ) -> Result, String> { let eth2_network_config = context .eth2_network_config @@ -1240,7 +1179,6 @@ async fn genesis_state( .genesis_state::( config.genesis_state_url.as_deref(), config.genesis_state_url_timeout, - log, ) .await? .ok_or_else(|| "Genesis state is unknown".to_string()) diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs index 1eb977d421..fab284c428 100644 --- a/beacon_node/client/src/compute_light_client_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -2,8 +2,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; use futures::channel::mpsc::Receiver; use futures::StreamExt; -use slog::{error, Logger}; use tokio::sync::mpsc::Sender; +use tracing::error; // Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent // updates it is okay to drop some events in case of overloading. In normal network conditions @@ -15,7 +15,6 @@ pub async fn compute_light_client_updates( chain: &BeaconChain, mut light_client_server_rv: Receiver>, reprocess_tx: Sender, - log: &Logger, ) { // Should only receive events for recent blocks, import_block filters by blocks close to clock. // @@ -28,12 +27,12 @@ pub async fn compute_light_client_updates( chain .recompute_and_cache_light_client_updates(event) .unwrap_or_else(|e| { - error!(log, "error computing light_client updates {:?}", e); + error!("error computing light_client updates {:?}", e); }); let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root }; if reprocess_tx.try_send(msg).is_err() { - error!(log, "Failed to inform light client update"; "parent_root" => %parent_root) + error!(%parent_root,"Failed to inform light client update") }; } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index c735d12538..d103d48dfb 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -8,12 +8,13 @@ use beacon_chain::{ BeaconChain, BeaconChainTypes, ExecutionStatus, }; use lighthouse_network::{types::SyncState, NetworkGlobals}; -use slog::{crit, debug, error, info, warn, Logger}; +use logging::crit; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::time::sleep; +use tracing::{debug, error, info, warn}; use types::*; /// Create a warning log whenever the peer count is at or below this value. @@ -39,7 +40,6 @@ pub fn spawn_notifier( let slot_duration = Duration::from_secs(seconds_per_slot); let speedo = Mutex::new(Speedo::default()); - let log = executor.log().clone(); // Keep track of sync state and reset the speedo on specific sync state changes. // Specifically, if we switch between a sync and a backfill sync, reset the speedo. @@ -56,15 +56,14 @@ pub fn spawn_notifier( // waiting for genesis. Some(next_slot) if next_slot > slot_duration => { info!( - log, - "Waiting for genesis"; - "peers" => peer_count_pretty(network.connected_peers()), - "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), + peers = peer_count_pretty(network.connected_peers()), + wait_time = estimated_time_pretty(Some(next_slot.as_secs() as f64)), + "Waiting for genesis" ); - eth1_logging(&beacon_chain, &log); - bellatrix_readiness_logging(Slot::new(0), &beacon_chain, &log).await; - capella_readiness_logging(Slot::new(0), &beacon_chain, &log).await; - genesis_execution_payload_logging(&beacon_chain, &log).await; + eth1_logging(&beacon_chain); + bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await; + capella_readiness_logging(Slot::new(0), &beacon_chain).await; + genesis_execution_payload_logging(&beacon_chain).await; sleep(slot_duration).await; } _ => break, @@ -82,7 +81,7 @@ pub fn spawn_notifier( let wait = match beacon_chain.slot_clock.duration_to_next_slot() { Some(duration) => duration + slot_duration / 2, None => { - warn!(log, "Unable to read current slot"); + warn!("Unable to read current slot"); sleep(slot_duration).await; continue; } @@ -120,11 +119,7 @@ pub fn spawn_notifier( let current_slot = match beacon_chain.slot() { Ok(slot) => slot, Err(e) => { - error!( - log, - "Unable to read current slot"; - "error" => format!("{:?}", e) - ); + error!(error = ?e, "Unable to read current slot"); break; } }; @@ -168,19 +163,21 @@ pub fn spawn_notifier( ); if connected_peer_count <= WARN_PEER_COUNT { - warn!(log, "Low peer count"; "peer_count" => peer_count_pretty(connected_peer_count)); + warn!( + peer_count = peer_count_pretty(connected_peer_count), + "Low peer count" + ); } debug!( - log, - "Slot timer"; - "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_checkpoint.root), - "finalized_epoch" => finalized_checkpoint.epoch, - "head_block" => format!("{}", head_root), - "head_slot" => head_slot, - "current_slot" => current_slot, - "sync_state" =>format!("{}", current_sync_state) + peers = peer_count_pretty(connected_peer_count), + finalized_root = %finalized_checkpoint.root, + finalized_epoch = %finalized_checkpoint.epoch, + head_block = %head_root, + %head_slot, + %current_slot, + sync_state = %current_sync_state, + "Slot timer" ); // Log if we are backfilling. @@ -202,26 +199,31 @@ pub fn spawn_notifier( if display_speed { info!( - log, - "Downloading historical blocks"; - "distance" => distance, - "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))), + distance, + speed = sync_speed_pretty(speed), + est_time = estimated_time_pretty( + speedo.estimated_time_till_slot( + original_oldest_block_slot + .saturating_sub(beacon_chain.genesis_backfill_slot) + ) + ), + "Downloading historical blocks" ); } else { info!( - log, - "Downloading historical blocks"; - "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_oldest_block_slot.saturating_sub(beacon_chain.genesis_backfill_slot))), + distance, + est_time = estimated_time_pretty( + speedo.estimated_time_till_slot( + original_oldest_block_slot + .saturating_sub(beacon_chain.genesis_backfill_slot) + ) + ), + "Downloading historical blocks" ); } } else if !is_backfilling && last_backfill_log_slot.is_some() { last_backfill_log_slot = None; - info!( - log, - "Historical block download complete"; - ); + info!("Historical block download complete"); } // Log if we are syncing @@ -238,20 +240,20 @@ pub fn spawn_notifier( if display_speed { info!( - log, - "Syncing"; - "peers" => peer_count_pretty(connected_peer_count), - "distance" => distance, - "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + peers = peer_count_pretty(connected_peer_count), + distance, + speed = sync_speed_pretty(speed), + est_time = + estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + "Syncing" ); } else { info!( - log, - "Syncing"; - "peers" => peer_count_pretty(connected_peer_count), - "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + peers = peer_count_pretty(connected_peer_count), + distance, + est_time = + estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), + "Syncing" ); } } else if current_sync_state.is_synced() { @@ -267,20 +269,18 @@ pub fn spawn_notifier( Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), Ok(ExecutionStatus::Optimistic(hash)) => { warn!( - log, - "Head is optimistic"; - "info" => "chain not fully verified, \ - block and attestation production disabled until execution engine syncs", - "execution_block_hash" => ?hash, + info = "chain not fully verified, \ + block and attestation production disabled until execution engine syncs", + execution_block_hash = ?hash, + "Head is optimistic" ); format!("{} (unverified)", hash) } Ok(ExecutionStatus::Invalid(hash)) => { crit!( - log, - "Head execution payload is invalid"; - "msg" => "this scenario may be unrecoverable", - "execution_block_hash" => ?hash, + msg = "this scenario may be unrecoverable", + execution_block_hash = ?hash, + "Head execution payload is invalid" ); format!("{} (invalid)", hash) } @@ -288,35 +288,33 @@ pub fn spawn_notifier( }; info!( - log, - "Synced"; - "peers" => peer_count_pretty(connected_peer_count), - "exec_hash" => block_hash, - "finalized_root" => format!("{}", finalized_checkpoint.root), - "finalized_epoch" => finalized_checkpoint.epoch, - "epoch" => current_epoch, - "block" => block_info, - "slot" => current_slot, + peers = peer_count_pretty(connected_peer_count), + exec_hash = block_hash, + finalized_root = %finalized_checkpoint.root, + finalized_epoch = %finalized_checkpoint.epoch, + epoch = %current_epoch, + block = block_info, + slot = %current_slot, + "Synced" ); } else { metrics::set_gauge(&metrics::IS_SYNCED, 0); info!( - log, - "Searching for peers"; - "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_checkpoint.root), - "finalized_epoch" => finalized_checkpoint.epoch, - "head_slot" => head_slot, - "current_slot" => current_slot, + peers = peer_count_pretty(connected_peer_count), + finalized_root = %finalized_checkpoint.root, + finalized_epoch = %finalized_checkpoint.epoch, + %head_slot, + %current_slot, + "Searching for peers" ); } - eth1_logging(&beacon_chain, &log); - bellatrix_readiness_logging(current_slot, &beacon_chain, &log).await; - capella_readiness_logging(current_slot, &beacon_chain, &log).await; - deneb_readiness_logging(current_slot, &beacon_chain, &log).await; - electra_readiness_logging(current_slot, &beacon_chain, &log).await; - fulu_readiness_logging(current_slot, &beacon_chain, &log).await; + eth1_logging(&beacon_chain); + bellatrix_readiness_logging(current_slot, &beacon_chain).await; + capella_readiness_logging(current_slot, &beacon_chain).await; + deneb_readiness_logging(current_slot, &beacon_chain).await; + electra_readiness_logging(current_slot, &beacon_chain).await; + fulu_readiness_logging(current_slot, &beacon_chain).await; } }; @@ -331,7 +329,6 @@ pub fn spawn_notifier( async fn bellatrix_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, - log: &Logger, ) { let merge_completed = beacon_chain .canonical_head @@ -355,10 +352,9 @@ async fn bellatrix_readiness_logging( // Logging of the EE being offline is handled in the other readiness logging functions. if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { error!( - log, - "Execution endpoint required"; - "info" => "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" + info = "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html", + "Execution endpoint required" ); } return; @@ -375,12 +371,11 @@ async fn bellatrix_readiness_logging( terminal_block_hash_epoch: None, } => { info!( - log, - "Ready for Bellatrix"; - "terminal_total_difficulty" => %ttd, - "current_difficulty" => current_difficulty + terminal_total_difficulty = %ttd, + current_difficulty = current_difficulty .map(|d| d.to_string()) .unwrap_or_else(|| "??".into()), + "Ready for Bellatrix" ) } MergeConfig { @@ -389,29 +384,25 @@ async fn bellatrix_readiness_logging( terminal_block_hash_epoch: Some(terminal_block_hash_epoch), } => { info!( - log, - "Ready for Bellatrix"; - "info" => "you are using override parameters, please ensure that you \ - understand these parameters and their implications.", - "terminal_block_hash" => ?terminal_block_hash, - "terminal_block_hash_epoch" => ?terminal_block_hash_epoch, + info = "you are using override parameters, please ensure that you \ + understand these parameters and their implications.", + ?terminal_block_hash, + ?terminal_block_hash_epoch, + "Ready for Bellatrix" ) } other => error!( - log, - "Inconsistent merge configuration"; - "config" => ?other + config = ?other, + "Inconsistent merge configuration" ), }, readiness @ BellatrixReadiness::NotSynced => warn!( - log, - "Not ready Bellatrix"; - "info" => %readiness, + info = %readiness, + "Not ready Bellatrix" ), readiness @ BellatrixReadiness::NoExecutionEndpoint => warn!( - log, - "Not ready for Bellatrix"; - "info" => %readiness, + info = %readiness, + "Not ready for Bellatrix" ), } } @@ -420,7 +411,6 @@ async fn bellatrix_readiness_logging( async fn capella_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, - log: &Logger, ) { let capella_completed = beacon_chain .canonical_head @@ -442,10 +432,9 @@ async fn capella_readiness_logging( // Logging of the EE being offline is handled in the other readiness logging functions. if !beacon_chain.is_time_to_prepare_for_deneb(current_slot) { error!( - log, - "Execution endpoint required"; - "info" => "you need a Capella enabled execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" + info = "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html", + "Execution endpoint required" ); } return; @@ -454,24 +443,21 @@ async fn capella_readiness_logging( match beacon_chain.check_capella_readiness().await { CapellaReadiness::Ready => { info!( - log, - "Ready for Capella"; - "info" => "ensure the execution endpoint is updated to the latest Capella/Shanghai release" + info = "ensure the execution endpoint is updated to the latest Capella/Shanghai release", + "Ready for Capella" ) } readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { error!( - log, - "Not ready for Capella"; - "hint" => "the execution endpoint may be offline", - "info" => %readiness, + hint = "the execution endpoint may be offline", + info = %readiness, + "Not ready for Capella" ) } readiness => warn!( - log, - "Not ready for Capella"; - "hint" => "try updating the execution endpoint", - "info" => %readiness, + hint = "try updating the execution endpoint", + info = %readiness, + "Not ready for Capella" ), } } @@ -480,7 +466,6 @@ async fn capella_readiness_logging( async fn deneb_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, - log: &Logger, ) { let deneb_completed = beacon_chain .canonical_head @@ -500,9 +485,8 @@ async fn deneb_readiness_logging( if deneb_completed && !has_execution_layer { error!( - log, - "Execution endpoint required"; - "info" => "you need a Deneb enabled execution engine to validate blocks." + info = "you need a Deneb enabled execution engine to validate blocks.", + "Execution endpoint required" ); return; } @@ -510,24 +494,22 @@ async fn deneb_readiness_logging( match beacon_chain.check_deneb_readiness().await { DenebReadiness::Ready => { info!( - log, - "Ready for Deneb"; - "info" => "ensure the execution endpoint is updated to the latest Deneb/Cancun release" + info = + "ensure the execution endpoint is updated to the latest Deneb/Cancun release", + "Ready for Deneb" ) } readiness @ DenebReadiness::ExchangeCapabilitiesFailed { error: _ } => { error!( - log, - "Not ready for Deneb"; - "hint" => "the execution endpoint may be offline", - "info" => %readiness, + hint = "the execution endpoint may be offline", + info = %readiness, + "Not ready for Deneb" ) } readiness => warn!( - log, - "Not ready for Deneb"; - "hint" => "try updating the execution endpoint", - "info" => %readiness, + hint = "try updating the execution endpoint", + info = %readiness, + "Not ready for Deneb" ), } } @@ -535,7 +517,6 @@ async fn deneb_readiness_logging( async fn electra_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, - log: &Logger, ) { let electra_completed = beacon_chain .canonical_head @@ -556,9 +537,8 @@ async fn electra_readiness_logging( if electra_completed && !has_execution_layer { // When adding a new fork, add a check for the next fork readiness here. error!( - log, - "Execution endpoint required"; - "info" => "you need a Electra enabled execution engine to validate blocks." + info = "you need a Electra enabled execution engine to validate blocks.", + "Execution endpoint required" ); return; } @@ -566,24 +546,22 @@ async fn electra_readiness_logging( match beacon_chain.check_electra_readiness().await { ElectraReadiness::Ready => { info!( - log, - "Ready for Electra"; - "info" => "ensure the execution endpoint is updated to the latest Electra/Prague release" + info = + "ensure the execution endpoint is updated to the latest Electra/Prague release", + "Ready for Electra" ) } readiness @ ElectraReadiness::ExchangeCapabilitiesFailed { error: _ } => { error!( - log, - "Not ready for Electra"; - "hint" => "the execution endpoint may be offline", - "info" => %readiness, + hint = "the execution endpoint may be offline", + info = %readiness, + "Not ready for Electra" ) } readiness => warn!( - log, - "Not ready for Electra"; - "hint" => "try updating the execution endpoint", - "info" => %readiness, + hint = "try updating the execution endpoint", + info = %readiness, + "Not ready for Electra" ), } } @@ -592,7 +570,6 @@ async fn electra_readiness_logging( async fn fulu_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, - log: &Logger, ) { let fulu_completed = beacon_chain .canonical_head @@ -612,9 +589,8 @@ async fn fulu_readiness_logging( if fulu_completed && !has_execution_layer { error!( - log, - "Execution endpoint required"; - "info" => "you need a Fulu enabled execution engine to validate blocks." + info = "you need a Fulu enabled execution engine to validate blocks.", + "Execution endpoint required" ); return; } @@ -622,102 +598,86 @@ async fn fulu_readiness_logging( match beacon_chain.check_fulu_readiness().await { FuluReadiness::Ready => { info!( - log, - "Ready for Fulu"; - "info" => "ensure the execution endpoint is updated to the latest Fulu release" + info = "ensure the execution endpoint is updated to the latest Fulu release", + "Ready for Fulu" ) } readiness @ FuluReadiness::ExchangeCapabilitiesFailed { error: _ } => { error!( - log, - "Not ready for Fulu"; - "hint" => "the execution endpoint may be offline", - "info" => %readiness, + hint = "the execution endpoint may be offline", + info = %readiness, + "Not ready for Fulu" ) } readiness => warn!( - log, - "Not ready for Fulu"; - "hint" => "try updating the execution endpoint", - "info" => %readiness, + hint = "try updating the execution endpoint", + info = %readiness, + "Not ready for Fulu" ), } } -async fn genesis_execution_payload_logging( - beacon_chain: &BeaconChain, - log: &Logger, -) { +async fn genesis_execution_payload_logging(beacon_chain: &BeaconChain) { match beacon_chain .check_genesis_execution_payload_is_correct() .await { Ok(GenesisExecutionPayloadStatus::Correct(block_hash)) => { info!( - log, - "Execution enabled from genesis"; - "genesis_payload_block_hash" => ?block_hash, + genesis_payload_block_hash = ?block_hash, + "Execution enabled from genesis" ); } Ok(GenesisExecutionPayloadStatus::BlockHashMismatch { got, expected }) => { error!( - log, - "Genesis payload block hash mismatch"; - "info" => "genesis is misconfigured and likely to fail", - "consensus_node_block_hash" => ?expected, - "execution_node_block_hash" => ?got, + info = "genesis is misconfigured and likely to fail", + consensus_node_block_hash = ?expected, + execution_node_block_hash = ?got, + "Genesis payload block hash mismatch" ); } Ok(GenesisExecutionPayloadStatus::TransactionsRootMismatch { got, expected }) => { error!( - log, - "Genesis payload transactions root mismatch"; - "info" => "genesis is misconfigured and likely to fail", - "consensus_node_transactions_root" => ?expected, - "execution_node_transactions_root" => ?got, + info = "genesis is misconfigured and likely to fail", + consensus_node_transactions_root = ?expected, + execution_node_transactions_root = ?got, + "Genesis payload transactions root mismatch" ); } Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, expected }) => { error!( - log, - "Genesis payload withdrawals root mismatch"; - "info" => "genesis is misconfigured and likely to fail", - "consensus_node_withdrawals_root" => ?expected, - "execution_node_withdrawals_root" => ?got, + info = "genesis is misconfigured and likely to fail", + consensus_node_withdrawals_root = ?expected, + execution_node_withdrawals_root = ?got, + "Genesis payload withdrawals root mismatch" ); } Ok(GenesisExecutionPayloadStatus::OtherMismatch) => { error!( - log, - "Genesis payload header mismatch"; - "info" => "genesis is misconfigured and likely to fail", - "detail" => "see debug logs for payload headers" + info = "genesis is misconfigured and likely to fail", + detail = "see debug logs for payload headers", + "Genesis payload header mismatch" ); } Ok(GenesisExecutionPayloadStatus::Irrelevant) => { - info!( - log, - "Execution is not enabled from genesis"; - ); + info!("Execution is not enabled from genesis"); } Ok(GenesisExecutionPayloadStatus::AlreadyHappened) => { warn!( - log, - "Unable to check genesis which has already occurred"; - "info" => "this is probably a race condition or a bug" + info = "this is probably a race condition or a bug", + "Unable to check genesis which has already occurred" ); } Err(e) => { error!( - log, - "Unable to check genesis execution payload"; - "error" => ?e + error = ?e, + "Unable to check genesis execution payload" ); } } } -fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { +fn eth1_logging(beacon_chain: &BeaconChain) { let current_slot_opt = beacon_chain.slot().ok(); // Perform some logging about the eth1 chain @@ -733,13 +693,12 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger &beacon_chain.spec, ) { debug!( - log, - "Eth1 cache sync status"; - "eth1_head_block" => status.head_block_number, - "latest_cached_block_number" => status.latest_cached_block_number, - "latest_cached_timestamp" => status.latest_cached_block_timestamp, - "voting_target_timestamp" => status.voting_target_timestamp, - "ready" => status.lighthouse_is_cached_and_ready + eth1_head_block = status.head_block_number, + latest_cached_block_number = status.latest_cached_block_number, + latest_cached_timestamp = status.latest_cached_block_timestamp, + voting_target_timestamp = status.voting_target_timestamp, + ready = status.lighthouse_is_cached_and_ready, + "Eth1 cache sync status" ); if !status.lighthouse_is_cached_and_ready { @@ -755,16 +714,12 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger .unwrap_or_else(|| "initializing deposits".to_string()); warn!( - log, - "Syncing deposit contract block cache"; - "est_blocks_remaining" => distance, + est_blocks_remaining = distance, + "Syncing deposit contract block cache" ); } } else { - error!( - log, - "Unable to determine deposit contract sync status"; - ); + error!("Unable to determine deposit contract sync status"); } } } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 8ccd50aad8..fa08364251 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -8,7 +8,6 @@ edition = { workspace = true } environment = { workspace = true } eth1_test_rig = { workspace = true } serde_yaml = { workspace = true } -sloggers = { workspace = true } [dependencies] eth2 = { workspace = true } @@ -22,10 +21,10 @@ metrics = { workspace = true } parking_lot = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } -slog = { workspace = true } state_processing = { workspace = true } superstruct = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 71ab98a6a2..6b10bd2215 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -13,13 +13,13 @@ use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::ops::{Range, RangeInclusive}; use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{interval_at, Duration, Instant}; +use tracing::{debug, error, info, trace, warn}; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. @@ -58,22 +58,16 @@ type EndpointState = Result<(), EndpointError>; /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. -async fn endpoint_state( - endpoint: &HttpJsonRpc, - config_chain_id: &Eth1Id, - log: &Logger, -) -> EndpointState { +async fn endpoint_state(endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id) -> EndpointState { let error_connecting = |e: String| { debug!( - log, - "eth1 endpoint error"; - "endpoint" => %endpoint, - "error" => &e, + %endpoint, + error = &e, + "eth1 endpoint error" ); warn!( - log, - "Error connecting to eth1 node endpoint"; - "endpoint" => %endpoint, + %endpoint, + "Error connecting to eth1 node endpoint" ); EndpointError::RequestFailed(e) }; @@ -86,19 +80,17 @@ async fn endpoint_state( // Handle the special case if chain_id == Eth1Id::Custom(0) { warn!( - log, - "Remote execution node is not synced"; - "endpoint" => %endpoint, + %endpoint, + "Remote execution node is not synced" ); return Err(EndpointError::FarBehind); } if &chain_id != config_chain_id { warn!( - log, - "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; - "endpoint" => %endpoint, - "expected" => ?config_chain_id, - "received" => ?chain_id, + %endpoint, + expected = ?config_chain_id, + received = ?chain_id, + "Invalid execution chain ID. Please switch to correct chain ID on endpoint" ); Err(EndpointError::WrongChainId) } else { @@ -134,10 +126,9 @@ async fn get_remote_head_and_new_block_ranges( .unwrap_or(u64::MAX); if remote_head_block.timestamp + node_far_behind_seconds < now { warn!( - service.log, - "Execution endpoint is not synced"; - "endpoint" => %endpoint, - "last_seen_block_unix_timestamp" => remote_head_block.timestamp, + %endpoint, + last_seen_block_unix_timestamp = remote_head_block.timestamp, + "Execution endpoint is not synced" ); return Err(Error::EndpointError(EndpointError::FarBehind)); } @@ -145,9 +136,8 @@ async fn get_remote_head_and_new_block_ranges( let handle_remote_not_synced = |e| { if let Error::RemoteNotSynced { .. } = e { warn!( - service.log, - "Execution endpoint is not synced"; - "endpoint" => %endpoint, + %endpoint, + "Execution endpoint is not synced" ); } e @@ -392,12 +382,11 @@ pub fn endpoint_from_config(config: &Config) -> Result { #[derive(Clone)] pub struct Service { inner: Arc, - pub log: Logger, } impl Service { /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, log: Logger, spec: Arc) -> Result { + pub fn new(config: Config, spec: Arc) -> Result { Ok(Self { inner: Arc::new(Inner { block_cache: <_>::default(), @@ -410,7 +399,6 @@ impl Service { config: RwLock::new(config), spec, }), - log, }) } @@ -425,7 +413,6 @@ impl Service { /// Creates a new service, initializing the deposit tree from a snapshot. pub fn from_deposit_snapshot( config: Config, - log: Logger, spec: Arc, deposit_snapshot: &DepositTreeSnapshot, ) -> Result { @@ -444,7 +431,6 @@ impl Service { config: RwLock::new(config), spec, }), - log, }) } @@ -464,16 +450,10 @@ impl Service { } /// Recover the deposit and block caches from encoded bytes. - pub fn from_bytes( - bytes: &[u8], - config: Config, - log: Logger, - spec: Arc, - ) -> Result { + pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { let inner = Inner::from_bytes(bytes, config, spec)?; Ok(Self { inner: Arc::new(inner), - log, }) } @@ -621,11 +601,10 @@ impl Service { &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { let client = self.client(); - let log = self.log.clone(); let chain_id = self.config().chain_id.clone(); let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds; - match endpoint_state(client, &chain_id, &log).await { + match endpoint_state(client, &chain_id).await { Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1), Err(e) => { crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); @@ -655,10 +634,9 @@ impl Service { { let mut deposit_cache = self.inner.deposit_cache.write(); debug!( - self.log, - "Resetting last processed block"; - "old_block_number" => deposit_cache.last_processed_block, - "new_block_number" => deposit_cache.cache.latest_block_number(), + old_block_number = deposit_cache.last_processed_block, + new_block_number = deposit_cache.cache.latest_block_number(), + "Resetting last processed block" ); deposit_cache.last_processed_block = Some(deposit_cache.cache.latest_block_number()); @@ -668,11 +646,11 @@ impl Service { outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?; trace!( - self.log, - "Updated deposit cache"; - "cached_deposits" => self.inner.deposit_cache.read().cache.len(), - "logs_imported" => outcome.logs_imported, - "last_processed_execution_block" => self.inner.deposit_cache.read().last_processed_block, + cached_deposits = self.inner.deposit_cache.read().cache.len(), + logs_imported = outcome.logs_imported, + last_processed_execution_block = + self.inner.deposit_cache.read().last_processed_block, + "Updated deposit cache" ); Ok::<_, String>(outcome) }; @@ -684,11 +662,10 @@ impl Service { .map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?; trace!( - self.log, - "Updated deposit contract block cache"; - "cached_blocks" => self.inner.block_cache.read().len(), - "blocks_imported" => outcome.blocks_imported, - "head_block" => outcome.head_block_number, + cached_blocks = self.inner.block_cache.read().len(), + blocks_imported = outcome.blocks_imported, + head_block = outcome.head_block_number, + "Updated deposit contract block cache" ); Ok::<_, String>(outcome) }; @@ -727,17 +704,15 @@ impl Service { let update_result = self.update().await; match update_result { Err(e) => error!( - self.log, - "Error updating deposit contract cache"; - "retry_millis" => update_interval.as_millis(), - "error" => e, + retry_millis = update_interval.as_millis(), + error = e, + "Error updating deposit contract cache" ), Ok((deposit, block)) => debug!( - self.log, - "Updated deposit contract cache"; - "retry_millis" => update_interval.as_millis(), - "blocks" => format!("{:?}", block), - "deposits" => format!("{:?}", deposit), + retry_millis = update_interval.as_millis(), + ?block, + ?deposit, + "Updated deposit contract cache" ), }; let optional_eth1data = self.inner.to_finalize.write().take(); @@ -752,23 +727,20 @@ impl Service { if deposit_count_to_finalize > already_finalized { match self.finalize_deposits(eth1data_to_finalize) { Err(e) => warn!( - self.log, - "Failed to finalize deposit cache"; - "error" => ?e, - "info" => "this should resolve on its own" + error = ?e, + info = "this should resolve on its own", + "Failed to finalize deposit cache" ), Ok(()) => info!( - self.log, - "Successfully finalized deposit tree"; - "finalized deposit count" => deposit_count_to_finalize, + finalized_deposit_count = deposit_count_to_finalize, + "Successfully finalized deposit tree" ), } } else { debug!( - self.log, - "Deposits tree already finalized"; - "already_finalized" => already_finalized, - "deposit_count_to_finalize" => deposit_count_to_finalize, + %already_finalized, + %deposit_count_to_finalize, + "Deposits tree already finalized" ); } } @@ -889,10 +861,7 @@ impl Service { let deposit_contract_address_ref: &str = &deposit_contract_address; for block_range in block_number_chunks.into_iter() { if block_range.is_empty() { - debug!( - self.log, - "No new blocks to scan for logs"; - ); + debug!("No new blocks to scan for logs"); continue; } @@ -946,11 +915,7 @@ impl Service { Ok::<_, Error>(()) })?; - debug!( - self.log, - "Imported deposit logs chunk"; - "logs" => logs.len(), - ); + debug!(logs = logs.len(), "Imported deposit logs chunk"); cache.last_processed_block = Some(block_range.end.saturating_sub(1)); @@ -963,18 +928,16 @@ impl Service { if logs_imported > 0 { info!( - self.log, - "Imported deposit log(s)"; - "latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(), - "total" => self.deposit_cache_len(), - "new" => logs_imported + latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), + total = self.deposit_cache_len(), + new = logs_imported, + "Imported deposit log(s)" ); } else { debug!( - self.log, - "No new deposits found"; - "latest_block" => self.inner.deposit_cache.read().cache.latest_block_number(), - "total_deposits" => self.deposit_cache_len(), + latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), + total_deposits = self.deposit_cache_len(), + "No new deposits found" ); } @@ -1058,10 +1021,9 @@ impl Service { .collect::>(); debug!( - self.log, - "Downloading execution blocks"; - "first" => ?required_block_numbers.first(), - "last" => ?required_block_numbers.last(), + first = ?required_block_numbers.first(), + last = ?required_block_numbers.last(), + "Downloading execution blocks" ); // Produce a stream from the list of required block numbers and return a future that @@ -1116,19 +1078,17 @@ impl Service { if blocks_imported > 0 { debug!( - self.log, - "Imported execution block(s)"; - "latest_block_age" => latest_block_mins, - "latest_block" => block_cache.highest_block_number(), - "total_cached_blocks" => block_cache.len(), - "new" => %blocks_imported + latest_block_age = latest_block_mins, + latest_block = block_cache.highest_block_number(), + total_cached_blocks = block_cache.len(), + new = %blocks_imported, + "Imported execution block(s)" ); } else { debug!( - self.log, - "No new execution blocks imported"; - "latest_block" => block_cache.highest_block_number(), - "cached_blocks" => block_cache.len(), + latest_block = block_cache.highest_block_number(), + cached_blocks = block_cache.len(), + "No new execution blocks imported" ); } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index e442ce4863..48ed189259 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -4,7 +4,7 @@ use eth1::{Config, Eth1Endpoint, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; -use logging::test_logger; +use logging::create_test_tracing_subscriber; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use std::ops::Range; @@ -19,11 +19,10 @@ use types::{ const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; pub fn new_env() -> Environment { + create_test_tracing_subscriber(); EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .test_logger() - .expect("should start null logger") .build() .expect("should build env") } @@ -100,9 +99,8 @@ mod eth1_cache { #[tokio::test] async fn simple_scenario() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - for follow_distance in 0..3 { let eth1 = new_anvil_instance() .await @@ -123,12 +121,8 @@ mod eth1_cache { }; let cache_follow_distance = config.cache_follow_distance(); - let service = Service::new( - config, - log.clone(), - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); + let service = + Service::new(config, Arc::new(MainnetEthSpec::default_spec())).unwrap(); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -186,9 +180,8 @@ mod eth1_cache { #[tokio::test] async fn big_skip() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); @@ -208,7 +201,6 @@ mod eth1_cache { block_cache_truncation: Some(cache_len), ..Config::default() }, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -241,9 +233,8 @@ mod eth1_cache { /// cache size. #[tokio::test] async fn pruning() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); @@ -263,7 +254,6 @@ mod eth1_cache { block_cache_truncation: Some(cache_len), ..Config::default() }, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -293,9 +283,8 @@ mod eth1_cache { #[tokio::test] async fn double_update() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let n = 16; let eth1 = new_anvil_instance() @@ -314,7 +303,6 @@ mod eth1_cache { follow_distance: 0, ..Config::default() }, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -346,9 +334,8 @@ mod deposit_tree { #[tokio::test] async fn updating() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let n = 4; let eth1 = new_anvil_instance() @@ -369,7 +356,6 @@ mod deposit_tree { follow_distance: 0, ..Config::default() }, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -427,9 +413,8 @@ mod deposit_tree { #[tokio::test] async fn double_update() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let n = 8; let eth1 = new_anvil_instance() @@ -451,7 +436,6 @@ mod deposit_tree { follow_distance: 0, ..Config::default() }, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); @@ -689,9 +673,8 @@ mod fast { // with the deposit count and root computed from the deposit cache. #[tokio::test] async fn deposit_cache_query() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); @@ -712,7 +695,6 @@ mod fast { block_cache_truncation: None, ..Config::default() }, - log, spec.clone(), ) .unwrap(); @@ -772,9 +754,8 @@ mod persist { use super::*; #[tokio::test] async fn test_persist_caches() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); @@ -793,12 +774,8 @@ mod persist { block_cache_truncation: None, ..Config::default() }; - let service = Service::new( - config.clone(), - log.clone(), - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); + let service = + Service::new(config.clone(), Arc::new(MainnetEthSpec::default_spec())).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -840,7 +817,6 @@ mod persist { let recovered_service = Service::from_bytes( ð1_bytes, config, - log, Arc::new(MainnetEthSpec::default_spec()), ) .unwrap(); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 7eb7b4a15e..580eac3c88 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -36,7 +36,6 @@ sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } ssz_types = { workspace = true } state_processing = { workspace = true } @@ -46,6 +45,7 @@ task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } triehash = "0.8.4" diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 75d0b872ce..b9e030703d 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -6,7 +6,6 @@ use crate::engine_api::{ }; use crate::{ClientVersionV1, HttpJsonRpc}; use lru::LruCache; -use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::num::NonZeroUsize; use std::sync::Arc; @@ -14,6 +13,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; +use tracing::{debug, error, info, warn}; use types::non_zero_usize::new_non_zero_usize; use types::ExecutionBlockHash; @@ -128,19 +128,17 @@ pub struct Engine { state: RwLock, latest_forkchoice_state: RwLock>, executor: TaskExecutor, - log: Logger, } impl Engine { /// Creates a new, offline engine. - pub fn new(api: HttpJsonRpc, executor: TaskExecutor, log: &Logger) -> Self { + pub fn new(api: HttpJsonRpc, executor: TaskExecutor) -> Self { Self { api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: Default::default(), latest_forkchoice_state: Default::default(), executor, - log: log.clone(), } } @@ -167,7 +165,6 @@ impl Engine { &self, forkchoice_state: ForkchoiceState, payload_attributes: Option, - log: &Logger, ) -> Result { let response = self .api @@ -180,11 +177,7 @@ impl Engine { { self.payload_id_cache.lock().await.put(key, payload_id); } else { - debug!( - log, - "Engine returned unexpected payload_id"; - "payload_id" => ?payload_id - ); + debug!(?payload_id, "Engine returned unexpected payload_id"); } } @@ -205,33 +198,24 @@ impl Engine { if let Some(forkchoice_state) = latest_forkchoice_state { if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() { debug!( - self.log, - "No need to call forkchoiceUpdated"; - "msg" => "head does not have execution enabled", + msg = "head does not have execution enabled", + "No need to call forkchoiceUpdated" ); return; } - info!( - self.log, - "Issuing forkchoiceUpdated"; - "forkchoice_state" => ?forkchoice_state, - ); + info!(?forkchoice_state, "Issuing forkchoiceUpdated"); // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await { debug!( - self.log, - "Failed to issue latest head to engine"; - "error" => ?e, + error = ?e, + "Failed to issue latest head to engine" ); } } else { - debug!( - self.log, - "No head, not sending to engine"; - ); + debug!("No head, not sending to engine"); } } @@ -252,18 +236,12 @@ impl Engine { Ok(()) => { let mut state = self.state.write().await; if **state != EngineStateInternal::Synced { - info!( - self.log, - "Execution engine online"; - ); + info!("Execution engine online"); // Send the node our latest forkchoice_state. self.send_latest_forkchoice_state().await; } else { - debug!( - self.log, - "Execution engine online"; - ); + debug!("Execution engine online"); } state.update(EngineStateInternal::Synced); (**state, ResponseCacheAction::Update) @@ -275,9 +253,8 @@ impl Engine { } Err(EngineApiError::Auth(err)) => { error!( - self.log, - "Failed jwt authorization"; - "error" => ?err, + error = ?err, + "Failed jwt authorization" ); let mut state = self.state.write().await; @@ -286,9 +263,8 @@ impl Engine { } Err(e) => { error!( - self.log, - "Error during execution engine upcheck"; - "error" => ?e, + error = ?e, + "Error during execution engine upcheck" ); let mut state = self.state.write().await; @@ -308,9 +284,9 @@ impl Engine { .get_engine_capabilities(Some(CACHED_RESPONSE_AGE_LIMIT)) .await { - warn!(self.log, - "Error during exchange capabilities"; - "error" => ?e, + warn!( + error = ?e, + "Error during exchange capabilities" ) } else { // no point in running this if there was an error fetching the capabilities @@ -326,11 +302,7 @@ impl Engine { } } - debug!( - self.log, - "Execution engine upcheck complete"; - "state" => ?state, - ); + debug!(?state, "Execution engine upcheck complete"); } /// Returns the execution engine capabilities resulting from a call to @@ -395,11 +367,7 @@ impl Engine { Ok(result) } Err(error) => { - warn!( - self.log, - "Execution engine call failed"; - "error" => ?error, - ); + warn!(?error, "Execution engine call failed"); // The node just returned an error, run an upcheck so we can update the endpoint // state. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cde6cc6f48..6644e46a0d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -21,12 +21,12 @@ use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedRespo use ethers_core::types::Transaction as EthersTransaction; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; +use logging::crit; use lru::LruCache; use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; use std::fmt; @@ -43,6 +43,7 @@ use tokio::{ time::sleep, }; use tokio_stream::wrappers::WatchStream; +use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::builder_bid::BuilderBid; @@ -422,7 +423,6 @@ struct Inner { proposers: RwLock>, executor: TaskExecutor, payload_cache: PayloadCache, - log: Logger, /// Track whether the last `newPayload` call errored. /// /// This is used *only* in the informational sync status endpoint, so that a VC using this @@ -466,7 +466,7 @@ pub struct ExecutionLayer { impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. - pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { + pub fn from_config(config: Config, executor: TaskExecutor) -> Result { let Config { execution_endpoint: url, builder_url, @@ -500,7 +500,7 @@ impl ExecutionLayer { .map_err(Error::InvalidJWTSecret) } else { // Create a new file and write a randomly generated secret to it if file does not exist - warn!(log, "No JWT found on disk. Generating"; "path" => %secret_file.display()); + warn!(path = %secret_file.display(),"No JWT found on disk. Generating"); std::fs::File::options() .write(true) .create_new(true) @@ -517,10 +517,10 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); - debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); + debug!(endpoint = %execution_url, jwt_path = ?secret_file.as_path(),"Loaded execution endpoint"); let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) .map_err(Error::ApiError)?; - Engine::new(api, executor.clone(), &log) + Engine::new(api, executor.clone()) }; let inner = Inner { @@ -533,7 +533,6 @@ impl ExecutionLayer { execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, payload_cache: PayloadCache::default(), - log, last_new_payload_errored: RwLock::new(false), }; @@ -580,11 +579,10 @@ impl ExecutionLayer { ) .map_err(Error::Builder)?; info!( - self.log(), - "Using external block builder"; - "builder_url" => ?builder_url, - "local_user_agent" => builder_client.get_user_agent(), - "ssz_disabled" => disable_ssz + ?builder_url, + local_user_agent = builder_client.get_user_agent(), + ssz_disabled = disable_ssz, + "Using external block builder" ); self.inner.builder.swap(Some(Arc::new(builder_client))); Ok(()) @@ -655,10 +653,6 @@ impl ExecutionLayer { &self.inner.proposers } - fn log(&self) -> &Logger { - &self.inner.log - } - pub async fn execution_engine_forkchoice_lock(&self) -> MutexGuard<'_, ()> { self.inner.execution_engine_forkchoice_lock.lock().await } @@ -716,16 +710,15 @@ impl ExecutionLayer { .await .map_err(|e| { error!( - el.log(), - "Failed to clean proposer preparation cache"; - "error" => format!("{:?}", e) + error = ?e, + "Failed to clean proposer preparation cache" ) }) .unwrap_or(()), - None => error!(el.log(), "Failed to get current epoch from slot clock"), + None => error!("Failed to get current epoch from slot clock"), } } else { - error!(el.log(), "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot and retry. sleep(slot_clock.slot_duration()).await; } @@ -865,12 +858,11 @@ impl ExecutionLayer { } else { // If there is no user-provided fee recipient, use a junk value and complain loudly. crit!( - self.log(), - "Fee recipient unknown"; - "msg" => "the suggested_fee_recipient was unknown during block production. \ + msg = "the suggested_fee_recipient was unknown during block production. \ a junk address was used, rewards were lost! \ check the --suggested-fee-recipient flag and VC configuration.", - "proposer_index" => ?proposer_index + ?proposer_index, + "Fee recipient unknown" ); Address::from_slice(&DEFAULT_SUGGESTED_FEE_RECIPIENT) @@ -987,11 +979,10 @@ impl ExecutionLayer { let parent_hash = payload_parameters.parent_hash; info!( - self.log(), - "Requesting blinded header from connected builder"; - "slot" => ?slot, - "pubkey" => ?pubkey, - "parent_hash" => ?parent_hash, + ?slot, + ?pubkey, + ?parent_hash, + "Requesting blinded header from connected builder" ); // Wait for the builder *and* local EL to produce a payload (or return an error). @@ -1012,20 +1003,19 @@ impl ExecutionLayer { ); info!( - self.log(), - "Requested blinded execution payload"; - "relay_fee_recipient" => match &relay_result { + relay_fee_recipient = match &relay_result { Ok(Some(r)) => format!("{:?}", r.data.message.header().fee_recipient()), Ok(None) => "empty response".to_string(), Err(_) => "request failed".to_string(), }, - "relay_response_ms" => relay_duration.as_millis(), - "local_fee_recipient" => match &local_result { + relay_response_ms = relay_duration.as_millis(), + local_fee_recipient = match &local_result { Ok(get_payload_response) => format!("{:?}", get_payload_response.fee_recipient()), - Err(_) => "request failed".to_string() + Err(_) => "request failed".to_string(), }, - "local_response_ms" => local_duration.as_millis(), - "parent_hash" => ?parent_hash, + local_response_ms = local_duration.as_millis(), + ?parent_hash, + "Requested blinded execution payload" ); (relay_result, local_result) @@ -1052,24 +1042,21 @@ impl ExecutionLayer { // chain is unhealthy, gotta use local payload match builder_params.chain_health { ChainHealth::Unhealthy(condition) => info!( - self.log(), - "Chain is unhealthy, using local payload"; - "info" => "this helps protect the network. the --builder-fallback flags \ - can adjust the expected health conditions.", - "failed_condition" => ?condition + info = "this helps protect the network. the --builder-fallback flags \ + can adjust the expected health conditions.", + failed_condition = ?condition, + "Chain is unhealthy, using local payload" ), // Intentional no-op, so we never attempt builder API proposals pre-merge. ChainHealth::PreMerge => (), ChainHealth::Optimistic => info!( - self.log(), - "Chain is optimistic; can't build payload"; - "info" => "the local execution engine is syncing and the builder network \ - cannot safely be used - unable to propose block" - ), - ChainHealth::Healthy => crit!( - self.log(), - "got healthy but also not healthy.. this shouldn't happen!" + info = "the local execution engine is syncing and the builder network \ + cannot safely be used - unable to propose block", + "Chain is optimistic; can't build payload" ), + ChainHealth::Healthy => { + crit!("got healthy but also not healthy.. this shouldn't happen!") + } } return self .get_full_payload_caching(payload_parameters) @@ -1086,12 +1073,11 @@ impl ExecutionLayer { match (relay_result, local_result) { (Err(e), Ok(local)) => { warn!( - self.log(), - "Builder error when requesting payload"; - "info" => "falling back to local execution client", - "relay_error" => ?e, - "local_block_hash" => ?local.block_hash(), - "parent_hash" => ?parent_hash, + info = "falling back to local execution client", + relay_error = ?e, + local_block_hash = ?local.block_hash(), + ?parent_hash, + "Builder error when requesting payload" ); Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( local.try_into()?, @@ -1099,11 +1085,10 @@ impl ExecutionLayer { } (Ok(None), Ok(local)) => { info!( - self.log(), - "Builder did not return a payload"; - "info" => "falling back to local execution client", - "local_block_hash" => ?local.block_hash(), - "parent_hash" => ?parent_hash, + info = "falling back to local execution client", + local_block_hash=?local.block_hash(), + ?parent_hash, + "Builder did not return a payload" ); Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( local.try_into()?, @@ -1111,24 +1096,22 @@ impl ExecutionLayer { } (Err(relay_error), Err(local_error)) => { crit!( - self.log(), - "Unable to produce execution payload"; - "info" => "the local EL and builder both failed - unable to propose block", - "relay_error" => ?relay_error, - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, + info = "the local EL and builder both failed - unable to propose block", + ?relay_error, + ?local_error, + ?parent_hash, + "Unable to produce execution payload" ); Err(Error::CannotProduceHeader) } (Ok(None), Err(local_error)) => { crit!( - self.log(), - "Unable to produce execution payload"; - "info" => "the local EL failed and the builder returned nothing - \ - the block proposal will be missed", - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, + info = "the local EL failed and the builder returned nothing - \ + the block proposal will be missed", + ?local_error, + ?parent_hash, + "Unable to produce execution payload" ); Err(Error::CannotProduceHeader) @@ -1137,11 +1120,10 @@ impl ExecutionLayer { let header = &relay.data.message.header(); info!( - self.log(), - "Received local and builder payloads"; - "relay_block_hash" => ?header.block_hash(), - "local_block_hash" => ?local.block_hash(), - "parent_hash" => ?parent_hash, + relay_block_hash = ?header.block_hash(), + local_block_hash=?local.block_hash(), + ?parent_hash, + "Received local and builder payloads" ); // check relay payload validity @@ -1154,12 +1136,11 @@ impl ExecutionLayer { &[reason.as_ref().as_ref()], ); warn!( - self.log(), - "Builder returned invalid payload"; - "info" => "using local payload", - "reason" => %reason, - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, + info = "using local payload", + %reason, + relay_block_hash = ?header.block_hash(), + ?parent_hash, + "Builder returned invalid payload" ); return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( local.try_into()?, @@ -1178,12 +1159,11 @@ impl ExecutionLayer { if local_value >= boosted_relay_value { info!( - self.log(), - "Local block is more profitable than relay block"; - "local_block_value" => %local_value, - "relay_value" => %relay_value, - "boosted_relay_value" => %boosted_relay_value, - "builder_boost_factor" => ?builder_boost_factor, + %local_value, + %relay_value, + %boosted_relay_value, + ?builder_boost_factor, + "Local block is more profitable than relay block" ); return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( local.try_into()?, @@ -1192,10 +1172,9 @@ impl ExecutionLayer { if local.should_override_builder().unwrap_or(false) { info!( - self.log(), - "Using local payload because execution engine suggested we ignore builder payload"; - "local_block_value" => %local_value, - "relay_value" => %relay_value + %local_value, + %relay_value, + "Using local payload because execution engine suggested we ignore builder payload" ); return Ok(ProvenancedPayload::Local(BlockProposalContentsType::Full( local.try_into()?, @@ -1203,12 +1182,11 @@ impl ExecutionLayer { } info!( - self.log(), - "Relay block is more profitable than local block"; - "local_block_value" => %local_value, - "relay_value" => %relay_value, - "boosted_relay_value" => %boosted_relay_value, - "builder_boost_factor" => ?builder_boost_factor + %local_value, + %relay_value, + %boosted_relay_value, + ?builder_boost_factor, + "Relay block is more profitable than local block" ); Ok(ProvenancedPayload::try_from(relay.data.message)?) @@ -1217,11 +1195,10 @@ impl ExecutionLayer { let header = &relay.data.message.header(); info!( - self.log(), - "Received builder payload with local error"; - "relay_block_hash" => ?header.block_hash(), - "local_error" => ?local_error, - "parent_hash" => ?parent_hash, + relay_block_hash = ?header.block_hash(), + ?local_error, + ?parent_hash, + "Received builder payload with local error" ); match verify_builder_bid(&relay, payload_parameters, None, spec) { @@ -1232,12 +1209,11 @@ impl ExecutionLayer { &[reason.as_ref().as_ref()], ); crit!( - self.log(), - "Builder returned invalid payload"; - "info" => "no local payload either - unable to propose block", - "reason" => %reason, - "relay_block_hash" => ?header.block_hash(), - "parent_hash" => ?parent_hash, + info = "no local payload either - unable to propose block", + %reason, + relay_block_hash = ?header.block_hash(), + ?parent_hash, + "Builder returned invalid payload" ); Err(Error::CannotProduceHeader) } @@ -1304,7 +1280,6 @@ impl ExecutionLayer { .notify_forkchoice_updated( fork_choice_state, Some(payload_attributes.clone()), - self.log(), ) .await?; @@ -1312,12 +1287,11 @@ impl ExecutionLayer { Some(payload_id) => payload_id, None => { error!( - self.log(), - "Exec engine unable to produce payload"; - "msg" => "No payload ID, the engine is likely syncing. \ - This has the potential to cause a missed block proposal.", - "status" => ?response.payload_status - ); + msg = "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", + status = ?response.payload_status, + "Exec engine unable to produce payload" + ); return Err(ApiError::PayloadIdUnavailable); } } @@ -1325,36 +1299,44 @@ impl ExecutionLayer { let payload_response = async { debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), - "prev_randao" => ?payload_attributes.prev_randao(), - "timestamp" => payload_attributes.timestamp(), - "parent_hash" => ?parent_hash, + suggested_fee_recipient = ?payload_attributes.suggested_fee_recipient(), + prev_randao = ?payload_attributes.prev_randao(), + timestamp = payload_attributes.timestamp(), + ?parent_hash, + "Issuing engine_getPayload" ); let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_PAYLOAD], ); engine.api.get_payload::(current_fork, payload_id).await - }.await?; + } + .await?; - if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { + if payload_response.execution_payload_ref().fee_recipient() + != payload_attributes.suggested_fee_recipient() + { error!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ + msg = "The fee recipient returned from the Execution Engine differs \ from the suggested_fee_recipient set on the beacon node. This could \ indicate that fees are being diverted to another address. Please \ ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", - "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), - "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), + fee_recipient = ?payload_response.execution_payload_ref().fee_recipient(), + suggested_fee_recipient = ?payload_attributes.suggested_fee_recipient(), + "Inconsistent fee recipient" ); } - if cache_fn(self, (payload_response.execution_payload_ref(), payload_response.blobs_bundle().ok())).is_some() { + if cache_fn( + self, + ( + payload_response.execution_payload_ref(), + payload_response.blobs_bundle().ok(), + ), + ) + .is_some() + { warn!( - self.log(), "Duplicate payload cached, this might indicate redundant proposal \ attempts." ); @@ -1394,18 +1376,17 @@ impl ExecutionLayer { &["new_payload", status_str], ); debug!( - self.log(), - "Processed engine_newPayload"; - "status" => status_str, - "parent_hash" => ?parent_hash, - "block_hash" => ?block_hash, - "block_number" => block_number, - "response_time_ms" => timer.elapsed().as_millis() + status = status_str, + ?parent_hash, + ?block_hash, + block_number, + response_time_ms = timer.elapsed().as_millis(), + "Processed engine_newPayload" ); } *self.inner.last_new_payload_errored.write().await = result.is_err(); - process_payload_status(block_hash, result, self.log()) + process_payload_status(block_hash, result) .map_err(Box::new) .map_err(Error::EngineError) } @@ -1462,12 +1443,11 @@ impl ExecutionLayer { let proposer = self.proposers().read().await.get(&proposers_key).cloned()?; debug!( - self.log(), - "Beacon proposer found"; - "payload_attributes" => ?proposer.payload_attributes, - "head_block_root" => ?head_block_root, - "slot" => current_slot, - "validator_index" => proposer.validator_index, + payload_attributes = ?proposer.payload_attributes, + ?head_block_root, + slot = %current_slot, + validator_index = proposer.validator_index, + "Beacon proposer found" ); Some(proposer.payload_attributes) @@ -1488,13 +1468,12 @@ impl ExecutionLayer { ); debug!( - self.log(), - "Issuing engine_forkchoiceUpdated"; - "finalized_block_hash" => ?finalized_block_hash, - "justified_block_hash" => ?justified_block_hash, - "head_block_hash" => ?head_block_hash, - "head_block_root" => ?head_block_root, - "current_slot" => current_slot, + ?finalized_block_hash, + ?justified_block_hash, + ?head_block_hash, + ?head_block_root, + ?current_slot, + "Issuing engine_forkchoiceUpdated" ); let next_slot = current_slot + 1; @@ -1510,12 +1489,7 @@ impl ExecutionLayer { lookahead, ); } else { - debug!( - self.log(), - "Late payload attributes"; - "timestamp" => ?timestamp, - "now" => ?now, - ) + debug!(?timestamp, ?now, "Late payload attributes") } } } @@ -1534,7 +1508,7 @@ impl ExecutionLayer { .engine() .request(|engine| async move { engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) + .notify_forkchoice_updated(forkchoice_state, payload_attributes) .await }) .await; @@ -1549,7 +1523,6 @@ impl ExecutionLayer { process_payload_status( head_block_hash, result.map(|response| response.payload_status), - self.log(), ) .map_err(Box::new) .map_err(Error::EngineError) @@ -1646,11 +1619,10 @@ impl ExecutionLayer { if let Some(hash) = &hash_opt { info!( - self.log(), - "Found terminal block hash"; - "terminal_block_hash_override" => ?spec.terminal_block_hash, - "terminal_total_difficulty" => ?spec.terminal_total_difficulty, - "block_hash" => ?hash, + terminal_block_hash_override = ?spec.terminal_block_hash, + terminal_total_difficulty = ?spec.terminal_total_difficulty, + block_hash = ?hash, + "Found terminal block hash" ); } @@ -1907,21 +1879,16 @@ impl ExecutionLayer { block_root: Hash256, block: &SignedBlindedBeaconBlock, ) -> Result, Error> { - debug!( - self.log(), - "Sending block to builder"; - "root" => ?block_root, - ); + debug!(?block_root, "Sending block to builder"); if let Some(builder) = self.builder() { let (payload_result, duration) = timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { let ssz_enabled = builder.is_ssz_available(); debug!( - self.log(), - "Calling submit_blinded_block on builder"; - "block_root" => ?block_root, - "ssz" => ssz_enabled + ?block_root, + ssz = ssz_enabled, + "Calling submit_blinded_block on builder" ); if ssz_enabled { builder @@ -1946,13 +1913,12 @@ impl ExecutionLayer { ); let payload = unblinded_response.payload_ref(); info!( - self.log(), - "Builder successfully revealed payload"; - "relay_response_ms" => duration.as_millis(), - "block_root" => ?block_root, - "fee_recipient" => ?payload.fee_recipient(), - "block_hash" => ?payload.block_hash(), - "parent_hash" => ?payload.parent_hash() + relay_response_ms = duration.as_millis(), + ?block_root, + fee_recipient = ?payload.fee_recipient(), + block_hash = ?payload.block_hash(), + parent_hash = ?payload.parent_hash(), + "Builder successfully revealed payload" ) } Err(e) => { @@ -1961,17 +1927,16 @@ impl ExecutionLayer { &[metrics::FAILURE], ); warn!( - self.log(), - "Builder failed to reveal payload"; - "info" => "this is common behaviour for some builders and may not indicate an issue", - "error" => ?e, - "relay_response_ms" => duration.as_millis(), - "block_root" => ?block_root, - "parent_hash" => ?block + info = "this is common behaviour for some builders and may not indicate an issue", + error = ?e, + relay_response_ms = duration.as_millis(), + ?block_root, + parent_hash = ?block .message() .execution_payload() .map(|payload| format!("{}", payload.parent_hash())) - .unwrap_or_else(|_| "unknown".to_string()) + .unwrap_or_else(|_| "unknown".to_string()), + "Builder failed to reveal payload" ) } } diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index cf0be8ed0d..bbfd30239d 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -1,6 +1,6 @@ use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; use crate::engines::EngineError; -use slog::{warn, Logger}; +use tracing::warn; use types::ExecutionBlockHash; /// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. @@ -26,15 +26,10 @@ pub enum PayloadStatus { pub fn process_payload_status( head_block_hash: ExecutionBlockHash, status: Result, - log: &Logger, ) -> Result { match status { Err(error) => { - warn!( - log, - "Error whilst processing payload status"; - "error" => ?error, - ); + warn!(?error, "Error whilst processing payload status"); Err(error) } Ok(response) => match &response.status { @@ -66,10 +61,9 @@ pub fn process_payload_status( // warning here. if response.latest_valid_hash.is_some() { warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status + msg = "expected a null latest_valid_hash", + status = ?response.status, + "Malformed response from execution engine" ) } @@ -82,10 +76,9 @@ pub fn process_payload_status( // warning here. if response.latest_valid_hash.is_some() { warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status + msg = "expected a null latest_valid_hash", + status = ?response.status, + "Malformed response from execution engine" ) } @@ -96,10 +89,9 @@ pub fn process_payload_status( // warning here. if response.latest_valid_hash.is_some() { warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status + msg = "expected a null latest_valid_hash", + status = ?response.status, + "Malformed response from execution engine" ) } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index f07ee7ac6f..fba34121a7 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -13,7 +13,6 @@ use eth2::{ use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use slog::{debug, error, info, warn, Logger}; use ssz::Encode; use std::collections::HashMap; use std::fmt::Debug; @@ -24,6 +23,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tokio_stream::StreamExt; +use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, @@ -309,7 +309,6 @@ pub struct MockBuilder { max_bid: bool, /// A cache that stores the proposers index for a given epoch proposers_cache: Arc>>>, - log: Logger, } impl MockBuilder { @@ -331,8 +330,7 @@ impl MockBuilder { ..Default::default() }; - let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + let el = ExecutionLayer::from_config(config, executor.clone()).unwrap(); let builder = MockBuilder::new( el, @@ -342,7 +340,6 @@ impl MockBuilder { false, spec, None, - executor.log().clone(), ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; @@ -359,16 +356,12 @@ impl MockBuilder { max_bid: bool, spec: Arc, sk: Option<&[u8]>, - log: Logger, ) -> Self { let builder_sk = if let Some(sk_bytes) = sk { match SecretKey::deserialize(sk_bytes) { Ok(sk) => sk, Err(_) => { - error!( - log, - "Invalid sk_bytes provided, generating random secret key" - ); + error!("Invalid sk_bytes provided, generating random secret key"); SecretKey::random() } } @@ -390,7 +383,6 @@ impl MockBuilder { apply_operations, max_bid, genesis_time: None, - log, } } @@ -425,18 +417,13 @@ impl MockBuilder { &self, registrations: Vec, ) -> Result<(), String> { - info!( - self.log, - "Registering validators"; - "count" => registrations.len(), - ); + info!(count = registrations.len(), "Registering validators"); for registration in registrations { if !registration.verify_signature(&self.spec) { error!( - self.log, - "Failed to register validator"; - "error" => "invalid signature", - "validator" => %registration.message.pubkey + error = "invalid signature", + validator = %registration.message.pubkey, + "Failed to register validator" ); return Err("invalid signature".to_string()); } @@ -472,9 +459,8 @@ impl MockBuilder { } }; info!( - self.log, - "Submitting blinded beacon block to builder"; - "block_hash" => %root + block_hash = %root, + "Submitting blinded beacon block to builder" ); let payload = self .el @@ -486,10 +472,9 @@ impl MockBuilder { .try_into_full_block(Some(payload.clone())) .ok_or("Internal error, just provided a payload")?; debug!( - self.log, - "Got full payload, sending to local beacon node for propagation"; - "txs_count" => payload.transactions().len(), - "blob_count" => blobs.as_ref().map(|b| b.commitments.len()) + txs_count = payload.transactions().len(), + blob_count = blobs.as_ref().map(|b| b.commitments.len()), + "Got full payload, sending to local beacon node for propagation" ); let publish_block_request = PublishBlockRequest::new( Arc::new(full_block), @@ -508,7 +493,7 @@ impl MockBuilder { parent_hash: ExecutionBlockHash, pubkey: PublicKeyBytes, ) -> Result, String> { - info!(self.log, "In get_header"); + info!("In get_header"); // Check if the pubkey has registered with the builder if required if self.validate_pubkey && !self.val_registration_cache.read().contains_key(&pubkey) { return Err("validator not registered with builder".to_string()); @@ -521,15 +506,12 @@ impl MockBuilder { let payload_parameters = match payload_parameters { Some(params) => params, None => { - warn!( - self.log, - "Payload params not cached for parent_hash {}", parent_hash - ); + warn!("Payload params not cached for parent_hash {}", parent_hash); self.get_payload_params(slot, None, pubkey, None).await? } }; - info!(self.log, "Got payload params"); + info!("Got payload params"); let fork = self.fork_name_at_slot(slot); let payload_response_type = self @@ -545,7 +527,7 @@ impl MockBuilder { .await .map_err(|e| format!("couldn't get payload {:?}", e))?; - info!(self.log, "Got payload message, fork {}", fork); + info!("Got payload message, fork {}", fork); let mut message = match payload_response_type { crate::GetPayloadResponseType::Full(payload_response) => { @@ -616,10 +598,10 @@ impl MockBuilder { }; if self.apply_operations { - info!(self.log, "Applying operations"); + info!("Applying operations"); self.apply_operations(&mut message); } - info!(self.log, "Signing builder message"); + info!("Signing builder message"); let mut signature = message.sign_builder_message(&self.builder_sk, &self.spec); @@ -627,7 +609,7 @@ impl MockBuilder { signature = Signature::empty(); }; let signed_bid = SignedBuilderBid { message, signature }; - info!(self.log, "Builder bid {:?}", &signed_bid.message.value()); + info!("Builder bid {:?}", &signed_bid.message.value()); Ok(signed_bid) } @@ -648,10 +630,7 @@ impl MockBuilder { /// Prepare the execution layer for payload creation every slot for the correct /// proposer index pub async fn prepare_execution_layer(&self) -> Result<(), String> { - info!( - self.log, - "Starting a task to prepare the execution layer"; - ); + info!("Starting a task to prepare the execution layer"); let mut head_event_stream = self .beacon_client .get_events::(&[EventTopic::Head]) @@ -662,9 +641,8 @@ impl MockBuilder { match event { EventKind::Head(head) => { debug!( - self.log, - "Got a new head event"; - "block_hash" => %head.block + block_hash = %head.block, + "Got a new head event" ); let next_slot = head.slot + 1; // Find the next proposer index from the cached data or through a beacon api call @@ -712,9 +690,8 @@ impl MockBuilder { } e => { warn!( - self.log, - "Got an unexpected event"; - "event" => %e.topic_name() + event = %e.topic_name(), + "Got an unexpected event" ); } } @@ -812,7 +789,6 @@ impl MockBuilder { ), None => { warn!( - self.log, "Validator not registered {}, using default fee recipient and gas limits", pubkey ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index f45bfda9ff..cbe5e3ae98 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -76,8 +76,7 @@ impl MockExecutionLayer { suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; - let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + let el = ExecutionLayer::from_config(config, executor.clone()).unwrap(); Self { server, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 75ff435886..17441a15fb 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -9,11 +9,11 @@ use bytes::Bytes; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; -use logging::test_logger; + +use logging::create_test_tracing_subscriber; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; -use slog::{info, Logger}; use std::collections::HashMap; use std::convert::Infallible; use std::future::Future; @@ -21,6 +21,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; +use tracing::info; use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; @@ -133,6 +134,7 @@ impl MockServer { spec: Arc, kzg: Option>, ) -> Self { + create_test_tracing_subscriber(); let MockExecutionConfig { jwt_key, terminal_difficulty, @@ -161,7 +163,6 @@ impl MockServer { let ctx: Arc> = Arc::new(Context { config: server_config, jwt_key, - log: test_logger(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), previous_request: <_>::default(), @@ -533,7 +534,7 @@ impl warp::reject::Reject for AuthError {} pub struct Context { pub config: Config, pub jwt_key: JwtKey, - pub log: Logger, + pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, @@ -671,7 +672,6 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result<(SocketAddr, impl Future), Error> { let config = &ctx.config; - let log = ctx.log.clone(); let inner_ctx = ctx.clone(); let ctx_filter = warp::any().map(move || inner_ctx.clone()); @@ -751,9 +751,8 @@ pub fn serve( )?; info!( - log, - "Metrics HTTP server started"; - "listen_address" => listening_socket.to_string(), + listen_address = listening_socket.to_string(), + "Metrics HTTP server started" ); Ok((listening_socket, server)) diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index eeca393947..6ba8998a01 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } [dev-dependencies] eth1_test_rig = { workspace = true } +logging = { workspace = true } sensitive_url = { workspace = true } [dependencies] @@ -17,8 +18,8 @@ futures = { workspace = true } int_to_bytes = { workspace = true } merkle_proof = { workspace = true } rayon = { workspace = true } -slog = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 6e8f38627c..dede96512c 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -2,7 +2,6 @@ pub use crate::common::genesis_deposits; pub use eth1::Config as Eth1Config; use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; -use slog::{debug, error, info, trace, Logger}; use state_processing::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, per_block_processing::process_operations::apply_deposit, process_activations, @@ -13,6 +12,7 @@ use std::sync::{ }; use std::time::Duration; use tokio::time::sleep; +use tracing::{debug, error, info, trace}; use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, FixedBytesExtended, Hash256}; /// The number of blocks that are pulled per request whilst waiting for genesis. @@ -43,7 +43,7 @@ impl Eth1GenesisService { /// Creates a new service. Does not attempt to connect to the Eth1 node. /// /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, log: Logger, spec: Arc) -> Result { + pub fn new(config: Eth1Config, spec: Arc) -> Result { let config = Eth1Config { // Truncating the block cache makes searching for genesis more // complicated. @@ -65,7 +65,7 @@ impl Eth1GenesisService { }; Ok(Self { - eth1_service: Eth1Service::new(config, log, spec) + eth1_service: Eth1Service::new(config, spec) .map_err(|e| format!("Failed to create eth1 service: {:?}", e))?, stats: Arc::new(Statistics { highest_processed_block: AtomicU64::new(0), @@ -103,15 +103,11 @@ impl Eth1GenesisService { ) -> Result, String> { let eth1_service = &self.eth1_service; let spec = eth1_service.chain_spec(); - let log = ð1_service.log; let mut sync_blocks = false; let mut highest_processed_block = None; - info!( - log, - "Importing eth1 deposit logs"; - ); + info!("Importing eth1 deposit logs"); loop { let update_result = eth1_service @@ -120,11 +116,7 @@ impl Eth1GenesisService { .map_err(|e| format!("{:?}", e)); if let Err(e) = update_result { - error!( - log, - "Failed to update eth1 deposit cache"; - "error" => e - ) + error!(error = e, "Failed to update eth1 deposit cache") } self.stats @@ -135,19 +127,15 @@ impl Eth1GenesisService { if let Some(viable_eth1_block) = self .first_candidate_eth1_block(spec.min_genesis_active_validator_count as usize) { - info!( - log, - "Importing eth1 blocks"; - ); + info!("Importing eth1 blocks"); self.eth1_service.set_lowest_cached_block(viable_eth1_block); sync_blocks = true } else { info!( - log, - "Waiting for more deposits"; - "min_genesis_active_validators" => spec.min_genesis_active_validator_count, - "total_deposits" => eth1_service.deposit_cache_len(), - "valid_deposits" => eth1_service.get_raw_valid_signature_count(), + min_genesis_active_validators = spec.min_genesis_active_validator_count, + total_deposits = eth1_service.deposit_cache_len(), + valid_deposits = eth1_service.get_raw_valid_signature_count(), + "Waiting for more deposits" ); sleep(update_interval).await; @@ -160,19 +148,17 @@ impl Eth1GenesisService { let blocks_imported = match eth1_service.update_block_cache(None).await { Ok(outcome) => { debug!( - log, - "Imported eth1 blocks"; - "latest_block_timestamp" => eth1_service.latest_block_timestamp(), - "cache_head" => eth1_service.highest_safe_block(), - "count" => outcome.blocks_imported, + latest_block_timestamp = eth1_service.latest_block_timestamp(), + cache_head = eth1_service.highest_safe_block(), + count = outcome.blocks_imported, + "Imported eth1 blocks" ); outcome.blocks_imported } Err(e) => { error!( - log, - "Failed to update eth1 block cache"; - "error" => format!("{:?}", e) + error = ?e, + "Failed to update eth1 block cache" ); 0 } @@ -183,13 +169,12 @@ impl Eth1GenesisService { self.scan_new_blocks::(&mut highest_processed_block, spec)? { info!( - log, - "Genesis ceremony complete"; - "genesis_validators" => genesis_state + genesis_validators = genesis_state .get_active_validator_indices(E::genesis_epoch(), spec) .map_err(|e| format!("Genesis validators error: {:?}", e))? .len(), - "genesis_time" => genesis_state.genesis_time(), + genesis_time = genesis_state.genesis_time(), + "Genesis ceremony complete" ); break Ok(genesis_state); } @@ -207,21 +192,19 @@ impl Eth1GenesisService { // Indicate that we are awaiting adequate active validators. if (active_validator_count as u64) < spec.min_genesis_active_validator_count { info!( - log, - "Waiting for more validators"; - "min_genesis_active_validators" => spec.min_genesis_active_validator_count, - "active_validators" => active_validator_count, - "total_deposits" => total_deposit_count, - "valid_deposits" => eth1_service.get_valid_signature_count().unwrap_or(0), + min_genesis_active_validators = spec.min_genesis_active_validator_count, + active_validators = active_validator_count, + total_deposits = total_deposit_count, + valid_deposits = eth1_service.get_valid_signature_count().unwrap_or(0), + "Waiting for more validators" ); } } else { info!( - log, - "Waiting for adequate eth1 timestamp"; - "genesis_delay" => spec.genesis_delay, - "genesis_time" => spec.min_genesis_time, - "latest_eth1_timestamp" => latest_timestamp, + genesis_delay = spec.genesis_delay, + genesis_time = spec.min_genesis_time, + latest_eth1_timestamp = latest_timestamp, + "Waiting for adequate eth1 timestamp" ); } @@ -253,7 +236,6 @@ impl Eth1GenesisService { spec: &ChainSpec, ) -> Result>, String> { let eth1_service = &self.eth1_service; - let log = ð1_service.log; for block in eth1_service.blocks().read().iter() { // It's possible that the block and deposit caches aren't synced. Ignore any blocks @@ -286,12 +268,11 @@ impl Eth1GenesisService { // Ignore any block with an insufficient timestamp. if !timestamp_can_trigger_genesis(block.timestamp, spec)? { trace!( - log, - "Insufficient block timestamp"; - "genesis_delay" => spec.genesis_delay, - "min_genesis_time" => spec.min_genesis_time, - "eth1_block_timestamp" => block.timestamp, - "eth1_block_number" => block.number, + genesis_delay = spec.genesis_delay, + min_genesis_time = spec.min_genesis_time, + eth1_block_timestamp = block.timestamp, + eth1_block_number = block.number, + "Insufficient block timestamp" ); continue; } @@ -301,12 +282,11 @@ impl Eth1GenesisService { .unwrap_or(0); if (valid_signature_count as u64) < spec.min_genesis_active_validator_count { trace!( - log, - "Insufficient valid signatures"; - "genesis_delay" => spec.genesis_delay, - "valid_signature_count" => valid_signature_count, - "min_validator_count" => spec.min_genesis_active_validator_count, - "eth1_block_number" => block.number, + genesis_delay = spec.genesis_delay, + valid_signature_count = valid_signature_count, + min_validator_count = spec.min_genesis_active_validator_count, + eth1_block_number = block.number, + "Insufficient valid signatures" ); continue; } @@ -333,11 +313,11 @@ impl Eth1GenesisService { return Ok(Some(genesis_state)); } else { trace!( - log, - "Insufficient active validators"; - "min_genesis_active_validator_count" => format!("{}", spec.min_genesis_active_validator_count), - "active_validators" => active_validator_count, - "eth1_block_number" => block.number, + min_genesis_active_validator_count = + format!("{}", spec.min_genesis_active_validator_count), + active_validators = active_validator_count, + eth1_block_number = block.number, + "Insufficient active validators" ); } } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 6cc7517aa4..b5710e50fd 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -3,6 +3,7 @@ use environment::{Environment, EnvironmentBuilder}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; use genesis::{Eth1Config, Eth1GenesisService}; +use logging::create_test_tracing_subscriber; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; use std::sync::Arc; @@ -12,11 +13,10 @@ use types::{ }; pub fn new_env() -> Environment { + create_test_tracing_subscriber(); EnvironmentBuilder::minimal() .multi_threaded_tokio_runtime() .expect("should start tokio runtime") - .test_logger() - .expect("should start null logger") .build() .expect("should build env") } @@ -24,7 +24,6 @@ pub fn new_env() -> Environment { #[test] fn basic() { let env = new_env(); - let log = env.core_context().log().clone(); let mut spec = (*env.eth2_config().spec).clone(); spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = 8; @@ -55,7 +54,6 @@ fn basic() { block_cache_truncation: None, ..Eth1Config::default() }, - log, spec.clone(), ) .unwrap(); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2fb3ec06bf..a4352f1c3d 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -33,7 +33,6 @@ safe_arith = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } state_processing = { workspace = true } store = { workspace = true } @@ -42,6 +41,7 @@ system_health = { path = "../../common/system_health" } task_executor = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } warp = { workspace = true } @@ -49,7 +49,6 @@ warp_utils = { workspace = true } [dev-dependencies] genesis = { workspace = true } -logging = { workspace = true } proto_array = { workspace = true } serde_json = { workspace = true } diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 0cc878bb48..fbb16e9540 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -1,10 +1,10 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; use lru::LruCache; -use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::num::NonZeroUsize; use std::sync::Arc; +use tracing::{debug, warn}; use types::beacon_block::BlindedBeaconBlock; use types::non_zero_usize::new_non_zero_usize; use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error}; @@ -15,7 +15,6 @@ const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); pub fn get_block_rewards( query: BlockRewardsQuery, chain: Arc>, - log: Logger, ) -> Result, warp::Rejection> { let start_slot = query.start_slot; let end_slot = query.end_slot; @@ -81,12 +80,7 @@ pub fn get_block_rewards( .map_err(unhandled_error)?; if block_replayer.state_root_miss() { - warn!( - log, - "Block reward state root miss"; - "start_slot" => start_slot, - "end_slot" => end_slot, - ); + warn!(%start_slot, %end_slot, "Block reward state root miss"); } drop(block_replayer); @@ -98,7 +92,6 @@ pub fn get_block_rewards( pub fn compute_block_rewards( blocks: Vec>, chain: Arc>, - log: Logger, ) -> Result, warp::Rejection> { let mut block_rewards = Vec::with_capacity(blocks.len()); let mut state_cache = LruCache::new(STATE_CACHE_SIZE); @@ -110,18 +103,16 @@ pub fn compute_block_rewards( // Check LRU cache for a constructed state from a previous iteration. let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) { debug!( - log, - "Re-using cached state for block rewards"; - "parent_root" => ?parent_root, - "slot" => block.slot(), + ?parent_root, + slot = %block.slot(), + "Re-using cached state for block rewards" ); state } else { debug!( - log, - "Fetching state for block rewards"; - "parent_root" => ?parent_root, - "slot" => block.slot() + ?parent_root, + slot = %block.slot(), + "Fetching state for block rewards" ); let parent_block = chain .get_blinded_block(&parent_root) @@ -152,10 +143,9 @@ pub fn compute_block_rewards( if block_replayer.state_root_miss() { warn!( - log, - "Block reward state root miss"; - "parent_slot" => parent_block.slot(), - "slot" => block.slot(), + parent_slot = %parent_block.slot(), + slot = %block.slot(), + "Block reward state root miss" ); } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0cddb7fa02..b6ad8da128 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -55,7 +55,7 @@ use health_metrics::observe::Observe; use lighthouse_network::rpc::methods::MetaData; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; -use logging::SSELoggingComponents; +use logging::{crit, SSELoggingComponents}; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; @@ -64,7 +64,6 @@ pub use publish_blocks::{ }; use serde::{Deserialize, Serialize}; use serde_json::Value; -use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; @@ -84,6 +83,7 @@ use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, StreamExt, }; +use tracing::{debug, error, info, warn}; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, @@ -132,7 +132,6 @@ pub struct Context { pub beacon_processor_reprocess_send: Option>, pub eth1_service: Option, pub sse_logging_components: Option, - pub log: Logger, } /// Configuration for the HTTP server. @@ -186,40 +185,6 @@ impl From for Error { } } -/// Creates a `warp` logging wrapper which we use to create `slog` logs. -pub fn slog_logging( - log: Logger, -) -> warp::filters::log::Log { - warp::log::custom(move |info| { - match info.status() { - status - if status == StatusCode::OK - || status == StatusCode::NOT_FOUND - || status == StatusCode::PARTIAL_CONTENT => - { - debug!( - log, - "Processed HTTP API request"; - "elapsed" => format!("{:?}", info.elapsed()), - "status" => status.to_string(), - "path" => info.path(), - "method" => info.method().to_string(), - ); - } - status => { - warn!( - log, - "Error processing HTTP API request"; - "elapsed" => format!("{:?}", info.elapsed()), - "status" => status.to_string(), - "path" => info.path(), - "method" => info.method().to_string(), - ); - } - }; - }) -} - /// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging, /// per say). pub fn prometheus_metrics() -> warp::filters::log::Log { @@ -307,7 +272,6 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result { let config = ctx.config.clone(); - let log = ctx.log.clone(); // Configure CORS. let cors_builder = { @@ -324,7 +288,7 @@ pub fn serve( // Sanity check. if !config.enabled { - crit!(log, "Cannot start disabled HTTP server"); + crit!("Cannot start disabled HTTP server"); return Err(Error::Other( "A disabled server should not be started".to_string(), )); @@ -485,10 +449,6 @@ pub fn serve( } }); - // Create a `warp` filter that provides access to the logger. - let inner_ctx = ctx.clone(); - let log_filter = warp::any().map(move || inner_ctx.log.clone()); - let inner_components = ctx.sse_logging_components.clone(); let sse_component_filter = warp::any().map(move || inner_components.clone()); @@ -1284,21 +1244,18 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, - log, BroadcastValidation::default(), duplicate_block_status_code, network_globals, @@ -1318,15 +1275,13 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |block_bytes: Bytes, consensus_version: ForkName, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, @@ -1340,7 +1295,6 @@ pub fn serve( ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, - log, BroadcastValidation::default(), duplicate_block_status_code, network_globals, @@ -1360,22 +1314,19 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, - log, validation_level.broadcast_validation, duplicate_block_status_code, network_globals, @@ -1396,7 +1347,6 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, block_bytes: Bytes, @@ -1404,8 +1354,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, @@ -1419,7 +1368,6 @@ pub fn serve( ProvenancedBlock::local_from_publish_request(block_contents), chain, &network_tx, - log, validation_level.broadcast_validation, duplicate_block_status_code, network_globals, @@ -1443,20 +1391,17 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |block_contents: Arc>, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( block_contents, chain, &network_tx, - log, BroadcastValidation::default(), duplicate_block_status_code, network_globals, @@ -1476,14 +1421,12 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |block_bytes: Bytes, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBlindedBeaconBlock::::from_ssz_bytes( &block_bytes, @@ -1497,7 +1440,6 @@ pub fn serve( block, chain, &network_tx, - log, BroadcastValidation::default(), duplicate_block_status_code, network_globals, @@ -1517,21 +1459,18 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, blinded_block: Arc>, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( blinded_block, chain, &network_tx, - log, validation_level.broadcast_validation, duplicate_block_status_code, network_globals, @@ -1551,15 +1490,13 @@ pub fn serve( .and(chain_filter.clone()) .and(network_tx_filter.clone()) .and(network_globals.clone()) - .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, block_bytes: Bytes, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, - network_globals: Arc>, - log: Logger| { + network_globals: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBlindedBeaconBlock::::from_ssz_bytes( &block_bytes, @@ -1573,7 +1510,6 @@ pub fn serve( block, chain, &network_tx, - log, validation_level.broadcast_validation, duplicate_block_status_code, network_globals, @@ -1843,14 +1779,12 @@ pub fn serve( .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .and(reprocess_send_filter.clone()) - .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, network_tx: UnboundedSender>, - reprocess_tx: Option>, - log: Logger| async move { + reprocess_tx: Option>| async move { let attestations = attestations.into_iter().map(Either::Left).collect(); let result = crate::publish_attestations::publish_attestations( task_spawner, @@ -1858,7 +1792,6 @@ pub fn serve( attestations, network_tx, reprocess_tx, - log, ) .await .map(|()| warp::reply::json(&())); @@ -1874,25 +1807,22 @@ pub fn serve( .and(optional_consensus_version_header_filter) .and(network_tx_filter.clone()) .and(reprocess_send_filter.clone()) - .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, payload: Value, fork_name: Option, network_tx: UnboundedSender>, - reprocess_tx: Option>, - log: Logger| async move { + reprocess_tx: Option>| async move { let attestations = match crate::publish_attestations::deserialize_attestation_payload::( - payload, fork_name, &log, + payload, fork_name, ) { Ok(attestations) => attestations, Err(err) => { warn!( - log, - "Unable to deserialize attestation POST request"; - "error" => ?err + error = ?err, + "Unable to deserialize attestation POST request" ); return warp::reply::with_status( warp::reply::json( @@ -1910,7 +1840,6 @@ pub fn serve( attestations, network_tx, reprocess_tx, - log, ) .await .map(|()| warp::reply::json(&())); @@ -2185,16 +2114,14 @@ pub fn serve( .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) - .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, signatures: Vec, - network_tx: UnboundedSender>, - log: Logger| { + network_tx: UnboundedSender>| { task_spawner.blocking_json_task(Priority::P0, move || { sync_committees::process_sync_committee_signatures( - signatures, network_tx, &chain, log, + signatures, network_tx, &chain, )?; Ok(api_types::GenericResponse::from(())) }) @@ -2222,13 +2149,11 @@ pub fn serve( .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) - .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, address_changes: Vec, - network_tx: UnboundedSender>, - log: Logger| { + network_tx: UnboundedSender>| { task_spawner.blocking_json_task(Priority::P0, move || { let mut failures = vec![]; @@ -2245,11 +2170,12 @@ pub fn serve( .to_execution_address; // New to P2P *and* op pool, gossip immediately if post-Capella. - let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) { - ReceivedPreCapella::No - } else { - ReceivedPreCapella::Yes - }; + let received_pre_capella = + if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; if matches!(received_pre_capella, ReceivedPreCapella::No) { publish_pubsub_message( &network_tx, @@ -2260,32 +2186,29 @@ pub fn serve( } // Import to op pool (may return `false` if there's a race). - let imported = - chain.import_bls_to_execution_change(verified_address_change, received_pre_capella); + let imported = chain.import_bls_to_execution_change( + verified_address_change, + received_pre_capella, + ); info!( - log, - "Processed BLS to execution change"; - "validator_index" => validator_index, - "address" => ?address, - "published" => matches!(received_pre_capella, ReceivedPreCapella::No), - "imported" => imported, + %validator_index, + ?address, + published = + matches!(received_pre_capella, ReceivedPreCapella::No), + imported, + "Processed BLS to execution change" ); } Ok(ObservationOutcome::AlreadyKnown) => { - debug!( - log, - "BLS to execution change already known"; - "validator_index" => validator_index, - ); + debug!(%validator_index, "BLS to execution change already known"); } Err(e) => { warn!( - log, - "Invalid BLS to execution change"; - "validator_index" => validator_index, - "reason" => ?e, - "source" => "HTTP", + validator_index, + reason = ?e, + source = "HTTP", + "Invalid BLS to execution change" ); failures.push(api_types::Failure::new( index, @@ -2658,17 +2581,15 @@ pub fn serve( .and(block_id_or_err) .and(warp::path::end()) .and(warp_utils::json::json()) - .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, block_id: BlockId, - validators: Vec, - log: Logger| { + validators: Vec| { task_spawner.blocking_json_task(Priority::P1, move || { let (rewards, execution_optimistic, finalized) = sync_committee_rewards::compute_sync_committee_rewards( - chain, block_id, validators, log, + chain, block_id, validators, )?; Ok(api_types::GenericResponse::from(rewards)).map(|resp| { @@ -2755,14 +2676,12 @@ pub fn serve( .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .then( |endpoint_version: EndpointVersion, state_id: StateId, accept_header: Option, task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { + chain: Arc>| { task_spawner.blocking_response_task(Priority::P1, move || match accept_header { Some(api_types::Accept::Ssz) => { // We can ignore the optimistic status for the "fork" since it's a @@ -2777,10 +2696,9 @@ pub fn serve( let response_bytes = state.as_ssz_bytes(); drop(timer); debug!( - log, - "HTTP state load"; - "total_time_ms" => t.elapsed().as_millis(), - "target_slot" => state.slot() + total_time_ms = t.elapsed().as_millis(), + target_slot = %state.slot(), + "HTTP state load" ); Response::builder() @@ -3248,16 +3166,14 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .then( |epoch: Epoch, not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { + chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain, &log) + proposer_duties::proposer_duties(epoch, &chain) }) }, ); @@ -3277,7 +3193,6 @@ pub fn serve( .and(warp::query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .then( |endpoint_version: EndpointVersion, slot: Slot, @@ -3285,14 +3200,9 @@ pub fn serve( not_synced_filter: Result<(), Rejection>, query: api_types::ValidatorBlocksQuery, task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { + chain: Arc>| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - debug!( - log, - "Block production request from HTTP API"; - "slot" => slot - ); + debug!(?slot, "Block production request from HTTP API"); not_synced_filter?; @@ -3499,7 +3409,6 @@ pub fn serve( .and(chain_filter.clone()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) - .and(log_filter.clone()) .then( // V1 and V2 are identical except V2 has a consensus version header in the request. // We only require this header for SSZ deserialization, which isn't supported for @@ -3509,7 +3418,7 @@ pub fn serve( task_spawner: TaskSpawner, chain: Arc>, aggregates: Vec>, - network_tx: UnboundedSender>, log: Logger| { + network_tx: UnboundedSender>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; let seen_timestamp = timestamp_now(); @@ -3556,13 +3465,13 @@ pub fn serve( // aggregate has been successfully published by some other node. Err(AttnError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { - error!(log, - "Failure verifying aggregate and proofs"; - "error" => format!("{:?}", e), - "request_index" => index, - "aggregator_index" => aggregate.message().aggregator_index(), - "attestation_index" => aggregate.message().aggregate().committee_index(), - "attestation_slot" => aggregate.message().aggregate().data().slot, + error!( + error = ?e, + request_index = index, + aggregator_index = aggregate.message().aggregator_index(), + attestation_index = aggregate.message().aggregate().committee_index(), + attestation_slot = %aggregate.message().aggregate().data().slot, + "Failure verifying aggregate and proofs" ); failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); } @@ -3577,22 +3486,21 @@ pub fn serve( // Import aggregate attestations for (index, verified_aggregate) in verified_aggregates { if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { - error!(log, - "Failure applying verified aggregate attestation to fork choice"; - "error" => format!("{:?}", e), - "request_index" => index, - "aggregator_index" => verified_aggregate.aggregate().message().aggregator_index(), - "attestation_index" => verified_aggregate.attestation().committee_index(), - "attestation_slot" => verified_aggregate.attestation().data().slot, + error!( + error = ?e, + request_index = index, + aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), + attestation_index = verified_aggregate.attestation().committee_index(), + attestation_slot = %verified_aggregate.attestation().data().slot, + "Failure applying verified aggregate attestation to fork choice" ); failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); } if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { warn!( - log, - "Could not add verified aggregate attestation to the inclusion pool"; - "error" => ?e, - "request_index" => index, + error = ?e, + request_index = index, + "Could not add verified aggregate attestation to the inclusion pool" ); failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); } @@ -3618,21 +3526,18 @@ pub fn serve( .and(chain_filter.clone()) .and(warp_utils::json::json()) .and(network_tx_filter) - .and(log_filter.clone()) .then( |not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>, contributions: Vec>, - network_tx: UnboundedSender>, - log: Logger| { + network_tx: UnboundedSender>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; sync_committees::process_signed_contribution_and_proofs( contributions, network_tx, &chain, - log, )?; Ok(api_types::GenericResponse::from(())) }) @@ -3648,13 +3553,11 @@ pub fn serve( .and(validator_subscription_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .then( |subscriptions: Vec, validator_subscription_tx: Sender, task_spawner: TaskSpawner, - chain: Arc>, - log: Logger| { + chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let subscriptions: std::collections::BTreeSet<_> = subscriptions .iter() @@ -3675,10 +3578,9 @@ pub fn serve( ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; if let Err(e) = validator_subscription_tx.try_send(message) { warn!( - log, - "Unable to process committee subscriptions"; - "info" => "the host may be overloaded or resource-constrained", - "error" => ?e, + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process committee subscriptions" ); return Err(warp_utils::reject::custom_server_error( "unable to queue subscription, host may be overloaded or shutting down" @@ -3699,13 +3601,11 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .and(warp_utils::json::json()) .then( |not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>, - log: Logger, preparation_data: Vec| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { not_synced_filter?; @@ -3719,9 +3619,8 @@ pub fn serve( let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), + count = preparation_data.len(), + "Received proposer preparation data" ); execution_layer @@ -3753,12 +3652,10 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .and(warp_utils::json::json()) .then( |task_spawner: TaskSpawner, chain: Arc>, - log: Logger, register_val_data: Vec| async { let (tx, rx) = oneshot::channel(); @@ -3777,9 +3674,8 @@ pub fn serve( let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( - log, - "Received register validator request"; - "count" => register_val_data.len(), + count = register_val_data.len(), + "Received register validator request" ); let head_snapshot = chain.head_snapshot(); @@ -3854,9 +3750,8 @@ pub fn serve( })?; info!( - log, - "Forwarding register validator request to connected builder"; - "count" => filtered_registration_data.len(), + count = filtered_registration_data.len(), + "Forwarding register validator request to connected builder" ); // It's a waste of a `BeaconProcessor` worker to just @@ -3881,10 +3776,9 @@ pub fn serve( .map(|resp| warp::reply::json(&resp).into_response()) .map_err(|e| { warn!( - log, - "Relay error when registering validator(s)"; - "num_registrations" => filtered_registration_data.len(), - "error" => ?e + num_registrations = filtered_registration_data.len(), + error = ?e, + "Relay error when registering validator(s)" ); // Forward the HTTP status code if we are able to, otherwise fall back // to a server error. @@ -3938,13 +3832,11 @@ pub fn serve( .and(validator_subscription_tx_filter) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) .then( |subscriptions: Vec, validator_subscription_tx: Sender, task_spawner: TaskSpawner, chain: Arc>, - log: Logger | { task_spawner.blocking_json_task(Priority::P0, move || { for subscription in subscriptions { @@ -3958,10 +3850,9 @@ pub fn serve( }; if let Err(e) = validator_subscription_tx.try_send(message) { warn!( - log, - "Unable to process sync subscriptions"; - "info" => "the host may be overloaded or resource-constrained", - "error" => ?e + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process sync subscriptions" ); return Err(warp_utils::reject::custom_server_error( "unable to queue subscription, host may be overloaded or shutting down".to_string(), @@ -4431,10 +4322,9 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) - .then(|query, task_spawner: TaskSpawner, chain, log| { + .then(|query, task_spawner: TaskSpawner, chain| { task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::get_block_rewards(query, chain, log) + block_rewards::get_block_rewards(query, chain) }) }); @@ -4446,14 +4336,11 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(log_filter.clone()) - .then( - |blocks, task_spawner: TaskSpawner, chain, log| { - task_spawner.blocking_json_task(Priority::P1, move || { - block_rewards::compute_block_rewards(blocks, chain, log) - }) - }, - ); + .then(|blocks, task_spawner: TaskSpawner, chain| { + task_spawner.blocking_json_task(Priority::P1, move || { + block_rewards::compute_block_rewards(blocks, chain) + }) + }); // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") @@ -4631,7 +4518,9 @@ pub fn serve( match msg { Ok(data) => { // Serialize to json - match data.to_json_string() { + match serde_json::to_string(&data) + .map_err(|e| format!("{:?}", e)) + { // Send the json as a Server Side Event Ok(json) => Ok(Event::default().data(json)), Err(e) => { @@ -4779,7 +4668,6 @@ pub fn serve( ), ) .recover(warp_utils::reject::handle_rejection) - .with(slog_logging(log.clone())) .with(prometheus_metrics()) // Add a `Server` header. .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) @@ -4797,7 +4685,7 @@ pub fn serve( shutdown.await; })?; - info!(log, "HTTP API is being served over TLS";); + info!("HTTP API is being served over TLS"); (socket, Box::pin(server)) } @@ -4811,9 +4699,8 @@ pub fn serve( }; info!( - log, - "HTTP API started"; - "listen_address" => %http_server.0, + listen_address = %http_server.0, + "HTTP API started" ); Ok(http_server) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index c4945df9d7..971571f487 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -7,9 +7,9 @@ use beacon_chain::{ }; use eth2::types::{self as api_types}; use safe_arith::SafeArith; -use slog::{debug, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; +use tracing::debug; use types::{Epoch, EthSpec, Hash256, Slot}; /// The struct that is returned to the requesting HTTP client. @@ -19,7 +19,6 @@ type ApiDuties = api_types::DutiesResponse>; pub fn proposer_duties( request_epoch: Epoch, chain: &BeaconChain, - log: &Logger, ) -> Result { let current_epoch = chain .slot_clock @@ -52,11 +51,7 @@ pub fn proposer_duties( if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? { Ok(duties) } else { - debug!( - log, - "Proposer cache miss"; - "request_epoch" => request_epoch, - ); + debug!(%request_epoch, "Proposer cache miss"); compute_and_cache_proposer_duties(request_epoch, chain) } } else if request_epoch diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 10d13e09a5..cd5e912bdf 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -45,7 +45,6 @@ use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use serde_json::Value; -use slog::{debug, error, warn, Logger}; use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; @@ -53,6 +52,7 @@ use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; +use tracing::{debug, error, warn}; use types::{Attestation, EthSpec, ForkName, SingleAttestation}; // Error variants are only used in `Debug` and considered `dead_code` by the compiler. @@ -80,14 +80,10 @@ enum PublishAttestationResult { pub fn deserialize_attestation_payload( payload: Value, fork_name: Option, - log: &Logger, ) -> Result, SingleAttestation>>, Error> { if fork_name.is_some_and(|fork_name| fork_name.electra_enabled()) || fork_name.is_none() { if fork_name.is_none() { - warn!( - log, - "No Consensus Version header specified."; - ); + warn!("No Consensus Version header specified."); } Ok(serde_json::from_value::>(payload) @@ -111,7 +107,6 @@ fn verify_and_publish_attestation( either_attestation: &Either, SingleAttestation>, seen_timestamp: Duration, network_tx: &UnboundedSender>, - log: &Logger, ) -> Result<(), Error> { let attestation = convert_to_attestation(chain, either_attestation)?; let verified_attestation = chain @@ -157,16 +152,14 @@ fn verify_and_publish_attestation( if let Err(e) = &fc_result { warn!( - log, - "Attestation invalid for fork choice"; - "err" => ?e, + err = ?e, + "Attestation invalid for fork choice" ); } if let Err(e) = &naive_aggregation_result { warn!( - log, - "Attestation invalid for aggregation"; - "err" => ?e + err = ?e, + "Attestation invalid for aggregation" ); } @@ -232,7 +225,6 @@ pub async fn publish_attestations( attestations: Vec, SingleAttestation>>, network_tx: UnboundedSender>, reprocess_send: Option>, - log: Logger, ) -> Result<(), warp::Rejection> { // Collect metadata about attestations which we'll use to report failures. We need to // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. @@ -246,7 +238,6 @@ pub async fn publish_attestations( // Gossip validate and publish attestations that can be immediately processed. let seen_timestamp = timestamp_now(); - let inner_log = log.clone(); let mut prelim_results = task_spawner .blocking_task(Priority::P0, move || { Ok(attestations @@ -257,7 +248,6 @@ pub async fn publish_attestations( &attestation, seen_timestamp, &network_tx, - &inner_log, ) { Ok(()) => PublishAttestationResult::Success, Err(Error::Validation(AttestationError::UnknownHeadBlock { @@ -270,14 +260,12 @@ pub async fn publish_attestations( let (tx, rx) = oneshot::channel(); let reprocess_chain = chain.clone(); let reprocess_network_tx = network_tx.clone(); - let reprocess_log = inner_log.clone(); let reprocess_fn = move || { let result = verify_and_publish_attestation( &reprocess_chain, &attestation, seen_timestamp, &reprocess_network_tx, - &reprocess_log, ); // Ignore failure on the oneshot that reports the result. This // shouldn't happen unless some catastrophe befalls the waiting @@ -330,10 +318,9 @@ pub async fn publish_attestations( for (i, reprocess_result) in reprocess_indices.into_iter().zip(reprocess_results) { let Some(result_entry) = prelim_results.get_mut(i) else { error!( - log, - "Unreachable case in attestation publishing"; - "case" => "prelim out of bounds", - "request_index" => i, + case = "prelim out of bounds", + request_index = i, + "Unreachable case in attestation publishing" ); continue; }; @@ -361,39 +348,35 @@ pub async fn publish_attestations( Some(PublishAttestationResult::Failure(e)) => { if let Some((slot, committee_index)) = attestation_metadata.get(index) { error!( - log, - "Failure verifying attestation for gossip"; - "error" => ?e, - "request_index" => index, - "committee_index" => committee_index, - "attestation_slot" => slot, + error = ?e, + request_index = index, + committee_index, + attestation_slot = %slot, + "Failure verifying attestation for gossip" ); failures.push(Failure::new(index, format!("{e:?}"))); } else { error!( - log, - "Unreachable case in attestation publishing"; - "case" => "out of bounds", - "request_index" => index + case = "out of bounds", + request_index = index, + "Unreachable case in attestation publishing" ); failures.push(Failure::new(index, "metadata logic error".into())); } } Some(PublishAttestationResult::Reprocessing(_)) => { error!( - log, - "Unreachable case in attestation publishing"; - "case" => "reprocessing", - "request_index" => index + case = "reprocessing", + request_index = index, + "Unreachable case in attestation publishing" ); failures.push(Failure::new(index, "reprocess logic error".into())); } None => { error!( - log, - "Unreachable case in attestation publishing"; - "case" => "result is None", - "request_index" => index + case = "result is None", + request_index = index, + "Unreachable case in attestation publishing" ); failures.push(Failure::new(index, "result logic error".into())); } @@ -402,9 +385,8 @@ pub async fn publish_attestations( if num_already_known > 0 { debug!( - log, - "Some unagg attestations already known"; - "count" => num_already_known + count = num_already_known, + "Some unagg attestations already known" ); } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 072ae5dc03..24af16680e 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -18,13 +18,13 @@ use futures::TryFutureExt; use lighthouse_network::{NetworkGlobals, PubsubMessage}; use network::NetworkMessage; use rand::prelude::SliceRandom; -use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, @@ -80,7 +80,6 @@ pub async fn publish_block>( provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, - log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, network_globals: Arc>, @@ -99,12 +98,12 @@ pub async fn publish_block>( "builder" }; let block = unverified_block.inner_block(); - debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); + + debug!(slot = %block.slot(), "Signed block received in HTTP API"); /* actually publish a block */ let publish_block_p2p = move |block: Arc>, sender, - log, seen_timestamp| -> Result<(), BlockError> { let publish_timestamp = timestamp_now(); @@ -119,10 +118,9 @@ pub async fn publish_block>( ); info!( - log, - "Signed block published to network via HTTP API"; - "slot" => block.slot(), - "publish_delay_ms" => publish_delay.as_millis(), + slot = %block.slot(), + publish_delay_ms = publish_delay.as_millis(), + "Signed block published to network via HTTP API" ); crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) @@ -136,7 +134,7 @@ pub async fn publish_block>( let sender_clone = network_tx.clone(); let build_sidecar_task_handle = - spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs, log.clone())?; + spawn_build_data_sidecar_task(chain.clone(), block.clone(), unverified_blobs)?; // Gossip verify the block and blobs/data columns separately. let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); @@ -151,19 +149,13 @@ pub async fn publish_block>( if BroadcastValidation::Gossip == validation_level && should_publish_block { if let Some(block_publishing_delay) = block_publishing_delay_for_testing { debug!( - log, - "Publishing block with artificial delay"; - "block_publishing_delay" => ?block_publishing_delay + ?block_publishing_delay, + "Publishing block with artificial delay" ); tokio::time::sleep(block_publishing_delay).await; } - publish_block_p2p( - block.clone(), - sender_clone.clone(), - log.clone(), - seen_timestamp, - ) - .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; + publish_block_p2p(block.clone(), sender_clone.clone(), seen_timestamp) + .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; } let publish_fn_completed = Arc::new(AtomicBool::new(false)); @@ -175,15 +167,13 @@ pub async fn publish_block>( BroadcastValidation::Consensus => publish_block_p2p( block_to_publish.clone(), sender_clone.clone(), - log.clone(), seen_timestamp, )?, BroadcastValidation::ConsensusAndEquivocation => { - check_slashable(&chain, block_root, &block_to_publish, &log)?; + check_slashable(&chain, block_root, &block_to_publish)?; publish_block_p2p( block_to_publish.clone(), sender_clone.clone(), - log.clone(), seen_timestamp, )?; } @@ -206,11 +196,7 @@ pub async fn publish_block>( return if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) } else { - error!( - log, - "Invalid blob provided to HTTP API"; - "reason" => &msg - ); + error!(reason = &msg, "Invalid blob provided to HTTP API"); Err(warp_utils::reject::custom_bad_request(msg)) }; } @@ -227,9 +213,8 @@ pub async fn publish_block>( let delay = data_column_publishing_delay.saturating_sub(block_publishing_delay); if !delay.is_zero() { debug!( - log, - "Publishing data columns with artificial delay"; - "data_column_publishing_delay" => ?data_column_publishing_delay + ?data_column_publishing_delay, + "Publishing data columns with artificial delay" ); tokio::time::sleep(delay).await; } @@ -254,9 +239,8 @@ pub async fn publish_block>( Err(warp_utils::reject::broadcast_without_import(msg)) } else { error!( - log, - "Invalid data column during block publication"; - "reason" => &msg + reason = &msg, + "Invalid data column during block publication" ); Err(warp_utils::reject::custom_bad_request(msg)) }; @@ -280,7 +264,6 @@ pub async fn publish_block>( is_locally_built_block, seen_timestamp, &chain, - &log, ) .await } @@ -293,7 +276,6 @@ pub async fn publish_block>( is_locally_built_block, seen_timestamp, &chain, - &log, ) .await } else { @@ -313,10 +295,9 @@ pub async fn publish_block>( } Err(BlockError::DuplicateImportStatusUnknown(root)) => { debug!( - log, - "Block previously seen"; - "block_root" => ?root, - "slot" => block.slot(), + block_root = ?root, + slot = %block.slot(), + "Block previously seen" ); let import_result = Box::pin(chain.process_block( block_root, @@ -333,16 +314,14 @@ pub async fn publish_block>( is_locally_built_block, seen_timestamp, &chain, - &log, ) .await } Err(e) => { warn!( - log, - "Not publishing block - not gossip verified"; - "slot" => slot, - "error" => %e + %slot, + error = %e, + "Not publishing block - not gossip verified" ); Err(warp_utils::reject::custom_bad_request(e.to_string())) } @@ -365,7 +344,6 @@ fn spawn_build_data_sidecar_task( chain: Arc>, block: Arc>>, proofs_and_blobs: UnverifiedBlobs, - log: Logger, ) -> Result>, Rejection> { chain .clone() @@ -380,12 +358,12 @@ fn spawn_build_data_sidecar_task( if !peer_das_enabled { // Pre-PeerDAS: construct blob sidecars for the network. let gossip_verified_blobs = - build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs, &log)?; + build_gossip_verified_blobs(&chain, &block, blobs, kzg_proofs)?; Ok((gossip_verified_blobs, vec![])) } else { // Post PeerDAS: construct data columns. let gossip_verified_data_columns = - build_gossip_verified_data_columns(&chain, &block, blobs, &log)?; + build_gossip_verified_data_columns(&chain, &block, blobs)?; Ok((vec![], gossip_verified_data_columns)) } }, @@ -404,16 +382,14 @@ fn build_gossip_verified_data_columns( chain: &BeaconChain, block: &SignedBeaconBlock>, blobs: BlobsList, - log: &Logger, ) -> Result>>, Rejection> { let slot = block.slot(); let data_column_sidecars = build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| { error!( - log, - "Invalid data column - not publishing block"; - "error" => ?e, - "slot" => slot + error = ?e, + %slot, + "Invalid data column - not publishing block" ); warp_utils::reject::custom_bad_request(format!("{e:?}")) })?; @@ -434,21 +410,19 @@ fn build_gossip_verified_data_columns( // or some of the other data columns if the block & data columns are only // partially published by the other publisher. debug!( - log, - "Data column for publication already known"; - "column_index" => column_index, - "slot" => slot, - "proposer" => proposer, + column_index, + %slot, + proposer, + "Data column for publication already known" ); Ok(None) } Err(e) => { error!( - log, - "Data column for publication is gossip-invalid"; - "column_index" => column_index, - "slot" => slot, - "error" => ?e, + column_index, + %slot, + error = ?e, + "Data column for publication is gossip-invalid" ); Err(warp_utils::reject::custom_bad_request(format!("{e:?}"))) } @@ -464,7 +438,6 @@ fn build_gossip_verified_blobs( block: &SignedBeaconBlock>, blobs: BlobsList, kzg_proofs: KzgProofs, - log: &Logger, ) -> Result>>, Rejection> { let slot = block.slot(); let gossip_verified_blobs = kzg_proofs @@ -479,11 +452,10 @@ fn build_gossip_verified_blobs( .map(Arc::new) .map_err(|e| { error!( - log, - "Invalid blob - not publishing block"; - "error" => ?e, - "blob_index" => i, - "slot" => slot, + error = ?e, + blob_index = i, + %slot, + "Invalid blob - not publishing block" ); warp_utils::reject::custom_bad_request(format!("{e:?}")) })?; @@ -499,21 +471,19 @@ fn build_gossip_verified_blobs( // or some of the other blobs if the block & blobs are only partially published // by the other publisher. debug!( - log, - "Blob for publication already known"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "proposer" => proposer, + blob_index = blob_sidecar.index, + %slot, + proposer, + "Blob for publication already known" ); Ok(None) } Err(e) => { error!( - log, - "Blob for publication is gossip-invalid"; - "blob_index" => blob_sidecar.index, - "slot" => slot, - "error" => ?e, + blob_index = blob_sidecar.index, + %slot, + error = ?e, + "Blob for publication is gossip-invalid" ); Err(warp_utils::reject::custom_bad_request(e.to_string())) } @@ -524,6 +494,15 @@ fn build_gossip_verified_blobs( Ok(gossip_verified_blobs) } +fn publish_blob_sidecars( + sender_clone: &UnboundedSender>, + blob: &GossipVerifiedBlob, +) -> Result<(), BlockError> { + let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); + crate::publish_pubsub_message(sender_clone, pubsub_message) + .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) +} + fn publish_column_sidecars( sender_clone: &UnboundedSender>, data_column_sidecars: &[Option>], @@ -554,15 +533,6 @@ fn publish_column_sidecars( .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) } -fn publish_blob_sidecars( - sender_clone: &UnboundedSender>, - blob: &GossipVerifiedBlob, -) -> Result<(), BlockError> { - let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); - crate::publish_pubsub_message(sender_clone, pubsub_message) - .map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) -} - async fn post_block_import_logging_and_response( result: Result, validation_level: BroadcastValidation, @@ -570,7 +540,6 @@ async fn post_block_import_logging_and_response( is_locally_built_block: bool, seen_timestamp: Duration, chain: &Arc>, - log: &Logger, ) -> Result { match result { // The `DuplicateFullyImported` case here captures the case where the block finishes @@ -582,12 +551,11 @@ async fn post_block_import_logging_and_response( | Err(BlockError::DuplicateFullyImported(root)) => { let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); info!( - log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => %root, - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), + block_delay = ?delay, + root = %root, + proposer_index = block.message().proposer_index(), + slot = %block.slot(), + "Valid block from HTTP API" ); // Notify the validator monitor. @@ -606,7 +574,7 @@ async fn post_block_import_logging_and_response( // blocks built with builders we consider the broadcast time to be // when the blinded block is published to the builder. if is_locally_built_block { - late_block_logging(chain, seen_timestamp, block.message(), root, "local", log) + late_block_logging(chain, seen_timestamp, block.message(), root, "local") } Ok(warp::reply().into_response()) } @@ -615,11 +583,7 @@ async fn post_block_import_logging_and_response( if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(msg)) } else { - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); + error!(reason = &msg, "Invalid block provided to HTTP API"); Err(warp_utils::reject::custom_bad_request(msg)) } } @@ -636,9 +600,8 @@ async fn post_block_import_logging_and_response( Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) } else { error!( - log, - "Invalid block provided to HTTP API"; - "reason" => ?e, + reason = ?e, + "Invalid block provided to HTTP API" ); Err(warp_utils::reject::custom_bad_request(format!( "Invalid block: {e}" @@ -654,20 +617,17 @@ pub async fn publish_blinded_block( blinded_block: Arc>, chain: Arc>, network_tx: &UnboundedSender>, - log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, network_globals: Arc>, ) -> Result { let block_root = blinded_block.canonical_root(); - let full_block = - reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; + let full_block = reconstruct_block(chain.clone(), block_root, blinded_block).await?; publish_block::( Some(block_root), full_block, chain, network_tx, - log, validation_level, duplicate_status_code, network_globals, @@ -682,7 +642,6 @@ pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, block: Arc>, - log: Logger, ) -> Result>>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { @@ -706,7 +665,7 @@ pub async fn reconstruct_block( } else if let Some(cached_payload) = el.get_payload_by_root(&payload_header.tree_hash_root()) { - info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash()); + info!(block_hash = ?cached_payload.block_hash(), "Reconstructing a full block using a local payload"); ProvenancedPayload::Local(cached_payload) // Otherwise, this means we are attempting a blind block proposal. } else { @@ -721,7 +680,6 @@ pub async fn reconstruct_block( block.message(), block_root, "builder", - &log, ); let full_payload = el @@ -733,7 +691,7 @@ pub async fn reconstruct_block( e )) })?; - info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash()); + info!(block_hash = ?full_payload.block_hash(), "Successfully published a block to the builder network"); ProvenancedPayload::Builder(full_payload) }; @@ -775,7 +733,6 @@ fn late_block_logging>( block: BeaconBlockRef, root: Hash256, provenance: &str, - log: &Logger, ) { let delay = get_block_delay_ms(seen_timestamp, block, &chain.slot_clock); @@ -794,23 +751,21 @@ fn late_block_logging>( let delayed_threshold = too_late_threshold / 2; if delay >= too_late_threshold { error!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "provenance" => provenance, - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, + msg = "system may be overloaded, block likely to be orphaned", + provenance, + delay_ms = delay.as_millis(), + slot = %block.slot(), + ?root, + "Block was broadcast too late" ) } else if delay >= delayed_threshold { error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "provenance" => provenance, - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, + msg = "system may be overloaded, block may be orphaned", + provenance, + delay_ms = delay.as_millis(), + slot = %block.slot(), + ?root, + "Block broadcast was delayed" ) } } @@ -820,7 +775,6 @@ fn check_slashable( chain_clone: &BeaconChain, block_root: Hash256, block_clone: &SignedBeaconBlock>, - log_clone: &Logger, ) -> Result<(), BlockError> { let slashable_cache = chain_clone.observed_slashable.read(); if slashable_cache @@ -832,9 +786,8 @@ fn check_slashable( .map_err(|e| BlockError::BeaconChainError(e.into()))? { warn!( - log_clone, - "Not publishing equivocating block"; - "slot" => block_clone.slot() + slot = %block_clone.slot(), + "Not publishing equivocating block" ); return Err(BlockError::Slashable); } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index ec63372406..e5a9d9daea 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -2,9 +2,9 @@ use crate::{BlockId, ExecutionOptimistic}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::SyncCommitteeReward; use eth2::types::ValidatorId; -use slog::{debug, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; +use tracing::debug; use types::{BeaconState, SignedBlindedBeaconBlock}; use warp_utils::reject::{custom_not_found, unhandled_error}; @@ -12,7 +12,6 @@ pub fn compute_sync_committee_rewards( chain: Arc>, block_id: BlockId, validators: Vec, - log: Logger, ) -> Result<(Option>, ExecutionOptimistic, bool), warp::Rejection> { let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; @@ -23,7 +22,7 @@ pub fn compute_sync_committee_rewards( .map_err(unhandled_error)?; let data = if reward_payload.is_empty() { - debug!(log, "compute_sync_committee_rewards returned empty"); + debug!("compute_sync_committee_rewards returned empty"); None } else if validators.is_empty() { Some(reward_payload) diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index da9f9b7a06..9ca1a2401a 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -11,11 +11,11 @@ use beacon_chain::{ use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use std::cmp::max; use std::collections::HashMap; use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, error, warn}; use types::{ slot_data::SlotData, BeaconStateError, Epoch, EthSpec, SignedContributionAndProof, SyncCommitteeMessage, SyncDuty, SyncSubnetId, @@ -178,7 +178,6 @@ pub fn process_sync_committee_signatures( sync_committee_signatures: Vec, network_tx: UnboundedSender>, chain: &BeaconChain, - log: Logger, ) -> Result<(), warp::reject::Rejection> { let mut failures = vec![]; @@ -192,10 +191,9 @@ pub fn process_sync_committee_signatures( Ok(positions) => positions, Err(e) => { error!( - log, - "Unable to compute subnet positions for sync message"; - "error" => ?e, - "slot" => sync_committee_signature.slot, + error = ?e, + slot = %sync_committee_signature.slot, + "Unable to compute subnet positions for sync message" ); failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e))); continue; @@ -248,22 +246,20 @@ pub fn process_sync_committee_signatures( new_root, }) => { debug!( - log, - "Ignoring already-known sync message"; - "new_root" => ?new_root, - "prev_root" => ?prev_root, - "slot" => slot, - "validator_index" => validator_index, + ?new_root, + ?prev_root, + %slot, + validator_index, + "Ignoring already-known sync message" ); } Err(e) => { error!( - log, - "Failure verifying sync committee signature for gossip"; - "error" => ?e, - "request_index" => i, - "slot" => sync_committee_signature.slot, - "validator_index" => sync_committee_signature.validator_index, + error = ?e, + request_index = i, + slot = %sync_committee_signature.slot, + validator_index = sync_committee_signature.validator_index, + "Failure verifying sync committee signature for gossip" ); failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e))); } @@ -273,11 +269,10 @@ pub fn process_sync_committee_signatures( if let Some(verified) = verified_for_pool { if let Err(e) = chain.add_to_naive_sync_aggregation_pool(verified) { error!( - log, - "Unable to add sync committee signature to pool"; - "error" => ?e, - "slot" => sync_committee_signature.slot, - "validator_index" => sync_committee_signature.validator_index, + error = ?e, + slot = %sync_committee_signature.slot, + validator_index = sync_committee_signature.validator_index, + "Unable to add sync committee signature to pool" ); } } @@ -312,7 +307,6 @@ pub fn process_signed_contribution_and_proofs( signed_contribution_and_proofs: Vec>, network_tx: UnboundedSender>, chain: &BeaconChain, - log: Logger, ) -> Result<(), warp::reject::Rejection> { let mut verified_contributions = Vec::with_capacity(signed_contribution_and_proofs.len()); let mut failures = vec![]; @@ -362,13 +356,12 @@ pub fn process_signed_contribution_and_proofs( Err(SyncVerificationError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { error!( - log, - "Failure verifying signed contribution and proof"; - "error" => ?e, - "request_index" => index, - "aggregator_index" => aggregator_index, - "subcommittee_index" => subcommittee_index, - "contribution_slot" => contribution_slot, + error = ?e, + request_index = index, + aggregator_index = aggregator_index, + subcommittee_index = subcommittee_index, + contribution_slot = %contribution_slot, + "Failure verifying signed contribution and proof" ); failures.push(api_types::Failure::new( index, @@ -382,10 +375,9 @@ pub fn process_signed_contribution_and_proofs( for (index, verified_contribution) in verified_contributions { if let Err(e) = chain.add_contribution_to_block_inclusion_pool(verified_contribution) { warn!( - log, - "Could not add verified sync contribution to the inclusion pool"; - "error" => ?e, - "request_index" => index, + error = ?e, + request_index = index, + "Could not add verified sync contribution to the inclusion pool" ); failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); } diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index c692ec999e..f78a361dad 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -19,10 +19,8 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkConfig, NetworkGlobals, PeerId, PeerManager, }; -use logging::test_logger; use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; -use slog::Logger; use std::future::Future; use std::net::SocketAddr; use std::sync::Arc; @@ -75,7 +73,6 @@ impl InteractiveTester { ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) - .logger(test_logger()) .mock_execution_layer(); harness_builder = if let Some(initializer) = initializer { @@ -102,13 +99,7 @@ impl InteractiveTester { listening_socket, network_rx, .. - } = create_api_server_with_config( - harness.chain.clone(), - config, - &harness.runtime, - harness.logger().clone(), - ) - .await; + } = create_api_server_with_config(harness.chain.clone(), config, &harness.runtime).await; tokio::spawn(server); @@ -134,16 +125,14 @@ impl InteractiveTester { pub async fn create_api_server( chain: Arc>, test_runtime: &TestRuntime, - log: Logger, ) -> ApiServer> { - create_api_server_with_config(chain, Config::default(), test_runtime, log).await + create_api_server_with_config(chain, Config::default(), test_runtime).await } pub async fn create_api_server_with_config( chain: Arc>, http_config: Config, test_runtime: &TestRuntime, - log: Logger, ) -> ApiServer> { // Use port 0 to allocate a new unused port. let port = 0; @@ -174,14 +163,13 @@ pub async fn create_api_server_with_config( meta_data, vec![], false, - &log, network_config, chain.spec.clone(), )); // Only a peer manager can add peers, so we create a dummy manager. let config = lighthouse_network::peer_manager::config::Config::default(); - let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap(); + let mut pm = PeerManager::new(config, network_globals.clone()).unwrap(); // add a peer let peer_id = PeerId::random(); @@ -200,8 +188,7 @@ pub async fn create_api_server_with_config( })); *network_globals.sync_state.write() = SyncState::Synced; - let eth1_service = - eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); + let eth1_service = eth1::Service::new(eth1::Config::default(), chain.spec.clone()).unwrap(); let beacon_processor_config = BeaconProcessorConfig { // The number of workers must be greater than one. Tests which use the @@ -225,7 +212,6 @@ pub async fn create_api_server_with_config( executor: test_runtime.task_executor.clone(), current_workers: 0, config: beacon_processor_config, - log: log.clone(), } .spawn_manager( beacon_processor_rx, @@ -258,7 +244,6 @@ pub async fn create_api_server_with_config( beacon_processor_reprocess_send: Some(reprocess_send), eth1_service: Some(eth1_service), sse_logging_components: None, - log, }); let (listening_socket, server) = diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 1baa71699c..b888439238 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -331,7 +331,6 @@ pub async fn consensus_partial_pass_only_consensus() { let validator_count = 64; let num_initial: u64 = 31; let tester = InteractiveTester::::new(None, validator_count).await; - let test_logger = tester.harness.logger().clone(); // Create some chain depth. tester.harness.advance_slot(); @@ -379,7 +378,6 @@ pub async fn consensus_partial_pass_only_consensus() { ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain.clone(), &channel.0, - test_logger, validation_level, StatusCode::ACCEPTED, network_globals, @@ -624,7 +622,6 @@ pub async fn equivocation_consensus_late_equivocation() { let validator_count = 64; let num_initial: u64 = 31; let tester = InteractiveTester::::new(None, validator_count).await; - let test_logger = tester.harness.logger().clone(); // Create some chain depth. tester.harness.advance_slot(); @@ -671,7 +668,6 @@ pub async fn equivocation_consensus_late_equivocation() { ProvenancedBlock::local(gossip_block_b.unwrap(), blobs_b), tester.harness.chain, &channel.0, - test_logger, validation_level, StatusCode::ACCEPTED, network_globals, @@ -1236,7 +1232,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let validator_count = 64; let num_initial: u64 = 31; let tester = InteractiveTester::::new(None, validator_count).await; - let test_logger = tester.harness.logger().clone(); // Create some chain depth. tester.harness.advance_slot(); @@ -1276,7 +1271,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain.clone(), block_a.canonical_root(), Arc::new(block_a), - test_logger.clone(), ) .await .unwrap(); @@ -1284,7 +1278,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain.clone(), block_b.canonical_root(), block_b.clone(), - test_logger.clone(), ) .await .unwrap(); @@ -1310,7 +1303,6 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { block_b, tester.harness.chain, &channel.0, - test_logger, validation_level, StatusCode::ACCEPTED, network_globals, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index f7dbedc9ca..2020130d18 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -26,7 +26,6 @@ use http_api::{ BlockId, StateId, }; use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; -use logging::test_logger; use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; @@ -135,7 +134,6 @@ impl ApiTester { reconstruct_historic_states: config.retain_historic_states, ..ChainConfig::default() }) - .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -277,8 +275,6 @@ impl ApiTester { "precondition: justification" ); - let log = test_logger(); - let ApiServer { ctx, server, @@ -286,7 +282,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), &harness.runtime, log).await; + } = create_api_server(chain.clone(), &harness.runtime).await; harness.runtime.task_executor.spawn(server, "api_server"); @@ -375,7 +371,6 @@ impl ApiTester { let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); - let log = test_logger(); let ApiServer { ctx, @@ -384,7 +379,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), &harness.runtime, log).await; + } = create_api_server(chain.clone(), &harness.runtime).await; harness.runtime.task_executor.spawn(server, "api_server"); diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index 9ad073439d..e12053ac43 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -10,12 +10,13 @@ beacon_chain = { workspace = true } health_metrics = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } +logging = { workspace = true } malloc_utils = { workspace = true } metrics = { workspace = true } serde = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } store = { workspace = true } +tracing = { workspace = true } warp = { workspace = true } warp_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index 2895506c3b..6cbb485d71 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -6,12 +6,13 @@ mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::prometheus_client::registry::Registry; use lighthouse_version::version_with_platform; +use logging::crit; use serde::{Deserialize, Serialize}; -use slog::{crit, info, Logger}; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; +use tracing::info; use warp::{http::Response, Filter}; #[derive(Debug)] @@ -41,7 +42,6 @@ pub struct Context { pub db_path: Option, pub freezer_db_path: Option, pub gossipsub_registry: Option>, - pub log: Logger, } /// Configuration for the HTTP server. @@ -86,7 +86,6 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result<(SocketAddr, impl Future), Error> { let config = &ctx.config; - let log = ctx.log.clone(); // Configure CORS. let cors_builder = { @@ -103,7 +102,7 @@ pub fn serve( // Sanity check. if !config.enabled { - crit!(log, "Cannot start disabled metrics HTTP server"); + crit!("Cannot start disabled metrics HTTP server"); return Err(Error::Other( "A disabled metrics server should not be started".to_string(), )); @@ -144,9 +143,8 @@ pub fn serve( )?; info!( - log, - "Metrics HTTP server started"; - "listen_address" => listening_socket.to_string(), + listen_address = listening_socket.to_string(), + "Metrics HTTP server started" ); Ok((listening_socket, server)) diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index d903e233fb..2de2fd96f8 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,6 @@ use beacon_chain::test_utils::EphemeralHarnessType; use http_metrics::Config; -use logging::test_logger; +use logging::create_test_tracing_subscriber; use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; @@ -12,9 +12,8 @@ type Context = http_metrics::Context>; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn returns_200_ok() { + create_test_tracing_subscriber(); async { - let log = test_logger(); - let context = Arc::new(Context { config: Config { enabled: true, @@ -27,7 +26,6 @@ async fn returns_200_ok() { db_path: None, freezer_db_path: None, gossipsub_registry: None, - log, }); let ctx = context.clone(); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 325c266195..d60bfc3735 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -17,12 +17,13 @@ ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } fnv = { workspace = true } futures = { workspace = true } -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", tag = "sigp-gossipsub-0.1" } +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", branch = "sigp-gossipsub" } hex = { workspace = true } itertools = { workspace = true } libp2p-mplex = "0.43" lighthouse_version = { workspace = true } local-ip-address = "0.6" +logging = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } metrics = { workspace = true } @@ -32,7 +33,6 @@ rand = { workspace = true } regex = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } -slog = { workspace = true } smallvec = { workspace = true } snap = { workspace = true } ssz_types = { workspace = true } @@ -43,6 +43,8 @@ tiny-keccak = "2" tokio = { workspace = true } tokio-io-timeout = "1" tokio-util = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } unsigned-varint = { version = "0.8", features = ["codec"] } unused_port = { workspace = true } @@ -57,8 +59,6 @@ async-channel = { workspace = true } logging = { workspace = true } quickcheck = { workspace = true } quickcheck_macros = { workspace = true } -slog-async = { workspace = true } -slog-term = { workspace = true } tempfile = { workspace = true } [features] diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 8067711954..e70c8047e0 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -9,13 +9,13 @@ use crate::NetworkConfig; use alloy_rlp::bytes::Bytes; use libp2p::identity::Keypair; use lighthouse_version::{client_name, version}; -use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::str::FromStr; +use tracing::{debug, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; @@ -99,20 +99,19 @@ pub fn use_or_load_enr( enr_key: &CombinedKey, local_enr: &mut Enr, config: &NetworkConfig, - log: &slog::Logger, ) -> Result<(), String> { let enr_f = config.network_dir.join(ENR_FILENAME); if let Ok(mut enr_file) = File::open(enr_f.clone()) { let mut enr_string = String::new(); match enr_file.read_to_string(&mut enr_string) { - Err(_) => debug!(log, "Could not read ENR from file"), + Err(_) => debug!("Could not read ENR from file"), Ok(_) => { match Enr::from_str(&enr_string) { Ok(disk_enr) => { // if the same node id, then we may need to update our sequence number if local_enr.node_id() == disk_enr.node_id() { if compare_enr(local_enr, &disk_enr) { - debug!(log, "ENR loaded from disk"; "file" => ?enr_f); + debug!(file = ?enr_f,"ENR loaded from disk"); // the stored ENR has the same configuration, use it *local_enr = disk_enr; return Ok(()); @@ -125,18 +124,18 @@ pub fn use_or_load_enr( local_enr.set_seq(new_seq_no, enr_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; - debug!(log, "ENR sequence number increased"; "seq" => new_seq_no); + debug!(seq = new_seq_no, "ENR sequence number increased"); } } Err(e) => { - warn!(log, "ENR from file could not be decoded"; "error" => ?e); + warn!(error = ?e,"ENR from file could not be decoded"); } } } } } - save_enr_to_disk(&config.network_dir, local_enr, log); + save_enr_to_disk(&config.network_dir, local_enr); Ok(()) } @@ -150,7 +149,6 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, - log: &slog::Logger, spec: &ChainSpec, ) -> Result { // Build the local ENR. @@ -159,7 +157,7 @@ pub fn build_or_load_enr( let enr_key = CombinedKey::from_libp2p(local_key)?; let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; - use_or_load_enr(&enr_key, &mut local_enr, config, log)?; + use_or_load_enr(&enr_key, &mut local_enr, config)?; Ok(local_enr) } @@ -314,18 +312,19 @@ pub fn load_enr_from_disk(dir: &Path) -> Result { } /// Saves an ENR to disk -pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { +pub fn save_enr_to_disk(dir: &Path, enr: &Enr) { let _ = std::fs::create_dir_all(dir); match File::create(dir.join(Path::new(ENR_FILENAME))) .and_then(|mut f| f.write_all(enr.to_base64().as_bytes())) { Ok(_) => { - debug!(log, "ENR written to disk"); + debug!("ENR written to disk"); } Err(e) => { warn!( - log, - "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => %e + file = format!("{:?}{:?}",dir, ENR_FILENAME), + error = %e, + "Could not write ENR to file" ); } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 33c7775ae2..ad54c6b8b1 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -31,8 +31,8 @@ pub use libp2p::{ SubstreamProtocol, ToSwarm, }, }; +use logging::crit; use lru::LruCache; -use slog::{crit, debug, error, info, trace, warn}; use ssz::Encode; use std::num::NonZeroUsize; use std::{ @@ -45,6 +45,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::mpsc; +use tracing::{debug, error, info, trace, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; mod subnet_predicate; @@ -192,8 +193,6 @@ pub struct Discovery { /// Specifies whether various port numbers should be updated after the discovery service has been started update_ports: UpdatePorts, - /// Logger for the discovery behaviour. - log: slog::Logger, spec: Arc, } @@ -203,11 +202,8 @@ impl Discovery { local_key: Keypair, config: &NetworkConfig, network_globals: Arc>, - log: &slog::Logger, spec: &ChainSpec, ) -> Result { - let log = log.clone(); - let enr_dir = match config.network_dir.to_str() { Some(path) => String::from(path), None => String::from(""), @@ -216,9 +212,11 @@ impl Discovery { let local_enr = network_globals.local_enr.read().clone(); let local_node_id = local_enr.node_id(); - info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), - "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6(), - "quic4" => ?local_enr.quic4(), "quic6" => ?local_enr.quic6() + info!( + enr = local_enr.to_base64(), seq = local_enr.seq(), id = %local_enr.node_id(), + ip4 = ?local_enr.ip4(), udp4= ?local_enr.udp4(), tcp4 = ?local_enr.tcp4(), tcp6 = ?local_enr.tcp6(), udp6 = ?local_enr.udp6(), + quic4 = ?local_enr.quic4(), quic6 = ?local_enr.quic6(), + "ENR Initialised" ); // convert the keypair into an ENR key @@ -234,22 +232,20 @@ impl Discovery { continue; } debug!( - log, - "Adding node to routing table"; - "node_id" => %bootnode_enr.node_id(), - "peer_id" => %bootnode_enr.peer_id(), - "ip" => ?bootnode_enr.ip4(), - "udp" => ?bootnode_enr.udp4(), - "tcp" => ?bootnode_enr.tcp4(), - "quic" => ?bootnode_enr.quic4() + node_id = %bootnode_enr.node_id(), + peer_id = %bootnode_enr.peer_id(), + ip = ?bootnode_enr.ip4(), + udp = ?bootnode_enr.udp4(), + tcp = ?bootnode_enr.tcp4(), + quic = bootnode_enr.quic4(), + "Adding node to routing table" ); let repr = bootnode_enr.to_string(); let _ = discv5.add_enr(bootnode_enr).map_err(|e| { error!( - log, - "Could not add peer to the local routing table"; - "addr" => repr, - "error" => e.to_string(), + addr = repr, + error = e.to_string(), + "Could not add peer to the local routing table" ) }); } @@ -257,14 +253,14 @@ impl Discovery { // Start the discv5 service and obtain an event stream let event_stream = if !config.disable_discovery { discv5.start().map_err(|e| e.to_string()).await?; - debug!(log, "Discovery service started"); + debug!("Discovery service started"); EventStream::Awaiting(Box::pin(discv5.event_stream())) } else { EventStream::InActive }; if !config.boot_nodes_multiaddr.is_empty() { - info!(log, "Contacting Multiaddr boot-nodes for their ENR"); + info!("Contacting Multiaddr boot-nodes for their ENR"); } // get futures for requesting the Enrs associated to these multiaddr and wait for their @@ -286,26 +282,28 @@ impl Discovery { match result { Ok(enr) => { debug!( - log, - "Adding node to routing table"; - "node_id" => %enr.node_id(), - "peer_id" => %enr.peer_id(), - "ip" => ?enr.ip4(), - "udp" => ?enr.udp4(), - "tcp" => ?enr.tcp4(), - "quic" => ?enr.quic4() + node_id = %enr.node_id(), + peer_id = %enr.peer_id(), + ip4 = ?enr.ip4(), + udp4 = ?enr.udp4(), + tcp4 = ?enr.tcp4(), + quic4 = ?enr.quic4(), + "Adding node to routing table" ); let _ = discv5.add_enr(enr).map_err(|e| { error!( - log, - "Could not add peer to the local routing table"; - "addr" => original_addr.to_string(), - "error" => e.to_string(), + addr = original_addr.to_string(), + error = e.to_string(), + "Could not add peer to the local routing table" ) }); } Err(e) => { - error!(log, "Error getting mapping to ENR"; "multiaddr" => original_addr.to_string(), "error" => e.to_string()) + error!( + multiaddr = original_addr.to_string(), + error = e.to_string(), + "Error getting mapping to ENR" + ) } } } @@ -327,7 +325,6 @@ impl Discovery { event_stream, started: !config.disable_discovery, update_ports, - log, enr_dir, spec: Arc::new(spec.clone()), }) @@ -358,7 +355,7 @@ impl Discovery { } // Immediately start a FindNode query let target_peers = std::cmp::min(FIND_NODE_QUERY_CLOSEST_PEERS, target_peers); - debug!(self.log, "Starting a peer discovery request"; "target_peers" => target_peers ); + debug!(target_peers, "Starting a peer discovery request"); self.find_peer_active = true; self.start_query(QueryType::FindPeers, target_peers, |_| true); } @@ -370,9 +367,8 @@ impl Discovery { return; } trace!( - self.log, - "Starting discovery query for subnets"; - "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() + subnets = ?subnets_to_discover.iter().map(|s| s.subnet).collect::>(), + "Starting discovery query for subnets" ); for subnet in subnets_to_discover { self.add_subnet_query(subnet.subnet, subnet.min_ttl, 0); @@ -386,9 +382,8 @@ impl Discovery { if let Err(e) = self.discv5.add_enr(enr) { debug!( - self.log, - "Could not add peer to the local routing table"; - "error" => %e + error = %e, + "Could not add peer to the local routing table" ) } } @@ -427,7 +422,7 @@ impl Discovery { // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); Ok(true) } @@ -463,7 +458,7 @@ impl Discovery { // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); Ok(true) } @@ -475,7 +470,7 @@ impl Discovery { const IS_TCP: bool = false; if self.discv5.update_local_enr_socket(socket_addr, IS_TCP) { // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); } *self.network_globals.local_enr.write() = self.discv5.local_enr(); Ok(()) @@ -561,7 +556,7 @@ impl Discovery { *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); Ok(()) } @@ -575,10 +570,11 @@ impl Discovery { format!("{:?}", enr_fork_id.next_fork_epoch) }; - info!(self.log, "Updating the ENR fork version"; - "fork_digest" => ?enr_fork_id.fork_digest, - "next_fork_version" => ?enr_fork_id.next_fork_version, - "next_fork_epoch" => next_fork_epoch_log, + info!( + fork_digest = ?enr_fork_id.fork_digest, + next_fork_version = ?enr_fork_id.next_fork_version, + next_fork_epoch = next_fork_epoch_log, + "Updating the ENR fork version" ); let _ = self @@ -586,9 +582,8 @@ impl Discovery { .enr_insert::(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes().into()) .map_err(|e| { warn!( - self.log, - "Could not update eth2 ENR field"; - "error" => ?e + error = ?e, + "Could not update eth2 ENR field" ) }); @@ -596,7 +591,7 @@ impl Discovery { *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk - enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); } // Bans a peer and it's associated seen IP addresses. @@ -642,10 +637,7 @@ impl Discovery { fn add_subnet_query(&mut self, subnet: Subnet, min_ttl: Option, retries: usize) { // remove the entry and complete the query if greater than the maximum search count if retries > MAX_DISCOVERY_RETRY { - debug!( - self.log, - "Subnet peer discovery did not find sufficient peers. Reached max retry limit" - ); + debug!("Subnet peer discovery did not find sufficient peers. Reached max retry limit"); return; } @@ -666,7 +658,7 @@ impl Discovery { } if !found { // update the metrics and insert into the queue. - trace!(self.log, "Queuing subnet query"; "subnet" => ?subnet, "retries" => retries); + trace!(?subnet, retries, "Queuing subnet query"); self.queued_queries.push_back(SubnetQuery { subnet, min_ttl, @@ -737,19 +729,21 @@ impl Discovery { .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { - debug!(self.log, "Discovery ignored"; - "reason" => "Already connected to desired peers", - "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, + debug!( + reason = "Already connected to desired peers", + connected_peers_on_subnet = peers_on_subnet, + target_subnet_peers = TARGET_SUBNET_PEERS, + "Discovery ignored" ); return false; } let target_peers = TARGET_SUBNET_PEERS.saturating_sub(peers_on_subnet); - trace!(self.log, "Discovery query started for subnet"; - "subnet_query" => ?subnet_query, - "connected_peers_on_subnet" => peers_on_subnet, - "peers_to_find" => target_peers, + trace!( + ?subnet_query, + connected_peers_on_subnet = peers_on_subnet, + peers_to_find = target_peers, + "Discovery query started for subnet" ); filtered_subnets.push(subnet_query.subnet); @@ -760,13 +754,11 @@ impl Discovery { // Only start a discovery query if we have a subnet to look for. if !filtered_subnet_queries.is_empty() { // build the subnet predicate as a combination of the eth2_fork_predicate and the subnet predicate - let subnet_predicate = - subnet_predicate::(filtered_subnets, &self.log, self.spec.clone()); + let subnet_predicate = subnet_predicate::(filtered_subnets, self.spec.clone()); debug!( - self.log, - "Starting grouped subnet query"; - "subnets" => ?filtered_subnet_queries, + subnets = ?filtered_subnet_queries, + "Starting grouped subnet query" ); self.start_query( QueryType::Subnet(filtered_subnet_queries), @@ -790,7 +782,7 @@ impl Discovery { let enr_fork_id = match self.local_enr().eth2() { Ok(v) => v, Err(e) => { - crit!(self.log, "Local ENR has no fork id"; "error" => e); + crit!(error = e, "Local ENR has no fork id"); return; } }; @@ -831,10 +823,10 @@ impl Discovery { self.find_peer_active = false; match query.result { Ok(r) if r.is_empty() => { - debug!(self.log, "Discovery query yielded no results."); + debug!("Discovery query yielded no results."); } Ok(r) => { - debug!(self.log, "Discovery query completed"; "peers_found" => r.len()); + debug!(peers_found = r.len(), "Discovery query completed"); let results = r .into_iter() .map(|enr| { @@ -846,7 +838,7 @@ impl Discovery { return Some(results); } Err(e) => { - warn!(self.log, "Discovery query failed"; "error" => %e); + warn!(error = %e, "Discovery query failed"); } } } @@ -855,13 +847,20 @@ impl Discovery { queries.iter().map(|query| query.subnet).collect(); match query.result { Ok(r) if r.is_empty() => { - debug!(self.log, "Grouped subnet discovery query yielded no results."; "subnets_searched_for" => ?subnets_searched_for); + debug!( + ?subnets_searched_for, + "Grouped subnet discovery query yielded no results." + ); queries.iter().for_each(|query| { self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); }) } Ok(r) => { - debug!(self.log, "Peer grouped subnet discovery request completed"; "peers_found" => r.len(), "subnets_searched_for" => ?subnets_searched_for); + debug!( + peers_found = r.len(), + ?subnets_searched_for, + "Peer grouped subnet discovery request completed" + ); let mut mapped_results = HashMap::new(); @@ -888,11 +887,8 @@ impl Discovery { self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); // Check the specific subnet against the enr - let subnet_predicate = subnet_predicate::( - vec![query.subnet], - &self.log, - self.spec.clone(), - ); + let subnet_predicate = + subnet_predicate::(vec![query.subnet], self.spec.clone()); r.clone() .into_iter() @@ -941,7 +937,7 @@ impl Discovery { } } Err(e) => { - warn!(self.log,"Grouped subnet discovery query failed"; "subnets_searched_for" => ?subnets_searched_for, "error" => %e); + warn!(?subnets_searched_for, error = %e,"Grouped subnet discovery query failed"); } } } @@ -1020,11 +1016,11 @@ impl NetworkBehaviour for Discovery { if let Poll::Ready(event_stream) = fut.poll_unpin(cx) { match event_stream { Ok(stream) => { - debug!(self.log, "Discv5 event stream ready"); + debug!("Discv5 event stream ready"); self.event_stream = EventStream::Present(stream); } Err(e) => { - slog::crit!(self.log, "Discv5 event stream failed"; "error" => %e); + crit!(error = %e, "Discv5 event stream failed"); self.event_stream = EventStream::InActive; } } @@ -1042,15 +1038,15 @@ impl NetworkBehaviour for Discovery { // log these to see if we are unnecessarily dropping discovered peers /* if enr.eth2() == self.local_enr().eth2() { - trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); + trace!( "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } else { // this is temporary warning for debugging the DHT - warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); + warn!( "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); } */ } discv5::Event::SocketUpdated(socket_addr) => { - info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port()); + info!(ip = %socket_addr.ip(), udp_port = %socket_addr.port(),"Address updated"); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); // Discv5 will have updated our local ENR. We save the updated version // to disk. @@ -1062,7 +1058,7 @@ impl NetworkBehaviour for Discovery { self.discv5.update_local_enr_socket(socket_addr, true); } let enr = self.discv5.local_enr(); - enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr); // update network globals *self.network_globals.local_enr.write() = enr; // A new UDP socket has been detected. @@ -1086,7 +1082,11 @@ impl NetworkBehaviour for Discovery { let addr = ev.addr; let listener_id = ev.listener_id; - trace!(self.log, "Received NewListenAddr event from swarm"; "listener_id" => ?listener_id, "addr" => ?addr); + trace!( + ?listener_id, + ?addr, + "Received NewListenAddr event from swarm" + ); let mut addr_iter = addr.iter(); @@ -1094,7 +1094,7 @@ impl NetworkBehaviour for Discovery { Some(Protocol::Ip4(_)) => match (addr_iter.next(), addr_iter.next()) { (Some(Protocol::Tcp(port)), None) => { if !self.update_ports.tcp4 { - debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + debug!(multiaddr = ?addr, "Skipping ENR update"); return; } @@ -1102,21 +1102,21 @@ impl NetworkBehaviour for Discovery { } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic4 { - debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + debug!(?addr, "Skipping ENR update"); return; } self.update_enr_quic_port(port, false) } _ => { - debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + debug!(?addr, "Encountered unacceptable multiaddr for listening (unsupported transport)"); return; } }, Some(Protocol::Ip6(_)) => match (addr_iter.next(), addr_iter.next()) { (Some(Protocol::Tcp(port)), None) => { if !self.update_ports.tcp6 { - debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + debug!(?addr, "Skipping ENR update"); return; } @@ -1124,19 +1124,22 @@ impl NetworkBehaviour for Discovery { } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic6 { - debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + debug!(?addr, "Skipping ENR update"); return; } self.update_enr_quic_port(port, true) } _ => { - debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + debug!(?addr, "Encountered unacceptable multiaddr for listening (unsupported transport)"); return; } }, _ => { - debug!(self.log, "Encountered unacceptable multiaddr for listening (no IP)"; "addr" => ?addr); + debug!( + ?addr, + "Encountered unacceptable multiaddr for listening (no IP)" + ); return; } }; @@ -1145,10 +1148,10 @@ impl NetworkBehaviour for Discovery { match attempt_enr_update { Ok(true) => { - info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) + info!(enr = local_enr.to_base64(), seq = local_enr.seq(), id = %local_enr.node_id(), ip4 = ?local_enr.ip4(), udp4 = ?local_enr.udp4(), tcp4 = ?local_enr.tcp4(), tcp6 = ?local_enr.tcp6(), udp6 = ?local_enr.udp6(),"Updated local ENR") } Ok(false) => {} // Nothing to do, ENR already configured - Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), + Err(e) => warn!(error = ?e,"Failed to update ENR"), } } _ => { @@ -1171,7 +1174,7 @@ impl Discovery { return; } // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id, "error" => %ClearDialError(error)); + debug!(%peer_id, error = %ClearDialError(error),"Marking peer disconnected in DHT"); self.disconnect_peer(&peer_id); } DialError::LocalPeerId { .. } @@ -1179,7 +1182,7 @@ impl Discovery { | DialError::Transport(_) | DialError::WrongPeerId { .. } => { // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id, "error" => %ClearDialError(error)); + debug!(%peer_id, error = %ClearDialError(error),"Marking peer disconnected in DHT"); self.disconnect_peer(&peer_id); } DialError::DialPeerConditionFalse(_) | DialError::Aborted => {} @@ -1193,23 +1196,10 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV2}; use libp2p::identity::secp256k1; - use slog::{o, Drain}; use types::{BitVector, MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; - pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - async fn build_discovery() -> Discovery { let spec = Arc::new(ChainSpec::default()); let keypair = secp256k1::Keypair::generate(); @@ -1218,7 +1208,6 @@ mod tests { let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); - let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { @@ -1228,12 +1217,11 @@ mod tests { }), vec![], false, - &log, config.clone(), spec.clone(), ); let keypair = keypair.into(); - Discovery::new(keypair, &config, Arc::new(globals), &log, &spec) + Discovery::new(keypair, &config, Arc::new(globals), &spec) .await .unwrap() } diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 400a0c2d56..735ef5b0f2 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -1,22 +1,19 @@ //! The subnet predicate used for searching for a particular subnet. use super::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; -use slog::trace; use std::ops::Deref; +use tracing::trace; use types::data_column_custody_group::compute_subnets_for_node; use types::ChainSpec; /// Returns the predicate for a given subnet. pub fn subnet_predicate( subnets: Vec, - log: &slog::Logger, spec: Arc, ) -> impl Fn(&Enr) -> bool + Send where E: EthSpec, { - let log_clone = log.clone(); - move |enr: &Enr| { let attestation_bitfield: EnrAttestationBitfield = match enr.attestation_bitfield::() { @@ -48,9 +45,8 @@ where if !predicate { trace!( - log_clone, - "Peer found but not on any of the desired subnets"; - "peer_id" => %enr.peer_id() + peer_id = %enr.peer_id(), + "Peer found but not on any of the desired subnets" ); } predicate diff --git a/beacon_node/lighthouse_network/src/listen_addr.rs b/beacon_node/lighthouse_network/src/listen_addr.rs index 53f7d9daca..3b0ff98b34 100644 --- a/beacon_node/lighthouse_network/src/listen_addr.rs +++ b/beacon_node/lighthouse_network/src/listen_addr.rs @@ -104,25 +104,3 @@ impl ListenAddress { }) } } - -impl slog::KV for ListenAddress { - fn serialize( - &self, - _record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - if let Some(v4_addr) = self.v4() { - serializer.emit_arguments("ip4_address", &format_args!("{}", v4_addr.addr))?; - serializer.emit_u16("disc4_port", v4_addr.disc_port)?; - serializer.emit_u16("quic4_port", v4_addr.quic_port)?; - serializer.emit_u16("tcp4_port", v4_addr.tcp_port)?; - } - if let Some(v6_addr) = self.v6() { - serializer.emit_arguments("ip6_address", &format_args!("{}", v6_addr.addr))?; - serializer.emit_u16("disc6_port", v6_addr.disc_port)?; - serializer.emit_u16("quic6_port", v6_addr.quic_port)?; - serializer.emit_u16("tcp6_port", v6_addr.tcp_port)?; - } - slog::Result::Ok(()) - } -} diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 07c4be7959..8c642ec91f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -11,12 +11,12 @@ use libp2p::identify::Info as IdentifyInfo; use lru_cache::LRUTimeCache; use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; -use slog::{debug, error, trace, warn}; use smallvec::SmallVec; use std::{ sync::Arc, time::{Duration, Instant}, }; +use tracing::{debug, error, trace, warn}; use types::{DataColumnSubnetId, EthSpec, SyncSubnetId}; pub use libp2p::core::Multiaddr; @@ -114,8 +114,6 @@ pub struct PeerManager { metrics_enabled: bool, /// Keeps track of whether the QUIC protocol is enabled or not. quic_enabled: bool, - /// The logger associated with the `PeerManager`. - log: slog::Logger, } /// The events that the `PeerManager` outputs (requests). @@ -150,7 +148,6 @@ impl PeerManager { pub fn new( cfg: config::Config, network_globals: Arc>, - log: &slog::Logger, ) -> Result { let config::Config { discovery_enabled, @@ -195,7 +192,6 @@ impl PeerManager { discovery_enabled, metrics_enabled, quic_enabled, - log: log.clone(), }) } @@ -209,7 +205,7 @@ impl PeerManager { pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { // Update the sync status if required if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - debug!(self.log, "Sending goodbye to peer"; "peer_id" => %peer_id, "reason" => %reason, "score" => %info.score()); + debug!(%peer_id, %reason, score = %info.score(), "Sending goodbye to peer"); if matches!(reason, GoodbyeReason::IrrelevantNetwork) { info.update_sync_status(SyncStatus::IrrelevantPeer); } @@ -369,7 +365,7 @@ impl PeerManager { .update_min_ttl(&peer_id, min_ttl); } if self.dial_peer(enr) { - debug!(self.log, "Added discovered ENR peer to dial queue"; "peer_id" => %peer_id); + debug!(%peer_id, "Added discovered ENR peer to dial queue"); to_dial_peers += 1; } } @@ -382,7 +378,10 @@ impl PeerManager { // reach out target. To prevent the infinite loop, if a query returns no useful peers, we // will cancel the recursiveness and wait for the heartbeat to trigger another query latter. if results_count > 0 && to_dial_peers == 0 { - debug!(self.log, "Skipping recursive discovery query after finding no useful results"; "results" => results_count); + debug!( + results = results_count, + "Skipping recursive discovery query after finding no useful results" + ); metrics::inc_counter(&metrics::DISCOVERY_NO_USEFUL_ENRS); } else { // Queue another discovery if we need to @@ -481,16 +480,21 @@ impl PeerManager { if previous_kind != peer_info.client().kind || *peer_info.listening_addresses() != previous_listening_addresses { - debug!(self.log, "Identified Peer"; "peer" => %peer_id, - "protocol_version" => &info.protocol_version, - "agent_version" => &info.agent_version, - "listening_addresses" => ?info.listen_addrs, - "observed_address" => ?info.observed_addr, - "protocols" => ?info.protocols + debug!( + %peer_id, + protocol_version = &info.protocol_version, + agent_version = &info.agent_version, + listening_addresses = ?info.listen_addrs, + observed_address = ?info.observed_addr, + protocols = ?info.protocols, + "Identified Peer" ); } } else { - error!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string()); + error!( + peer_id = peer_id.to_string(), + "Received an Identify response from an unknown peer" + ); } } @@ -506,8 +510,7 @@ impl PeerManager { ) { let client = self.network_globals.client(peer_id); let score = self.network_globals.peers.read().score(peer_id); - debug!(self.log, "RPC Error"; "protocol" => %protocol, "err" => %err, "client" => %client, - "peer_id" => %peer_id, "score" => %score, "direction" => ?direction); + debug!(%protocol, %err, %client, %peer_id, %score, ?direction, "RPC Error"); metrics::inc_counter_vec( &metrics::TOTAL_RPC_ERRORS_PER_CLIENT, &[ @@ -524,7 +527,7 @@ impl PeerManager { PeerAction::MidToleranceError } RPCError::InternalError(e) => { - debug!(self.log, "Internal RPC Error"; "error" => %e, "peer_id" => %peer_id); + debug!(error = %e, %peer_id, "Internal RPC Error"); return; } RPCError::HandlerRejected => PeerAction::Fatal, @@ -617,7 +620,7 @@ impl PeerManager { RPCError::StreamTimeout => match direction { ConnectionDirection::Incoming => { // There was a timeout responding to a peer. - debug!(self.log, "Timed out responding to RPC Request"; "peer_id" => %peer_id); + debug!(%peer_id, "Timed out responding to RPC Request"); return; } ConnectionDirection::Outgoing => match protocol { @@ -656,7 +659,7 @@ impl PeerManager { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a ping // reset the to-ping timer for this peer - trace!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq); + trace!(%peer_id, seq_no = seq, "Received a ping request"); match peer_info.connection_direction() { Some(ConnectionDirection::Incoming) => { self.inbound_ping_peers.insert(*peer_id); @@ -665,26 +668,23 @@ impl PeerManager { self.outbound_ping_peers.insert(*peer_id); } None => { - warn!(self.log, "Received a ping from a peer with an unknown connection direction"; "peer_id" => %peer_id); + warn!(%peer_id, "Received a ping from a peer with an unknown connection direction"); } } // if the sequence number is unknown send an update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data() { if *meta_data.seq_number() < seq { - trace!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "ping_seq_no" => seq); + trace!(%peer_id, known_seq_no = meta_data.seq_number(), ping_seq_no = seq, "Requesting new metadata from peer"); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { // if we don't know the meta-data, request it - debug!(self.log, "Requesting first metadata from peer"; - "peer_id" => %peer_id); + debug!(%peer_id, "Requesting first metadata from peer"); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { - error!(self.log, "Received a PING from an unknown peer"; - "peer_id" => %peer_id); + error!(%peer_id, "Received a PING from an unknown peer"); } } @@ -696,18 +696,16 @@ impl PeerManager { // if the sequence number is unknown send update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data() { if *meta_data.seq_number() < seq { - trace!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "pong_seq_no" => seq); + trace!(%peer_id, known_seq_no = meta_data.seq_number(), pong_seq_no = seq, "Requesting new metadata from peer"); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { // if we don't know the meta-data, request it - trace!(self.log, "Requesting first metadata from peer"; - "peer_id" => %peer_id); + trace!(%peer_id, "Requesting first metadata from peer"); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { - error!(self.log, "Received a PONG from an unknown peer"; "peer_id" => %peer_id); + error!(%peer_id, "Received a PONG from an unknown peer"); } } @@ -718,18 +716,15 @@ impl PeerManager { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data() { if *known_meta_data.seq_number() < *meta_data.seq_number() { - trace!(self.log, "Updating peer's metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); + trace!(%peer_id, known_seq_no = known_meta_data.seq_number(), new_seq_no = meta_data.seq_number(), "Updating peer's metadata"); } else { - trace!(self.log, "Received old metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); + trace!(%peer_id, known_seq_no = known_meta_data.seq_number(), new_seq_no = meta_data.seq_number(), "Received old metadata"); // Updating metadata even in this case to prevent storing // incorrect `attnets/syncnets` for a peer } } else { // we have no meta-data for this peer, update - debug!(self.log, "Obtained peer's metadata"; - "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); + debug!(%peer_id, new_seq_no = meta_data.seq_number(), "Obtained peer's metadata"); } let custody_group_count_opt = meta_data.custody_group_count().copied().ok(); @@ -749,10 +744,9 @@ impl PeerManager { .cloned() .unwrap_or_else(|| { warn!( - self.log, - "Custody group not found in subnet mapping"; - "custody_index" => custody_index, - "peer_id" => %peer_id + %custody_index, + %peer_id, + "Custody group not found in subnet mapping" ); vec![] }) @@ -761,11 +755,12 @@ impl PeerManager { peer_info.set_custody_subnets(custody_subnets); } Err(err) => { - debug!(self.log, "Unable to compute peer custody groups from metadata"; - "info" => "Sending goodbye to peer", - "peer_id" => %peer_id, - "custody_group_count" => custody_group_count, - "error" => ?err, + debug!( + info = "Sending goodbye to peer", + peer_id = %peer_id, + custody_group_count, + error = ?err, + "Unable to compute peer custody groups from metadata" ); invalid_meta_data = true; } @@ -773,8 +768,7 @@ impl PeerManager { } } } else { - error!(self.log, "Received METADATA from an unknown peer"; - "peer_id" => %peer_id); + error!(%peer_id, "Received METADATA from an unknown peer"); } // Disconnect peers with invalid metadata and find other peers instead. @@ -866,7 +860,7 @@ impl PeerManager { let mut peerdb = self.network_globals.peers.write(); if peerdb.ban_status(peer_id).is_some() { // don't connect if the peer is banned - error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); + error!(%peer_id, "Connection has been allowed to a banned peer"); } match connection { @@ -934,9 +928,8 @@ impl PeerManager { // request the subnet query from discovery if !subnets_to_discover.is_empty() { debug!( - self.log, - "Making subnet queries for maintaining sync committee peers"; - "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() + subnets = ?subnets_to_discover.iter().map(|s| s.subnet).collect::>(), + "Making subnet queries for maintaining sync committee peers" ); self.events .push(PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover)); @@ -965,7 +958,13 @@ impl PeerManager { if wanted_peers != 0 { // We need more peers, re-queue a discovery lookup. - debug!(self.log, "Starting a new peer discovery query"; "connected" => peer_count, "target" => self.target_peers, "outbound" => outbound_only_peer_count, "wanted" => wanted_peers); + debug!( + connected = peer_count, + target = self.target_peers, + outbound = outbound_only_peer_count, + wanted = wanted_peers, + "Starting a new peer discovery query" + ); self.events .push(PeerManagerEvent::DiscoverPeers(wanted_peers)); } @@ -1491,21 +1490,8 @@ enum ConnectingType { mod tests { use super::*; use crate::NetworkConfig; - use slog::{o, Drain}; use types::MainnetEthSpec as E; - pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - async fn build_peer_manager(target_peer_count: usize) -> PeerManager { build_peer_manager_with_trusted_peers(vec![], target_peer_count).await } @@ -1523,10 +1509,9 @@ mod tests { target_peers: target_peer_count, ..Default::default() }); - let log = build_log(slog::Level::Debug, false); let spec = Arc::new(E::default_spec()); - let globals = NetworkGlobals::new_test_globals(trusted_peers, &log, network_config, spec); - PeerManager::new(config, Arc::new(globals), &log).unwrap() + let globals = NetworkGlobals::new_test_globals(trusted_peers, network_config, spec); + PeerManager::new(config, Arc::new(globals)).unwrap() } #[tokio::test] diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index ee2a746142..1ad55ce5c4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -13,7 +13,7 @@ use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, ToSwarm}; pub use metrics::{set_gauge_vec, NAT_OPEN}; -use slog::{debug, error, trace}; +use tracing::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; @@ -54,7 +54,10 @@ impl NetworkBehaviour for PeerManager { self.events.push(PeerManagerEvent::Ping(peer_id)); } Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for inbound peers to ping"; "error" => e.to_string()) + error!( + error = e.to_string(), + "Failed to check for inbound peers to ping" + ) } Poll::Ready(None) | Poll::Pending => break, } @@ -67,7 +70,10 @@ impl NetworkBehaviour for PeerManager { self.events.push(PeerManagerEvent::Ping(peer_id)); } Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for outbound peers to ping"; "error" => e.to_string()) + error!( + error = e.to_string(), + "Failed to check for outbound peers to ping" + ) } Poll::Ready(None) | Poll::Pending => break, } @@ -84,7 +90,7 @@ impl NetworkBehaviour for PeerManager { self.events.push(PeerManagerEvent::Status(peer_id)) } Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for peers to ping"; "error" => e.to_string()) + error!(error = e.to_string(), "Failed to check for peers to ping") } Poll::Ready(None) | Poll::Pending => break, } @@ -109,7 +115,7 @@ impl NetworkBehaviour for PeerManager { ] .concat(); - debug!(self.log, "Dialing peer"; "peer_id"=> %enr.peer_id(), "multiaddrs" => ?multiaddrs); + debug!(peer_id = %enr.peer_id(), ?multiaddrs, "Dialing peer"); return Poll::Ready(ToSwarm::Dial { opts: DialOpts::peer_id(enr.peer_id()) .condition(PeerCondition::Disconnected) @@ -141,7 +147,7 @@ impl NetworkBehaviour for PeerManager { error, connection_id: _, }) => { - debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); + debug!(?peer_id, error = %ClearDialError(error),"Failed to dial peer"); self.on_dial_failure(peer_id); } _ => { @@ -186,7 +192,7 @@ impl NetworkBehaviour for PeerManager { _local_addr: &libp2p::Multiaddr, remote_addr: &libp2p::Multiaddr, ) -> Result, ConnectionDenied> { - trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); + trace!(%peer_id, multiaddr = %remote_addr, "Inbound connection"); // We already checked if the peer was banned on `handle_pending_inbound_connection`. if self.ban_status(&peer_id).is_some() { return Err(ConnectionDenied::new( @@ -227,9 +233,9 @@ impl NetworkBehaviour for PeerManager { _role_override: libp2p::core::Endpoint, _port_use: PortUse, ) -> Result, libp2p::swarm::ConnectionDenied> { - trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); + trace!(%peer_id, multiaddr = %addr,"Outbound connection"); if let Some(cause) = self.ban_status(&peer_id) { - error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + error!(%peer_id, "Connected a banned peer. Rejecting connection"); return Err(ConnectionDenied::new(cause)); } @@ -258,9 +264,11 @@ impl PeerManager { endpoint: &ConnectedPoint, _other_established: usize, ) { - debug!(self.log, "Connection established"; "peer_id" => %peer_id, - "multiaddr" => %endpoint.get_remote_address(), - "connection" => ?endpoint.to_endpoint() + debug!( + multiaddr = %endpoint.get_remote_address(), + connection = ?endpoint.to_endpoint(), + %peer_id, + "Connection established" ); // Update the prometheus metrics @@ -309,7 +317,7 @@ impl PeerManager { // Inform the application. self.events .push(PeerManagerEvent::PeerDisconnected(peer_id)); - debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); + debug!(%peer_id,"Peer disconnected"); } // NOTE: It may be the case that a rejected node, due to too many peers is disconnected diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 8e5d6121e0..4a0388058b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -2,9 +2,9 @@ use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; use itertools::Itertools; +use logging::crit; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use score::{PeerAction, ReportSource, Score, ScoreState}; -use slog::{crit, debug, error, trace, warn}; use std::net::IpAddr; use std::time::Instant; use std::{cmp::Ordering, fmt::Display}; @@ -13,6 +13,7 @@ use std::{ fmt::Formatter, }; use sync_status::SyncStatus; +use tracing::{debug, error, trace, warn}; use types::data_column_custody_group::compute_subnets_for_node; use types::{ChainSpec, DataColumnSubnetId, EthSpec}; @@ -44,19 +45,16 @@ pub struct PeerDB { banned_peers_count: BannedPeersCount, /// Specifies if peer scoring is disabled. disable_peer_scoring: bool, - /// PeerDB's logger - log: slog::Logger, } impl PeerDB { - pub fn new(trusted_peers: Vec, disable_peer_scoring: bool, log: &slog::Logger) -> Self { + pub fn new(trusted_peers: Vec, disable_peer_scoring: bool) -> Self { // Initialize the peers hashmap with trusted peers let peers = trusted_peers .into_iter() .map(|peer_id| (peer_id, PeerInfo::trusted_peer_info())) .collect(); Self { - log: log.clone(), disconnected_peers: 0, banned_peers_count: BannedPeersCount::default(), disable_peer_scoring, @@ -385,15 +383,15 @@ impl PeerDB { // Update scores info.score_update(); - match Self::handle_score_transition(previous_state, peer_id, info, &self.log) { + match Self::handle_score_transition(previous_state, peer_id, info) { // A peer should not be able to be banned from a score update. ScoreTransitionResult::Banned => { - error!(self.log, "Peer has been banned in an update"; "peer_id" => %peer_id) + error!(%peer_id, "Peer has been banned in an update"); } // A peer should not be able to transition to a disconnected state from a healthy // state in a score update. ScoreTransitionResult::Disconnected => { - error!(self.log, "Peer has been disconnected in an update"; "peer_id" => %peer_id) + error!(%peer_id, "Peer has been disconnected in an update"); } ScoreTransitionResult::Unbanned => { peers_to_unban.push(*peer_id); @@ -466,7 +464,7 @@ impl PeerDB { actions.push(( *peer_id, - Self::handle_score_transition(previous_state, peer_id, info, &self.log), + Self::handle_score_transition(previous_state, peer_id, info), )); } @@ -537,15 +535,13 @@ impl PeerDB { &metrics::PEER_ACTION_EVENTS_PER_CLIENT, &[info.client().kind.as_ref(), action.as_ref(), source.into()], ); - let result = - Self::handle_score_transition(previous_state, peer_id, info, &self.log); + let result = Self::handle_score_transition(previous_state, peer_id, info); if previous_state == info.score_state() { debug!( - self.log, - "Peer score adjusted"; - "msg" => %msg, - "peer_id" => %peer_id, - "score" => %info.score() + %msg, + %peer_id, + score = %info.score(), + "Peer score adjusted" ); } match result { @@ -567,10 +563,9 @@ impl PeerDB { ScoreTransitionResult::NoAction => ScoreUpdateResult::NoAction, ScoreTransitionResult::Unbanned => { error!( - self.log, - "Report peer action lead to an unbanning"; - "msg" => %msg, - "peer_id" => %peer_id + %msg, + %peer_id, + "Report peer action lead to an unbanning" ); ScoreUpdateResult::NoAction } @@ -578,10 +573,9 @@ impl PeerDB { } None => { debug!( - self.log, - "Reporting a peer that doesn't exist"; - "msg" => %msg, - "peer_id" =>%peer_id + %msg, + %peer_id, + "Reporting a peer that doesn't exist" ); ScoreUpdateResult::NoAction } @@ -601,7 +595,7 @@ impl PeerDB { .checked_duration_since(Instant::now()) .map(|duration| duration.as_secs()) .unwrap_or_else(|| 0); - debug!(self.log, "Updating the time a peer is required for"; "peer_id" => %peer_id, "future_min_ttl_secs" => min_ttl_secs); + debug!(%peer_id, future_min_ttl_secs = min_ttl_secs, "Updating the time a peer is required for"); } } @@ -625,12 +619,14 @@ impl PeerDB { /// min_ttl than what's given. // VISIBILITY: The behaviour is able to adjust subscriptions. pub(crate) fn extend_peers_on_subnet(&mut self, subnet: &Subnet, min_ttl: Instant) { - let log = &self.log; - self.peers.iter_mut() + self.peers + .iter_mut() .filter(move |(_, info)| { - info.is_connected() && info.on_subnet_metadata(subnet) && info.on_subnet_gossipsub(subnet) + info.is_connected() + && info.on_subnet_metadata(subnet) + && info.on_subnet_gossipsub(subnet) }) - .for_each(|(peer_id,info)| { + .for_each(|(peer_id, info)| { if info.min_ttl().is_none() || Some(&min_ttl) > info.min_ttl() { info.set_min_ttl(min_ttl); } @@ -638,7 +634,7 @@ impl PeerDB { .checked_duration_since(Instant::now()) .map(|duration| duration.as_secs()) .unwrap_or_else(|| 0); - trace!(log, "Updating minimum duration a peer is required for"; "peer_id" => %peer_id, "min_ttl" => min_ttl_secs); + trace!(%peer_id, min_ttl_secs, "Updating minimum duration a peer is required for"); }); } @@ -740,7 +736,6 @@ impl PeerDB { peer_id: &PeerId, new_state: NewConnectionState, ) -> Option { - let log_ref = &self.log; let info = self.peers.entry(*peer_id).or_insert_with(|| { // If we are not creating a new connection (or dropping a current inbound connection) log a warning indicating we are updating a // connection state for an unknown peer. @@ -752,8 +747,7 @@ impl PeerDB { | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately // disconnected without having being stored in the db before ) { - warn!(log_ref, "Updating state of unknown peer"; - "peer_id" => %peer_id, "new_state" => ?new_state); + warn!(%peer_id, ?new_state, "Updating state of unknown peer"); } if self.disable_peer_scoring { PeerInfo::trusted_peer_info() @@ -768,7 +762,7 @@ impl PeerDB { ScoreState::Banned => {} _ => { // If score isn't low enough to ban, this function has been called incorrectly. - error!(self.log, "Banning a peer with a good score"; "peer_id" => %peer_id); + error!(%peer_id, "Banning a peer with a good score"); info.apply_peer_action_to_score(score::PeerAction::Fatal); } } @@ -799,13 +793,13 @@ impl PeerDB { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } PeerConnectionStatus::Banned { .. } => { - error!(self.log, "Accepted a connection from a banned peer"; "peer_id" => %peer_id); + error!(%peer_id, "Accepted a connection from a banned peer"); // TODO: check if this happens and report the unban back self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); } PeerConnectionStatus::Disconnecting { .. } => { - warn!(self.log, "Connected to a disconnecting peer"; "peer_id" => %peer_id) + warn!(%peer_id, "Connected to a disconnecting peer"); } PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } @@ -827,7 +821,7 @@ impl PeerDB { (old_state, NewConnectionState::Dialing { enr }) => { match old_state { PeerConnectionStatus::Banned { .. } => { - warn!(self.log, "Dialing a banned peer"; "peer_id" => %peer_id); + warn!(%peer_id, "Dialing a banned peer"); self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); } @@ -835,13 +829,13 @@ impl PeerDB { self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } PeerConnectionStatus::Connected { .. } => { - warn!(self.log, "Dialing an already connected peer"; "peer_id" => %peer_id) + warn!(%peer_id, "Dialing an already connected peer"); } PeerConnectionStatus::Dialing { .. } => { - warn!(self.log, "Dialing an already dialing peer"; "peer_id" => %peer_id) + warn!(%peer_id, "Dialing an already dialing peer"); } PeerConnectionStatus::Disconnecting { .. } => { - warn!(self.log, "Dialing a disconnecting peer"; "peer_id" => %peer_id) + warn!(%peer_id, "Dialing a disconnecting peer"); } PeerConnectionStatus::Unknown => {} // default behaviour } @@ -851,7 +845,7 @@ impl PeerDB { } if let Err(e) = info.set_dialing_peer() { - error!(self.log, "{}", e; "peer_id" => %peer_id); + error!(%peer_id, e); } } @@ -907,7 +901,7 @@ impl PeerDB { * Handles the transition to a disconnecting state */ (PeerConnectionStatus::Banned { .. }, NewConnectionState::Disconnecting { to_ban }) => { - error!(self.log, "Disconnecting from a banned peer"; "peer_id" => %peer_id); + error!(%peer_id, "Disconnecting from a banned peer"); info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); } ( @@ -951,13 +945,13 @@ impl PeerDB { (PeerConnectionStatus::Disconnecting { .. }, NewConnectionState::Banned) => { // NOTE: This can occur due a rapid downscore of a peer. It goes through the // disconnection phase and straight into banning in a short time-frame. - debug!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id); + debug!(%peer_id, "Banning peer that is currently disconnecting"); // Ban the peer once the disconnection process completes. info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban: true }); return Some(BanOperation::PeerDisconnecting); } (PeerConnectionStatus::Banned { .. }, NewConnectionState::Banned) => { - error!(log_ref, "Banning already banned peer"; "peer_id" => %peer_id); + error!(%peer_id, "Banning already banned peer"); let known_banned_ips = self.banned_peers_count.banned_ips(); let banned_ips = info .seen_ip_addresses() @@ -975,7 +969,7 @@ impl PeerDB { } (PeerConnectionStatus::Unknown, NewConnectionState::Banned) => { // shift the peer straight to banned - warn!(log_ref, "Banning a peer of unknown connection state"; "peer_id" => %peer_id); + warn!(%peer_id, "Banning a peer of unknown connection state"); self.banned_peers_count .add_banned_peer(info.seen_ip_addresses()); info.set_connection_status(PeerConnectionStatus::Banned { @@ -996,15 +990,15 @@ impl PeerDB { */ (old_state, NewConnectionState::Unbanned) => { if matches!(info.score_state(), ScoreState::Banned) { - error!(self.log, "Unbanning a banned peer"; "peer_id" => %peer_id); + error!(%peer_id, "Unbanning a banned peer"); } match old_state { PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } => { - error!(self.log, "Unbanning a connected peer"; "peer_id" => %peer_id); + error!(%peer_id, "Unbanning a connected peer"); } PeerConnectionStatus::Disconnected { .. } | PeerConnectionStatus::Disconnecting { .. } => { - debug!(self.log, "Unbanning disconnected or disconnecting peer"; "peer_id" => %peer_id); + debug!(%peer_id, "Unbanning disconnected or disconnecting peer"); } // These are odd but fine. PeerConnectionStatus::Dialing { .. } => {} // Also odd but acceptable PeerConnectionStatus::Banned { since } => { @@ -1073,15 +1067,12 @@ impl PeerDB { Some((*id, unbanned_ips)) } else { // If there is no minimum, this is a coding error. - crit!( - self.log, - "banned_peers > MAX_BANNED_PEERS despite no banned peers in db!" - ); + crit!("banned_peers > MAX_BANNED_PEERS despite no banned peers in db!"); // reset banned_peers this will also exit the loop self.banned_peers_count = BannedPeersCount::default(); None } { - debug!(self.log, "Removing old banned peer"; "peer_id" => %to_drop); + debug!(peer_id = %to_drop, "Removing old banned peer"); self.peers.remove(&to_drop); unbanned_peers.push((to_drop, unbanned_ips)) } @@ -1100,7 +1091,11 @@ impl PeerDB { .min_by_key(|(_, age)| *age) .map(|(id, _)| *id) { - debug!(self.log, "Removing old disconnected peer"; "peer_id" => %to_drop, "disconnected_size" => self.disconnected_peers.saturating_sub(1)); + debug!( + peer_id = %to_drop, + disconnected_size = self.disconnected_peers.saturating_sub(1), + "Removing old disconnected peer" + ); self.peers.remove(&to_drop); } // If there is no minimum, this is a coding error. For safety we decrease @@ -1117,15 +1112,19 @@ impl PeerDB { previous_state: ScoreState, peer_id: &PeerId, info: &PeerInfo, - log: &slog::Logger, ) -> ScoreTransitionResult { match (info.score_state(), previous_state) { (ScoreState::Banned, ScoreState::Healthy | ScoreState::ForcedDisconnect) => { - debug!(log, "Peer has been banned"; "peer_id" => %peer_id, "score" => %info.score()); + debug!(%peer_id, score = %info.score(), "Peer has been banned"); ScoreTransitionResult::Banned } (ScoreState::ForcedDisconnect, ScoreState::Banned | ScoreState::Healthy) => { - debug!(log, "Peer transitioned to forced disconnect score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); + debug!( + %peer_id, + score = %info.score(), + past_score_state = %previous_state, + "Peer transitioned to forced disconnect score state" + ); // disconnect the peer if it's currently connected or dialing if info.is_connected_or_dialing() { ScoreTransitionResult::Disconnected @@ -1138,11 +1137,21 @@ impl PeerDB { } } (ScoreState::Healthy, ScoreState::ForcedDisconnect) => { - debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); + debug!( + %peer_id, + score = %info.score(), + past_score_state = %previous_state, + "Peer transitioned to healthy score state" + ); ScoreTransitionResult::NoAction } (ScoreState::Healthy, ScoreState::Banned) => { - debug!(log, "Peer transitioned to healthy score state"; "peer_id" => %peer_id, "score" => %info.score(), "past_score_state" => %previous_state); + debug!( + %peer_id, + score = %info.score(), + past_score_state = %previous_state, + "Peer transitioned to healthy score state" + ); // unban the peer if it was previously banned. ScoreTransitionResult::Unbanned } @@ -1309,24 +1318,11 @@ impl BannedPeersCount { mod tests { use super::*; use libp2p::core::multiaddr::Protocol; - use slog::{o, Drain}; use std::net::{Ipv4Addr, Ipv6Addr}; use types::MinimalEthSpec; type M = MinimalEthSpec; - pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - fn add_score(db: &mut PeerDB, peer_id: &PeerId, score: f64) { if let Some(info) = db.peer_info_mut(peer_id) { info.add_to_score(score); @@ -1340,8 +1336,7 @@ mod tests { } fn get_db() -> PeerDB { - let log = build_log(slog::Level::Debug, false); - PeerDB::new(vec![], false, &log) + PeerDB::new(vec![], false) } #[test] @@ -2039,8 +2034,7 @@ mod tests { #[allow(clippy::float_cmp)] fn test_trusted_peers_score() { let trusted_peer = PeerId::random(); - let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false, &log); + let mut pdb: PeerDB = PeerDB::new(vec![trusted_peer], false); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); @@ -2063,8 +2057,7 @@ mod tests { #[test] fn test_disable_peer_scoring() { let peer = PeerId::random(); - let log = build_log(slog::Level::Debug, false); - let mut pdb: PeerDB = PeerDB::new(vec![], true, &log); + let mut pdb: PeerDB = PeerDB::new(vec![], true); pdb.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 03203fcade..8353b661c5 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -15,8 +15,9 @@ use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::Stream; -use slog::{crit, debug, trace}; +use libp2p::swarm::{ConnectionId, Stream}; +use libp2p::PeerId; +use logging::crit; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, VecDeque}, @@ -27,6 +28,7 @@ use std::{ }; use tokio::time::{sleep, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; +use tracing::{debug, trace}; use types::{EthSpec, ForkContext}; /// The number of times to retry an outbound upgrade in the case of IO errors. @@ -135,11 +137,11 @@ where /// Waker, to be sure the handler gets polled when needed. waker: Option, - /// Logger for handling RPC streams - log: slog::Logger, - /// Timeout that will me used for inbound and outbound responses. resp_timeout: Duration, + + /// Information about this handler for logging purposes. + log_info: (PeerId, ConnectionId), } enum HandlerState { @@ -221,8 +223,9 @@ where pub fn new( listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, - log: &slog::Logger, resp_timeout: Duration, + peer_id: PeerId, + connection_id: ConnectionId, ) -> Self { RPCHandler { listen_protocol, @@ -240,8 +243,8 @@ where outbound_io_error_retries: 0, fork_context, waker: None, - log: log.clone(), resp_timeout, + log_info: (peer_id, connection_id), } } @@ -250,7 +253,12 @@ where fn shutdown(&mut self, goodbye_reason: Option<(Id, GoodbyeReason)>) { if matches!(self.state, HandlerState::Active) { if !self.dial_queue.is_empty() { - debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len()); + debug!( + unsent_queued_requests = self.dial_queue.len(), + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Starting handler shutdown" + ); } // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { @@ -297,8 +305,10 @@ where let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else { if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses - trace!(self.log, "Inbound stream has expired. Response not sent"; - "response" => %response, "id" => inbound_id); + trace!(%response, id = ?inbound_id, + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Inbound stream has expired. Response not sent"); } return; }; @@ -313,8 +323,10 @@ where if matches!(self.state, HandlerState::Deactivated) { // we no longer send responses after the handler is deactivated - debug!(self.log, "Response not sent. Deactivated handler"; - "response" => %response, "id" => inbound_id); + debug!(%response, id = ?inbound_id, + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Response not sent. Deactivated handler"); return; } inbound_info.pending_items.push_back(response); @@ -381,7 +393,11 @@ where match delay.as_mut().poll(cx) { Poll::Ready(_) => { self.state = HandlerState::Deactivated; - debug!(self.log, "Shutdown timeout elapsed, Handler deactivated"); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Shutdown timeout elapsed, Handler deactivated" + ); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( HandlerEvent::Close(RPCError::Disconnected), )); @@ -428,7 +444,10 @@ where outbound_err, ))); } else { - crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); + crit!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + stream_id = ?outbound_id.get_ref(), "timed out substream not in the books"); } } @@ -557,10 +576,24 @@ where // BlocksByRange is the one that typically consumes the most time. // Its useful to log when the request was completed. if matches!(info.protocol, Protocol::BlocksByRange) { - debug!(self.log, "BlocksByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + duration = Instant::now() + .duration_since(info.request_start_time) + .as_secs(), + "BlocksByRange Response sent" + ); } if matches!(info.protocol, Protocol::BlobsByRange) { - debug!(self.log, "BlobsByRange Response sent"; "duration" => Instant::now().duration_since(info.request_start_time).as_secs()); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + duration = Instant::now() + .duration_since(info.request_start_time) + .as_secs(), + "BlobsByRange Response sent" + ); } // There is nothing more to process on this substream as it has @@ -583,10 +616,20 @@ where })); if matches!(info.protocol, Protocol::BlocksByRange) { - debug!(self.log, "BlocksByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + duration = info.request_start_time.elapsed().as_secs(), + "BlocksByRange Response failed" + ); } if matches!(info.protocol, Protocol::BlobsByRange) { - debug!(self.log, "BlobsByRange Response failed"; "duration" => info.request_start_time.elapsed().as_secs()); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + duration = info.request_start_time.elapsed().as_secs(), + "BlobsByRange Response failed" + ); } break; } @@ -695,7 +738,7 @@ where // stream closed // if we expected multiple streams send a stream termination, // else report the stream terminating only. - //trace!(self.log, "RPC Response - stream closed by remote"); + //"RPC Response - stream closed by remote"); // drop the stream let delay_key = &entry.get().delay_key; let request_id = entry.get().req_id; @@ -772,7 +815,11 @@ where } } OutboundSubstreamState::Poisoned => { - crit!(self.log, "Poisoned outbound substream"); + crit!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Poisoned outbound substream" + ); unreachable!("Coding Error: Outbound substream is poisoned") } } @@ -804,7 +851,11 @@ where && self.events_out.is_empty() && self.dial_negotiated == 0 { - debug!(self.log, "Goodbye sent, Handler deactivated"); + debug!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + "Goodbye sent, Handler deactivated" + ); self.state = HandlerState::Deactivated; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( HandlerEvent::Close(RPCError::Disconnected), @@ -997,7 +1048,11 @@ where ) .is_some() { - crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id); + crit!( + peer_id = %self.log_info.0, + connection_id = %self.log_info.1, + + id = ?self.current_outbound_substream_id, "Duplicate outbound substream id"); } self.current_outbound_substream_id.0 += 1; } @@ -1045,17 +1100,6 @@ where } } -impl slog::Value for SubstreamId { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - slog::Value::serialize(&self.0, record, key, serializer) - } -} - /// Creates a future that can be polled that will send any queued message to the peer. /// /// This function returns the given substream, along with whether it has been closed or not. Any diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index a2f866e59c..b748ab11c0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -864,19 +864,3 @@ impl std::fmt::Display for DataColumnsByRootRequest { ) } } - -impl slog::KV for StatusMessage { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - use slog::Value; - serializer.emit_arguments("fork_digest", &format_args!("{:?}", self.fork_digest))?; - Value::serialize(&self.finalized_epoch, record, "finalized_epoch", serializer)?; - serializer.emit_arguments("finalized_root", &format_args!("{}", self.finalized_root))?; - Value::serialize(&self.head_slot, record, "head_slot", serializer)?; - serializer.emit_arguments("head_root", &format_args!("{}", self.head_root))?; - slog::Result::Ok(()) - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index a91d2d1042..f5085e798c 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -13,13 +13,14 @@ use libp2p::swarm::{ }; use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; +use logging::crit; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; -use slog::{crit, debug, o, trace}; use std::marker::PhantomData; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; +use tracing::{debug, instrument, trace}; use types::{EthSpec, ForkContext}; pub(crate) use handler::{HandlerErr, HandlerEvent}; @@ -159,8 +160,6 @@ pub struct RPC { events: Vec>, fork_context: Arc, enable_light_client_server: bool, - /// Slog logger for RPC behaviour. - log: slog::Logger, /// Networking constant values network_params: NetworkParams, /// A sequential counter indicating when data gets modified. @@ -168,25 +167,28 @@ pub struct RPC { } impl RPC { + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn new( fork_context: Arc, enable_light_client_server: bool, inbound_rate_limiter_config: Option, outbound_rate_limiter_config: Option, - log: slog::Logger, network_params: NetworkParams, seq_number: u64, ) -> Self { - let log = log.new(o!("service" => "libp2p_rpc")); - let inbound_limiter = inbound_rate_limiter_config.map(|config| { - debug!(log, "Using inbound rate limiting params"; "config" => ?config); + debug!(?config, "Using inbound rate limiting params"); RateLimiter::new_with_config(config.0, fork_context.clone()) .expect("Inbound limiter configuration parameters are valid") }); let self_limiter = outbound_rate_limiter_config.map(|config| { - SelfRateLimiter::new(config, fork_context.clone(), log.clone()) + SelfRateLimiter::new(config, fork_context.clone()) .expect("Configuration parameters are valid") }); @@ -196,7 +198,6 @@ impl RPC { events: Vec::new(), fork_context, enable_light_client_server, - log, network_params, seq_number, } @@ -205,6 +206,12 @@ impl RPC { /// Sends an RPC response. /// /// The peer must be connected for this to succeed. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn send_response( &mut self, peer_id: PeerId, @@ -222,6 +229,12 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn send_request(&mut self, peer_id: PeerId, request_id: Id, req: RequestType) { let event = if let Some(self_limiter) = self.self_limiter.as_mut() { match self_limiter.allows(peer_id, request_id, req) { @@ -244,6 +257,12 @@ impl RPC { /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This /// gracefully terminates the RPC behaviour with a goodbye message. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) { self.events.push(ToSwarm::NotifyHandler { peer_id, @@ -252,16 +271,28 @@ impl RPC { }); } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn update_seq_number(&mut self, seq_number: u64) { self.seq_number = seq_number } /// Send a Ping request to the destination `PeerId` via `ConnectionId`. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p_rpc"), + name = "libp2p_rpc", + skip_all + )] pub fn ping(&mut self, peer_id: PeerId, id: Id) { let ping = Ping { data: self.seq_number, }; - trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); + trace!(%peer_id, "Sending Ping"); self.send_request(peer_id, id, RequestType::Ping(ping)); } } @@ -291,14 +322,13 @@ where }, (), ); - let log = self - .log - .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); + let handler = RPCHandler::new( protocol, self.fork_context.clone(), - &log, self.network_params.resp_timeout, + peer_id, + connection_id, ); Ok(handler) @@ -323,15 +353,12 @@ where (), ); - let log = self - .log - .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); - let handler = RPCHandler::new( protocol, self.fork_context.clone(), - &log, self.network_params.resp_timeout, + peer_id, + connection_id, ); Ok(handler) @@ -421,10 +448,10 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(self.log, "Request too large to process"; "request" => %r#type, "protocol" => %protocol); + debug!(request = %r#type, %protocol, "Request too large to process"); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind - crit!(self.log, "Request size too large to ever be processed"; "protocol" => %protocol); + crit!(%protocol, "Request size too large to ever be processed"); } // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour @@ -440,8 +467,7 @@ where return; } Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(self.log, "Request exceeds the rate limit"; - "request" => %r#type, "peer_id" => %peer_id, "wait_time_ms" => wait_time.as_millis()); + debug!(request = %r#type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit"); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( @@ -462,7 +488,7 @@ where // If we received a Ping, we queue a Pong response. if let RequestType::Ping(_) = r#type { - trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %conn_id, "peer_id" => %peer_id); + trace!(connection_id = %conn_id, %peer_id, "Received Ping, queueing Pong"); self.send_response( peer_id, (conn_id, substream_id), @@ -526,53 +552,3 @@ where Poll::Pending } } - -impl slog::KV for RPCMessage -where - E: EthSpec, - Id: ReqId, -{ - fn serialize( - &self, - _record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; - match &self.message { - Ok(received) => { - let (msg_kind, protocol) = match received { - RPCReceived::Request(Request { r#type, .. }) => { - ("request", r#type.versioned_protocol().protocol()) - } - RPCReceived::Response(_, res) => ("response", res.protocol()), - RPCReceived::EndOfStream(_, end) => ( - "end_of_stream", - match end { - ResponseTermination::BlocksByRange => Protocol::BlocksByRange, - ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, - ResponseTermination::BlobsByRange => Protocol::BlobsByRange, - ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, - ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, - ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, - ResponseTermination::LightClientUpdatesByRange => { - Protocol::LightClientUpdatesByRange - } - }, - ), - }; - serializer.emit_str("msg_kind", msg_kind)?; - serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; - } - Err(error) => { - let (msg_kind, protocol) = match &error { - HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), - HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), - }; - serializer.emit_str("msg_kind", msg_kind)?; - serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; - } - }; - - slog::Result::Ok(()) - } -} diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index 515e7d7244..af6ac37d2c 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -7,9 +7,10 @@ use std::{ use futures::FutureExt; use libp2p::{swarm::NotifyHandler, PeerId}; -use slog::{crit, debug, Logger}; +use logging::crit; use smallvec::SmallVec; use tokio_util::time::DelayQueue; +use tracing::debug; use types::{EthSpec, ForkContext}; use super::{ @@ -36,8 +37,6 @@ pub(crate) struct SelfRateLimiter { limiter: RateLimiter, /// Requests that are ready to be sent. ready_requests: SmallVec<[(PeerId, RPCSend); 3]>, - /// Slog logger. - log: Logger, } /// Error returned when the rate limiter does not accept a request. @@ -54,9 +53,8 @@ impl SelfRateLimiter { pub fn new( config: OutboundRateLimiterConfig, fork_context: Arc, - log: Logger, ) -> Result { - debug!(log, "Using self rate limiting params"; "config" => ?config); + debug!(?config, "Using self rate limiting params"); let limiter = RateLimiter::new_with_config(config.0, fork_context)?; Ok(SelfRateLimiter { @@ -64,7 +62,6 @@ impl SelfRateLimiter { next_peer_request: Default::default(), limiter, ready_requests: Default::default(), - log, }) } @@ -84,7 +81,7 @@ impl SelfRateLimiter { return Err(Error::PendingRequests); } - match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req) { Err((rate_limited_req, wait_time)) => { let key = (peer_id, protocol); self.next_peer_request.insert(key, wait_time); @@ -107,7 +104,6 @@ impl SelfRateLimiter { peer_id: PeerId, request_id: Id, req: RequestType, - log: &Logger, ) -> Result, (QueuedRequest, Duration)> { match limiter.allows(&peer_id, &req) { Ok(()) => Ok(RPCSend::Request(request_id, req)), @@ -118,14 +114,13 @@ impl SelfRateLimiter { // this should never happen with default parameters. Let's just send the request. // Log a crit since this is a config issue. crit!( - log, - "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters."; - "protocol" => %req.versioned_protocol().protocol() + protocol = %req.versioned_protocol().protocol(), + "Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters." ); Ok(RPCSend::Request(request_id, req)) } RateLimitedErr::TooSoon(wait_time) => { - debug!(log, "Self rate limiting"; "protocol" => %protocol.protocol(), "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id); + debug!(protocol = %protocol.protocol(), wait_time_ms = wait_time.as_millis(), %peer_id, "Self rate limiting"); Err((QueuedRequest { req, request_id }, wait_time)) } } @@ -139,8 +134,7 @@ impl SelfRateLimiter { if let Entry::Occupied(mut entry) = self.delayed_requests.entry((peer_id, protocol)) { let queued_requests = entry.get_mut(); while let Some(QueuedRequest { req, request_id }) = queued_requests.pop_front() { - match Self::try_send_request(&mut self.limiter, peer_id, request_id, req, &self.log) - { + match Self::try_send_request(&mut self.limiter, peer_id, request_id, req) { Err((rate_limited_req, wait_time)) => { let key = (peer_id, protocol); self.next_peer_request.insert(key, wait_time); @@ -215,13 +209,14 @@ mod tests { use crate::rpc::{Ping, Protocol, RequestType}; use crate::service::api_types::{AppRequestId, RequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; + use logging::create_test_tracing_subscriber; use std::time::Duration; use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; /// Test that `next_peer_request_ready` correctly maintains the queue. #[tokio::test] async fn test_next_peer_request_ready() { - let log = logging::test_logger(); + create_test_tracing_subscriber(); let config = OutboundRateLimiterConfig(RateLimiterConfig { ping_quota: Quota::n_every(1, 2), ..Default::default() @@ -232,7 +227,7 @@ mod tests { &MainnetEthSpec::default_spec(), )); let mut limiter: SelfRateLimiter = - SelfRateLimiter::new(config, fork_context, log).unwrap(); + SelfRateLimiter::new(config, fork_context).unwrap(); let peer_id = PeerId::random(); let lookup_id = 0; diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e69c7aa5f7..894fff5074 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -218,22 +218,6 @@ impl std::convert::From> for RpcResponse { } } -impl slog::Value for RequestId { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - match self { - RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), - RequestId::Application(ref id) => { - slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) - } - } - } -} - macro_rules! impl_display { ($structname: ty, $format: literal, $($field:ident),*) => { impl Display for $structname { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9001e389c1..0bf281fd75 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -32,12 +32,13 @@ use libp2p::swarm::behaviour::toggle::Toggle; use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; use libp2p::upnp::tokio::Behaviour as Upnp; use libp2p::{identify, PeerId, SwarmBuilder}; -use slog::{crit, debug, info, o, trace, warn}; +use logging::crit; use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use tracing::{debug, info, instrument, trace, warn}; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; @@ -160,23 +161,24 @@ pub struct Network { gossip_cache: GossipCache, /// This node's PeerId. pub local_peer_id: PeerId, - /// Logger for behaviour actions. - log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. impl Network { + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub async fn new( executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, - log: &slog::Logger, ) -> Result<(Self, Arc>), String> { - let log = log.new(o!("service"=> "libp2p")); - let config = ctx.config.clone(); - trace!(log, "Libp2p Service starting"); + trace!("Libp2p Service starting"); // initialise the node's ID - let local_keypair = utils::load_private_key(&config, &log); + let local_keypair = utils::load_private_key(&config); // Trusted peers will also be marked as explicit in GossipSub. // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements @@ -192,7 +194,6 @@ impl Network { local_keypair.clone(), &config, &ctx.enr_fork_id, - &log, &ctx.chain_spec, )?; @@ -201,15 +202,13 @@ impl Network { ctx.chain_spec .custody_group_count(config.subscribe_all_data_column_subnets) }); - let meta_data = - utils::load_or_build_metadata(&config.network_dir, custody_group_count, &log); + let meta_data = utils::load_or_build_metadata(&config.network_dir, custody_group_count); let seq_number = *meta_data.seq_number(); let globals = NetworkGlobals::new( enr, meta_data, trusted_peers, config.disable_peer_scoring, - &log, config.clone(), ctx.chain_spec.clone(), ); @@ -274,7 +273,7 @@ impl Network { )? }; - trace!(log, "Using peer score params"; "params" => ?params); + trace!(?params, "Using peer score params"); // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); @@ -374,7 +373,6 @@ impl Network { config.enable_light_client_server, config.inbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(), - log.clone(), network_params, seq_number, ); @@ -385,7 +383,6 @@ impl Network { local_keypair.clone(), &config, network_globals.clone(), - &log, &ctx.chain_spec, ) .await?; @@ -418,7 +415,7 @@ impl Network { target_peer_count: config.target_peers, ..Default::default() }; - PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? + PeerManager::new(peer_manager_cfg, network_globals.clone())? }; let connection_limits = { @@ -513,7 +510,6 @@ impl Network { update_gossipsub_scores, gossip_cache, local_peer_id, - log, }; network.start(&config).await?; @@ -528,10 +524,25 @@ impl Network { /// - Starts listening in the given ports. /// - Dials boot-nodes and libp2p peers. /// - Subscribes to starting gossipsub topics. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] async fn start(&mut self, config: &crate::NetworkConfig) -> Result<(), String> { let enr = self.network_globals.local_enr(); - info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery, "quic_enabled" => !config.disable_quic_support); + info!( + peer_id = %enr.peer_id(), + bandwidth_config = format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name), + "Libp2p Starting" + ); + debug!( + listen_addrs = ?config.listen_addrs(), + discovery_enabled = !config.disable_discovery, + quic_enabled = !config.disable_quic_support, + "Attempting to open listening ports" + ); for listen_multiaddr in config.listen_addrs().libp2p_addresses() { // If QUIC is disabled, ignore listening on QUIC ports @@ -545,14 +556,13 @@ impl Network { Ok(_) => { let mut log_address = listen_multiaddr; log_address.push(MProtocol::P2p(enr.peer_id())); - info!(self.log, "Listening established"; "address" => %log_address); + info!(address = %log_address, "Listening established"); } Err(err) => { crit!( - self.log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, + error = ?err, + %listen_multiaddr, + "Unable to listen on libp2p address" ); return Err("Libp2p was unable to listen on the given listen address.".into()); } @@ -564,9 +574,9 @@ impl Network { // strip the p2p protocol if it exists strip_peer_id(&mut multiaddr); match self.swarm.dial(multiaddr.clone()) { - Ok(()) => debug!(self.log, "Dialing libp2p peer"; "address" => %multiaddr), + Ok(()) => debug!(address = %multiaddr, "Dialing libp2p peer"), Err(err) => { - debug!(self.log, "Could not connect to peer"; "address" => %multiaddr, "error" => ?err) + debug!(address = %multiaddr, error = ?err, "Could not connect to peer") } }; }; @@ -629,12 +639,12 @@ impl Network { if self.subscribe_kind(topic_kind.clone()) { subscribed_topics.push(topic_kind.clone()); } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic_kind); + warn!(topic = %topic_kind, "Could not subscribe to topic"); } } if !subscribed_topics.is_empty() { - info!(self.log, "Subscribed to topics"; "topics" => ?subscribed_topics); + info!(topics = ?subscribed_topics, "Subscribed to topics"); } Ok(()) @@ -643,48 +653,114 @@ impl Network { /* Public Accessible Functions to interact with the behaviour */ /// The routing pub-sub mechanism for eth2. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn gossipsub_mut(&mut self) -> &mut Gossipsub { &mut self.swarm.behaviour_mut().gossipsub } /// The Eth2 RPC specified in the wire-0 protocol. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn eth2_rpc_mut(&mut self) -> &mut RPC { &mut self.swarm.behaviour_mut().eth2_rpc } /// Discv5 Discovery protocol. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn discovery_mut(&mut self) -> &mut Discovery { &mut self.swarm.behaviour_mut().discovery } /// Provides IP addresses and peer information. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn identify_mut(&mut self) -> &mut identify::Behaviour { &mut self.swarm.behaviour_mut().identify } /// The peer manager that keeps track of peer's reputation and status. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn peer_manager_mut(&mut self) -> &mut PeerManager { &mut self.swarm.behaviour_mut().peer_manager } /// The routing pub-sub mechanism for eth2. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn gossipsub(&self) -> &Gossipsub { &self.swarm.behaviour().gossipsub } /// The Eth2 RPC specified in the wire-0 protocol. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn eth2_rpc(&self) -> &RPC { &self.swarm.behaviour().eth2_rpc } /// Discv5 Discovery protocol. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn discovery(&self) -> &Discovery { &self.swarm.behaviour().discovery } /// Provides IP addresses and peer information. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn identify(&self) -> &identify::Behaviour { &self.swarm.behaviour().identify } /// The peer manager that keeps track of peer's reputation and status. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn peer_manager(&self) -> &PeerManager { &self.swarm.behaviour().peer_manager } /// Returns the local ENR of the node. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn local_enr(&self) -> Enr { self.network_globals.local_enr() } @@ -693,6 +769,12 @@ impl Network { /// Subscribes to a gossipsub topic kind, letting the network service determine the /// encoding and fork version. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn subscribe_kind(&mut self, kind: GossipKind) -> bool { let gossip_topic = GossipTopic::new( kind, @@ -705,6 +787,12 @@ impl Network { /// Unsubscribes from a gossipsub topic kind, letting the network service determine the /// encoding and fork version. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn unsubscribe_kind(&mut self, kind: GossipKind) -> bool { let gossip_topic = GossipTopic::new( kind, @@ -715,6 +803,12 @@ impl Network { } /// Subscribe to all required topics for the `new_fork` with the given `new_fork_digest`. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn subscribe_new_fork_topics(&mut self, new_fork: ForkName, new_fork_digest: [u8; 4]) { // Re-subscribe to non-core topics with the new fork digest let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); @@ -739,6 +833,12 @@ impl Network { } /// Unsubscribe from all topics that doesn't have the given fork_digest + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn unsubscribe_from_fork_topics_except(&mut self, except: [u8; 4]) { let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); for topic in subscriptions @@ -751,6 +851,12 @@ impl Network { } /// Remove topic weight from all topics that don't have the given fork digest. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn remove_topic_weight_except(&mut self, except: [u8; 4]) { let new_param = TopicScoreParams { topic_weight: 0.0, @@ -766,15 +872,21 @@ impl Network { .gossipsub_mut() .set_topic_params(libp2p_topic, new_param.clone()) { - Ok(_) => debug!(self.log, "Removed topic weight"; "topic" => %topic), + Ok(_) => debug!(%topic, "Removed topic weight"), Err(e) => { - warn!(self.log, "Failed to remove topic weight"; "topic" => %topic, "error" => e) + warn!(%topic, error = e, "Failed to remove topic weight") } } } } /// Returns the scoring parameters for a topic if set. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn get_topic_params(&self, topic: GossipTopic) -> Option<&TopicScoreParams> { self.swarm .behaviour() @@ -785,6 +897,12 @@ impl Network { /// Subscribes to a gossipsub topic. /// /// Returns `true` if the subscription was successful and `false` otherwise. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn subscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals @@ -796,17 +914,23 @@ impl Network { match self.gossipsub_mut().subscribe(&topic) { Err(e) => { - warn!(self.log, "Failed to subscribe to topic"; "topic" => %topic, "error" => ?e); + warn!(%topic, error = ?e, "Failed to subscribe to topic"); false } Ok(_) => { - debug!(self.log, "Subscribed to topic"; "topic" => %topic); + debug!(%topic, "Subscribed to topic"); true } } } /// Unsubscribe from a gossipsub topic. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn unsubscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals @@ -817,11 +941,17 @@ impl Network { // unsubscribe from the topic let libp2p_topic: Topic = topic.clone().into(); - debug!(self.log, "Unsubscribed to topic"; "topic" => %topic); + debug!(%topic, "Unsubscribed to topic"); self.gossipsub_mut().unsubscribe(&libp2p_topic) } /// Publishes a list of messages on the pubsub (gossipsub) behaviour, choosing the encoding. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn publish(&mut self, messages: Vec>) { for message in messages { for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { @@ -833,17 +963,15 @@ impl Network { match e { PublishError::Duplicate => { debug!( - self.log, - "Attempted to publish duplicate message"; - "kind" => %topic.kind(), + kind = %topic.kind(), + "Attempted to publish duplicate message" ); } ref e => { warn!( - self.log, - "Could not publish message"; - "error" => ?e, - "kind" => %topic.kind(), + error = ?e, + kind = %topic.kind(), + "Could not publish message" ); } } @@ -878,6 +1006,12 @@ impl Network { /// Informs the gossipsub about the result of a message validation. /// If the message is valid it will get propagated by gossipsub. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn report_message_validation_result( &mut self, propagation_source: &PeerId, @@ -912,6 +1046,12 @@ impl Network { /// Updates the current gossipsub scoring parameters based on the validator count and current /// slot. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn update_gossipsub_parameters( &mut self, active_validators: usize, @@ -926,12 +1066,12 @@ impl Network { GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into() }; - debug!(self.log, "Updating gossipsub score parameters"; - "active_validators" => active_validators); - trace!(self.log, "Updated gossipsub score parameters"; - "beacon_block_params" => ?beacon_block_params, - "beacon_aggregate_proof_params" => ?beacon_aggregate_proof_params, - "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, + debug!(active_validators, "Updating gossipsub score parameters"); + trace!( + ?beacon_block_params, + ?beacon_aggregate_proof_params, + ?beacon_attestation_subnet_params, + "Updated gossipsub score parameters" ); self.gossipsub_mut() @@ -955,6 +1095,12 @@ impl Network { /* Eth2 RPC behaviour functions */ /// Send a request to a peer over RPC. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn send_request( &mut self, peer_id: PeerId, @@ -972,6 +1118,12 @@ impl Network { } /// Send a successful response to a peer over RPC. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn send_response( &mut self, peer_id: PeerId, @@ -984,6 +1136,12 @@ impl Network { } /// Inform the peer that their request produced an error. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn send_error_response( &mut self, peer_id: PeerId, @@ -1001,11 +1159,22 @@ impl Network { } /* Peer management functions */ - + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn testing_dial(&mut self, addr: Multiaddr) -> Result<(), libp2p::swarm::DialError> { self.swarm.dial(addr) } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn report_peer( &mut self, peer_id: &PeerId, @@ -1021,6 +1190,12 @@ impl Network { /// /// This will send a goodbye, disconnect and then ban the peer. /// This is fatal for a peer, and should be used in unrecoverable circumstances. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { self.peer_manager_mut() .goodbye_peer(peer_id, reason, source); @@ -1028,16 +1203,34 @@ impl Network { /// Hard (ungraceful) disconnect for testing purposes only /// Use goodbye_peer for disconnections, do not use this function. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn __hard_disconnect_testing_only(&mut self, peer_id: PeerId) { let _ = self.swarm.disconnect_peer_id(peer_id); } /// Returns an iterator over all enr entries in the DHT. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn enr_entries(&self) -> Vec { self.discovery().table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn add_enr(&mut self, enr: Enr) { self.discovery_mut().add_enr(enr); } @@ -1045,9 +1238,15 @@ impl Network { /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { if let Err(e) = self.discovery_mut().update_enr_bitfield(subnet_id, value) { - crit!(self.log, "Could not update ENR bitfield"; "error" => e); + crit!(error = e, "Could not update ENR bitfield"); } // update the local meta data which informs our peers of the update during PINGS self.update_metadata_bitfields(); @@ -1055,6 +1254,12 @@ impl Network { /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we /// would like to retain the peers for. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // If discovery is not started or disabled, ignore the request if !self.discovery().started { @@ -1085,12 +1290,11 @@ impl Network { .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { trace!( - self.log, - "Discovery query ignored"; - "subnet" => ?s.subnet, - "reason" => "Already connected to desired peers", - "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, + subnet = ?s.subnet, + reason = "Already connected to desired peers", + connected_peers_on_subnet = peers_on_subnet, + target_subnet_peers = TARGET_SUBNET_PEERS, + "Discovery query ignored" ); false // Queue an outgoing connection request to the cached peers that are on `s.subnet_id`. @@ -1110,6 +1314,12 @@ impl Network { } /// Updates the local ENR's "eth2" field with the latest EnrForkId. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); @@ -1120,6 +1330,12 @@ impl Network { /* Private internal functions */ /// Updates the current meta data of the node to match the local ENR. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn update_metadata_bitfields(&mut self) { let local_attnets = self .discovery_mut() @@ -1147,15 +1363,27 @@ impl Network { drop(meta_data_w); self.eth2_rpc_mut().update_seq_number(seq_number); // Save the updated metadata to disk - utils::save_metadata_to_disk(&self.network_dir, meta_data, &self.log); + utils::save_metadata_to_disk(&self.network_dir, meta_data); } /// Sends a Ping request to the peer. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn ping(&mut self, peer_id: PeerId) { self.eth2_rpc_mut().ping(peer_id, RequestId::Internal); } /// Sends a METADATA request to a peer. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = if self.fork_context.spec.is_peer_das_scheduled() { // Nodes with higher custody will probably start advertising it @@ -1170,6 +1398,12 @@ impl Network { } /// Sends a METADATA response to a peer. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn send_meta_data_response( &mut self, _req: MetadataRequest, @@ -1187,6 +1421,12 @@ impl Network { // RPC Propagation methods /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. #[must_use = "return the response"] + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn build_response( &mut self, id: RequestId, @@ -1205,8 +1445,14 @@ impl Network { /// Dial cached Enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet, spec: Arc) { - let predicate = subnet_predicate::(vec![subnet], &self.log, spec); + let predicate = subnet_predicate::(vec![subnet], spec); let peers_to_dial: Vec = self .discovery() .cached_enrs() @@ -1224,7 +1470,7 @@ impl Network { self.discovery_mut().remove_cached_enr(&enr.peer_id()); let peer_id = enr.peer_id(); if self.peer_manager_mut().dial_peer(enr) { - debug!(self.log, "Added cached ENR peer to dial queue"; "peer_id" => %peer_id); + debug!(%peer_id, "Added cached ENR peer to dial queue"); } } } @@ -1232,6 +1478,12 @@ impl Network { /* Sub-behaviour event handling functions */ /// Handle a gossipsub event. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn inject_gs_event(&mut self, event: gossipsub::Event) -> Option> { match event { gossipsub::Event::Message { @@ -1243,7 +1495,7 @@ impl Network { // peer that originally published the message. match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data, &self.fork_context) { Err(e) => { - debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); + debug!(topic = ?gs_msg.topic, error = e, "Could not decode gossipsub message"); //reject the message self.gossipsub_mut().report_message_validation_result( &id, @@ -1281,11 +1533,7 @@ impl Network { .publish(Topic::from(topic.clone()), data) { Ok(_) => { - debug!( - self.log, - "Gossip message published on retry"; - "topic" => topic_str - ); + debug!(topic = topic_str, "Gossip message published on retry"); metrics::inc_counter_vec( &metrics::GOSSIP_LATE_PUBLISH_PER_TOPIC_KIND, &[topic_str], @@ -1293,10 +1541,9 @@ impl Network { } Err(PublishError::Duplicate) => { debug!( - self.log, - "Gossip message publish ignored on retry"; - "reason" => "duplicate", - "topic" => topic_str + reason = "duplicate", + topic = topic_str, + "Gossip message publish ignored on retry" ); metrics::inc_counter_vec( &metrics::GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND, @@ -1305,10 +1552,9 @@ impl Network { } Err(e) => { warn!( - self.log, - "Gossip message publish failed on retry"; - "topic" => topic_str, - "error" => %e + topic = topic_str, + error = %e, + "Gossip message publish failed on retry" ); metrics::inc_counter_vec( &metrics::GOSSIP_FAILED_LATE_PUBLISH_PER_TOPIC_KIND, @@ -1329,7 +1575,7 @@ impl Network { } } gossipsub::Event::GossipsubNotSupported { peer_id } => { - debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); + debug!(%peer_id, "Peer does not support gossipsub"); self.peer_manager_mut().report_peer( &peer_id, PeerAction::Fatal, @@ -1342,10 +1588,17 @@ impl Network { peer_id, failed_messages, } => { - debug!(self.log, "Slow gossipsub peer"; "peer_id" => %peer_id, "publish" => failed_messages.publish, "forward" => failed_messages.forward, "priority" => failed_messages.priority, "non_priority" => failed_messages.non_priority); + debug!( + peer_id = %peer_id, + publish = failed_messages.publish, + forward = failed_messages.forward, + priority = failed_messages.priority, + non_priority = failed_messages.non_priority, + "Slow gossipsub peer" + ); // Punish the peer if it cannot handle priority messages if failed_messages.timeout > 10 { - debug!(self.log, "Slow gossipsub peer penalized for priority failure"; "peer_id" => %peer_id); + debug!(%peer_id, "Slow gossipsub peer penalized for priority failure"); self.peer_manager_mut().report_peer( &peer_id, PeerAction::HighToleranceError, @@ -1354,7 +1607,7 @@ impl Network { "publish_timeout_penalty", ); } else if failed_messages.total_queue_full() > 10 { - debug!(self.log, "Slow gossipsub peer penalized for send queue full"; "peer_id" => %peer_id); + debug!(%peer_id, "Slow gossipsub peer penalized for send queue full"); self.peer_manager_mut().report_peer( &peer_id, PeerAction::HighToleranceError, @@ -1369,6 +1622,12 @@ impl Network { } /// Handle an RPC event. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; @@ -1378,11 +1637,7 @@ impl Network { && (matches!(event.message, Err(HandlerErr::Inbound { .. })) || matches!(event.message, Ok(RPCReceived::Request(..)))) { - debug!( - self.log, - "Ignoring rpc message of disconnecting peer"; - event - ); + debug!(?event, "Ignoring rpc message of disconnecting peer"); return None; } @@ -1445,10 +1700,10 @@ impl Network { RequestType::Goodbye(reason) => { // queue for disconnection without a goodbye message debug!( - self.log, "Peer sent Goodbye"; - "peer_id" => %peer_id, - "reason" => %reason, - "client" => %self.network_globals.client(&peer_id), + %peer_id, + %reason, + client = %self.network_globals.client(&peer_id), + "Peer sent Goodbye" ); // NOTE: We currently do not inform the application that we are // disconnecting here. The RPC handler will automatically @@ -1659,6 +1914,12 @@ impl Network { } /// Handle an identify event. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn inject_identify_event(&mut self, event: identify::Event) -> Option> { match event { identify::Event::Received { @@ -1667,10 +1928,7 @@ impl Network { connection_id: _, } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { - debug!( - self.log, - "More than 10 addresses have been identified, truncating" - ); + debug!("More than 10 addresses have been identified, truncating"); info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } // send peer info to the peer manager. @@ -1684,6 +1942,12 @@ impl Network { } /// Handle a peer manager event. + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn inject_pm_event(&mut self, event: PeerManagerEvent) -> Option> { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { @@ -1728,8 +1992,7 @@ impl Network { None } PeerManagerEvent::DisconnectPeer(peer_id, reason) => { - debug!(self.log, "Peer Manager disconnecting peer"; - "peer_id" => %peer_id, "reason" => %reason); + debug!(%peer_id, %reason, "Peer Manager disconnecting peer"); // send one goodbye self.eth2_rpc_mut() .shutdown(peer_id, RequestId::Internal, reason); @@ -1738,10 +2001,16 @@ impl Network { } } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn inject_upnp_event(&mut self, event: libp2p::upnp::Event) { match event { libp2p::upnp::Event::NewExternalAddr(addr) => { - info!(self.log, "UPnP route established"; "addr" => %addr); + info!(%addr, "UPnP route established"); let mut iter = addr.iter(); let is_ip6 = { let addr = iter.next(); @@ -1753,38 +2022,40 @@ impl Network { if let Err(e) = self.discovery_mut().update_enr_quic_port(udp_port, is_ip6) { - warn!(self.log, "Failed to update ENR"; "error" => e); + warn!(error = e, "Failed to update ENR"); } } _ => { - trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr) + trace!(%addr, "UPnP address mapped multiaddr from unknown transport"); } }, Some(multiaddr::Protocol::Tcp(tcp_port)) => { if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port, is_ip6) { - warn!(self.log, "Failed to update ENR"; "error" => e); + warn!(error = e, "Failed to update ENR"); } } _ => { - trace!(self.log, "UPnP address mapped multiaddr from unknown transport"; "addr" => %addr); + trace!(%addr, "UPnP address mapped multiaddr from unknown transport"); } } } libp2p::upnp::Event::ExpiredExternalAddr(_) => {} libp2p::upnp::Event::GatewayNotFound => { - info!(self.log, "UPnP not available"); + info!("UPnP not available"); } libp2p::upnp::Event::NonRoutableGateway => { - info!( - self.log, - "UPnP is available but gateway is not exposed to public network" - ); + info!("UPnP is available but gateway is not exposed to public network"); } } } /* Networking polling */ - + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] pub async fn next_event(&mut self) -> NetworkEvent { loop { tokio::select! { @@ -1795,7 +2066,6 @@ impl Network { return event; } }, - // perform gossipsub score updates when necessary _ = self.update_gossipsub_scores.tick() => { let this = self.swarm.behaviour_mut(); @@ -1804,7 +2074,7 @@ impl Network { // poll the gossipsub cache to clear expired messages Some(result) = self.gossip_cache.next() => { match result { - Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), + Err(e) => warn!(error = e, "Gossip cache error"), Ok(expired_topic) => { if let Some(v) = metrics::get_int_counter( &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, @@ -1819,6 +2089,12 @@ impl Network { } } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] fn parse_swarm_event( &mut self, event: SwarmEvent>, @@ -1852,7 +2128,7 @@ impl Network { send_back_addr, connection_id: _, } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + trace!(our_addr = %local_addr, from = %send_back_addr, "Incoming connection"); None } SwarmEvent::IncomingConnectionError { @@ -1883,7 +2159,7 @@ impl Network { } }, }; - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr); + debug!(our_addr = %local_addr, from = %send_back_addr, error = error_repr, "Failed incoming connection"); None } SwarmEvent::OutgoingConnectionError { @@ -1898,7 +2174,7 @@ impl Network { } SwarmEvent::NewListenAddr { address, .. } => Some(NetworkEvent::NewListenAddr(address)), SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address); + debug!(%address, "Listen address expired"); None } SwarmEvent::ListenerClosed { @@ -1906,10 +2182,10 @@ impl Network { } => { match reason { Ok(_) => { - debug!(self.log, "Listener gracefully closed"; "addresses" => ?addresses) + debug!(?addresses, "Listener gracefully closed") } Err(reason) => { - crit!(self.log, "Listener abruptly closed"; "addresses" => ?addresses, "reason" => ?reason) + crit!(?addresses, ?reason, "Listener abruptly closed") } }; if Swarm::listeners(&self.swarm).count() == 0 { @@ -1919,7 +2195,7 @@ impl Network { } } SwarmEvent::ListenerError { error, .. } => { - debug!(self.log, "Listener closed connection attempt"; "reason" => ?error); + debug!(reason = ?error, "Listener closed connection attempt"); None } _ => { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 72c2b29102..01929bcb01 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -9,7 +9,6 @@ use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxe use libp2p::identity::{secp256k1, Keypair}; use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; -use slog::{debug, warn}; use ssz::Decode; use std::collections::HashSet; use std::fs::File; @@ -17,6 +16,7 @@ use std::io::prelude::*; use std::path::Path; use std::sync::Arc; use std::time::Duration; +use tracing::{debug, warn}; use types::{ ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId, }; @@ -107,21 +107,21 @@ fn keypair_from_bytes(mut bytes: Vec) -> Result { /// generated and is then saved to disk. /// /// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { +pub fn load_private_key(config: &NetworkConfig) -> Keypair { // check for key from disk let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { let mut key_bytes: Vec = Vec::with_capacity(36); match network_key_file.read_to_end(&mut key_bytes) { - Err(_) => debug!(log, "Could not read network key file"), + Err(_) => debug!("Could not read network key file"), Ok(_) => { // only accept secp256k1 keys for now if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) { let kp: secp256k1::Keypair = secret_key.into(); - debug!(log, "Loaded network key from disk."); + debug!("Loaded network key from disk."); return kp.into(); } else { - debug!(log, "Network key file is not a valid secp256k1 key"); + debug!("Network key file is not a valid secp256k1 key"); } } } @@ -134,12 +134,12 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes())) { Ok(_) => { - debug!(log, "New network key generated and written to disk"); + debug!("New network key generated and written to disk"); } Err(e) => { warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e + "Could not write node key to file: {:?}. error: {}", + network_key_f, e ); } } @@ -166,7 +166,6 @@ pub fn strip_peer_id(addr: &mut Multiaddr) { pub fn load_or_build_metadata( network_dir: &Path, custody_group_count_opt: Option, - log: &slog::Logger, ) -> MetaData { // We load a V2 metadata version by default (regardless of current fork) // since a V2 metadata can be converted to V1. The RPC encoder is responsible @@ -192,7 +191,7 @@ pub fn load_or_build_metadata( { meta_data.seq_number += 1; } - debug!(log, "Loaded metadata from disk"); + debug!("Loaded metadata from disk"); } Err(_) => { match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { @@ -200,13 +199,12 @@ pub fn load_or_build_metadata( let persisted_metadata = MetaData::V1(persisted_metadata); // Increment seq number as the persisted metadata version is updated meta_data.seq_number = *persisted_metadata.seq_number() + 1; - debug!(log, "Loaded metadata from disk"); + debug!("Loaded metadata from disk"); } Err(e) => { debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, + error = ?e, + "Metadata from file could not be decoded" ); } } @@ -227,8 +225,8 @@ pub fn load_or_build_metadata( MetaData::V2(meta_data) }; - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); - save_metadata_to_disk(network_dir, meta_data.clone(), log); + debug!(seq_num = meta_data.seq_number(), "Metadata sequence number"); + save_metadata_to_disk(network_dir, meta_data.clone()); meta_data } @@ -275,11 +273,7 @@ pub(crate) fn create_whitelist_filter( } /// Persist metadata to disk -pub(crate) fn save_metadata_to_disk( - dir: &Path, - metadata: MetaData, - log: &slog::Logger, -) { +pub(crate) fn save_metadata_to_disk(dir: &Path, metadata: MetaData) { let _ = std::fs::create_dir_all(dir); // We always store the metadata v2 to disk because // custody_group_count parameter doesn't need to be persisted across runs. @@ -288,14 +282,13 @@ pub(crate) fn save_metadata_to_disk( let metadata_bytes = metadata.metadata_v2().as_ssz_bytes(); match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) { Ok(_) => { - debug!(log, "Metadata written to disk"); + debug!("Metadata written to disk"); } Err(e) => { warn!( - log, - "Could not write metadata to disk"; - "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), - "error" => %e + file = format!("{:?}{:?}", dir, METADATA_FILENAME), + error = %e, + "Could not write metadata to disk" ); } } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index d243c68c0f..4269a8973c 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -5,9 +5,9 @@ use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; use crate::{Client, Enr, EnrExt, GossipTopic, Multiaddr, NetworkConfig, PeerId}; use parking_lot::RwLock; -use slog::error; use std::collections::HashSet; use std::sync::Arc; +use tracing::error; use types::data_column_custody_group::{ compute_columns_for_custody_group, compute_subnets_from_custody_group, get_custody_groups, }; @@ -45,7 +45,6 @@ impl NetworkGlobals { local_metadata: MetaData, trusted_peers: Vec, disable_peer_scoring: bool, - log: &slog::Logger, config: Arc, spec: Arc, ) -> Self { @@ -56,9 +55,8 @@ impl NetworkGlobals { Ok(&cgc) if cgc <= spec.number_of_custody_groups => cgc, _ => { error!( - log, - "custody_group_count from metadata is either invalid or not set. This is a bug!"; - "info" => "falling back to default custody requirement" + info = "falling back to default custody requirement", + "custody_group_count from metadata is either invalid or not set. This is a bug!" ); spec.custody_requirement } @@ -96,7 +94,7 @@ impl NetworkGlobals { peer_id: RwLock::new(enr.peer_id()), listen_multiaddrs: RwLock::new(Vec::new()), local_metadata: RwLock::new(local_metadata), - peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring, log)), + peers: RwLock::new(PeerDB::new(trusted_peers, disable_peer_scoring)), gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::Paused), @@ -197,7 +195,6 @@ impl NetworkGlobals { /// TESTING ONLY. Build a dummy NetworkGlobals instance. pub fn new_test_globals( trusted_peers: Vec, - log: &slog::Logger, config: Arc, spec: Arc, ) -> NetworkGlobals { @@ -207,13 +204,12 @@ impl NetworkGlobals { syncnets: Default::default(), custody_group_count: spec.custody_requirement, }); - Self::new_test_globals_with_metadata(trusted_peers, metadata, log, config, spec) + Self::new_test_globals_with_metadata(trusted_peers, metadata, config, spec) } pub(crate) fn new_test_globals_with_metadata( trusted_peers: Vec, metadata: MetaData, - log: &slog::Logger, config: Arc, spec: Arc, ) -> NetworkGlobals { @@ -221,18 +217,19 @@ impl NetworkGlobals { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair); let enr = discv5::enr::Enr::builder().build(&enr_key).unwrap(); - NetworkGlobals::new(enr, metadata, trusted_peers, false, log, config, spec) + NetworkGlobals::new(enr, metadata, trusted_peers, false, config, spec) } } #[cfg(test)] mod test { use super::*; + use logging::create_test_tracing_subscriber; use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] fn test_sampling_subnets() { - let log = logging::test_logger(); + create_test_tracing_subscriber(); let mut spec = E::default_spec(); spec.fulu_fork_epoch = Some(Epoch::new(0)); @@ -244,7 +241,6 @@ mod test { let globals = NetworkGlobals::::new_test_globals_with_metadata( vec![], metadata, - &log, config, Arc::new(spec), ); @@ -256,7 +252,7 @@ mod test { #[test] fn test_sampling_columns() { - let log = logging::test_logger(); + create_test_tracing_subscriber(); let mut spec = E::default_spec(); spec.fulu_fork_epoch = Some(Epoch::new(0)); @@ -268,7 +264,6 @@ mod test { let globals = NetworkGlobals::::new_test_globals_with_metadata( vec![], metadata, - &log, config, Arc::new(spec), ); diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 6a3ec6dd32..d686885ff7 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -4,10 +4,11 @@ use lighthouse_network::Enr; use lighthouse_network::EnrExt; use lighthouse_network::Multiaddr; use lighthouse_network::{NetworkConfig, NetworkEvent}; -use slog::{debug, error, o, Drain}; use std::sync::Arc; use std::sync::Weak; use tokio::runtime::Runtime; +use tracing::{debug, error, info_span, Instrument}; +use tracing_subscriber::EnvFilter; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, @@ -67,15 +68,12 @@ impl std::ops::DerefMut for Libp2pInstance { } #[allow(unused)] -pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - +pub fn build_tracing_subscriber(level: &str, enabled: bool) { if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::try_new(level).unwrap()) + .try_init() + .unwrap(); } } @@ -101,16 +99,16 @@ pub fn build_config(mut boot_nodes: Vec) -> Arc { pub async fn build_libp2p_instance( rt: Weak, boot_nodes: Vec, - log: slog::Logger, fork_name: ForkName, chain_spec: Arc, + service_name: String, ) -> Libp2pInstance { let config = build_config(boot_nodes); // launch libp2p service let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); + let executor = task_executor::TaskExecutor::new(rt, exit, shutdown_tx, service_name); let libp2p_context = lighthouse_network::Context { config, enr_fork_id: EnrForkId::default(), @@ -119,7 +117,7 @@ pub async fn build_libp2p_instance( libp2p_registry: None, }; Libp2pInstance( - LibP2PService::new(executor, libp2p_context, &log) + LibP2PService::new(executor, libp2p_context) .await .expect("should build libp2p instance") .0, @@ -143,18 +141,20 @@ pub enum Protocol { #[allow(dead_code)] pub async fn build_node_pair( rt: Weak, - log: &slog::Logger, fork_name: ForkName, spec: Arc, protocol: Protocol, ) -> (Libp2pInstance, Libp2pInstance) { - let sender_log = log.new(o!("who" => "sender")); - let receiver_log = log.new(o!("who" => "receiver")); - - let mut sender = - build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec.clone()).await; + let mut sender = build_libp2p_instance( + rt.clone(), + vec![], + fork_name, + spec.clone(), + "sender".to_string(), + ) + .await; let mut receiver = - build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec.clone()).await; + build_libp2p_instance(rt, vec![], fork_name, spec.clone(), "receiver".to_string()).await; // let the two nodes set up listeners let sender_fut = async { @@ -179,7 +179,8 @@ pub async fn build_node_pair( } } } - }; + } + .instrument(info_span!("Sender", who = "sender")); let receiver_fut = async { loop { if let NetworkEvent::NewListenAddr(addr) = receiver.next_event().await { @@ -201,7 +202,8 @@ pub async fn build_node_pair( } } } - }; + } + .instrument(info_span!("Receiver", who = "receiver")); let joined = futures::future::join(sender_fut, receiver_fut); @@ -209,9 +211,9 @@ pub async fn build_node_pair( match sender.testing_dial(receiver_multiaddr.clone()) { Ok(()) => { - debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) + debug!(address = ?receiver_multiaddr, "Sender dialed receiver") } - Err(_) => error!(log, "Dialing failed"), + Err(_) => error!("Dialing failed"), }; (sender, receiver) } @@ -220,7 +222,6 @@ pub async fn build_node_pair( #[allow(dead_code)] pub async fn build_linear( rt: Weak, - log: slog::Logger, n: usize, fork_name: ForkName, spec: Arc, @@ -228,7 +229,14 @@ pub async fn build_linear( let mut nodes = Vec::with_capacity(n); for _ in 0..n { nodes.push( - build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec.clone()).await, + build_libp2p_instance( + rt.clone(), + vec![], + fork_name, + spec.clone(), + "linear".to_string(), + ) + .await, ); } @@ -238,8 +246,8 @@ pub async fn build_linear( .collect(); for i in 0..n - 1 { match nodes[i].testing_dial(multiaddrs[i + 1].clone()) { - Ok(()) => debug!(log, "Connected"), - Err(_) => error!(log, "Failed to connect"), + Ok(()) => debug!("Connected"), + Err(_) => error!("Failed to connect"), }; } nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 4b54a24ddc..d736fefa5f 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -2,17 +2,17 @@ mod common; -use common::Protocol; +use common::{build_tracing_subscriber, Protocol}; use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; -use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; +use tracing::{debug, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, @@ -53,26 +53,19 @@ fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon #[test] #[allow(clippy::single_match)] fn test_tcp_status_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); - let log = common::build_log(log_level, enable_logging); - let spec = Arc::new(E::default_spec()); rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair( - Arc::downgrade(&rt), - &log, - ForkName::Base, - spec, - Protocol::Tcp, - ) - .await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), ForkName::Base, spec, Protocol::Tcp).await; // Dummy STATUS RPC message let rpc_request = RequestType::Status(StatusMessage { @@ -98,7 +91,7 @@ fn test_tcp_status_rpc() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -109,9 +102,9 @@ fn test_tcp_status_rpc() { response, } => { // Should receive the RPC response - debug!(log, "Sender Received"); + debug!("Sender Received"); assert_eq!(response, rpc_response.clone()); - debug!(log, "Sender Completed"); + debug!("Sender Completed"); return; } _ => {} @@ -130,7 +123,7 @@ fn test_tcp_status_rpc() { } => { if request.r#type == rpc_request { // send the response - debug!(log, "Receiver Received"); + debug!("Receiver Received"); receiver.send_response(peer_id, id, request.id, rpc_response.clone()); } } @@ -153,14 +146,13 @@ fn test_tcp_status_rpc() { #[test] #[allow(clippy::single_match)] fn test_tcp_blocks_by_range_chunked_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 6; - let log = common::build_log(log_level, enable_logging); - let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); @@ -169,7 +161,6 @@ fn test_tcp_blocks_by_range_chunked_rpc() { // get sender/receiver let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Bellatrix, spec.clone(), Protocol::Tcp, @@ -206,7 +197,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -216,7 +207,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { id: _, response, } => { - warn!(log, "Sender received a response"); + warn!("Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { if messages_received < 2 { @@ -227,7 +218,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { assert_eq!(response, rpc_response_bellatrix_small.clone()); } messages_received += 1; - warn!(log, "Chunk received"); + warn!("Chunk received"); } Response::BlocksByRange(None) => { // should be exactly `messages_to_send` messages before terminating @@ -254,7 +245,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); for i in 0..messages_to_send { // Send first third of responses as base blocks, // second as altair and third as bellatrix. @@ -300,15 +291,14 @@ fn test_tcp_blocks_by_range_chunked_rpc() { #[test] #[allow(clippy::single_match)] fn test_blobs_by_range_chunked_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let slot_count = 32; let messages_to_send = 34; - let log = common::build_log(log_level, enable_logging); - let rt = Arc::new(Runtime::new().unwrap()); rt.block_on(async { @@ -316,7 +306,6 @@ fn test_blobs_by_range_chunked_rpc() { let spec = Arc::new(E::default_spec()); let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Deneb, spec.clone(), Protocol::Tcp, @@ -342,7 +331,7 @@ fn test_blobs_by_range_chunked_rpc() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -352,12 +341,12 @@ fn test_blobs_by_range_chunked_rpc() { id: _, response, } => { - warn!(log, "Sender received a response"); + warn!("Sender received a response"); match response { Response::BlobsByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; - warn!(log, "Chunk received"); + warn!("Chunk received"); } Response::BlobsByRange(None) => { // should be exactly `messages_to_send` messages before terminating @@ -384,7 +373,7 @@ fn test_blobs_by_range_chunked_rpc() { } => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); for _ in 0..messages_to_send { // Send first third of responses as base blocks, // second as altair and third as bellatrix. @@ -423,14 +412,13 @@ fn test_blobs_by_range_chunked_rpc() { #[test] #[allow(clippy::single_match)] fn test_tcp_blocks_by_range_over_limit() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 5; - let log = common::build_log(log_level, enable_logging); - let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); @@ -439,7 +427,6 @@ fn test_tcp_blocks_by_range_over_limit() { // get sender/receiver let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Bellatrix, spec.clone(), Protocol::Tcp, @@ -466,7 +453,7 @@ fn test_tcp_blocks_by_range_over_limit() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -492,7 +479,7 @@ fn test_tcp_blocks_by_range_over_limit() { } => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); receiver.send_response( @@ -529,15 +516,14 @@ fn test_tcp_blocks_by_range_over_limit() { // Tests that a streamed BlocksByRange RPC Message terminates when all expected chunks were received #[test] fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 10; let extra_messages_to_send = 10; - let log = common::build_log(log_level, enable_logging); - let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); @@ -546,7 +532,6 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // get sender/receiver let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Base, spec.clone(), Protocol::Tcp, @@ -574,7 +559,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -586,7 +571,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { } => // Should receive the RPC response { - debug!(log, "Sender received a response"); + debug!("Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); @@ -630,7 +615,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { )) => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); message_info = Some((peer_id, id, request.id)); } } @@ -643,7 +628,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { messages_sent += 1; let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); - debug!(log, "Sending message {}", messages_sent); + debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages return; @@ -666,11 +651,11 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { #[test] #[allow(clippy::single_match)] fn test_tcp_blocks_by_range_single_empty_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Trace; + // Set up the logging. + let log_level = "trace"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); - let log = common::build_log(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); let spec = Arc::new(E::default_spec()); @@ -679,7 +664,6 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { // get sender/receiver let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Base, spec.clone(), Protocol::Tcp, @@ -709,7 +693,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -722,7 +706,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { Response::BlocksByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; - warn!(log, "Chunk received"); + warn!("Chunk received"); } Response::BlocksByRange(None) => { // should be exactly 10 messages before terminating @@ -748,7 +732,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { } => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); for _ in 1..=messages_to_send { receiver.send_response( @@ -788,13 +772,13 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { #[test] #[allow(clippy::single_match)] fn test_tcp_blocks_by_root_chunked_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let messages_to_send = 6; - let log = common::build_log(log_level, enable_logging); let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); @@ -802,7 +786,6 @@ fn test_tcp_blocks_by_root_chunked_rpc() { rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Bellatrix, spec.clone(), Protocol::Tcp, @@ -847,7 +830,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -866,7 +849,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { assert_eq!(response, rpc_response_bellatrix_small.clone()); } messages_received += 1; - debug!(log, "Chunk received"); + debug!("Chunk received"); } Response::BlocksByRoot(None) => { // should be exactly messages_to_send @@ -892,7 +875,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } => { if request.r#type == rpc_request { // send the response - debug!(log, "Receiver got request"); + debug!("Receiver got request"); for i in 0..messages_to_send { // Send equal base, altair and bellatrix blocks @@ -904,7 +887,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { rpc_response_bellatrix_small.clone() }; receiver.send_response(peer_id, id, request.id, rpc_response); - debug!(log, "Sending message"); + debug!("Sending message"); } // send the stream termination receiver.send_response( @@ -913,7 +896,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { request.id, Response::BlocksByRange(None), ); - debug!(log, "Send stream term"); + debug!("Send stream term"); } } _ => {} // Ignore other events @@ -933,14 +916,14 @@ fn test_tcp_blocks_by_root_chunked_rpc() { // Tests a streamed, chunked BlocksByRoot RPC Message terminates when all expected reponses have been received #[test] fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; + // Set up the logging. + let log_level = "debug"; let enable_logging = false; + build_tracing_subscriber(log_level, enable_logging); let messages_to_send: u64 = 10; let extra_messages_to_send: u64 = 10; - let log = common::build_log(log_level, enable_logging); let spec = Arc::new(E::default_spec()); let rt = Arc::new(Runtime::new().unwrap()); @@ -948,7 +931,6 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - &log, ForkName::Base, spec.clone(), Protocol::Tcp, @@ -988,7 +970,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) .unwrap(); @@ -998,12 +980,12 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { id: AppRequestId::Router, response, } => { - debug!(log, "Sender received a response"); + debug!("Sender received a response"); match response { Response::BlocksByRoot(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; - debug!(log, "Chunk received"); + debug!("Chunk received"); } Response::BlocksByRoot(None) => { // should be exactly messages_to_send @@ -1044,7 +1026,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { )) => { if request.r#type == rpc_request { // send the response - warn!(log, "Receiver got request"); + warn!("Receiver got request"); message_info = Some((peer_id, id, request.id)); } } @@ -1057,7 +1039,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { messages_sent += 1; let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); - debug!(log, "Sending message {}", messages_sent); + debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages return; @@ -1078,8 +1060,9 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { /// Establishes a pair of nodes and disconnects the pair based on the selected protocol via an RPC /// Goodbye message. -fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { - let log = common::build_log(log_level, enable_logging); +fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { + // Set up the logging. + build_tracing_subscriber(log_level, enable_logging); let rt = Arc::new(Runtime::new().unwrap()); @@ -1088,8 +1071,7 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = - common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, spec, protocol) - .await; + common::build_node_pair(Arc::downgrade(&rt), ForkName::Base, spec, protocol).await; // build the sender future let sender_future = async { @@ -1097,7 +1079,7 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { match sender.next_event().await { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a goodbye and disconnect - debug!(log, "Sending RPC"); + debug!("Sending RPC"); sender.goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, @@ -1137,18 +1119,16 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { #[test] #[allow(clippy::single_match)] fn tcp_test_goodbye_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; - let enable_logging = false; - goodbye_test(log_level, enable_logging, Protocol::Tcp); + let log_level = "debug"; + let enabled_logging = false; + goodbye_test(log_level, enabled_logging, Protocol::Tcp); } // Tests a Goodbye RPC message #[test] #[allow(clippy::single_match)] fn quic_test_goodbye_rpc() { - // set up the logging. The level and enabled logging or not - let log_level = Level::Debug; - let enable_logging = false; - goodbye_test(log_level, enable_logging, Protocol::Quic); + let log_level = "debug"; + let enabled_logging = false; + goodbye_test(log_level, enabled_logging, Protocol::Quic); } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index efe24b7182..4250f8f8bb 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -9,15 +9,12 @@ bls = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } genesis = { workspace = true } -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", tag = "sigp-gossipsub-0.1" } +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", branch = "sigp-gossipsub" } k256 = "0.13.4" kzg = { workspace = true } matches = "0.1.8" rand_chacha = "0.3.1" serde_json = { workspace = true } -slog-async = { workspace = true } -slog-term = { workspace = true } -sloggers = { workspace = true } [dependencies] alloy-primitives = { workspace = true } @@ -42,7 +39,6 @@ metrics = { workspace = true } operation_pool = { workspace = true } parking_lot = { workspace = true } rand = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } smallvec = { workspace = true } ssz_types = { workspace = true } @@ -51,6 +47,8 @@ strum = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } [features] diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index e63ff55039..ce9d241d43 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -5,10 +5,10 @@ use anyhow::{bail, Context, Error}; use igd_next::{aio::tokio as igd, PortMappingProtocol}; -use slog::debug; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; use tokio::time::sleep; +use tracing::debug; /// The duration in seconds of a port mapping on the gateway. const MAPPING_DURATION: u32 = 3600; @@ -17,11 +17,7 @@ const MAPPING_DURATION: u32 = 3600; const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; /// Attempts to map Discovery external port mappings with UPnP. -pub async fn construct_upnp_mappings( - addr: Ipv4Addr, - port: u16, - log: slog::Logger, -) -> Result<(), Error> { +pub async fn construct_upnp_mappings(addr: Ipv4Addr, port: u16) -> Result<(), Error> { let gateway = igd::search_gateway(Default::default()) .await .context("Gateway does not support UPnP")?; @@ -54,7 +50,7 @@ pub async fn construct_upnp_mappings( ) .await .with_context(|| format!("Could not UPnP map port: {} on the gateway", port))?; - debug!(log, "Discovery UPnP port mapped"; "port" => %port); + debug!(%port,"Discovery UPnP port mapped"); sleep(Duration::from_secs(MAPPING_TIMEOUT)).await; } } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4338bfbc89..afab2d178c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -21,8 +21,8 @@ use beacon_chain::{ GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use logging::crit; use operation_pool::ReceivedPreCapella; -use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use std::fs; @@ -32,6 +32,7 @@ use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; +use tracing::{debug, error, info, trace, warn}; use types::{ beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, @@ -251,9 +252,8 @@ impl NetworkBeaconProcessor { Ok(results) => results, Err(e) => { error!( - self.log, - "Batch unagg. attn verification failed"; - "error" => ?e + error = ?e, + "Batch unagg. attn verification failed" ); return; } @@ -264,10 +264,9 @@ impl NetworkBeaconProcessor { // The log is `crit` since in this scenario we might be penalizing/rewarding the wrong // peer. crit!( - self.log, - "Batch attestation result mismatch"; - "results" => results.len(), - "packages" => packages.len(), + results = results.len(), + packages = packages.len(), + "Batch attestation result mismatch" ) } @@ -355,19 +354,17 @@ impl NetworkBeaconProcessor { e, )) => { debug!( - self.log, - "Attestation invalid for fork choice"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Attestation invalid for fork choice" ) } e => error!( - self.log, - "Error applying attestation to fork choice"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Error applying attestation to fork choice" ), } } @@ -377,11 +374,10 @@ impl NetworkBeaconProcessor { .add_to_naive_aggregation_pool(&verified_attestation) { debug!( - self.log, - "Attestation invalid for agg pool"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Attestation invalid for agg pool" ) } @@ -459,10 +455,9 @@ impl NetworkBeaconProcessor { seen_timestamp, ) { error!( - &self.log, - "Unable to queue converted SingleAttestation"; - "error" => %e, - "slot" => slot, + error = %e, + %slot, + "Unable to queue converted SingleAttestation" ); self.propagate_validation_result( message_id, @@ -486,11 +481,7 @@ impl NetworkBeaconProcessor { beacon_block_root, )) .unwrap_or_else(|_| { - warn!( - self.log, - "Failed to send to sync service"; - "msg" => "UnknownBlockHash" - ) + warn!(msg = "UnknownBlockHash", "Failed to send to sync service") }); let processor = self.clone(); // Do not allow this attestation to be re-processed beyond this point. @@ -510,10 +501,7 @@ impl NetworkBeaconProcessor { }), }); if sender.try_send(reprocess_msg).is_err() { - error!( - self.log, - "Failed to send attestation for re-processing"; - ) + error!("Failed to send attestation for re-processing") } } else { // We shouldn't make any further attempts to process this attestation. @@ -611,9 +599,8 @@ impl NetworkBeaconProcessor { Ok(results) => results, Err(e) => { error!( - self.log, - "Batch agg. attn verification failed"; - "error" => ?e + error = ?e, + "Batch agg. attn verification failed" ); return; } @@ -624,10 +611,9 @@ impl NetworkBeaconProcessor { // The log is `crit` since in this scenario we might be penalizing/rewarding the wrong // peer. crit!( - self.log, - "Batch agg. attestation result mismatch"; - "results" => results.len(), - "packages" => packages.len(), + results = results.len(), + packages = packages.len(), + "Batch agg. attestation result mismatch" ) } @@ -707,30 +693,27 @@ impl NetworkBeaconProcessor { e, )) => { debug!( - self.log, - "Aggregate invalid for fork choice"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Aggregate invalid for fork choice" ) } e => error!( - self.log, - "Error applying aggregate to fork choice"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Error applying aggregate to fork choice" ), } } if let Err(e) = self.chain.add_to_block_inclusion_pool(verified_aggregate) { debug!( - self.log, - "Attestation invalid for op pool"; - "reason" => ?e, - "peer" => %peer_id, - "beacon_block_root" => ?beacon_block_root + reason = ?e, + %peer_id, + ?beacon_block_root, + "Attestation invalid for op pool" ) } @@ -786,11 +769,10 @@ impl NetworkBeaconProcessor { ); debug!( - self.log, - "Successfully verified gossip data column sidecar"; - "slot" => %slot, - "block_root" => %block_root, - "index" => %index, + %slot, + %block_root, + %index, + "Successfully verified gossip data column sidecar" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); @@ -817,11 +799,10 @@ impl NetworkBeaconProcessor { match err { GossipDataColumnError::ParentUnknown { parent_root } => { debug!( - self.log, - "Unknown parent hash for column"; - "action" => "requesting parent", - "block_root" => %block_root, - "parent_root" => %parent_root, + action = "requesting parent", + %block_root, + %parent_root, + "Unknown parent hash for column" ); self.send_sync_message(SyncMessage::UnknownParentDataColumn( peer_id, @@ -831,9 +812,8 @@ impl NetworkBeaconProcessor { GossipDataColumnError::PubkeyCacheTimeout | GossipDataColumnError::BeaconChainError(_) => { crit!( - self.log, - "Internal error when verifying column sidecar"; - "error" => ?err, + error = ?err, + "Internal error when verifying column sidecar" ) } GossipDataColumnError::ProposalSignatureInvalid @@ -848,12 +828,11 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InconsistentCommitmentsOrProofLength | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( - self.log, - "Could not verify column sidecar for gossip. Rejecting the column sidecar"; - "error" => ?err, - "slot" => %slot, - "block_root" => %block_root, - "index" => %index, + error = ?err, + %slot, + %block_root, + %index, + "Could not verify column sidecar for gossip. Rejecting the column sidecar" ); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -872,22 +851,20 @@ impl NetworkBeaconProcessor { // Do not penalise the peer. // Gossip filter should filter any duplicates received after this. debug!( - self.log, - "Received already available column sidecar. Ignoring the column sidecar"; - "slot" => %slot, - "block_root" => %block_root, - "index" => %index, + %slot, + %block_root, + %index, + "Received already available column sidecar. Ignoring the column sidecar" ) } GossipDataColumnError::FutureSlot { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { debug!( - self.log, - "Could not verify column sidecar for gossip. Ignoring the column sidecar"; - "error" => ?err, - "slot" => %slot, - "block_root" => %block_root, - "index" => %index, + error = ?err, + %slot, + %block_root, + %index, + "Could not verify column sidecar for gossip. Ignoring the column sidecar" ); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -933,23 +910,21 @@ impl NetworkBeaconProcessor { if delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL); debug!( - self.log, - "Gossip blob arrived late"; - "block_root" => ?gossip_verified_blob.block_root(), - "proposer_index" => gossip_verified_blob.block_proposer_index(), - "slot" => gossip_verified_blob.slot(), - "delay" => ?delay, - "commitment" => %gossip_verified_blob.kzg_commitment(), + block_root = ?gossip_verified_blob.block_root(), + proposer_index = gossip_verified_blob.block_proposer_index(), + slot = %gossip_verified_blob.slot(), + delay = ?delay, + commitment = %gossip_verified_blob.kzg_commitment(), + "Gossip blob arrived late" ); } debug!( - self.log, - "Successfully verified gossip blob"; - "slot" => %slot, - "root" => %root, - "index" => %index, - "commitment" => %gossip_verified_blob.kzg_commitment(), + %slot, + %root, + %index, + commitment = %gossip_verified_blob.kzg_commitment(), + "Successfully verified gossip blob" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); @@ -972,12 +947,11 @@ impl NetworkBeaconProcessor { match err { GossipBlobError::BlobParentUnknown { parent_root } => { debug!( - self.log, - "Unknown parent hash for blob"; - "action" => "requesting parent", - "block_root" => %root, - "parent_root" => %parent_root, - "commitment" => %commitment, + action = "requesting parent", + block_root = %root, + parent_root = %parent_root, + %commitment, + "Unknown parent hash for blob" ); self.send_sync_message(SyncMessage::UnknownParentBlob( peer_id, @@ -986,9 +960,8 @@ impl NetworkBeaconProcessor { } GossipBlobError::PubkeyCacheTimeout | GossipBlobError::BeaconChainError(_) => { crit!( - self.log, - "Internal error when verifying blob sidecar"; - "error" => ?err, + error = ?err, + "Internal error when verifying blob sidecar" ) } GossipBlobError::ProposalSignatureInvalid @@ -1000,13 +973,12 @@ impl NetworkBeaconProcessor { | GossipBlobError::KzgError(_) | GossipBlobError::NotFinalizedDescendant { .. } => { warn!( - self.log, - "Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; - "error" => ?err, - "slot" => %slot, - "root" => %root, - "index" => %index, - "commitment" => %commitment, + error = ?err, + %slot, + %root, + %index, + %commitment, + "Could not verify blob sidecar for gossip. Rejecting the blob sidecar" ); // Prevent recurring behaviour by penalizing the peer. self.gossip_penalize_peer( @@ -1024,22 +996,20 @@ impl NetworkBeaconProcessor { // We may have received the blob from the EL. Do not penalise the peer. // Gossip filter should filter any duplicates received after this. debug!( - self.log, - "Received already available blob sidecar. Ignoring the blob sidecar"; - "slot" => %slot, - "root" => %root, - "index" => %index, + %slot, + %root, + %index, + "Received already available blob sidecar. Ignoring the blob sidecar" ) } GossipBlobError::FutureSlot { .. } => { debug!( - self.log, - "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; - "error" => ?err, - "slot" => %slot, - "root" => %root, - "index" => %index, - "commitment" => %commitment, + error = ?err, + %slot, + %root, + %index, + %commitment, + "Could not verify blob sidecar for gossip. Ignoring the blob sidecar" ); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -1055,13 +1025,12 @@ impl NetworkBeaconProcessor { } GossipBlobError::PastFinalizedSlot { .. } => { debug!( - self.log, - "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; - "error" => ?err, - "slot" => %slot, - "root" => %root, - "index" => %index, - "commitment" => %commitment, + error = ?err, + %slot, + %root, + %index, + %commitment, + "Could not verify blob sidecar for gossip. Ignoring the blob sidecar" ); // Prevent recurring behaviour by penalizing the peer. A low-tolerance // error is fine because there's no reason for peers to be propagating old @@ -1099,9 +1068,8 @@ impl NetworkBeaconProcessor { match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { info!( - self.log, - "Gossipsub blob processed - imported fully available block"; - "block_root" => %block_root + %block_root, + "Gossipsub blob processed - imported fully available block" ); self.chain.recompute_head_at_current_slot().await; @@ -1112,29 +1080,25 @@ impl NetworkBeaconProcessor { } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { debug!( - self.log, - "Processed gossip blob - waiting for other components"; - "slot" => %slot, - "blob_index" => %blob_index, - "block_root" => %block_root, + %slot, + %blob_index, + %block_root, + "Processed gossip blob - waiting for other components" ); } Err(BlockError::DuplicateFullyImported(_)) => { debug!( - self.log, - "Ignoring gossip blob already imported"; - "block_root" => ?block_root, - "blob_index" => blob_index, + ?block_root, + blob_index, "Ignoring gossip blob already imported" ); } Err(err) => { debug!( - self.log, - "Invalid gossip blob"; - "outcome" => ?err, - "block_root" => ?block_root, - "block_slot" => blob_slot, - "blob_index" => blob_index, + outcome = ?err, + ?block_root, + %blob_slot, + blob_index, + "Invalid gossip blob" ); self.gossip_penalize_peer( peer_id, @@ -1177,9 +1141,8 @@ impl NetworkBeaconProcessor { Ok(availability) => match availability { AvailabilityProcessingStatus::Imported(block_root) => { info!( - self.log, - "Gossipsub data column processed, imported fully available block"; - "block_root" => %block_root + %block_root, + "Gossipsub data column processed, imported fully available block" ); self.chain.recompute_head_at_current_slot().await; @@ -1190,11 +1153,10 @@ impl NetworkBeaconProcessor { } AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { trace!( - self.log, - "Processed data column, waiting for other components"; - "slot" => %slot, - "data_column_index" => %data_column_index, - "block_root" => %block_root, + %slot, + %data_column_index, + %block_root, + "Processed data column, waiting for other components" ); self.attempt_data_column_reconstruction(block_root).await; @@ -1202,20 +1164,17 @@ impl NetworkBeaconProcessor { }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( - self.log, - "Ignoring gossip column already imported"; - "block_root" => ?block_root, - "data_column_index" => data_column_index, + ?block_root, + data_column_index, "Ignoring gossip column already imported" ); } Err(err) => { debug!( - self.log, - "Invalid gossip data column"; - "outcome" => ?err, - "block root" => ?block_root, - "block slot" => data_column_slot, - "data column index" => data_column_index, + outcome = ?err, + ?block_root, + block_slot = %data_column_slot, + data_column_index, + "Invalid gossip data column" ); self.gossip_penalize_peer( peer_id, @@ -1271,9 +1230,8 @@ impl NetworkBeaconProcessor { drop(handle); } else { debug!( - self.log, - "RPC block is being imported"; - "block_root" => %block_root, + %block_root, + "RPC block is being imported" ); } } @@ -1329,20 +1287,18 @@ impl NetworkBeaconProcessor { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL); debug!( - self.log, - "Gossip block arrived late"; - "block_root" => ?verified_block.block_root, - "proposer_index" => verified_block.block.message().proposer_index(), - "slot" => verified_block.block.slot(), - "block_delay" => ?block_delay, + block_root = ?verified_block.block_root, + proposer_index = verified_block.block.message().proposer_index(), + slot = ?verified_block.block.slot(), + ?block_delay, + "Gossip block arrived late" ); } info!( - self.log, - "New block received"; - "slot" => verified_block.block.slot(), - "root" => ?verified_block.block_root + slot = %verified_block.block.slot(), + root = ?verified_block.block_root, + "New block received" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); @@ -1362,9 +1318,8 @@ impl NetworkBeaconProcessor { } Err(e @ BlockError::Slashable) => { warn!( - self.log, - "Received equivocating block from peer"; - "error" => ?e + error = ?e, + "Received equivocating block from peer" ); /* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */ self.gossip_penalize_peer( @@ -1375,19 +1330,14 @@ impl NetworkBeaconProcessor { return None; } Err(BlockError::ParentUnknown { .. }) => { - debug!( - self.log, - "Unknown parent for gossip block"; - "root" => ?block_root - ); + debug!(?block_root, "Unknown parent for gossip block"); self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)); return None; } Err(e @ BlockError::BeaconChainError(_)) => { debug!( - self.log, - "Gossip block beacon chain error"; - "error" => ?e, + error = ?e, + "Gossip block beacon chain error" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; @@ -1397,18 +1347,16 @@ impl NetworkBeaconProcessor { | BlockError::DuplicateImportStatusUnknown(..), ) => { debug!( - self.log, - "Gossip block is already known"; - "block_root" => %block_root, + %block_root, + "Gossip block is already known" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } Err(e @ BlockError::FutureSlot { .. }) => { debug!( - self.log, - "Could not verify block for gossip. Ignoring the block"; - "error" => %e + error = %e, + "Could not verify block for gossip. Ignoring the block" ); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( @@ -1422,9 +1370,8 @@ impl NetworkBeaconProcessor { Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!( - self.log, - "Could not verify block for gossip. Ignoring the block"; - "error" => %e + error = %e, + "Could not verify block for gossip. Ignoring the block" ); // The spec says we must IGNORE these blocks but there's no reason for an honest // and non-buggy client to be gossiping blocks that blatantly conflict with @@ -1439,8 +1386,7 @@ impl NetworkBeaconProcessor { return None; } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { - debug!(self.log, "Could not verify block for gossip. Ignoring the block"; - "error" => %e); + debug!(error = %e, "Could not verify block for gossip. Ignoring the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } @@ -1458,8 +1404,7 @@ impl NetworkBeaconProcessor { | Err(e @ BlockError::ExecutionPayloadError(_)) | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { - warn!(self.log, "Could not verify block for gossip. Rejecting the block"; - "error" => %e); + warn!(error = %e, "Could not verify block for gossip. Rejecting the block"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( peer_id, @@ -1471,17 +1416,12 @@ impl NetworkBeaconProcessor { // Note: This error variant cannot be reached when doing gossip validation // as we do not do availability checks here. Err(e @ BlockError::AvailabilityCheck(_)) => { - crit!(self.log, "Internal block gossip validation error. Availability check during - gossip validation"; - "error" => %e - ); + crit!(error = %e, "Internal block gossip validation error. Availability check during gossip validation"); return None; } // BlobNotRequired is unreachable. Only constructed in `process_gossip_blob` Err(e @ BlockError::InternalError(_)) | Err(e @ BlockError::BlobNotRequired(_)) => { - error!(self.log, "Internal block gossip validation error"; - "error" => %e - ); + error!(error = %e, "Internal block gossip validation error"); return None; } }; @@ -1510,11 +1450,10 @@ impl NetworkBeaconProcessor { // tolerance for block imports. Ok(current_slot) if block_slot > current_slot => { warn!( - self.log, - "Block arrived early"; - "block_slot" => %block_slot, - "block_root" => ?block_root, - "msg" => "if this happens consistently, check system clock" + %block_slot, + ?block_root, + msg = "if this happens consistently, check system clock", + "Block arrived early" ); // Take note of how early this block arrived. @@ -1555,11 +1494,10 @@ impl NetworkBeaconProcessor { .is_err() { error!( - self.log, - "Failed to defer block import"; - "block_slot" => %block_slot, - "block_root" => ?block_root, - "location" => "block gossip" + %block_slot, + ?block_root, + location = "block gossip", + "Failed to defer block import" ) } None @@ -1567,12 +1505,11 @@ impl NetworkBeaconProcessor { Ok(_) => Some(verified_block), Err(e) => { error!( - self.log, - "Failed to defer block import"; - "error" => ?e, - "block_slot" => %block_slot, - "block_root" => ?block_root, - "location" => "block gossip" + error = ?e, + %block_slot, + ?block_root, + location = "block gossip", + "Failed to defer block import" ); None } @@ -1644,18 +1581,16 @@ impl NetworkBeaconProcessor { .is_err() { error!( - self.log, - "Failed to inform block import"; - "source" => "gossip", - "block_root" => ?block_root, + source = "gossip", + ?block_root, + "Failed to inform block import" ) }; debug!( - self.log, - "Gossipsub block processed"; - "block" => ?block_root, - "peer_id" => %peer_id + ?block_root, + %peer_id, + "Gossipsub block processed" ); self.chain.recompute_head_at_current_slot().await; @@ -1667,10 +1602,9 @@ impl NetworkBeaconProcessor { } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { trace!( - self.log, - "Processed block, waiting for other components"; - "slot" => slot, - "block_root" => %block_root, + %slot, + %block_root, + "Processed block, waiting for other components" ); } Err(BlockError::ParentUnknown { .. }) => { @@ -1680,26 +1614,23 @@ impl NetworkBeaconProcessor { // can recover by receiving another block / blob / attestation referencing the // chain that includes this block. error!( - self.log, - "Block with unknown parent attempted to be processed"; - "block_root" => %block_root, - "peer_id" => %peer_id + %block_root, + %peer_id, + "Block with unknown parent attempted to be processed" ); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( - self.log, - "Failed to verify execution payload"; - "error" => %e + error = %e, + "Failed to verify execution payload" ); } Err(BlockError::AvailabilityCheck(err)) => { match err.category() { AvailabilityCheckErrorCategory::Internal => { warn!( - self.log, - "Internal availability check error"; - "error" => ?err, + error = ?err, + "Internal availability check error" ); } AvailabilityCheckErrorCategory::Malicious => { @@ -1711,20 +1642,18 @@ impl NetworkBeaconProcessor { // 2. The proposer being malicious and sending inconsistent // blocks and blobs. warn!( - self.log, - "Received invalid blob or malicious proposer"; - "error" => ?err + error = ?err, + "Received invalid blob or malicious proposer" ); } } } other => { debug!( - self.log, - "Invalid gossip beacon block"; - "outcome" => ?other, - "block root" => ?block_root, - "block slot" => block.slot() + outcome = ?other, + ?block_root, + block_slot = %block.slot(), + "Invalid gossip beacon block" ); self.gossip_penalize_peer( peer_id, @@ -1732,21 +1661,14 @@ impl NetworkBeaconProcessor { "bad_gossip_block_ssz", ); trace!( - self.log, - "Invalid gossip beacon block ssz"; - "ssz" => format_args!("0x{}", hex::encode(block.as_ssz_bytes())), + ssz = format_args!("0x{}", hex::encode(block.as_ssz_bytes())), + "Invalid gossip beacon block ssz" ); } }; if let Err(e) = &result { - self.maybe_store_invalid_block( - &invalid_block_storage, - block_root, - &block, - e, - &self.log, - ); + self.maybe_store_invalid_block(&invalid_block_storage, block_root, &block, e); } self.send_sync_message(SyncMessage::GossipBlockProcessResult { @@ -1768,20 +1690,18 @@ impl NetworkBeaconProcessor { Ok(ObservationOutcome::AlreadyKnown) => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); debug!( - self.log, - "Dropping exit for already exiting validator"; - "validator_index" => validator_index, - "peer" => %peer_id + validator_index, + peer = %peer_id, + "Dropping exit for already exiting validator" ); return; } Err(e) => { debug!( - self.log, - "Dropping invalid exit"; - "validator_index" => validator_index, - "peer" => %peer_id, - "error" => ?e + validator_index, + %peer_id, + error = ?e, + "Dropping invalid exit" ); // These errors occur due to a fault in the beacon chain. It is not necessarily // the fault on the peer. @@ -1808,7 +1728,7 @@ impl NetworkBeaconProcessor { self.chain.import_voluntary_exit(exit); - debug!(self.log, "Successfully imported voluntary exit"); + debug!("Successfully imported voluntary exit"); metrics::inc_counter(&metrics::BEACON_PROCESSOR_EXIT_IMPORTED_TOTAL); } @@ -1828,11 +1748,10 @@ impl NetworkBeaconProcessor { Ok(ObservationOutcome::New(slashing)) => slashing, Ok(ObservationOutcome::AlreadyKnown) => { debug!( - self.log, - "Dropping proposer slashing"; - "reason" => "Already seen a proposer slashing for that validator", - "validator_index" => validator_index, - "peer" => %peer_id + reason = "Already seen a proposer slashing for that validator", + validator_index, + peer = %peer_id, + "Dropping proposer slashing" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; @@ -1841,11 +1760,10 @@ impl NetworkBeaconProcessor { // This is likely a fault with the beacon chain and not necessarily a // malicious message from the peer. debug!( - self.log, - "Dropping invalid proposer slashing"; - "validator_index" => validator_index, - "peer" => %peer_id, - "error" => ?e + validator_index, + %peer_id, + error = ?e, + "Dropping invalid proposer slashing" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1870,7 +1788,7 @@ impl NetworkBeaconProcessor { .register_gossip_proposer_slashing(slashing.as_inner()); self.chain.import_proposer_slashing(slashing); - debug!(self.log, "Successfully imported proposer slashing"); + debug!("Successfully imported proposer slashing"); metrics::inc_counter(&metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_IMPORTED_TOTAL); } @@ -1888,20 +1806,18 @@ impl NetworkBeaconProcessor { Ok(ObservationOutcome::New(slashing)) => slashing, Ok(ObservationOutcome::AlreadyKnown) => { debug!( - self.log, - "Dropping attester slashing"; - "reason" => "Slashings already known for all slashed validators", - "peer" => %peer_id + reason = "Slashings already known for all slashed validators", + peer = %peer_id, + "Dropping attester slashing" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; } Err(e) => { debug!( - self.log, - "Dropping invalid attester slashing"; - "peer" => %peer_id, - "error" => ?e + %peer_id, + error = ?e, + "Dropping invalid attester slashing" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. @@ -1925,7 +1841,7 @@ impl NetworkBeaconProcessor { .register_gossip_attester_slashing(slashing.as_inner().to_ref()); self.chain.import_attester_slashing(slashing); - debug!(self.log, "Successfully imported attester slashing"); + debug!("Successfully imported attester slashing"); metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } @@ -1946,20 +1862,18 @@ impl NetworkBeaconProcessor { Ok(ObservationOutcome::AlreadyKnown) => { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); debug!( - self.log, - "Dropping BLS to execution change"; - "validator_index" => validator_index, - "peer" => %peer_id + validator_index, + peer = %peer_id, + "Dropping BLS to execution change" ); return; } Err(e) => { debug!( - self.log, - "Dropping invalid BLS to execution change"; - "validator_index" => validator_index, - "peer" => %peer_id, - "error" => ?e + validator_index, + %peer_id, + error = ?e, + "Dropping invalid BLS to execution change" ); // We ignore pre-capella messages without penalizing peers. if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { @@ -1997,10 +1911,9 @@ impl NetworkBeaconProcessor { .import_bls_to_execution_change(change, received_pre_capella); debug!( - self.log, - "Successfully imported BLS to execution change"; - "validator_index" => validator_index, - "address" => ?address, + validator_index, + ?address, + "Successfully imported BLS to execution change" ); metrics::inc_counter(&metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_IMPORTED_TOTAL); @@ -2059,10 +1972,9 @@ impl NetworkBeaconProcessor { .add_to_naive_sync_aggregation_pool(sync_signature) { debug!( - self.log, - "Sync committee signature invalid for agg pool"; - "reason" => ?e, - "peer" => %peer_id, + reason = ?e, + %peer_id, + "Sync committee signature invalid for agg pool" ) } @@ -2121,10 +2033,9 @@ impl NetworkBeaconProcessor { .add_contribution_to_block_inclusion_pool(sync_contribution) { debug!( - self.log, - "Sync contribution invalid for op pool"; - "reason" => ?e, - "peer" => %peer_id, + reason = ?e, + %peer_id, + "Sync contribution invalid for op pool" ) } metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL); @@ -2149,10 +2060,9 @@ impl NetworkBeaconProcessor { match e { LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { debug!( - self.log, - "Light client invalid finality update"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client invalid finality update" ); self.gossip_penalize_peer( @@ -2163,10 +2073,9 @@ impl NetworkBeaconProcessor { } LightClientFinalityUpdateError::TooEarly => { debug!( - self.log, - "Light client finality update too early"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client finality update too early" ); self.gossip_penalize_peer( @@ -2177,10 +2086,9 @@ impl NetworkBeaconProcessor { } LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( - self.log, - "Light client error constructing finality update"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client error constructing finality update" ), } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2202,10 +2110,9 @@ impl NetworkBeaconProcessor { ) { Ok(verified_light_client_optimistic_update) => { debug!( - self.log, - "Light client successful optimistic update"; - "peer" => %peer_id, - "parent_root" => %verified_light_client_optimistic_update.parent_root, + %peer_id, + parent_root = %verified_light_client_optimistic_update.parent_root, + "Light client successful optimistic update" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); @@ -2217,10 +2124,9 @@ impl NetworkBeaconProcessor { &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, ); debug!( - self.log, - "Optimistic update for unknown block"; - "peer_id" => %peer_id, - "parent_root" => ?parent_root + %peer_id, + ?parent_root, + "Optimistic update for unknown block" ); if let Some(sender) = reprocess_tx { @@ -2241,17 +2147,13 @@ impl NetworkBeaconProcessor { ); if sender.try_send(msg).is_err() { - error!( - self.log, - "Failed to send optimistic update for re-processing"; - ) + error!("Failed to send optimistic update for re-processing") } } else { debug!( - self.log, - "Not sending light client update because it had been reprocessed"; - "peer_id" => %peer_id, - "parent_root" => ?parent_root + %peer_id, + ?parent_root, + "Not sending light client update because it had been reprocessed" ); self.propagate_validation_result( @@ -2266,10 +2168,9 @@ impl NetworkBeaconProcessor { metrics::register_optimistic_update_error(&e); debug!( - self.log, - "Light client invalid optimistic update"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client invalid optimistic update" ); self.gossip_penalize_peer( @@ -2281,10 +2182,9 @@ impl NetworkBeaconProcessor { LightClientOptimisticUpdateError::TooEarly => { metrics::register_optimistic_update_error(&e); debug!( - self.log, - "Light client optimistic update too early"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client optimistic update too early" ); self.gossip_penalize_peer( @@ -2298,10 +2198,9 @@ impl NetworkBeaconProcessor { metrics::register_optimistic_update_error(&e); debug!( - self.log, - "Light client error constructing optimistic update"; - "peer" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Light client error constructing optimistic update" ) } } @@ -2333,11 +2232,10 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message, _only_ if we trust our own clock. */ trace!( - self.log, - "Attestation is not within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "type" => ?attestation_type, + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Attestation is not within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots" ); // Peers that are slow or not to spec can spam us with these messages draining our @@ -2464,11 +2362,10 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ trace!( - self.log, - "Attestation already known"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "type" => ?attestation_type, + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Attestation already known" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; @@ -2481,11 +2378,10 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ trace!( - self.log, - "Aggregator already known"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "type" => ?attestation_type, + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Aggregator already known" ); // This is an allowed behaviour. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2502,13 +2398,12 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ debug!( - self.log, - "Prior attestation known"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "epoch" => %epoch, - "validator_index" => validator_index, - "type" => ?attestation_type, + %peer_id, + block = ?beacon_block_root, + %epoch, + validator_index, + ?attestation_type, + "Prior attestation known" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2523,11 +2418,10 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message. */ debug!( - self.log, - "Validation Index too high"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "type" => ?attestation_type, + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Validation Index too high" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -2543,12 +2437,11 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message. */ debug!( - self.log, - "Committee index non zero"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root, - "type" => ?attestation_type, - "committee_index" => index, + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + committee_index = index, + "Committee index non zero" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -2559,10 +2452,9 @@ impl NetworkBeaconProcessor { } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( - self.log, - "Attestation for unknown block"; - "peer_id" => %peer_id, - "block" => ?beacon_block_root + %peer_id, + block = ?beacon_block_root, + "Attestation for unknown block" ); if let Some(sender) = reprocess_tx { // We don't know the block, get the sync manager to handle the block lookup, and @@ -2573,11 +2465,7 @@ impl NetworkBeaconProcessor { *beacon_block_root, )) .unwrap_or_else(|_| { - warn!( - self.log, - "Failed to send to sync service"; - "msg" => "UnknownBlockHash" - ) + warn!(msg = "UnknownBlockHash", "Failed to send to sync service") }); let msg = match failed_att { FailedAtt::Aggregate { @@ -2606,9 +2494,8 @@ impl NetworkBeaconProcessor { // for `SingleAttestation`s separately and should not be able to hit // an `UnknownHeadBlock` error. error!( - self.log, - "Dropping SingleAttestation instead of requeueing"; - "block_root" => ?beacon_block_root, + block_root = ?beacon_block_root, + "Dropping SingleAttestation instead of requeueing" ); return; } @@ -2640,10 +2527,7 @@ impl NetworkBeaconProcessor { }; if sender.try_send(msg).is_err() { - error!( - self.log, - "Failed to send attestation for re-processing"; - ) + error!("Failed to send attestation for re-processing") } } else { // We shouldn't make any further attempts to process this attestation. @@ -2754,10 +2638,9 @@ impl NetworkBeaconProcessor { * The attestation was received on an incorrect subnet id. */ debug!( - self.log, - "Received attestation on incorrect subnet"; - "expected" => ?expected, - "received" => ?received, + ?expected, + ?received, + "Received attestation on incorrect subnet" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -2815,10 +2698,9 @@ impl NetworkBeaconProcessor { * The message is not necessarily invalid, but we choose to ignore it. */ debug!( - self.log, - "Rejected long skip slot attestation"; - "head_block_slot" => head_block_slot, - "attestation_slot" => attestation_slot, + %head_block_slot, + %attestation_slot, + "Rejected long skip slot attestation" ); // In this case we wish to penalize gossipsub peers that do this to avoid future // attestations that have too many skip slots. @@ -2831,10 +2713,9 @@ impl NetworkBeaconProcessor { } AttnError::HeadBlockFinalized { beacon_block_root } => { debug!( - self.log, - "Ignored attestation to finalized block"; - "block_root" => ?beacon_block_root, - "attestation_slot" => failed_att.attestation_data().slot, + block_root = ?beacon_block_root, + attestation_slot = %failed_att.attestation_data().slot, + "Ignored attestation to finalized block" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2850,19 +2731,18 @@ impl NetworkBeaconProcessor { AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( HotColdDBError::FinalizedStateNotInHotDatabase { .. }, ))) => { - debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); + debug!(%peer_id, "Attestation for finalized state"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } e @ AttnError::BeaconChainError(BeaconChainError::MaxCommitteePromises(_)) => { debug!( - self.log, - "Dropping attestation"; - "target_root" => ?failed_att.attestation_data().target.root, - "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation_data().slot, - "type" => ?attestation_type, - "error" => ?e, - "peer_id" => % peer_id + target_root = ?failed_att.attestation_data().target.root, + ?beacon_block_root, + slot = ?failed_att.attestation_data().slot, + ?attestation_type, + error = ?e, + %peer_id, + "Dropping attestation" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } @@ -2875,25 +2755,23 @@ impl NetworkBeaconProcessor { * It's not clear if the message is invalid/malicious. */ error!( - self.log, - "Unable to validate attestation"; - "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation_data().slot, - "type" => ?attestation_type, - "peer_id" => %peer_id, - "error" => ?e, + ?beacon_block_root, + slot = ?failed_att.attestation_data().slot, + ?attestation_type, + %peer_id, + error = ?e, + "Unable to validate attestation" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } } debug!( - self.log, - "Invalid attestation from network"; - "reason" => ?error, - "block" => ?beacon_block_root, - "peer_id" => %peer_id, - "type" => ?attestation_type, + reason = ?error, + block = ?beacon_block_root, + %peer_id, + ?attestation_type, + "Invalid attestation from network" ); } @@ -2919,10 +2797,9 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message, _only_ if we trust our own clock. */ trace!( - self.log, - "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots" ); // Unlike attestations, we have a zero slot buffer in case of sync committee messages, @@ -2944,10 +2821,9 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message, _only_ if we trust our own clock. */ trace!( - self.log, - "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots" ); // Compute the slot when we received the message. @@ -3037,10 +2913,9 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ trace!( - self.log, - "Sync committee message is already known"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Sync committee message is already known" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; @@ -3053,10 +2928,9 @@ impl NetworkBeaconProcessor { * The peer has published an invalid consensus message. */ debug!( - self.log, - "Validation Index too high"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Validation Index too high" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -3067,10 +2941,9 @@ impl NetworkBeaconProcessor { } SyncCommitteeError::UnknownValidatorPubkey(_) => { debug!( - self.log, - "Validator pubkey is unknown"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Validator pubkey is unknown" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -3084,10 +2957,9 @@ impl NetworkBeaconProcessor { * The sync committee message was received on an incorrect subnet id. */ debug!( - self.log, - "Received sync committee message on incorrect subnet"; - "expected" => ?expected, - "received" => ?received, + ?expected, + ?received, + "Received sync committee message on incorrect subnet" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -3116,10 +2988,9 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ debug!( - self.log, - "Prior sync committee message known"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Prior sync committee message known" ); // Do not penalize the peer. @@ -3135,10 +3006,9 @@ impl NetworkBeaconProcessor { * The peer is not necessarily faulty. */ debug!( - self.log, - "Prior sync contribution message known"; - "peer_id" => %peer_id, - "type" => ?message_type, + %peer_id, + ?message_type, + "Prior sync contribution message known" ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. @@ -3161,10 +3031,9 @@ impl NetworkBeaconProcessor { * It's not clear if the message is invalid/malicious. */ error!( - self.log, - "Unable to validate sync committee message"; - "peer_id" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Unable to validate sync committee message" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } @@ -3177,10 +3046,9 @@ impl NetworkBeaconProcessor { * It's not clear if the message is invalid/malicious. */ error!( - self.log, - "Unable to validate sync committee message"; - "peer_id" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Unable to validate sync committee message" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly @@ -3192,10 +3060,9 @@ impl NetworkBeaconProcessor { } SyncCommitteeError::ContributionError(e) => { error!( - self.log, - "Error while processing sync contribution"; - "peer_id" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Error while processing sync contribution" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly @@ -3207,10 +3074,9 @@ impl NetworkBeaconProcessor { } SyncCommitteeError::SyncCommitteeError(e) => { error!( - self.log, - "Error while processing sync committee message"; - "peer_id" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Error while processing sync committee message" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly @@ -3225,10 +3091,9 @@ impl NetworkBeaconProcessor { This would most likely imply incompatible configs or an invalid message. */ error!( - self.log, - "Arithematic error while processing sync committee message"; - "peer_id" => %peer_id, - "error" => ?e, + %peer_id, + error = ?e, + "Arithematic error while processing sync committee message" ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); self.gossip_penalize_peer( @@ -3251,11 +3116,10 @@ impl NetworkBeaconProcessor { } } debug!( - self.log, - "Invalid sync committee message from network"; - "reason" => ?error, - "peer_id" => %peer_id, - "type" => ?message_type, + reason = ?error, + %peer_id, + ?message_type, + "Invalid sync committee message from network" ); } @@ -3313,7 +3177,6 @@ impl NetworkBeaconProcessor { block_root: Hash256, block: &SignedBeaconBlock, error: &BlockError, - log: &Logger, ) { if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage { let block_path = base_dir.join(format!("{}_{:?}.ssz", block.slot(), block_root)); @@ -3341,20 +3204,18 @@ impl NetworkBeaconProcessor { }); if let Err(e) = write_result { error!( - log, - "Failed to store invalid block/error"; - "error" => e, - "path" => ?path, - "root" => ?block_root, - "slot" => block.slot(), + error = e, + ?path, + ?block_root, + slot = %block.slot(), + "Failed to store invalid block/error" ) } else { info!( - log, - "Stored invalid block/error "; - "path" => ?path, - "root" => ?block_root, - "slot" => block.slot(), + ?path, + ?block_root, + slot = %block.slot(), + "Stored invalid block/error" ) } }; diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 13c7df8095..b99e71bcea 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -9,13 +9,11 @@ use beacon_chain::fetch_blobs::{ }; use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::{ - builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, - BeaconChainTypes, BlockError, NotifyExecutionLayer, + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer, }; use beacon_processor::{ - work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, - DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, - WorkEvent as BeaconWorkEvent, + work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, + GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ @@ -28,15 +26,12 @@ use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; use rand::prelude::SliceRandom; -use slog::{debug, error, trace, warn, Logger}; -use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; -use store::MemoryStore; use task_executor::TaskExecutor; -use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::{self, error::TrySendError}; +use tracing::{debug, error, trace, warn, Instrument}; use types::*; pub use sync_methods::ChainSegmentProcessId; @@ -71,7 +66,6 @@ pub struct NetworkBeaconProcessor { pub network_globals: Arc>, pub invalid_block_storage: InvalidBlockStorage, pub executor: TaskExecutor, - pub log: Logger, } // Publish blobs in batches of exponentially increasing size. @@ -600,10 +594,7 @@ impl NetworkBeaconProcessor { blocks: Vec>, ) -> Result<(), Error> { let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); - debug!(self.log, "Batch sending for process"; - "blocks" => blocks.len(), - "id" => ?process_id, - ); + debug!(blocks = blocks.len(), id = ?process_id, "Batch sending for process"); let processor = self.clone(); let process_fn = async move { @@ -916,10 +907,9 @@ impl NetworkBeaconProcessor { /// /// Creates a log if there is an internal error. pub(crate) fn send_sync_message(&self, message: SyncMessage) { - self.sync_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the sync service"; - "error" => %e) - }); + self.sync_tx + .send(message) + .unwrap_or_else(|e| debug!(error = %e, "Could not send message to the sync service")); } /// Send a message to `network_tx`. @@ -927,8 +917,7 @@ impl NetworkBeaconProcessor { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the network service. Likely shutdown"; - "error" => %e) + debug!(error = %e, "Could not send message to the network service. Likely shutdown") }); } @@ -958,48 +947,48 @@ impl NetworkBeaconProcessor { block.clone(), publish_fn, ) + .instrument(tracing::info_span!( + "", + service = "fetch_engine_blobs", + block_root = format!("{:?}", block_root) + )) .await { Ok(Some(availability)) => match availability { AvailabilityProcessingStatus::Imported(_) => { debug!( - self.log, - "Block components retrieved from EL"; - "result" => "imported block and custody columns", - "block_root" => %block_root, + result = "imported block and custody columns", + %block_root, + "Block components retrieved from EL" ); self.chain.recompute_head_at_current_slot().await; } AvailabilityProcessingStatus::MissingComponents(_, _) => { debug!( - self.log, - "Still missing blobs after engine blobs processed successfully"; - "block_root" => %block_root, + %block_root, + "Still missing blobs after engine blobs processed successfully" ); } }, Ok(None) => { debug!( - self.log, - "Fetch blobs completed without import"; - "block_root" => %block_root, + %block_root, + "Fetch blobs completed without import" ); } Err(FetchEngineBlobError::BlobProcessingError(BlockError::DuplicateFullyImported( .., ))) => { debug!( - self.log, - "Fetch blobs duplicate import"; - "block_root" => %block_root, + %block_root, + "Fetch blobs duplicate import" ); } Err(e) => { error!( - self.log, - "Error fetching or processing blobs from EL"; - "error" => ?e, - "block_root" => %block_root, + error = ?e, + %block_root, + "Error fetching or processing blobs from EL" ); } } @@ -1022,19 +1011,17 @@ impl NetworkBeaconProcessor { match &availability_processing_status { AvailabilityProcessingStatus::Imported(hash) => { debug!( - self.log, - "Block components available via reconstruction"; - "result" => "imported block and custody columns", - "block_hash" => %hash, + result = "imported block and custody columns", + block_hash = %hash, + "Block components available via reconstruction" ); self.chain.recompute_head_at_current_slot().await; } AvailabilityProcessingStatus::MissingComponents(_, _) => { debug!( - self.log, - "Block components still missing block after reconstruction"; - "result" => "imported all custody columns", - "block_hash" => %block_root, + result = "imported all custody columns", + block_hash = %block_root, + "Block components still missing block after reconstruction" ); } } @@ -1044,18 +1031,16 @@ impl NetworkBeaconProcessor { Ok(None) => { // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric trace!( - self.log, - "Reconstruction not required for block"; - "block_hash" => %block_root, + block_hash = %block_root, + "Reconstruction not required for block" ); None } Err(e) => { error!( - self.log, - "Error during data column reconstruction"; - "block_root" => %block_root, - "error" => ?e + %block_root, + error = ?e, + "Error during data column reconstruction" ); None } @@ -1078,7 +1063,6 @@ impl NetworkBeaconProcessor { self.executor.spawn( async move { let chain = self_clone.chain.clone(); - let log = self_clone.chain.logger(); let publish_fn = |blobs: Vec>>| { self_clone.send_network_message(NetworkMessage::Publish { messages: blobs @@ -1107,9 +1091,8 @@ impl NetworkBeaconProcessor { Err(GossipBlobError::RepeatBlob { .. }) => None, Err(e) => { warn!( - log, - "Previously verified blob is invalid"; - "error" => ?e + error = ?e, + "Previously verified blob is invalid" ); None } @@ -1118,10 +1101,9 @@ impl NetworkBeaconProcessor { if !publishable.is_empty() { debug!( - log, - "Publishing blob batch"; - "publish_count" => publishable.len(), - "block_root" => ?block_root, + publish_count = publishable.len(), + ?block_root, + "Publishing blob batch" ); publish_count += publishable.len(); publish_fn(publishable); @@ -1132,12 +1114,11 @@ impl NetworkBeaconProcessor { } debug!( - log, - "Batch blob publication complete"; - "batch_interval" => blob_publication_batch_interval.as_millis(), - "blob_count" => blob_count, - "published_count" => publish_count, - "block_root" => ?block_root, + batch_interval = blob_publication_batch_interval.as_millis(), + blob_count, + publish_count, + ?block_root, + "Batch blob publication complete" ) }, "gradual_blob_publication", @@ -1160,7 +1141,6 @@ impl NetworkBeaconProcessor { self.executor.spawn( async move { let chain = self_clone.chain.clone(); - let log = self_clone.chain.logger(); let publish_fn = |columns: DataColumnSidecarList| { self_clone.send_network_message(NetworkMessage::Publish { messages: columns @@ -1193,9 +1173,8 @@ impl NetworkBeaconProcessor { Err(GossipDataColumnError::PriorKnown { .. }) => None, Err(e) => { warn!( - log, - "Previously verified data column is invalid"; - "error" => ?e + error = ?e, + "Previously verified data column is invalid" ); None } @@ -1204,10 +1183,9 @@ impl NetworkBeaconProcessor { if !publishable.is_empty() { debug!( - log, - "Publishing data column batch"; - "publish_count" => publishable.len(), - "block_root" => ?block_root, + publish_count = publishable.len(), + ?block_root, + "Publishing data column batch" ); publish_count += publishable.len(); publish_fn(publishable); @@ -1217,13 +1195,12 @@ impl NetworkBeaconProcessor { } debug!( - log, - "Batch data column publishing complete"; - "batch_size" => batch_size, - "batch_interval" => blob_publication_batch_interval.as_millis(), - "data_columns_to_publish_count" => data_columns_to_publish.len(), - "published_count" => publish_count, - "block_root" => ?block_root, + batch_size, + batch_interval = blob_publication_batch_interval.as_millis(), + data_columns_to_publish_count = data_columns_to_publish.len(), + publish_count, + ?block_root, + "Batch data column publishing complete" ) }, "gradual_data_column_publication", @@ -1231,9 +1208,20 @@ impl NetworkBeaconProcessor { } } +#[cfg(test)] +use { + beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend}, + beacon_processor::BeaconProcessorChannels, + slot_clock::ManualSlotClock, + store::MemoryStore, + tokio::sync::mpsc::UnboundedSender, +}; + +#[cfg(test)] type TestBeaconChainType = Witness, E, MemoryStore, MemoryStore>; +#[cfg(test)] impl NetworkBeaconProcessor> { // Instantiates a mostly non-functional version of `Self` and returns the // event receiver that would normally go to the beacon processor. This is @@ -1244,7 +1232,6 @@ impl NetworkBeaconProcessor> { sync_tx: UnboundedSender>, chain: Arc>>, executor: TaskExecutor, - log: Logger, ) -> (Self, mpsc::Receiver>) { let BeaconProcessorChannels { beacon_processor_tx, @@ -1265,7 +1252,6 @@ impl NetworkBeaconProcessor> { network_globals, invalid_block_storage: InvalidBlockStorage::Disabled, executor, - log, }; (network_beacon_processor, beacon_processor_rx) diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 67a1570275..da8e595ddc 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -11,11 +11,11 @@ use lighthouse_network::rpc::methods::{ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use methods::LightClientUpdatesByRangeRequest; -use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; use std::sync::Arc; use tokio_stream::StreamExt; +use tracing::{debug, error, warn}; use types::blob_sidecar::BlobIdentifier; use types::{Epoch, EthSpec, FixedBytesExtended, Hash256, Slot}; @@ -115,7 +115,7 @@ impl NetworkBeaconProcessor { pub fn process_status(&self, peer_id: PeerId, status: StatusMessage) { match self.check_peer_relevance(&status) { Ok(Some(irrelevant_reason)) => { - debug!(self.log, "Handshake Failure"; "peer" => %peer_id, "reason" => irrelevant_reason); + debug!(%peer_id, reason = irrelevant_reason, "Handshake Failure"); self.goodbye_peer(peer_id, GoodbyeReason::IrrelevantNetwork); } Ok(None) => { @@ -127,9 +127,10 @@ impl NetworkBeaconProcessor { }; self.send_sync_message(SyncMessage::AddPeer(peer_id, info)); } - Err(e) => error!(self.log, "Could not process status message"; - "peer" => %peer_id, - "error" => ?e + Err(e) => error!( + %peer_id, + error = ?e, + "Could not process status message" ), } } @@ -172,11 +173,10 @@ impl NetworkBeaconProcessor { ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { debug!( - self.log, - "BlocksByRoot outgoing response processed"; - "peer" => %peer_id, - "requested" => requested_blocks, - "returned" => %send_block_count + %peer_id, + requested = requested_blocks, + returned = %send_block_count, + "BlocksByRoot outgoing response processed" ); }; @@ -187,7 +187,7 @@ impl NetworkBeaconProcessor { { Ok(block_stream) => block_stream, Err(e) => { - error!(self.log, "Error getting block stream"; "error" => ?e); + error!( error = ?e, "Error getting block stream"); return Err((RpcErrorResponse::ServerError, "Error getting block stream")); } }; @@ -207,18 +207,16 @@ impl NetworkBeaconProcessor { } Ok(None) => { debug!( - self.log, - "Peer requested unknown block"; - "peer" => %peer_id, - "request_root" => ?root + %peer_id, + request_root = ?root, + "Peer requested unknown block" ); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( - self.log, - "Failed to fetch execution payload for blocks by root request"; - "block_root" => ?root, - "reason" => "execution layer not synced", + block_root = ?root, + reason = "execution layer not synced", + "Failed to fetch execution payload for blocks by root request" ); log_results(peer_id, requested_blocks, send_block_count); return Err(( @@ -228,11 +226,10 @@ impl NetworkBeaconProcessor { } Err(e) => { debug!( - self.log, - "Error fetching block for peer"; - "peer" => %peer_id, - "request_root" => ?root, - "error" => ?e, + ?peer_id, + request_root = ?root, + error = ?e, + "Error fetching block for peer" ); } } @@ -332,23 +329,21 @@ impl NetworkBeaconProcessor { } Err(e) => { debug!( - self.log, - "Error fetching blob for peer"; - "peer" => %peer_id, - "request_root" => ?root, - "error" => ?e, + ?peer_id, + request_root = ?root, + error = ?e, + "Error fetching blob for peer" ); } } } } debug!( - self.log, - "BlobsByRoot outgoing response processed"; - "peer" => %peer_id, - "request_root" => %requested_root, - "request_indices" => ?requested_indices, - "returned" => send_blob_count + %peer_id, + %requested_root, + ?requested_indices, + returned = send_blob_count, + "BlobsByRoot outgoing response processed" ); Ok(()) @@ -408,10 +403,11 @@ impl NetworkBeaconProcessor { Ok(None) => {} // no-op Err(e) => { // TODO(das): lower log level when feature is stabilized - error!(self.log, "Error getting data column"; - "block_root" => ?data_column_id.block_root, - "peer" => %peer_id, - "error" => ?e + error!( + block_root = ?data_column_id.block_root, + %peer_id, + error = ?e, + "Error getting data column" ); return Err((RpcErrorResponse::ServerError, "Error getting data column")); } @@ -419,11 +415,10 @@ impl NetworkBeaconProcessor { } debug!( - self.log, - "Received DataColumnsByRoot Request"; - "peer" => %peer_id, - "request" => ?request.group_by_ordered_block_root(), - "returned" => send_data_column_count + %peer_id, + request = ?request.group_by_ordered_block_root(), + returned = send_data_column_count, + "Received DataColumnsByRoot Request" ); Ok(()) @@ -463,10 +458,11 @@ impl NetworkBeaconProcessor { request_id: RequestId, req: LightClientUpdatesByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { - debug!(self.log, "Received LightClientUpdatesByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_period" => req.start_period, + debug!( + %peer_id, + count = req.count, + start_period = req.start_period, + "Received LightClientUpdatesByRange Request" ); // Should not send more than max light client updates @@ -484,10 +480,11 @@ impl NetworkBeaconProcessor { { Ok(lc_updates) => lc_updates, Err(e) => { - error!(self.log, "Unable to obtain light client updates"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + peer = %peer_id, + error = ?e, + "Unable to obtain light client updates" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -506,22 +503,20 @@ impl NetworkBeaconProcessor { if lc_updates_sent < req.count as usize { debug!( - self.log, - "LightClientUpdatesByRange outgoing response processed"; - "peer" => %peer_id, - "info" => "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", - "start_period" => req.start_period, - "requested" => req.count, - "returned" => lc_updates_sent + peer = %peer_id, + info = "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", + start_period = req.start_period, + requested = req.count, + returned = lc_updates_sent, + "LightClientUpdatesByRange outgoing response processed" ); } else { debug!( - self.log, - "LightClientUpdatesByRange outgoing response processed"; - "peer" => %peer_id, - "start_period" => req.start_period, - "requested" => req.count, - "returned" => lc_updates_sent + peer = %peer_id, + start_period = req.start_period, + requested = req.count, + returned = lc_updates_sent, + "LightClientUpdatesByRange outgoing response processed" ); } @@ -549,10 +544,11 @@ impl NetworkBeaconProcessor { "Bootstrap not available".to_string(), )), Err(e) => { - error!(self.log, "Error getting LightClientBootstrap instance"; - "block_root" => ?request.root, - "peer" => %peer_id, - "error" => ?e + error!( + block_root = ?request.root, + %peer_id, + error = ?e, + "Error getting LightClientBootstrap instance" ); Err((RpcErrorResponse::ResourceUnavailable, format!("{:?}", e))) } @@ -653,10 +649,11 @@ impl NetworkBeaconProcessor { request_id: RequestId, req: BlocksByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { - debug!(self.log, "Received BlocksByRange Request"; - "peer_id" => %peer_id, - "count" => req.count(), - "start_slot" => req.start_slot(), + debug!( + %peer_id, + count = req.count(), + start_slot = %req.start_slot(), + "Received BlocksByRange Request" ); let forwards_block_root_iter = match self @@ -668,17 +665,19 @@ impl NetworkBeaconProcessor { slot, oldest_block_slot, }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot + debug!( + requested_slot = %slot, + oldest_known_slot = %oldest_block_slot, + "Range request failed during backfill" ); return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Unable to obtain root iter" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -706,10 +705,11 @@ impl NetworkBeaconProcessor { let block_roots = match maybe_block_roots { Ok(block_roots) => block_roots, Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Error during iteration over blocks" ); return Err((RpcErrorResponse::ServerError, "Iteration error")); } @@ -726,24 +726,22 @@ impl NetworkBeaconProcessor { let log_results = |req: BlocksByRangeRequest, peer_id, blocks_sent| { if blocks_sent < (*req.count() as usize) { debug!( - self.log, - "BlocksByRange outgoing response processed"; - "peer" => %peer_id, - "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot(), - "current_slot" => current_slot, - "requested" => req.count(), - "returned" => blocks_sent + %peer_id, + msg = "Failed to return all requested blocks", + start_slot = %req.start_slot(), + %current_slot, + requested = req.count(), + returned = blocks_sent, + "BlocksByRange outgoing response processed" ); } else { debug!( - self.log, - "BlocksByRange outgoing response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot(), - "current_slot" => current_slot, - "requested" => req.count(), - "returned" => blocks_sent + %peer_id, + start_slot = %req.start_slot(), + %current_slot, + requested = req.count(), + returned = blocks_sent, + "BlocksByRange outgoing response processed" ); } }; @@ -751,7 +749,7 @@ impl NetworkBeaconProcessor { let mut block_stream = match self.chain.get_blocks(block_roots) { Ok(block_stream) => block_stream, Err(e) => { - error!(self.log, "Error getting block stream"; "error" => ?e); + error!(error = ?e, "Error getting block stream"); return Err((RpcErrorResponse::ServerError, "Iterator error")); } }; @@ -777,21 +775,19 @@ impl NetworkBeaconProcessor { } Ok(None) => { error!( - self.log, - "Block in the chain is not in the store"; - "request" => ?req, - "peer" => %peer_id, - "request_root" => ?root + request = ?req, + %peer_id, + request_root = ?root, + "Block in the chain is not in the store" ); log_results(req, peer_id, blocks_sent); return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { debug!( - self.log, - "Failed to fetch execution payload for blocks by range request"; - "block_root" => ?root, - "reason" => "execution layer not synced", + block_root = ?root, + reason = "execution layer not synced", + "Failed to fetch execution payload for blocks by range request" ); log_results(req, peer_id, blocks_sent); // send the stream terminator @@ -807,18 +803,16 @@ impl NetworkBeaconProcessor { if matches!(**boxed_error, execution_layer::Error::EngineError(_)) ) { warn!( - self.log, - "Error rebuilding payload for peer"; - "info" => "this may occur occasionally when the EE is busy", - "block_root" => ?root, - "error" => ?e, + info = "this may occur occasionally when the EE is busy", + block_root = ?root, + error = ?e, + "Error rebuilding payload for peer" ); } else { error!( - self.log, - "Error fetching block for peer"; - "block_root" => ?root, - "error" => ?e + block_root = ?root, + error = ?e, + "Error fetching block for peer" ); } log_results(req, peer_id, blocks_sent); @@ -866,10 +860,11 @@ impl NetworkBeaconProcessor { request_id: RequestId, req: BlobsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { - debug!(self.log, "Received BlobsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, + debug!( + ?peer_id, + count = req.count, + start_slot = req.start_slot, + "Received BlobsByRange Request" ); let request_start_slot = Slot::from(req.start_slot); @@ -877,7 +872,7 @@ impl NetworkBeaconProcessor { let data_availability_boundary_slot = match self.chain.data_availability_boundary() { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { - debug!(self.log, "Deneb fork is disabled"); + debug!("Deneb fork is disabled"); return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -890,11 +885,10 @@ impl NetworkBeaconProcessor { .unwrap_or(data_availability_boundary_slot); if request_start_slot < oldest_blob_slot { debug!( - self.log, - "Range request start slot is older than data availability boundary."; - "requested_slot" => request_start_slot, - "oldest_blob_slot" => oldest_blob_slot, - "data_availability_boundary" => data_availability_boundary_slot + %request_start_slot, + %oldest_blob_slot, + %data_availability_boundary_slot, + "Range request start slot is older than data availability boundary." ); return if data_availability_boundary_slot < oldest_blob_slot { @@ -917,17 +911,19 @@ impl NetworkBeaconProcessor { slot, oldest_block_slot, }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot + debug!( + requested_slot = %slot, + oldest_known_slot = %oldest_block_slot, + "Range request failed during backfill" ); return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Unable to obtain root iter" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -961,10 +957,11 @@ impl NetworkBeaconProcessor { let block_roots = match maybe_block_roots { Ok(block_roots) => block_roots, Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Error during iteration over blocks" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -977,13 +974,12 @@ impl NetworkBeaconProcessor { let log_results = |peer_id, req: BlobsByRangeRequest, blobs_sent| { debug!( - self.log, - "BlobsByRange outgoing response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blobs_sent + %peer_id, + start_slot = req.start_slot, + %current_slot, + requested = req.count, + returned = blobs_sent, + "BlobsByRange outgoing response processed" ); }; @@ -1006,12 +1002,11 @@ impl NetworkBeaconProcessor { } Err(e) => { error!( - self.log, - "Error fetching blobs block root"; - "request" => ?req, - "peer" => %peer_id, - "block_root" => ?root, - "error" => ?e + request = ?req, + %peer_id, + block_root = ?root, + error = ?e, + "Error fetching blobs block root" ); log_results(peer_id, req, blobs_sent); @@ -1061,10 +1056,11 @@ impl NetworkBeaconProcessor { request_id: RequestId, req: DataColumnsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { - debug!(self.log, "Received DataColumnsByRange Request"; - "peer_id" => %peer_id, - "count" => req.count, - "start_slot" => req.start_slot, + debug!( + %peer_id, + count = req.count, + start_slot = req.start_slot, + "Received DataColumnsByRange Request" ); // Should not send more than max request data columns @@ -1080,7 +1076,7 @@ impl NetworkBeaconProcessor { let data_availability_boundary_slot = match self.chain.data_availability_boundary() { Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), None => { - debug!(self.log, "Deneb fork is disabled"); + debug!("Deneb fork is disabled"); return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled")); } }; @@ -1094,11 +1090,10 @@ impl NetworkBeaconProcessor { if request_start_slot < oldest_data_column_slot { debug!( - self.log, - "Range request start slot is older than data availability boundary."; - "requested_slot" => request_start_slot, - "oldest_data_column_slot" => oldest_data_column_slot, - "data_availability_boundary" => data_availability_boundary_slot + %request_start_slot, + %oldest_data_column_slot, + %data_availability_boundary_slot, + "Range request start slot is older than data availability boundary." ); return if data_availability_boundary_slot < oldest_data_column_slot { @@ -1121,17 +1116,19 @@ impl NetworkBeaconProcessor { slot, oldest_block_slot, }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot + debug!( + requested_slot = %slot, + oldest_known_slot = %oldest_block_slot, + "Range request failed during backfill" ); return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); } Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Unable to obtain root iter" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -1165,10 +1162,11 @@ impl NetworkBeaconProcessor { let block_roots = match maybe_block_roots { Ok(block_roots) => block_roots, Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e + error!( + request = ?req, + %peer_id, + error = ?e, + "Error during iteration over blocks" ); return Err((RpcErrorResponse::ServerError, "Database error")); } @@ -1195,12 +1193,11 @@ impl NetworkBeaconProcessor { Ok(None) => {} // no-op Err(e) => { error!( - self.log, - "Error fetching data columns block root"; - "request" => ?req, - "peer" => %peer_id, - "block_root" => ?root, - "error" => ?e + request = ?req, + %peer_id, + block_root = ?root, + error = ?e, + "Error fetching data columns block root" ); return Err(( RpcErrorResponse::ServerError, @@ -1217,13 +1214,12 @@ impl NetworkBeaconProcessor { .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); debug!( - self.log, - "DataColumnsByRange Response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => data_columns_sent + %peer_id, + start_slot = req.start_slot, + %current_slot, + requested = req.count, + returned = data_columns_sent, + "DataColumnsByRange Response processed" ); Ok(()) diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f5fe7ee98b..65097da0c6 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -18,11 +18,11 @@ use beacon_processor::{ AsyncFn, BlockingFn, DuplicateCache, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, warn}; use std::sync::Arc; use std::time::Duration; use store::KzgCommitment; use tokio::sync::mpsc; +use tracing::{debug, error, info, warn}; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; use types::{BlockImportSource, DataColumnSidecar, DataColumnSidecarList, Epoch, Hash256}; @@ -112,11 +112,10 @@ impl NetworkBeaconProcessor { // Check if the block is already being imported through another source let Some(handle) = duplicate_cache.check_and_insert(block_root) else { debug!( - self.log, - "Gossip block is being processed"; - "action" => "sending rpc block to reprocessing queue", - "block_root" => %block_root, - "process_type" => ?process_type, + action = "sending rpc block to reprocessing queue", + %block_root, + ?process_type, + "Gossip block is being processed" ); // Send message to work reprocess queue to retry the block @@ -133,7 +132,7 @@ impl NetworkBeaconProcessor { }); if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root) + error!(source = "rpc", %block_root,"Failed to inform block import") }; return; }; @@ -144,13 +143,12 @@ impl NetworkBeaconProcessor { let commitments_formatted = block.as_block().commitments_formatted(); debug!( - self.log, - "Processing RPC block"; - "block_root" => ?block_root, - "proposer" => block.message().proposer_index(), - "slot" => block.slot(), - "commitments" => commitments_formatted, - "process_type" => ?process_type, + ?block_root, + proposer = block.message().proposer_index(), + slot = %block.slot(), + commitments_formatted, + ?process_type, + "Processing RPC block" ); let signed_beacon_block = block.block_cloned(); @@ -168,15 +166,22 @@ impl NetworkBeaconProcessor { // RPC block imported, regardless of process type match result.as_ref() { Ok(AvailabilityProcessingStatus::Imported(hash)) => { - info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); - + info!( + %slot, + %hash, + "New RPC block received", + ); // Trigger processing for work referencing this block. let reprocess_msg = ReprocessQueueMessage::BlockImported { block_root: *hash, parent_root, }; if reprocess_tx.try_send(reprocess_msg).is_err() { - error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) + error!( + source = "rpc", + block_root = %hash, + "Failed to inform block import" + ); }; self.chain.block_times_cache.write().set_time_observed( *hash, @@ -265,12 +270,11 @@ impl NetworkBeaconProcessor { let commitments = format_kzg_commitments(&commitments); debug!( - self.log, - "RPC blobs received"; - "indices" => ?indices, - "block_root" => %block_root, - "slot" => %slot, - "commitments" => commitments, + ?indices, + %block_root, + %slot, + commitments, + "RPC blobs received" ); if let Ok(current_slot) = self.chain.slot() { @@ -290,37 +294,33 @@ impl NetworkBeaconProcessor { match &result { Ok(AvailabilityProcessingStatus::Imported(hash)) => { debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and blobs", - "slot" => %slot, - "block_hash" => %hash, + result = "imported block and blobs", + %slot, + block_hash = %hash, + "Block components retrieved" ); self.chain.recompute_head_at_current_slot().await; } Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, - "slot" => %slot, + block_hash = %block_root, + %slot, + "Missing components over rpc" ); } Err(BlockError::DuplicateFullyImported(_)) => { debug!( - self.log, - "Blobs have already been imported"; - "block_hash" => %block_root, - "slot" => %slot, + block_hash = %block_root, + %slot, + "Blobs have already been imported" ); } Err(e) => { warn!( - self.log, - "Error when importing rpc blobs"; - "error" => ?e, - "block_hash" => %block_root, - "slot" => %slot, + error = ?e, + block_hash = %block_root, + %slot, + "Error when importing rpc blobs" ); } } @@ -354,11 +354,10 @@ impl NetworkBeaconProcessor { let mut indices = custody_columns.iter().map(|d| d.index).collect::>(); indices.sort_unstable(); debug!( - self.log, - "RPC custody data columns received"; - "indices" => ?indices, - "block_root" => %block_root, - "slot" => %slot, + ?indices, + %block_root, + %slot, + "RPC custody data columns received" ); let mut result = self @@ -371,18 +370,16 @@ impl NetworkBeaconProcessor { Ok(availability) => match availability { AvailabilityProcessingStatus::Imported(hash) => { debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and custody columns", - "block_hash" => %hash, + result = "imported block and custody columns", + block_hash = %hash, + "Block components retrieved" ); self.chain.recompute_head_at_current_slot().await; } AvailabilityProcessingStatus::MissingComponents(_, _) => { debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, + block_hash = %block_root, + "Missing components over rpc" ); // Attempt reconstruction here before notifying sync, to avoid sending out more requests // that we may no longer need. @@ -395,17 +392,15 @@ impl NetworkBeaconProcessor { }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( - self.log, - "Custody columns have already been imported"; - "block_hash" => %block_root, + block_hash = %block_root, + "Custody columns have already been imported" ); } Err(e) => { warn!( - self.log, - "Error when importing rpc custody columns"; - "error" => ?e, - "block_hash" => %block_root, + error = ?e, + block_hash = %block_root, + "Error when importing rpc custody columns" ); } } @@ -455,27 +450,29 @@ impl NetworkBeaconProcessor { .await { (imported_blocks, Ok(_)) => { - debug!(self.log, "Batch processed"; - "batch_epoch" => epoch, - "first_block_slot" => start_slot, - "chain" => chain_id, - "last_block_slot" => end_slot, - "processed_blocks" => sent_blocks, - "service"=> "sync"); + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + chain = chain_id, + last_block_slot = end_slot, + processed_blocks = sent_blocks, + service= "sync", + "Batch processed"); BatchProcessResult::Success { sent_blocks, imported_blocks, } } (imported_blocks, Err(e)) => { - debug!(self.log, "Batch processing failed"; - "batch_epoch" => epoch, - "first_block_slot" => start_slot, - "chain" => chain_id, - "last_block_slot" => end_slot, - "imported_blocks" => imported_blocks, - "error" => %e.message, - "service" => "sync"); + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + chain = chain_id, + last_block_slot = end_slot, + imported_blocks, + error = %e.message, + service = "sync", + "Batch processing failed"); match e.peer_action { Some(penalty) => BatchProcessResult::FaultyFailure { imported_blocks, @@ -502,28 +499,31 @@ impl NetworkBeaconProcessor { match self.process_backfill_blocks(downloaded_blocks) { (imported_blocks, Ok(_)) => { - debug!(self.log, "Backfill batch processed"; - "batch_epoch" => epoch, - "first_block_slot" => start_slot, - "keep_execution_payload" => !self.chain.store.get_config().prune_payloads, - "last_block_slot" => end_slot, - "processed_blocks" => sent_blocks, - "processed_blobs" => n_blobs, - "processed_data_columns" => n_data_columns, - "service"=> "sync"); + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + keep_execution_payload = !self.chain.store.get_config().prune_payloads, + last_block_slot = end_slot, + processed_blocks = sent_blocks, + processed_blobs = n_blobs, + processed_data_columns = n_data_columns, + service= "sync", + "Backfill batch processed"); BatchProcessResult::Success { sent_blocks, imported_blocks, } } (_, Err(e)) => { - debug!(self.log, "Backfill batch processing failed"; - "batch_epoch" => epoch, - "first_block_slot" => start_slot, - "last_block_slot" => end_slot, - "processed_blobs" => n_blobs, - "error" => %e.message, - "service" => "sync"); + debug!( + batch_epoch = %epoch, + first_block_slot = start_slot, + last_block_slot = end_slot, + processed_blobs = n_blobs, + error = %e.message, + service = "sync", + "Backfill batch processing failed" + ); match e.peer_action { Some(penalty) => BatchProcessResult::FaultyFailure { imported_blocks: 0, @@ -652,11 +652,10 @@ impl NetworkBeaconProcessor { expected_block_root, } => { debug!( - self.log, - "Backfill batch processing error"; - "error" => "mismatched_block_root", - "block_root" => ?block_root, - "expected_root" => ?expected_block_root + error = "mismatched_block_root", + ?block_root, + expected_root = ?expected_block_root, + "Backfill batch processing error" ); // The peer is faulty if they send blocks with bad roots. Some(PeerAction::LowToleranceError) @@ -664,33 +663,30 @@ impl NetworkBeaconProcessor { HistoricalBlockError::InvalidSignature | HistoricalBlockError::SignatureSet(_) => { warn!( - self.log, - "Backfill batch processing error"; - "error" => ?e + error = ?e, + "Backfill batch processing error" ); // The peer is faulty if they bad signatures. Some(PeerAction::LowToleranceError) } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( - self.log, - "Backfill batch processing error"; - "error" => "pubkey_cache_timeout" + error = "pubkey_cache_timeout", + "Backfill batch processing error" ); // This is an internal error, do not penalize the peer. None } HistoricalBlockError::IndexOutOfBounds => { error!( - self.log, - "Backfill batch OOB error"; - "error" => ?e, + error = ?e, + "Backfill batch OOB error" ); // This should never occur, don't penalize the peer. None } HistoricalBlockError::StoreError(e) => { - warn!(self.log, "Backfill batch processing error"; "error" => ?e); + warn!(error = ?e, "Backfill batch processing error"); // This is an internal error, don't penalize the peer. None } // @@ -733,19 +729,19 @@ impl NetworkBeaconProcessor { if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { // The block is too far in the future, drop it. warn!( - self.log, "Block is ahead of our slot clock"; - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + msg = "block for future slot rejected, check your time", + %present_slot, + %block_slot, + FUTURE_SLOT_TOLERANCE, + "Block is ahead of our slot clock" ); } else { // The block is in the future, but not too far. debug!( - self.log, "Block is slightly ahead of our slot clock. Ignoring."; - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + %present_slot, + %block_slot, + FUTURE_SLOT_TOLERANCE, + "Block is slightly ahead of our slot clock. Ignoring." ); } @@ -759,18 +755,18 @@ impl NetworkBeaconProcessor { }) } BlockError::WouldRevertFinalizedSlot { .. } => { - debug!(self.log, "Finalized or earlier block processed";); + debug!("Finalized or earlier block processed"); Ok(()) } BlockError::GenesisBlock => { - debug!(self.log, "Genesis block was processed"); + debug!("Genesis block was processed"); Ok(()) } BlockError::BeaconChainError(e) => { warn!( - self.log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => ?e, + msg = "unexpected condition in processing block.", + outcome = ?e, + "BlockProcessingFailure" ); Err(ChainSegmentFailed { @@ -783,10 +779,10 @@ impl NetworkBeaconProcessor { if !epe.penalize_peer() { // These errors indicate an issue with the EL and not the `ChainSegment`. // Pause the syncing while the EL recovers - debug!(self.log, - "Execution layer verification failed"; - "outcome" => "pausing sync", - "err" => ?err + debug!( + outcome = "pausing sync", + ?err, + "Execution layer verification failed" ); Err(ChainSegmentFailed { message: format!("Execution layer offline. Reason: {:?}", err), @@ -794,9 +790,9 @@ impl NetworkBeaconProcessor { peer_action: None, }) } else { - debug!(self.log, - "Invalid execution payload"; - "error" => ?err + debug!( + error = ?err, + "Invalid execution payload" ); Err(ChainSegmentFailed { message: format!( @@ -809,10 +805,9 @@ impl NetworkBeaconProcessor { } ref err @ BlockError::ParentExecutionPayloadInvalid { ref parent_root } => { warn!( - self.log, - "Failed to sync chain built on invalid parent"; - "parent_root" => ?parent_root, - "advice" => "check execution node for corruption then restart it and Lighthouse", + ?parent_root, + advice = "check execution node for corruption then restart it and Lighthouse", + "Failed to sync chain built on invalid parent" ); Err(ChainSegmentFailed { message: format!("Peer sent invalid block. Reason: {err:?}"), @@ -824,9 +819,9 @@ impl NetworkBeaconProcessor { } other => { debug!( - self.log, "Invalid block received"; - "msg" => "peer sent invalid block", - "outcome" => %other, + msg = "peer sent invalid block", + outcome = %other, + "Invalid block received" ); Err(ChainSegmentFailed { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 8415ece638..69ba5c1dbd 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -182,8 +182,6 @@ impl TestRig { let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let log = harness.logger().clone(); - let beacon_processor_config = BeaconProcessorConfig { enable_backfill_rate_limiting, ..Default::default() @@ -221,7 +219,6 @@ impl TestRig { meta_data, vec![], false, - &log, network_config, spec, )); @@ -241,7 +238,6 @@ impl TestRig { network_globals: network_globals.clone(), invalid_block_storage: InvalidBlockStorage::Disabled, executor: executor.clone(), - log: log.clone(), }; let network_beacon_processor = Arc::new(network_beacon_processor); @@ -250,7 +246,6 @@ impl TestRig { executor, current_workers: 0, config: beacon_processor_config, - log: log.clone(), } .spawn_manager( beacon_processor_rx, diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 1e1420883e..9c112dba86 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -69,20 +69,17 @@ impl StoreItem for PersistedDht { #[cfg(test)] mod tests { use super::*; - use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use store::config::StoreConfig; use store::MemoryStore; use types::{ChainSpec, MinimalEthSpec}; #[test] fn test_persisted_dht() { - let log = NullLoggerBuilder.build().unwrap(); let store: HotColdDB< MinimalEthSpec, MemoryStore, MemoryStore, - > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal().into(), log) - .unwrap(); + > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal().into()).unwrap(); let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; store .put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() }) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 36e5c391e9..7376244501 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -21,13 +21,13 @@ use lighthouse_network::{ service::api_types::{AppRequestId, SyncRequestId}, MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, }; +use logging::crit; use logging::TimeLatch; -use slog::{crit, debug, o, trace}; -use slog::{error, warn}; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::{debug, error, info_span, trace, warn, Instrument}; use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. @@ -42,8 +42,6 @@ pub struct Router { network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. network_beacon_processor: Arc>, - /// The `Router` logger. - log: slog::Logger, /// Provides de-bounce functionality for logging. logger_debounce: TimeLatch, } @@ -91,14 +89,11 @@ impl Router { beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, fork_context: Arc, - log: slog::Logger, ) -> Result>, String> { - let message_handler_log = log.new(o!("service"=> "router")); - trace!(message_handler_log, "Service starting"); + trace!("Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - let sync_logger = log.new(o!("service"=> "sync")); // generate the message channel let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); @@ -112,7 +107,6 @@ impl Router { network_globals: network_globals.clone(), invalid_block_storage, executor: executor.clone(), - log: log.clone(), }; let network_beacon_processor = Arc::new(network_beacon_processor); @@ -124,7 +118,6 @@ impl Router { network_beacon_processor.clone(), sync_recv, fork_context, - sync_logger, ); // generate the Message handler @@ -132,18 +125,18 @@ impl Router { network_globals, chain: beacon_chain, sync_send, - network: HandlerNetworkContext::new(network_send, log.clone()), + network: HandlerNetworkContext::new(network_send), network_beacon_processor, - log: message_handler_log, logger_debounce: TimeLatch::default(), }; // spawn handler task and move the message handler instance into the spawned thread executor.spawn( async move { - debug!(log, "Network message router started"); + debug!("Network message router started"); UnboundedReceiverStream::new(handler_recv) .for_each(move |msg| future::ready(handler.handle_message(msg))) + .instrument(info_span!("", service = "router")) .await; }, "router", @@ -201,7 +194,7 @@ impl Router { rpc_request: rpc::Request, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?rpc_request); + debug!( %peer_id, request = ?rpc_request, "Dropping request of disconnected peer"); return; } match rpc_request.r#type { @@ -336,7 +329,7 @@ impl Router { ) { match response { Response::Status(status_message) => { - debug!(self.log, "Received Status Response"; "peer_id" => %peer_id, &status_message); + debug!(%peer_id, ?status_message,"Received Status Response"); self.handle_beacon_processor_send_result( self.network_beacon_processor .send_status_message(peer_id, status_message), @@ -448,7 +441,7 @@ impl Router { ) } PubsubMessage::VoluntaryExit(exit) => { - debug!(self.log, "Received a voluntary exit"; "peer_id" => %peer_id); + debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( self.network_beacon_processor .send_gossip_voluntary_exit(message_id, peer_id, exit), @@ -456,9 +449,8 @@ impl Router { } PubsubMessage::ProposerSlashing(proposer_slashing) => { debug!( - self.log, - "Received a proposer slashing"; - "peer_id" => %peer_id + %peer_id, + "Received a proposer slashing" ); self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_proposer_slashing( @@ -470,9 +462,8 @@ impl Router { } PubsubMessage::AttesterSlashing(attester_slashing) => { debug!( - self.log, - "Received a attester slashing"; - "peer_id" => %peer_id + %peer_id, + "Received a attester slashing" ); self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_attester_slashing( @@ -484,9 +475,8 @@ impl Router { } PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { trace!( - self.log, - "Received sync committee aggregate"; - "peer_id" => %peer_id + %peer_id, + "Received sync committee aggregate" ); self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_sync_contribution( @@ -499,9 +489,8 @@ impl Router { } PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { trace!( - self.log, - "Received sync committee signature"; - "peer_id" => %peer_id + %peer_id, + "Received sync committee signature" ); self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_sync_signature( @@ -515,9 +504,8 @@ impl Router { } PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { trace!( - self.log, - "Received light client finality update"; - "peer_id" => %peer_id + %peer_id, + "Received light client finality update" ); self.handle_beacon_processor_send_result( self.network_beacon_processor @@ -531,9 +519,9 @@ impl Router { } PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { trace!( - self.log, - "Received light client optimistic update"; - "peer_id" => %peer_id + %peer_id, + "Received light client optimistic update" + ); self.handle_beacon_processor_send_result( self.network_beacon_processor @@ -559,7 +547,7 @@ impl Router { fn send_status(&mut self, peer_id: PeerId) { let status_message = status_message(&self.chain); - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + debug!(%peer_id, ?status_message, "Sending Status Request"); self.network .send_processor_request(peer_id, RequestType::Status(status_message)); } @@ -567,9 +555,8 @@ impl Router { fn send_to_sync(&mut self, message: SyncMessage) { self.sync_send.send(message).unwrap_or_else(|e| { warn!( - self.log, - "Could not send message to the sync service"; - "error" => %e, + error = %e, + "Could not send message to the sync service" ) }); } @@ -598,7 +585,7 @@ impl Router { request_id: RequestId, status: StatusMessage, ) { - debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); + debug!(%peer_id, ?status, "Received Status Request"); // Say status back. self.network.send_response( @@ -626,20 +613,20 @@ impl Router { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::BlocksByRange { .. } => id, other => { - crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); + crit!(request = ?other, "BlocksByRange response on incorrect request"); return; } }, AppRequestId::Router => { - crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); + crit!(%peer_id, "All BBRange requests belong to sync"); return; } }; trace!( - self.log, - "Received BlocksByRange Response"; - "peer" => %peer_id, + %peer_id, + "Received BlocksByRange Response" + ); self.send_to_sync(SyncMessage::RpcBlock { @@ -657,9 +644,8 @@ impl Router { blob_sidecar: Option>>, ) { trace!( - self.log, - "Received BlobsByRange Response"; - "peer" => %peer_id, + %peer_id, + "Received BlobsByRange Response" ); if let AppRequestId::Sync(id) = request_id { @@ -670,10 +656,7 @@ impl Router { seen_timestamp: timestamp_now(), }); } else { - crit!( - self.log, - "All blobs by range responses should belong to sync" - ); + crit!("All blobs by range responses should belong to sync"); } } @@ -688,20 +671,19 @@ impl Router { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, other => { - crit!(self.log, "BlocksByRoot response on incorrect request"; "request" => ?other); + crit!(request = ?other, "BlocksByRoot response on incorrect request"); return; } }, AppRequestId::Router => { - crit!(self.log, "All BBRoot requests belong to sync"; "peer_id" => %peer_id); + crit!(%peer_id, "All BBRoot requests belong to sync"); return; } }; trace!( - self.log, - "Received BlocksByRoot Response"; - "peer" => %peer_id, + %peer_id, + "Received BlocksByRoot Response" ); self.send_to_sync(SyncMessage::RpcBlock { peer_id, @@ -722,20 +704,19 @@ impl Router { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, other => { - crit!(self.log, "BlobsByRoot response on incorrect request"; "request" => ?other); + crit!(request = ?other, "BlobsByRoot response on incorrect request"); return; } }, AppRequestId::Router => { - crit!(self.log, "All BlobsByRoot requests belong to sync"; "peer_id" => %peer_id); + crit!(%peer_id, "All BlobsByRoot requests belong to sync"); return; } }; trace!( - self.log, - "Received BlobsByRoot Response"; - "peer" => %peer_id, + %peer_id, + "Received BlobsByRoot Response" ); self.send_to_sync(SyncMessage::RpcBlob { request_id, @@ -756,20 +737,19 @@ impl Router { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::DataColumnsByRoot { .. } => id, other => { - crit!(self.log, "DataColumnsByRoot response on incorrect request"; "request" => ?other); + crit!(request = ?other, "DataColumnsByRoot response on incorrect request"); return; } }, AppRequestId::Router => { - crit!(self.log, "All DataColumnsByRoot requests belong to sync"; "peer_id" => %peer_id); + crit!(%peer_id, "All DataColumnsByRoot requests belong to sync"); return; } }; trace!( - self.log, - "Received DataColumnsByRoot Response"; - "peer" => %peer_id, + %peer_id, + "Received DataColumnsByRoot Response" ); self.send_to_sync(SyncMessage::RpcDataColumn { request_id, @@ -786,9 +766,8 @@ impl Router { data_column: Option>>, ) { trace!( - self.log, - "Received DataColumnsByRange Response"; - "peer" => %peer_id, + %peer_id, + "Received DataColumnsByRange Response" ); if let AppRequestId::Sync(id) = request_id { @@ -799,10 +778,7 @@ impl Router { seen_timestamp: timestamp_now(), }); } else { - crit!( - self.log, - "All data columns by range responses should belong to sync" - ); + crit!("All data columns by range responses should belong to sync"); } } @@ -818,8 +794,7 @@ impl Router { }; if self.logger_debounce.elapsed() { - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + error!(error = %e, work_type, "Unable to send message to the beacon processor") } } } @@ -831,20 +806,18 @@ impl Router { pub struct HandlerNetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender>, - /// Logger for the `NetworkContext`. - log: slog::Logger, } impl HandlerNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { - Self { network_send, log } + pub fn new(network_send: mpsc::UnboundedSender>) -> Self { + Self { network_send } } /// Sends a message to the network task. fn inform_network(&mut self, msg: NetworkMessage) { - self.network_send.send(msg).unwrap_or_else( - |e| warn!(self.log, "Could not send message to the network service"; "error" => %e), - ) + self.network_send + .send(msg) + .unwrap_or_else(|e| warn!(error = %e,"Could not send message to the network service")) } /// Sends a request to the network task. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e1ef57c6ce..d25e8509a4 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -10,7 +10,6 @@ use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconPro use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use futures::StreamExt; use lighthouse_network::rpc::{RequestId, RequestType}; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; @@ -24,7 +23,7 @@ use lighthouse_network::{ types::{core_topics_to_subscribe, GossipEncoding, GossipTopic}, MessageId, NetworkEvent, NetworkGlobals, PeerId, }; -use slog::{crit, debug, error, info, o, trace, warn}; +use logging::crit; use std::collections::BTreeSet; use std::{collections::HashSet, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; @@ -32,6 +31,7 @@ use strum::IntoStaticStr; use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; +use tracing::{debug, error, info, info_span, trace, warn, Instrument}; use types::{ ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, ValidatorSubscription, @@ -189,9 +189,8 @@ pub struct NetworkService { metrics_update: tokio::time::Interval, /// gossipsub_parameter_update timer gossipsub_parameter_update: tokio::time::Interval, - /// The logger for the network service. + /// Provides fork specific info. fork_context: Arc, - log: slog::Logger, } impl NetworkService { @@ -210,30 +209,23 @@ impl NetworkService { ), String, > { - let network_log = executor.log().clone(); // build the channels for external comms let (network_senders, network_receivers) = NetworkSenders::new(); #[cfg(feature = "disable-backfill")] - warn!( - network_log, - "Backfill is disabled. DO NOT RUN IN PRODUCTION" - ); + warn!("Backfill is disabled. DO NOT RUN IN PRODUCTION"); if let (true, false, Some(v4)) = ( config.upnp_enabled, config.disable_discovery, config.listen_addrs().v4(), ) { - let nw = network_log.clone(); let v4 = v4.clone(); executor.spawn( async move { - info!(nw, "UPnP Attempting to initialise routes"); - if let Err(e) = - nat::construct_upnp_mappings(v4.addr, v4.disc_port, nw.clone()).await - { - info!(nw, "Could not UPnP map Discovery port"; "error" => %e); + info!("UPnP Attempting to initialise routes"); + if let Err(e) = nat::construct_upnp_mappings(v4.addr, v4.disc_port).await { + info!(error = %e, "Could not UPnP map Discovery port"); } }, "UPnP", @@ -262,7 +254,7 @@ impl NetworkService { &beacon_chain.spec, )); - debug!(network_log, "Current fork"; "fork_name" => ?fork_context.current_fork()); + debug!(fork_name = ?fork_context.current_fork(), "Current fork"); // construct the libp2p service context let service_context = Context { @@ -274,15 +266,14 @@ impl NetworkService { }; // launch libp2p service - let (mut libp2p, network_globals) = - Network::new(executor.clone(), service_context, &network_log).await?; + let (mut libp2p, network_globals) = Network::new(executor.clone(), service_context).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { let enrs_to_load = load_dht::(store.clone()); debug!( - network_log, - "Loading peers into the routing table"; "peers" => enrs_to_load.len() + peers = enrs_to_load.len(), + "Loading peers into the routing table" ); for enr in enrs_to_load { libp2p.add_enr(enr.clone()); @@ -307,7 +298,6 @@ impl NetworkService { beacon_processor_send, beacon_processor_reprocess_tx, fork_context.clone(), - network_log.clone(), )?; // attestation and sync committee subnet service @@ -315,7 +305,6 @@ impl NetworkService { beacon_chain.clone(), network_globals.local_enr().node_id(), &config, - &network_log, ); // create a timer for updating network metrics @@ -330,7 +319,6 @@ impl NetworkService { } = network_receivers; // create the network service and spawn the task - let network_log = network_log.new(o!("service" => "network")); let network_service = NetworkService { beacon_chain, libp2p, @@ -348,7 +336,6 @@ impl NetworkService { metrics_update, gossipsub_parameter_update, fork_context, - log: network_log, }; Ok((network_service, network_globals, network_senders)) @@ -417,7 +404,7 @@ impl NetworkService { fn send_to_router(&mut self, msg: RouterMessage) { if let Err(mpsc::error::SendError(msg)) = self.router_send.send(msg) { - debug!(self.log, "Failed to send msg to router"; "msg" => ?msg); + debug!(?msg, "Failed to send msg to router"); } } @@ -456,7 +443,7 @@ impl NetworkService { Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); self.libp2p.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); - info!(self.log, "Unsubscribed from old fork topics"); + info!("Unsubscribed from old fork topics"); self.next_unsubscribe = Box::pin(None.into()); } @@ -464,17 +451,17 @@ impl NetworkService { if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); - info!(self.log, "Subscribing to new fork topics"); + info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); } else { - error!(self.log, "Fork subscription scheduled but no fork scheduled"); + error!( "Fork subscription scheduled but no fork scheduled"); } } } } - }; + }.instrument(info_span!("", service = "network")); executor.spawn(service_fut, "network"); } @@ -588,9 +575,8 @@ impl NetworkService { .await .map_err(|e| { warn!( - self.log, - "failed to send a shutdown signal"; - "error" => %e + error = %e, + "failed to send a shutdown signal" ) }); } @@ -645,10 +631,9 @@ impl NetworkService { message_id, validation_result, } => { - trace!(self.log, "Propagating gossipsub message"; - "propagation_peer" => ?propagation_source, - "message_id" => %message_id, - "validation_result" => ?validation_result + trace!( propagation_peer = ?propagation_source, + %message_id, + ?validation_result, "Propagating gossipsub message" ); self.libp2p.report_message_validation_result( &propagation_source, @@ -664,10 +649,9 @@ impl NetworkService { } } debug!( - self.log, - "Sending pubsub messages"; - "count" => messages.len(), - "topics" => ?topic_kinds + count = messages.len(), + topics = ?topic_kinds, + "Sending pubsub messages" ); self.libp2p.publish(messages); } @@ -696,9 +680,8 @@ impl NetworkService { .await { warn!( - self.log, - "failed to send a shutdown signal"; - "error" => %e + error = %e, + "failed to send a shutdown signal" ) } return; @@ -719,7 +702,7 @@ impl NetworkService { if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + warn!(%topic, "Could not subscribe to topic"); } } } @@ -741,9 +724,8 @@ impl NetworkService { if !subscribed_topics.is_empty() { info!( - self.log, - "Subscribed to topics"; - "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + topics = ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>(), + "Subscribed to topics" ); } } @@ -777,19 +759,14 @@ impl NetworkService { .update_gossipsub_parameters(active_validators, slot) .is_err() { - error!( - self.log, - "Failed to update gossipsub parameters"; - "active_validators" => active_validators - ); + error!(active_validators, "Failed to update gossipsub parameters"); } } else { // This scenario will only happen if the caches on the cached canonical head aren't // built. That should never be the case. error!( - self.log, - "Active validator count unavailable"; - "info" => "please report this bug" + info = "please report this bug", + "Active validator count unavailable" ); } } @@ -831,10 +808,9 @@ impl NetworkService { let fork_context = &self.fork_context; if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { info!( - self.log, - "Transitioned to new fork"; - "old_fork" => ?fork_context.current_fork(), - "new_fork" => ?new_fork_name, + old_fork = ?fork_context.current_fork(), + new_fork = ?new_fork_name, + "Transitioned to new fork" ); fork_context.update_current_fork(*new_fork_name); @@ -851,13 +827,16 @@ impl NetworkService { self.next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); - info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + info!( + remaining_epochs = UNSUBSCRIBE_DELAY_EPOCHS, + "Network will unsubscribe from old fork gossip topics in a few epochs" + ); // Remove topic weight from old fork topics to prevent peers that left on the mesh on // old topics from being penalized for not sending us messages. self.libp2p.remove_topic_weight_except(new_fork_digest); } else { - crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + crit!(new_fork_id = ?new_enr_fork_id, "Unknown new enr fork id"); } } @@ -906,26 +885,18 @@ impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating let enrs = self.libp2p.enr_entries(); - debug!( - self.log, - "Persisting DHT to store"; - "Number of peers" => enrs.len(), - ); + debug!(number_of_peers = enrs.len(), "Persisting DHT to store"); if let Err(e) = clear_dht::(self.store.clone()) { - error!(self.log, "Failed to clear old DHT entries"; "error" => ?e); + error!(error = ?e, "Failed to clear old DHT entries"); } // Still try to update new entries match persist_dht::(self.store.clone(), enrs) { Err(e) => error!( - self.log, - "Failed to persist DHT on drop"; - "error" => ?e - ), - Ok(_) => info!( - self.log, - "Saved DHT state"; + error = ?e, + "Failed to persist DHT on drop" ), + Ok(_) => info!("Saved DHT state"), } - info!(self.log, "Network service shutdown"); + info!("Network service shutdown"); } } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 32bbfcbcaa..15c3321e94 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -8,8 +8,6 @@ use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; use futures::StreamExt; use lighthouse_network::types::{GossipEncoding, GossipKind}; use lighthouse_network::{Enr, GossipTopic}; -use slog::{o, Drain, Level, Logger}; -use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; @@ -21,28 +19,8 @@ impl NetworkService { } } -fn get_logger(actual_log: bool) -> Logger { - if actual_log { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(Level::Debug) - }; - - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } -} - #[test] fn test_dht_persistence() { - let log = get_logger(false); - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .deterministic_keypairs(8) @@ -60,8 +38,12 @@ fn test_dht_persistence() { let (signal, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = - task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let executor = task_executor::TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + shutdown_tx, + "test-dht-persistence".to_string(), + ); let mut config = NetworkConfig::default(); config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); @@ -137,8 +119,8 @@ fn test_removing_topic_weight_on_old_topics() { let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), exit, - get_logger(false), shutdown_tx, + "test-removing-topic-weight-on-old-topics".to_string(), ); let mut config = NetworkConfig::default(); diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs new file mode 100644 index 0000000000..dd4724b261 --- /dev/null +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -0,0 +1,681 @@ +//! This service keeps track of which shard subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and +//! determines whether attestations should be aggregated and/or passed to the beacon node. + +use super::SubnetServiceMessage; +use std::collections::HashSet; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use delay_map::{HashMapDelay, HashSetDelay}; +use futures::prelude::*; +use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; +use slot_clock::SlotClock; +use tracing::{debug, error, info, trace, warn}; +use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; + +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. +pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; +/// The fraction of a slot that we subscribe to a subnet before the required slot. +/// +/// Currently a whole slot ahead. +const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; + +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub(crate) enum SubscriptionKind { + /// Long lived subscriptions. + /// + /// These have a longer duration and are advertised in our ENR. + LongLived, + /// Short lived subscriptions. + /// + /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. + ShortLived, +} + +/// A particular subnet at a given slot. +#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] +pub struct ExactSubnet { + /// The `SubnetId` associated with this subnet. + pub subnet_id: SubnetId, + /// The `Slot` associated with this subnet. + pub slot: Slot, +} + +pub struct AttestationService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// Subnets we are currently subscribed to as short lived subscriptions. + /// + /// Once they expire, we unsubscribe from these. + /// We subscribe to subnets when we are an aggregator for an exact subnet. + short_lived_subscriptions: HashMapDelay, + + /// Subnets we are currently subscribed to as long lived subscriptions. + /// + /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. + /// These are required of all beacon nodes. The exact number is determined by the chain + /// specification. + long_lived_subscriptions: HashSet, + + /// Short lived subscriptions that need to be executed in the future. + scheduled_short_lived_subscriptions: HashSetDelay, + + /// A collection timeouts to track the existence of aggregate validator subscriptions at an + /// `ExactSubnet`. + aggregate_validators_on_subnet: Option>, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// Our Discv5 node_id. + node_id: NodeId, + + /// Future used to manage subscribing and unsubscribing from long lived subnets. + next_long_lived_subscription_event: Pin>, + + /// Whether this node is a block proposer-only node. + proposer_only: bool, +} + +impl AttestationService { + /* Public functions */ + + /// Establish the service based on the passed configuration. + pub fn new(beacon_chain: Arc>, node_id: NodeId, config: &NetworkConfig) -> Self { + let slot_duration = beacon_chain.slot_clock.slot_duration(); + + if config.subscribe_all_subnets { + info!("Subscribing to all subnets"); + } else { + info!( + subnets_per_node = beacon_chain.spec.subnets_per_node, + subscription_duration_in_epochs = beacon_chain.spec.epochs_per_subnet_subscription, + "Deterministic long lived subnets enabled" + ); + } + + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); + let mut service = AttestationService { + events: VecDeque::with_capacity(10), + beacon_chain, + short_lived_subscriptions: HashMapDelay::new(slot_duration), + long_lived_subscriptions: HashSet::default(), + scheduled_short_lived_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, + waker: None, + discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + node_id, + next_long_lived_subscription_event: { + // Set a dummy sleep. Calculating the current subnet subscriptions will update this + // value with a smarter timing + Box::pin(tokio::time::sleep(Duration::from_secs(1))) + }, + proposer_only: config.proposer_only, + }; + + // If we are not subscribed to all subnets, handle the deterministic set of subnets + if !config.subscribe_all_subnets { + service.recompute_long_lived_subnets(); + } + + service + } + + /// Return count of all currently subscribed subnets (long-lived **and** short-lived). + #[cfg(test)] + pub fn subscription_count(&self) -> usize { + if self.subscribe_all_subnets { + self.beacon_chain.spec.attestation_subnet_count as usize + } else { + let count = self + .short_lived_subscriptions + .keys() + .chain(self.long_lived_subscriptions.iter()) + .collect::>() + .len(); + count + } + } + + /// Returns whether we are subscribed to a subnet for testing purposes. + #[cfg(test)] + pub(crate) fn is_subscribed( + &self, + subnet_id: &SubnetId, + subscription_kind: SubscriptionKind, + ) -> bool { + match subscription_kind { + SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), + SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), + } + } + + #[cfg(test)] + pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { + &self.long_lived_subscriptions + } + + /// Processes a list of validator subscriptions. + /// + /// This will: + /// - Register new validators as being known. + /// - Search for peers for required subnets. + /// - Request subscriptions for subnets on specific slots when required. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions( + &mut self, + subscriptions: impl Iterator, + ) -> Result<(), String> { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return Ok(()); + } + + // Maps each subnet_id subscription to it's highest slot + let mut subnets_to_discover: HashMap = HashMap::new(); + + // Registers the validator with the attestation service. + for subscription in subscriptions { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); + + trace!(?subscription, "Validator subscription"); + + // Compute the subnet that is associated with this subscription + let subnet_id = match SubnetId::compute_subnet::( + subscription.slot, + subscription.attestation_committee_index, + subscription.committee_count_at_slot, + &self.beacon_chain.spec, + ) { + Ok(subnet_id) => subnet_id, + Err(e) => { + warn!( + error = ?e, + "Failed to compute subnet id for validator subscription" + ); + continue; + } + }; + // Ensure each subnet_id inserted into the map has the highest slot as it's value. + // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. + if let Some(slot) = subnets_to_discover.get(&subnet_id) { + if subscription.slot > *slot { + subnets_to_discover.insert(subnet_id, subscription.slot); + } + } else if !self.discovery_disabled { + subnets_to_discover.insert(subnet_id, subscription.slot); + } + + let exact_subnet = ExactSubnet { + subnet_id, + slot: subscription.slot, + }; + + // Determine if the validator is an aggregator. If so, we subscribe to the subnet and + // if successful add the validator to a mapping of known aggregators for that exact + // subnet. + + if subscription.is_aggregator { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); + if let Err(e) = self.subscribe_to_short_lived_subnet(exact_subnet) { + warn!(error = e, "Subscription to subnet error"); + } else { + trace!(?exact_subnet, "Subscribed to subnet for aggregator duties"); + } + } + } + + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request( + subnets_to_discover + .into_iter() + .map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }), + ) { + warn!(error = e, "Discovery lookup request error"); + }; + } + + Ok(()) + } + + fn recompute_long_lived_subnets(&mut self) { + // Ensure the next computation is scheduled even if assigning subnets fails. + let next_subscription_event = self + .recompute_long_lived_subnets_inner() + .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); + + debug!("Recomputing deterministic long lived subnets"); + self.next_long_lived_subscription_event = + Box::pin(tokio::time::sleep(next_subscription_event)); + + if let Some(waker) = self.waker.as_ref() { + waker.wake_by_ref(); + } + } + + /// Gets the long lived subnets the node should be subscribed to during the current epoch and + /// the remaining duration for which they remain valid. + fn recompute_long_lived_subnets_inner(&mut self) -> Result { + let current_epoch = self.beacon_chain.epoch().map_err(|e| { + if !self + .beacon_chain + .slot_clock + .is_prior_to_genesis() + .unwrap_or(false) + { + error!(err = ?e,"Failed to get the current epoch from clock") + } + })?; + + let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( + self.node_id.raw(), + current_epoch, + &self.beacon_chain.spec, + ) + .map_err(|e| error!(err = e, "Could not compute subnets for current epoch"))?; + + let next_subscription_slot = + next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let next_subscription_event = self + .beacon_chain + .slot_clock + .duration_to_slot(next_subscription_slot) + .ok_or_else(|| { + error!("Failed to compute duration to next to long lived subscription event") + })?; + + self.update_long_lived_subnets(subnets.collect()); + + Ok(next_subscription_event) + } + + /// Updates the long lived subnets. + /// + /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr + /// updated accordingly. + fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { + info!(subnets = ?subnets.iter().collect::>(),"Subscribing to long-lived subnets"); + for subnet in &subnets { + // Add the events for those subnets that are new as long lived subscriptions. + if !self.long_lived_subscriptions.contains(subnet) { + // Check if this subnet is new and send the subscription event if needed. + if !self.short_lived_subscriptions.contains_key(subnet) { + debug!( + ?subnet, + subscription_kind = ?SubscriptionKind::LongLived, + "Subscribing to subnet" + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + *subnet, + ))); + } + self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); + if !self.discovery_disabled { + self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: Subnet::Attestation(*subnet), + min_ttl: None, + }])) + } + } + } + + // Update the long_lived_subnets set and check for subnets that are being removed + std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); + for subnet in subnets { + if !self.long_lived_subscriptions.contains(&subnet) { + self.handle_removed_subnet(subnet, SubscriptionKind::LongLived); + } + } + } + + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip + /// verification, re-propagates and returns false. + pub fn should_process_attestation( + &self, + subnet: SubnetId, + attestation: &Attestation, + ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } + self.aggregate_validators_on_subnet + .as_ref() + .map(|tracked_vals| { + tracked_vals.contains_key(&ExactSubnet { + subnet_id: subnet, + slot: attestation.data().slot, + }) + }) + .unwrap_or(true) + } + + /* Internal private functions */ + + /// Adds an event to the event queue and notifies that this service is ready to be polled + /// again. + fn queue_event(&mut self, ev: SubnetServiceMessage) { + self.events.push_back(ev); + if let Some(waker) = &self.waker { + waker.wake_by_ref() + } + } + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + fn discover_peers_request( + &mut self, + exact_subnets: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let discovery_subnets: Vec = exact_subnets + .filter_map(|exact_subnet| { + // Check if there is enough time to perform a discovery lookup. + if exact_subnet.slot + >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) + { + // Send out an event to start looking for peers. + // Require the peer for an additional slot to ensure we keep the peer for the + // duration of the subscription. + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(exact_subnet.slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { + subnet: Subnet::Attestation(exact_subnet.subnet_id), + min_ttl, + }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!( + subnet_id = ?exact_subnet, + "Not enough time for a discovery search" + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + // Subscribes to the subnet if it should be done immediately, or schedules it if required. + fn subscribe_to_short_lived_subnet( + &mut self, + ExactSubnet { subnet_id, slot }: ExactSubnet, + ) -> Result<(), &'static str> { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); + } + + // If the subscription should be done in the future, schedule it. Otherwise subscribe + // immediately. + if time_to_subscription_start.is_zero() { + // This is a current or past slot, we subscribe immediately. + self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1)?; + } else { + // This is a future slot, schedule subscribing. + trace!(subnet = ?subnet_id, ?time_to_subscription_start,"Scheduling subnet subscription"); + self.scheduled_short_lived_subscriptions + .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); + } + + Ok(()) + } + + /* A collection of functions that handle the various timeouts */ + + /// Registers a subnet as subscribed. + /// + /// Checks that the time in which the subscription would end is not in the past. If we are + /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send + /// out the appropriate events. + /// + /// On determinist long lived subnets, this is only used for short lived subscriptions. + fn subscribe_to_short_lived_subnet_immediately( + &mut self, + subnet_id: SubnetId, + end_slot: Slot, + ) -> Result<(), &'static str> { + if self.subscribe_all_subnets { + // Case not handled by this service. + return Ok(()); + } + + let time_to_subscription_end = self + .beacon_chain + .slot_clock + .duration_to_slot(end_slot) + .unwrap_or_default(); + + // First check this is worth doing. + if time_to_subscription_end.is_zero() { + return Err("Time when subscription would end has already passed."); + } + + let subscription_kind = SubscriptionKind::ShortLived; + + // We need to check and add a subscription for the right kind, regardless of the presence + // of the subnet as a subscription of the other kind. This is mainly since long lived + // subscriptions can be removed at any time when a validator goes offline. + + let (subscriptions, already_subscribed_as_other_kind) = ( + &mut self.short_lived_subscriptions, + self.long_lived_subscriptions.contains(&subnet_id), + ); + + match subscriptions.get(&subnet_id) { + Some(current_end_slot) => { + // We are already subscribed. Check if we need to extend the subscription. + if &end_slot > current_end_slot { + trace!( + subnet = ?subnet_id, + prev_end_slot = %current_end_slot, + new_end_slot = %end_slot, + ?subscription_kind, + "Extending subscription to subnet" + ); + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + } + } + None => { + // This is a new subscription. Add with the corresponding timeout and send the + // notification. + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + + // Inform of the subscription. + if !already_subscribed_as_other_kind { + debug!( + subnet = ?subnet_id, + %end_slot, + ?subscription_kind, + "Subscribing to subnet" + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id, + ))); + } + } + } + + Ok(()) + } + + // Unsubscribes from a subnet that was removed if it does not continue to exist as a + // subscription of the other kind. For long lived subscriptions, it also removes the + // advertisement from our ENR. + fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { + let exists_in_other_subscriptions = match subscription_kind { + SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), + SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), + }; + + if !exists_in_other_subscriptions { + // Subscription no longer exists as short lived or long lived. + debug!( + subnet = ?subnet_id, + ?subscription_kind, + "Unsubscribing from subnet" + ); + self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet_id, + ))); + } + + if subscription_kind == SubscriptionKind::LongLived { + // Remove from our ENR even if we remain subscribed in other way. + self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + subnet_id, + ))); + } + } +} + +impl Stream for AttestationService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // Update the waker if needed. + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // Send out any generated events. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + // If we aren't subscribed to all subnets, handle the deterministic long-lived subnets + if !self.subscribe_all_subnets { + match self.next_long_lived_subscription_event.as_mut().poll(cx) { + Poll::Ready(_) => { + self.recompute_long_lived_subnets(); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Pending => {} + } + } + + // Process scheduled subscriptions that might be ready, since those can extend a soon to + // expire subscription. + match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { + if let Err(e) = + self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1) + { + debug!(subnet = ?subnet_id, err = e,"Failed to subscribe to short lived subnet"); + } + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!( + error = e, + "Failed to check for scheduled subnet subscriptions" + ); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Finally process any expired subscriptions. + match self.short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { + self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!(error = e, "Failed to check for subnet unsubscription times"); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Poll to remove entries on expiration, no need to act on expiration events. + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { + error!( + error = e, + "Failed to check for aggregate validator on subnet expirations" + ); + } + } + + Poll::Pending + } +} diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index de90e22254..5340538e52 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -14,8 +14,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use delay_map::HashSetDelay; use futures::prelude::*; use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; -use slog::{debug, error, o, warn}; use slot_clock::SlotClock; +use tracing::{debug, error, info, instrument, warn}; use types::{ AttestationData, EthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -107,27 +107,23 @@ pub struct SubnetService { /// Whether this node is a block proposer-only node. proposer_only: bool, - - /// The logger for the attestation service. - log: slog::Logger, } impl SubnetService { /* Public functions */ /// Establish the service based on the passed configuration. - pub fn new( - beacon_chain: Arc>, - node_id: NodeId, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "subnet_service")); - + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] + pub fn new(beacon_chain: Arc>, node_id: NodeId, config: &NetworkConfig) -> Self { let slot_duration = beacon_chain.slot_clock.slot_duration(); if config.subscribe_all_subnets { - slog::info!(log, "Subscribing to all subnets"); + info!("Subscribing to all subnets"); } // Build the list of known permanent subscriptions, so that we know not to subscribe or @@ -194,7 +190,6 @@ impl SubnetService { discovery_disabled: config.disable_discovery, subscribe_all_subnets: config.subscribe_all_subnets, proposer_only: config.proposer_only, - log, } } @@ -233,6 +228,12 @@ impl SubnetService { /// /// This returns a result simply for the ergonomics of using ?. The result can be /// safely dropped. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] pub fn validator_subscriptions(&mut self, subscriptions: impl Iterator) { // If the node is in a proposer-only state, we ignore all subnet subscriptions. if self.proposer_only { @@ -257,9 +258,9 @@ impl SubnetService { ) { Ok(subnet_id) => Subnet::Attestation(subnet_id), Err(e) => { - warn!(self.log, - "Failed to compute subnet id for validator subscription"; - "error" => ?e, + warn!( + error = ?e, + "Failed to compute subnet id for validator subscription" ); continue; } @@ -287,10 +288,7 @@ impl SubnetService { if subscription.is_aggregator { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); if let Err(e) = self.subscribe_to_subnet(exact_subnet) { - warn!(self.log, - "Subscription to subnet error"; - "error" => e, - ); + warn!(error = e, "Subscription to subnet error"); } } } @@ -305,10 +303,10 @@ impl SubnetService { ) { Ok(subnet_ids) => subnet_ids, Err(e) => { - warn!(self.log, - "Failed to compute subnet id for sync committee subscription"; - "error" => ?e, - "validator_index" => subscription.validator_index + warn!( + error = ?e, + validator_index = subscription.validator_index, + "Failed to compute subnet id for sync committee subscription" ); continue; } @@ -326,7 +324,11 @@ impl SubnetService { .slot_clock .duration_to_slot(slot_required_until) else { - warn!(self.log, "Subscription to sync subnet error"; "error" => "Unable to determine duration to unsubscription slot", "validator_index" => subscription.validator_index); + warn!( + error = "Unable to determine duration to unsubscription slot", + validator_index = subscription.validator_index, + "Subscription to sync subnet error" + ); continue; }; @@ -337,11 +339,11 @@ impl SubnetService { .now() .unwrap_or(Slot::from(0u64)); warn!( - self.log, - "Sync committee subscription is past expiration"; - "subnet" => ?subnet, - "current_slot" => ?current_slot, - "unsubscribe_slot" => ?slot_required_until, ); + ?subnet, + ?current_slot, + unsubscribe_slot = ?slot_required_until, + "Sync committee subscription is past expiration" + ); continue; } @@ -359,13 +361,19 @@ impl SubnetService { // required subnets. if !self.discovery_disabled { if let Err(e) = self.discover_peers_request(subnets_to_discover.into_iter()) { - warn!(self.log, "Discovery lookup request error"; "error" => e); + warn!(error = e, "Discovery lookup request error"); }; } } /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip /// verification, re-propagates and returns false. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] pub fn should_process_attestation( &self, subnet: Subnet, @@ -390,6 +398,12 @@ impl SubnetService { /// Adds an event to the event queue and notifies that this service is ready to be polled /// again. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn queue_event(&mut self, ev: SubnetServiceMessage) { self.events.push_back(ev); if let Some(waker) = &self.waker { @@ -401,6 +415,11 @@ impl SubnetService { /// /// If there is sufficient time, queues a peer discovery request for all the required subnets. // NOTE: Sending early subscriptions results in early searching for peers on subnets. + #[instrument(parent = None, + level = "info", + name = "subnet_service", + skip_all + )] fn discover_peers_request( &mut self, subnets_to_discover: impl Iterator, @@ -432,9 +451,9 @@ impl SubnetService { } else { // We may want to check the global PeerInfo to see estimated timeouts for each // peer before they can be removed. - warn!(self.log, - "Not enough time for a discovery search"; - "subnet_id" => ?subnet, + warn!( + subnet_id = ?subnet, + "Not enough time for a discovery search" ); None } @@ -448,6 +467,12 @@ impl SubnetService { } // Subscribes to the subnet if it should be done immediately, or schedules it if required. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn subscribe_to_subnet( &mut self, ExactSubnet { subnet, slot }: ExactSubnet, @@ -500,6 +525,12 @@ impl SubnetService { } /// Adds a subscription event to the sync subnet. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn subscribe_to_sync_subnet( &mut self, subnet: Subnet, @@ -529,7 +560,11 @@ impl SubnetService { self.subscriptions .insert_at(subnet, duration_to_unsubscribe); // We are not currently subscribed and have no waiting subscription, create one - debug!(self.log, "Subscribing to subnet"; "subnet" => ?subnet, "until" => ?slot_required_until); + debug!( + ?subnet, + until = ?slot_required_until, + "Subscribing to subnet" + ); self.events .push_back(SubnetServiceMessage::Subscribe(subnet)); @@ -545,6 +580,12 @@ impl SubnetService { /// Checks that the time in which the subscription would end is not in the past. If we are /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send /// out the appropriate events. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn subscribe_to_subnet_immediately( &mut self, subnet: Subnet, @@ -588,9 +629,10 @@ impl SubnetService { .insert_at(subnet, time_to_subscription_end); // Inform of the subscription. - debug!(self.log, "Subscribing to subnet"; - "subnet" => ?subnet, - "end_slot" => end_slot, + debug!( + ?subnet, + %end_slot, + "Subscribing to subnet" ); self.queue_event(SubnetServiceMessage::Subscribe(subnet)); } @@ -599,10 +641,16 @@ impl SubnetService { } // Unsubscribes from a subnet that was removed. + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn handle_removed_subnet(&mut self, subnet: Subnet) { if !self.subscriptions.contains_key(&subnet) { // Subscription no longer exists as short lived subnet - debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet); + debug!(?subnet, "Unsubscribing from subnet"); self.queue_event(SubnetServiceMessage::Unsubscribe(subnet)); // If this is a sync subnet, we need to remove it from our ENR. @@ -616,6 +664,12 @@ impl SubnetService { impl Stream for SubnetService { type Item = SubnetServiceMessage; + #[instrument(parent = None, + level = "info", + fields(service = "subnet_service"), + name = "subnet_service", + skip_all + )] fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Update the waker if needed. if let Some(waker) = &self.waker { @@ -639,7 +693,11 @@ impl Stream for SubnetService { // Set the `end_slot` for the subscription to be `duty.slot + 1` so that we unsubscribe // only at the end of the duty slot. if let Err(e) = self.subscribe_to_subnet_immediately(subnet, slot + 1) { - debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); + debug!( + subnet = ?subnet, + err = e, + "Failed to subscribe to short lived subnet" + ); } self.waker .as_ref() @@ -647,7 +705,10 @@ impl Stream for SubnetService { .wake_by_ref(); } Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); + error!( + error = e, + "Failed to check for scheduled subnet subscriptions" + ); } Poll::Ready(None) | Poll::Pending => {} } @@ -663,7 +724,7 @@ impl Stream for SubnetService { .wake_by_ref(); } Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + error!(error = e, "Failed to check for subnet unsubscription times"); } Poll::Ready(None) | Poll::Pending => {} } @@ -671,7 +732,10 @@ impl Stream for SubnetService { // Poll to remove entries on expiration, no need to act on expiration events. if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { - error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + error!( + error = e, + "Failed to check for aggregate validator on subnet expirations" + ); } } diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs new file mode 100644 index 0000000000..59ec278a95 --- /dev/null +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -0,0 +1,345 @@ +//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. + +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use futures::prelude::*; +use tracing::{debug, error, trace, warn}; + +use super::SubnetServiceMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use delay_map::HashSetDelay; +use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; +use slot_clock::SlotClock; +use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; + +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. +const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; + +/// A particular subnet at a given slot. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct ExactSubnet { + /// The `SyncSubnetId` associated with this subnet. + pub subnet_id: SyncSubnetId, + /// The epoch until which we need to stay subscribed to the subnet. + pub until_epoch: Epoch, +} +pub struct SyncCommitteeService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// The collection of all currently subscribed subnets. + subscriptions: HashMap, + + /// A collection of timeouts for when to unsubscribe from a subnet. + unsubscriptions: HashSetDelay, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// Whether this node is a block proposer-only node. + proposer_only: bool, +} + +impl SyncCommitteeService { + /* Public functions */ + + pub fn new(beacon_chain: Arc>, config: &NetworkConfig) -> Self { + let spec = &beacon_chain.spec; + let epoch_duration_secs = + beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); + let default_timeout = + epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); + + SyncCommitteeService { + events: VecDeque::with_capacity(10), + beacon_chain, + subscriptions: HashMap::new(), + unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), + waker: None, + subscribe_all_subnets: config.subscribe_all_subnets, + discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, + } + } + + /// Return count of all currently subscribed subnets. + #[cfg(test)] + pub fn subscription_count(&self) -> usize { + use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; + if self.subscribe_all_subnets { + SYNC_COMMITTEE_SUBNET_COUNT as usize + } else { + self.subscriptions.len() + } + } + + /// Processes a list of sync committee subscriptions. + /// + /// This will: + /// - Search for peers for required subnets. + /// - Request subscriptions required subnets. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions( + &mut self, + subscriptions: Vec, + ) -> Result<(), String> { + // A proposer-only node does not subscribe to any sync-committees + if self.proposer_only { + return Ok(()); + } + + let mut subnets_to_discover = Vec::new(); + for subscription in subscriptions { + metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); + //NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the subnet service. + // This will subscribe to long-lived random subnets if required. + trace!(?subscription, "Sync committee subscription"); + + let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( + &subscription.sync_committee_indices, + ) { + Ok(subnet_ids) => subnet_ids, + Err(e) => { + warn!( + error = ?e, + validator_index = subscription.validator_index, + "Failed to compute subnet id for sync committee subscription" + ); + continue; + } + }; + + for subnet_id in subnet_ids { + let exact_subnet = ExactSubnet { + subnet_id, + until_epoch: subscription.until_epoch, + }; + subnets_to_discover.push(exact_subnet.clone()); + if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { + warn!( + error = e, + validator_index = subscription.validator_index, + "Subscription to sync subnet error" + ); + } else { + trace!( + ?exact_subnet, + validator_index = subscription.validator_index, + "Subscribed to subnet for sync committee duties" + ); + } + } + } + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { + warn!(error = e, "Discovery lookup request error"); + }; + } + + // pre-emptively wake the thread to check for new events + if let Some(waker) = &self.waker { + waker.wake_by_ref(); + } + Ok(()) + } + + /* Internal private functions */ + + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + fn discover_peers_request<'a>( + &mut self, + exact_subnets: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + let discovery_subnets: Vec = exact_subnets + .filter_map(|exact_subnet| { + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // check if there is enough time to perform a discovery lookup + if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { + // if the slot is more than epoch away, add an event to start looking for peers + // add one slot to ensure we keep the peer for the subscription slot + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(until_slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { + subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), + min_ttl, + }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!( + subnet_id = ?exact_subnet, + "Not enough time for a discovery search" + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.events + .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + /// Adds a subscription event and an associated unsubscription event if required. + fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + // Return if we have subscribed to all subnets + if self.subscribe_all_subnets { + return Ok(()); + } + + // Return if we already have a subscription for exact_subnet + if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { + return Ok(()); + } + + // Return if we already have subscription set to expire later than the current request. + if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { + if *until_epoch >= exact_subnet.until_epoch { + return Ok(()); + } + } + + // initialise timing variables + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // Calculate the duration to the unsubscription event. + let expected_end_subscription_duration = if current_slot >= until_slot { + warn!( + %current_slot, + ?exact_subnet, + "Sync committee subscription is past expiration" + ); + return Ok(()); + } else { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // the duration until we no longer need this subscription. We assume a single slot is + // sufficient. + self.beacon_chain + .slot_clock + .duration_to_slot(until_slot) + .ok_or("Unable to determine duration to unsubscription slot")? + + slot_duration + }; + + if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { + // We are not currently subscribed and have no waiting subscription, create one + debug!(subnet = *exact_subnet.subnet_id, until_epoch = ?exact_subnet.until_epoch, "Subscribing to subnet"); + e.insert(exact_subnet.until_epoch); + self.events + .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add the subnet to the ENR bitfield + self.events + .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add an unsubscription event to remove ourselves from the subnet once completed + self.unsubscriptions + .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); + } else { + // We are already subscribed, extend the unsubscription duration + self.unsubscriptions + .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + } + + Ok(()) + } + + /// A queued unsubscription is ready. + fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { + debug!(subnet = *subnet_id, "Unsubscribing from subnet"); + + self.subscriptions.remove(&subnet_id); + self.events + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( + subnet_id, + ))); + + self.events + .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( + subnet_id, + ))); + } +} + +impl Stream for SyncCommitteeService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // update the waker if needed + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // process any un-subscription events + match self.unsubscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(error = e, "Failed to check for subnet unsubscription times"); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // process any generated events + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + Poll::Pending + } +} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 0f3343df63..7e274850b5 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -13,6 +13,7 @@ use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; use task_executor::test_utils::TestRuntime; +use tracing_subscriber::EnvFilter; use types::{ CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -20,6 +21,8 @@ use types::{ const SLOT_DURATION_MILLIS: u64 = 400; +const TEST_LOG_LEVEL: Option<&str> = None; + type TestBeaconChainType = Witness< SystemTimeSlotClock, CachingEth1Backend, @@ -37,11 +40,11 @@ impl TestBeaconChain { pub fn new_with_system_clock() -> Self { let spec = Arc::new(MainnetEthSpec::default_spec()); + get_tracing_subscriber(TEST_LOG_LEVEL); + let keypairs = generate_deterministic_keypairs(1); - let log = logging::test_logger(); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); + let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone()).unwrap(); let kzg = get_kzg(&spec); @@ -51,7 +54,6 @@ impl TestBeaconChain { let chain = Arc::new( BeaconChainBuilder::new(MainnetEthSpec, kzg.clone()) - .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) .task_executor(test_runtime.task_executor.clone()) @@ -91,10 +93,18 @@ pub fn recent_genesis_time() -> u64 { .as_secs() } +fn get_tracing_subscriber(log_level: Option<&str>) { + if let Some(level) = log_level { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::try_new(level).unwrap()) + .try_init() + .unwrap(); + } +} + static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); fn get_subnet_service() -> SubnetService { - let log = logging::test_logger(); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -103,7 +113,6 @@ fn get_subnet_service() -> SubnetService { beacon_chain, lighthouse_network::discv5::enr::NodeId::random(), &config, - &log, ) } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 4220f85fc3..cd3f0dcbeb 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -20,13 +20,14 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::service::api_types::Id; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; +use logging::crit; use rand::seq::SliceRandom; -use slog::{crit, debug, error, info, warn}; use std::collections::{ btree_map::{BTreeMap, Entry}, HashMap, HashSet, }; use std::sync::Arc; +use tracing::{debug, error, info, instrument, warn}; use types::{Epoch, EthSpec}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -146,16 +147,17 @@ pub struct BackFillSync { /// Reference to the network globals in order to obtain valid peers to backfill blocks from /// (i.e synced peers). network_globals: Arc>, - - /// A logger for backfill sync. - log: slog::Logger, } impl BackFillSync { + #[instrument(parent = None, + level = "info", + name = "backfill_sync", + skip_all + )] pub fn new( beacon_chain: Arc>, network_globals: Arc>, - log: slog::Logger, ) -> Self { // Determine if backfill is enabled or not. // If, for some reason a backfill has already been completed (or we've used a trusted @@ -186,7 +188,6 @@ impl BackFillSync { participating_peers: HashSet::new(), restart_failed_sync: false, beacon_chain, - log, }; // Update the global network state with the current backfill state. @@ -195,9 +196,15 @@ impl BackFillSync { } /// Pauses the backfill sync if it's currently syncing. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] pub fn pause(&mut self) { if let BackFillState::Syncing = self.state() { - debug!(self.log, "Backfill sync paused"; "processed_epochs" => self.validated_batches, "to_be_processed" => self.current_start); + debug!(processed_epochs = %self.validated_batches, to_be_processed = %self.current_start,"Backfill sync paused"); self.set_state(BackFillState::Paused); } } @@ -206,6 +213,12 @@ impl BackFillSync { /// /// If resuming is successful, reports back the current syncing metrics. #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] pub fn start( &mut self, network: &mut SyncNetworkContext, @@ -222,7 +235,7 @@ impl BackFillSync { .is_some() { // If there are peers to resume with, begin the resume. - debug!(self.log, "Resuming backfill sync"; "start_epoch" => self.current_start, "awaiting_batches" => self.batches.len(), "processing_target" => self.processing_target); + debug!(start_epoch = ?self.current_start, awaiting_batches = self.batches.len(), processing_target = ?self.processing_target, "Resuming backfill sync"); self.set_state(BackFillState::Syncing); // Resume any previously failed batches. self.resume_batches(network)?; @@ -251,14 +264,14 @@ impl BackFillSync { // This infallible match exists to force us to update this code if a future // refactor of `ResetEpochError` adds a variant. let ResetEpochError::SyncCompleted = e; - error!(self.log, "Backfill sync completed whilst in failed status"); + error!("Backfill sync completed whilst in failed status"); self.set_state(BackFillState::Completed); return Err(BackFillError::InvalidSyncState(String::from( "chain completed", ))); } - debug!(self.log, "Resuming a failed backfill sync"; "start_epoch" => self.current_start); + debug!(start_epoch = %self.current_start, "Resuming a failed backfill sync"); // begin requesting blocks from the peer pool, until all peers are exhausted. self.request_batches(network)?; @@ -281,6 +294,12 @@ impl BackFillSync { /// A fully synced peer has joined us. /// If we are in a failed state, update a local variable to indicate we are able to restart /// the failed sync on the next attempt. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] pub fn fully_synced_peer_joined(&mut self) { if matches!(self.state(), BackFillState::Failed) { self.restart_failed_sync = true; @@ -289,6 +308,12 @@ impl BackFillSync { /// A peer has disconnected. /// If the peer has active batches, those are considered failed and re-requested. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn peer_disconnected( &mut self, @@ -318,15 +343,13 @@ impl BackFillSync { // short circuit early. if self.retry_batch_download(network, id).is_err() { debug!( - self.log, - "Batch could not be retried"; - "batch_id" => id, - "error" => "no synced peers" + batch_id = %id, + error = "no synced peers", + "Batch could not be retried" ); } } else { - debug!(self.log, "Batch not found while removing peer"; - "peer" => %peer_id, "batch" => id) + debug!(peer = %peer_id, batch = %id, "Batch not found while removing peer"); } } } @@ -339,6 +362,12 @@ impl BackFillSync { /// An RPC error has occurred. /// /// If the batch exists it is re-requested. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn inject_error( &mut self, @@ -356,7 +385,7 @@ impl BackFillSync { if !batch.is_expecting_block(&request_id) { return Ok(()); } - debug!(self.log, "Batch failed"; "batch_epoch" => batch_id, "error" => "rpc_error"); + debug!(batch_epoch = %batch_id, error = "rpc_error", "Batch failed"); if let Some(active_requests) = self.active_requests.get_mut(peer_id) { active_requests.remove(&batch_id); } @@ -378,6 +407,12 @@ impl BackFillSync { /// If this returns an error, the backfill sync has failed and will be restarted once new peers /// join the system. /// The sync manager should update the global sync state on failure. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_block_response( &mut self, @@ -391,7 +426,7 @@ impl BackFillSync { let Some(batch) = self.batches.get_mut(&batch_id) else { if !matches!(self.state(), BackFillState::Failed) { // A batch might get removed when the chain advances, so this is non fatal. - debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); + debug!(epoch = %batch_id, "Received a block for unknown batch"); } return Ok(ProcessResult::Successful); }; @@ -416,7 +451,12 @@ impl BackFillSync { Ok(received) => { let awaiting_batches = self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; - debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + debug!( + epoch = %batch_id, + blocks = received, + %awaiting_batches, + "Completed batch received" + ); // pre-emptively request more blocks from peers whilst we process current blocks, self.request_batches(network)?; @@ -432,6 +472,12 @@ impl BackFillSync { /// The syncing process has failed. /// /// This resets past variables, to allow for a fresh start when resuming. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn fail_sync(&mut self, error: BackFillError) -> Result<(), BackFillError> { // Some errors shouldn't fail the chain. if matches!(error, BackFillError::Paused) { @@ -455,7 +501,7 @@ impl BackFillSync { // NOTE: Lets keep validated_batches for posterity // Emit the log here - error!(self.log, "Backfill sync failed"; "error" => ?error); + error!(?error, "Backfill sync failed"); // Return the error, kinda weird pattern, but I want to use // `self.fail_chain(_)?` in other parts of the code. @@ -464,6 +510,12 @@ impl BackFillSync { /// Processes the batch with the given id. /// The batch must exist and be ready for processing + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn process_batch( &mut self, network: &mut SyncNetworkContext, @@ -503,8 +555,12 @@ impl BackFillSync { .beacon_processor() .send_chain_segment(process_id, blocks) { - crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", - "error" => %e, "batch" => self.processing_target); + crit!( + msg = "process_batch", + error = %e, + batch = ?self.processing_target, + "Failed to send backfill segment to processor." + ); // This is unlikely to happen but it would stall syncing since the batch now has no // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is @@ -518,6 +574,12 @@ impl BackFillSync { /// The block processor has completed processing a batch. This function handles the result /// of the batch processor. /// If an error is returned the BackFill sync has failed. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_batch_process_result( &mut self, @@ -530,13 +592,15 @@ impl BackFillSync { // result let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { - debug!(self.log, "Unexpected batch result"; - "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); + debug!( + batch_epoch = %batch_id.as_u64(), + expected_batch_epoch = processing_id.as_u64(), + "Unexpected batch result" + ); return Ok(ProcessResult::Successful); } None => { - debug!(self.log, "Chain was not expecting a batch result"; - "batch_epoch" => batch_id); + debug!(%batch_id, "Chain was not expecting a batch result"); return Ok(ProcessResult::Successful); } _ => { @@ -566,8 +630,14 @@ impl BackFillSync { return Ok(ProcessResult::Successful); }; - debug!(self.log, "Backfill batch processed"; "result" => ?result, &batch, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(peer)); + debug!( + ?result, + %batch, + batch_epoch = %batch_id, + %peer, + client = %network.client_type(peer), + "Backfill batch processed" + ); match result { BatchProcessResult::Success { @@ -591,7 +661,10 @@ impl BackFillSync { // check if the chain has completed syncing if self.check_completed() { // chain is completed - info!(self.log, "Backfill sync completed"; "blocks_processed" => self.validated_batches * T::EthSpec::slots_per_epoch()); + info!( + blocks_processed = self.validated_batches * T::EthSpec::slots_per_epoch(), + "Backfill sync completed" + ); self.set_state(BackFillState::Completed); Ok(ProcessResult::SyncCompleted) } else { @@ -619,10 +692,9 @@ impl BackFillSync { // repeatedly and are either malicious or faulty. We stop the backfill sync and // report all synced peers that have participated. warn!( - self.log, - "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %penalty, - "batch_epoch"=> batch_id + score_adjustment = %penalty, + batch_epoch = %batch_id, + "Backfill batch failed to download. Penalizing peers" ); for peer in self.participating_peers.drain() { @@ -658,6 +730,12 @@ impl BackFillSync { } /// Processes the next ready batch. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn process_completed_batches( &mut self, network: &mut SyncNetworkContext, @@ -692,7 +770,10 @@ impl BackFillSync { BatchState::AwaitingValidation(_) => { // TODO: I don't think this state is possible, log a CRIT just in case. // If this is not observed, add it to the failed state branch above. - crit!(self.log, "Chain encountered a robust batch awaiting validation"; "batch" => self.processing_target); + crit!( + batch = ?self.processing_target, + "Chain encountered a robust batch awaiting validation" + ); self.processing_target -= BACKFILL_EPOCHS_PER_BATCH; if self.to_be_downloaded >= self.processing_target { @@ -718,6 +799,12 @@ impl BackFillSync { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch >= self.current_start { @@ -745,9 +832,12 @@ impl BackFillSync { // A different peer sent the correct batch, the previous peer did not // We negatively score the original peer. let action = PeerAction::LowToleranceError; - debug!(self.log, "Re-processed batch validated. Scoring original peer"; - "batch_epoch" => id, "score_adjustment" => %action, - "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + debug!( + batch_epoch = ?id, + score_adjustment = %action, + original_peer = %attempt.peer_id, + new_peer = %processed_attempt.peer_id, + "Re-processed batch validated. Scoring original peer" ); network.report_peer( attempt.peer_id, @@ -758,9 +848,12 @@ impl BackFillSync { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. let action = PeerAction::MidToleranceError; - debug!(self.log, "Re-processed batch validated by the same peer"; - "batch_epoch" => id, "score_adjustment" => %action, - "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + debug!( + batch_epoch = ?id, + score_adjustment = %action, + original_peer = %attempt.peer_id, + new_peer = %processed_attempt.peer_id, + "Re-processed batch validated by the same peer" ); network.report_peer( attempt.peer_id, @@ -778,14 +871,11 @@ impl BackFillSync { } } BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => { - crit!( - self.log, - "batch indicates inconsistent chain state while advancing chain" - ) + crit!("batch indicates inconsistent chain state while advancing chain") } BatchState::AwaitingProcessing(..) => {} BatchState::Processing(_) => { - debug!(self.log, "Advancing chain while processing a batch"; "batch" => id, batch); + debug!(batch = %id, %batch, "Advancing chain while processing a batch"); if let Some(processing_id) = self.current_processing_batch { if id >= processing_id { self.current_processing_batch = None; @@ -803,7 +893,7 @@ impl BackFillSync { // won't have this batch, so we need to request it. self.to_be_downloaded -= BACKFILL_EPOCHS_PER_BATCH; } - debug!(self.log, "Backfill advanced"; "validated_epoch" => validating_epoch, "processing_target" => self.processing_target); + debug!(?validating_epoch, processing_target = ?self.processing_target, "Backfill advanced"); } /// An invalid batch has been received that could not be processed, but that can be retried. @@ -811,6 +901,12 @@ impl BackFillSync { /// These events occur when a peer has successfully responded with blocks, but the blocks we /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn handle_invalid_batch( &mut self, network: &mut SyncNetworkContext, @@ -862,6 +958,12 @@ impl BackFillSync { } /// Sends and registers the request of a batch awaiting download. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn retry_batch_download( &mut self, network: &mut SyncNetworkContext, @@ -896,13 +998,19 @@ impl BackFillSync { self.send_batch(network, batch_id, peer) } else { // If we are here the chain has no more synced peers - info!(self.log, "Backfill sync paused"; "reason" => "insufficient_synced_peers"); + info!(reason = "insufficient_synced_peers", "Backfill sync paused"); self.set_state(BackFillState::Paused); Err(BackFillError::Paused) } } /// Requests the batch assigned to the given id from a given peer. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn send_batch( &mut self, network: &mut SyncNetworkContext, @@ -922,7 +1030,7 @@ impl BackFillSync { if let Err(e) = batch.start_downloading_from_peer(peer, request_id) { return self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)); } - debug!(self.log, "Requesting batch"; "epoch" => batch_id, &batch); + debug!(epoch = %batch_id, %batch, "Requesting batch"); // register the batch for this peer self.active_requests @@ -933,8 +1041,7 @@ impl BackFillSync { } Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway - warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => ?e, &batch); + warn!(%batch_id, error = ?e, %batch,"Could not send batch request"); // register the failed download and check if the batch can be retried if let Err(e) = batch.start_downloading_from_peer(peer, 1) { return self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)); @@ -963,6 +1070,12 @@ impl BackFillSync { /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn resume_batches(&mut self, network: &mut SyncNetworkContext) -> Result<(), BackFillError> { let batch_ids_to_retry = self .batches @@ -987,6 +1100,12 @@ impl BackFillSync { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn request_batches( &mut self, network: &mut SyncNetworkContext, @@ -1029,6 +1148,12 @@ impl BackFillSync { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond genesis; if self.last_batch_downloaded { @@ -1090,6 +1215,12 @@ impl BackFillSync { /// /// This errors if the beacon chain indicates that backfill sync has already completed or is /// not required. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> { let anchor_info = self.beacon_chain.store.get_anchor_info(); if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { @@ -1103,6 +1234,12 @@ impl BackFillSync { } /// Checks with the beacon chain if backfill sync has completed. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn check_completed(&mut self) -> bool { if self.would_complete(self.current_start) { // Check that the beacon chain agrees @@ -1111,13 +1248,19 @@ impl BackFillSync { if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { return true; } else { - error!(self.log, "Backfill out of sync with beacon chain"); + error!("Backfill out of sync with beacon chain"); } } false } /// Checks if backfill would complete by syncing to `start_epoch`. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn would_complete(&self, start_epoch: Epoch) -> bool { start_epoch <= self @@ -1127,10 +1270,22 @@ impl BackFillSync { } /// Updates the global network state indicating the current state of a backfill sync. + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn set_state(&self, state: BackFillState) { *self.network_globals.backfill_state.write() = state; } + #[instrument(parent = None, + level = "info", + fields(service = "backfill_sync"), + name = "backfill_sync", + skip_all + )] fn state(&self) -> BackFillState { self.network_globals.backfill_state.read().clone() } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a29f9cf402..8c884f644e 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -41,11 +41,11 @@ use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; -use slog::{debug, error, warn, Logger}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use store::Hash256; +use tracing::{debug, error, instrument, warn}; use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; @@ -116,9 +116,6 @@ pub struct BlockLookups { // TODO: Why not index lookups by block_root? single_block_lookups: FnvHashMap>, - - /// The logger for the import manager. - log: Logger, } #[cfg(test)] @@ -130,27 +127,45 @@ use lighthouse_network::service::api_types::Id; pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); impl BlockLookups { - pub fn new(log: Logger) -> Self { + #[instrument(parent = None,level = "info", fields(service = "lookup_sync"), name = "lookup_sync")] + pub fn new() -> Self { Self { failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), - log, } } #[cfg(test)] + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { self.failed_chains.insert(block_root); } #[cfg(test)] + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub(crate) fn get_failed_chains(&mut self) -> Vec { self.failed_chains.keys().cloned().collect() } #[cfg(test)] + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub(crate) fn active_single_lookups(&self) -> Vec { self.single_block_lookups .iter() @@ -159,6 +174,12 @@ impl BlockLookups { } /// Returns a vec of all parent lookup chains by tip, in descending slot order (tip first) + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub(crate) fn active_parent_lookups(&self) -> Vec { compute_parent_chains( &self @@ -173,6 +194,12 @@ impl BlockLookups { /// Creates a parent lookup for the block with the given `block_root` and immediately triggers it. /// If a parent lookup exists or is triggered, a current lookup will be created. + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn search_child_and_parent( &mut self, block_root: Hash256, @@ -202,6 +229,12 @@ impl BlockLookups { /// Seach a block whose parent root is unknown. /// Returns true if the lookup is created or already exists + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn search_unknown_block( &mut self, block_root: Hash256, @@ -217,6 +250,12 @@ impl BlockLookups { /// - `block_root_to_search` is a failed chain /// /// Returns true if the lookup is created or already exists + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn search_parent_of_child( &mut self, block_root_to_search: Hash256, @@ -238,7 +277,7 @@ impl BlockLookups { if (block_would_extend_chain || trigger_is_chain_tip) && parent_chain.len() >= PARENT_DEPTH_TOLERANCE { - debug!(self.log, "Parent lookup chain too long"; "block_root" => ?block_root_to_search); + debug!(block_root = ?block_root_to_search, "Parent lookup chain too long"); // Searching for this parent would extend a parent chain over the max // Insert the tip only to failed chains @@ -283,9 +322,10 @@ impl BlockLookups { }); } else { // Should never happen, log error and continue the lookup drop - error!(self.log, "Unable to transition lookup to range sync"; - "error" => "Parent chain tip lookup not found", - "block_root" => ?parent_chain_tip + error!( + error = "Parent chain tip lookup not found", + block_root = ?parent_chain_tip, + "Unable to transition lookup to range sync" ); } @@ -299,9 +339,10 @@ impl BlockLookups { self.drop_lookup_and_children(*lookup_id); } else { // Should never happen - error!(self.log, "Unable to transition lookup to range sync"; - "error" => "Block to drop lookup not found", - "block_root" => ?block_to_drop + error!( + error = "Block to drop lookup not found", + block_root = ?block_to_drop, + "Unable to transition lookup to range sync" ); } @@ -316,6 +357,12 @@ impl BlockLookups { /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. /// Returns true if the lookup is created or already exists + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn new_current_lookup( &mut self, block_root: Hash256, @@ -326,7 +373,7 @@ impl BlockLookups { ) -> bool { // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&block_root) { - debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root); + debug!(?block_root, "Block is from a past failed chain. Dropping"); for peer_id in peers { cx.report_peer(*peer_id, PeerAction::MidToleranceError, "failed_chain"); } @@ -343,12 +390,15 @@ impl BlockLookups { let component_type = block_component.get_type(); let imported = lookup.add_child_components(block_component); if !imported { - debug!(self.log, "Lookup child component ignored"; "block_root" => ?block_root, "type" => component_type); + debug!( + ?block_root, + component_type, "Lookup child component ignored" + ); } } if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers, cx) { - warn!(self.log, "Error adding peers to ancestor lookup"; "error" => ?e); + warn!(error = ?e, "Error adding peers to ancestor lookup"); } return true; @@ -361,7 +411,7 @@ impl BlockLookups { .iter() .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) { - warn!(self.log, "Ignoring child lookup parent lookup not found"; "block_root" => ?awaiting_parent); + warn!(block_root = ?awaiting_parent, "Ignoring child lookup parent lookup not found"); return false; } } @@ -369,7 +419,7 @@ impl BlockLookups { // Lookups contain untrusted data, bound the total count of lookups hold in memory to reduce // the risk of OOM in case of bugs of malicious activity. if self.single_block_lookups.len() > MAX_LOOKUPS { - warn!(self.log, "Dropping lookup reached max"; "block_root" => ?block_root); + warn!(?block_root, "Dropping lookup reached max"); return false; } @@ -387,18 +437,19 @@ impl BlockLookups { Entry::Vacant(entry) => entry.insert(lookup), Entry::Occupied(_) => { // Should never happen - warn!(self.log, "Lookup exists with same id"; "id" => id); + warn!(id, "Lookup exists with same id"); return false; } }; debug!( - self.log, - "Created block lookup"; - "peer_ids" => ?peers, - "block_root" => ?block_root, - "awaiting_parent" => awaiting_parent.map(|root| root.to_string()).unwrap_or("none".to_owned()), - "id" => lookup.id, + ?peers, + ?block_root, + awaiting_parent = awaiting_parent + .map(|root| root.to_string()) + .unwrap_or("none".to_owned()), + id = lookup.id, + "Created block lookup" ); metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); @@ -414,6 +465,12 @@ impl BlockLookups { /* Lookup responses */ /// Process a block or blob response received from a single lookup request. + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn on_download_response>( &mut self, id: SingleLookupReqId, @@ -437,7 +494,7 @@ impl BlockLookups { let Some(lookup) = self.single_block_lookups.get_mut(&id.lookup_id) else { // We don't have the ability to cancel in-flight RPC requests. So this can happen // if we started this RPC request, and later saw the block/blobs via gossip. - debug!(self.log, "Block returned for single block lookup not present"; "id" => ?id); + debug!(?id, "Block returned for single block lookup not present"); return Err(LookupRequestError::UnknownLookup); }; @@ -448,12 +505,12 @@ impl BlockLookups { match response { Ok((response, peer_group, seen_timestamp)) => { - debug!(self.log, - "Received lookup download success"; - "block_root" => ?block_root, - "id" => ?id, - "peer_group" => ?peer_group, - "response_type" => ?response_type, + debug!( + ?block_root, + ?id, + ?peer_group, + ?response_type, + "Received lookup download success" ); // Here we could check if response extends a parent chain beyond its max length. @@ -481,12 +538,12 @@ impl BlockLookups { Err(e) => { // No need to log peer source here. When sending a DataColumnsByRoot request we log // the peer and the request ID which is linked to this `id` value here. - debug!(self.log, - "Received lookup download failure"; - "block_root" => ?block_root, - "id" => ?id, - "response_type" => ?response_type, - "error" => ?e, + debug!( + ?block_root, + ?id, + ?response_type, + error = ?e, + "Received lookup download failure" ); request_state.on_download_failure(id.req_id)?; @@ -499,6 +556,12 @@ impl BlockLookups { /* Error responses */ + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn peer_disconnected(&mut self, peer_id: &PeerId) { for (_, lookup) in self.single_block_lookups.iter_mut() { lookup.remove_peer(peer_id); @@ -507,6 +570,12 @@ impl BlockLookups { /* Processing responses */ + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn on_processing_result( &mut self, process_type: BlockProcessType, @@ -527,6 +596,12 @@ impl BlockLookups { self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn on_processing_result_inner>( &mut self, lookup_id: SingleLookupId, @@ -534,7 +609,7 @@ impl BlockLookups { cx: &mut SyncNetworkContext, ) -> Result { let Some(lookup) = self.single_block_lookups.get_mut(&lookup_id) else { - debug!(self.log, "Unknown single block lookup"; "id" => lookup_id); + debug!(id = lookup_id, "Unknown single block lookup"); return Err(LookupRequestError::UnknownLookup); }; @@ -544,12 +619,11 @@ impl BlockLookups { .get_state_mut(); debug!( - self.log, - "Received lookup processing result"; - "component" => ?R::response_type(), - "block_root" => ?block_root, - "id" => lookup_id, - "result" => ?result, + component = ?R::response_type(), + ?block_root, + id = lookup_id, + ?result, + "Received lookup processing result" ); let action = match result { @@ -581,20 +655,15 @@ impl BlockLookups { BlockProcessingResult::Err(BlockError::DuplicateImportStatusUnknown(..)) => { // This is unreachable because RPC blocks do not undergo gossip verification, and // this error can *only* come from gossip verification. - error!( - self.log, - "Single block lookup hit unreachable condition"; - "block_root" => ?block_root - ); + error!(?block_root, "Single block lookup hit unreachable condition"); Action::Drop } BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( - self.log, - "Lookup component processing ignored, cpu might be overloaded"; - "component" => ?R::response_type(), + component = ?R::response_type(), + "Lookup component processing ignored, cpu might be overloaded" ); Action::Drop } @@ -602,7 +671,7 @@ impl BlockLookups { match e { BlockError::BeaconChainError(e) => { // Internal error - error!(self.log, "Beacon chain error processing lookup component"; "block_root" => %block_root, "error" => ?e); + error!(%block_root, error = ?e, "Beacon chain error processing lookup component"); Action::Drop } BlockError::ParentUnknown { parent_root, .. } => { @@ -618,10 +687,9 @@ impl BlockLookups { // These errors indicate that the execution layer is offline // and failed to validate the execution payload. Do not downscore peer. debug!( - self.log, - "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "block_root" => ?block_root, - "error" => ?e + ?block_root, + error = ?e, + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured" ); Action::Drop } @@ -629,7 +697,7 @@ impl BlockLookups { if e.category() == AvailabilityCheckErrorCategory::Internal => { // There errors indicate internal problems and should not downscore the peer - warn!(self.log, "Internal availability check failure"; "block_root" => ?block_root, "error" => ?e); + warn!(?block_root, error = ?e, "Internal availability check failure"); // Here we choose *not* to call `on_processing_failure` because this could result in a bad // lookup state transition. This error invalidates both blob and block requests, and we don't know the @@ -638,7 +706,12 @@ impl BlockLookups { Action::Drop } other => { - debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); + debug!( + ?block_root, + component = ?R::response_type(), + error = ?other, + "Invalid lookup component" + ); let peer_group = request_state.on_processing_failure()?; let peers_to_penalize: Vec<_> = match other { // Note: currenlty only InvalidColumn errors have index granularity, @@ -685,7 +758,12 @@ impl BlockLookups { Action::ParentUnknown { parent_root } => { let peers = lookup.all_peers(); lookup.set_awaiting_parent(parent_root); - debug!(self.log, "Marking lookup as awaiting parent"; "id" => lookup.id, "block_root" => ?block_root, "parent_root" => ?parent_root); + debug!( + id = lookup.id, + ?block_root, + ?parent_root, + "Marking lookup as awaiting parent" + ); self.search_parent_of_child(parent_root, block_root, &peers, cx); Ok(LookupResult::Pending) } @@ -700,6 +778,12 @@ impl BlockLookups { } } + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn on_external_processing_result( &mut self, block_root: Hash256, @@ -725,13 +809,24 @@ impl BlockLookups { } /// Makes progress on the immediate children of `block_root` + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn continue_child_lookups(&mut self, block_root: Hash256, cx: &mut SyncNetworkContext) { let mut lookup_results = vec![]; // < need to buffer lookup results to not re-borrow &mut self for (id, lookup) in self.single_block_lookups.iter_mut() { if lookup.awaiting_parent() == Some(block_root) { lookup.resolve_awaiting_parent(); - debug!(self.log, "Continuing child lookup"; "parent_root" => ?block_root, "id" => id, "block_root" => ?lookup.block_root()); + debug!( + parent_root = ?block_root, + id, + block_root = ?lookup.block_root(), + "Continuing child lookup" + ); let result = lookup.continue_requests(cx); lookup_results.push((*id, result)); } @@ -745,12 +840,19 @@ impl BlockLookups { /// Drops `dropped_id` lookup and all its children recursively. Lookups awaiting a parent need /// the parent to make progress to resolve, therefore we must drop them if the parent is /// dropped. + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId) { if let Some(dropped_lookup) = self.single_block_lookups.remove(&dropped_id) { - debug!(self.log, "Dropping lookup"; - "id" => ?dropped_id, - "block_root" => ?dropped_lookup.block_root(), - "awaiting_parent" => ?dropped_lookup.awaiting_parent(), + debug!( + id = ?dropped_id, + block_root = ?dropped_lookup.block_root(), + awaiting_parent = ?dropped_lookup.awaiting_parent(), + "Dropping lookup" ); let child_lookups = self @@ -768,6 +870,12 @@ impl BlockLookups { /// Common handler a lookup request error, drop it and update metrics /// Returns true if the lookup is created or already exists + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn on_lookup_result( &mut self, id: SingleLookupId, @@ -779,13 +887,13 @@ impl BlockLookups { Ok(LookupResult::Pending) => true, // no action Ok(LookupResult::Completed) => { if let Some(lookup) = self.single_block_lookups.remove(&id) { - debug!(self.log, "Dropping completed lookup"; "block" => ?lookup.block_root(), "id" => id); + debug!(block = ?lookup.block_root(), id, "Dropping completed lookup"); metrics::inc_counter(&metrics::SYNC_LOOKUP_COMPLETED); // Block imported, continue the requests of pending child blocks self.continue_child_lookups(lookup.block_root(), cx); self.update_metrics(); } else { - debug!(self.log, "Attempting to drop non-existent lookup"; "id" => id); + debug!(id, "Attempting to drop non-existent lookup"); } false } @@ -793,7 +901,7 @@ impl BlockLookups { // update metrics because the lookup does not exist. Err(LookupRequestError::UnknownLookup) => false, Err(error) => { - debug!(self.log, "Dropping lookup on request error"; "id" => id, "source" => source, "error" => ?error); + debug!(id, source, ?error, "Dropping lookup on request error"); metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[error.into()]); self.drop_lookup_and_children(id); self.update_metrics(); @@ -805,12 +913,24 @@ impl BlockLookups { /* Helper functions */ /// Drops all the single block requests and returns how many requests were dropped. + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn drop_single_block_requests(&mut self) -> usize { let requests_to_drop = self.single_block_lookups.len(); self.single_block_lookups.clear(); requests_to_drop } + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn update_metrics(&self) { metrics::set_gauge( &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, @@ -819,6 +939,12 @@ impl BlockLookups { } /// Perform some prune operations on lookups on some interval + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] pub fn prune_lookups(&mut self) { self.drop_lookups_without_peers(); self.drop_stuck_lookups(); @@ -842,6 +968,12 @@ impl BlockLookups { /// /// Instead there's no negative for keeping lookups with no peers around for some time. If we /// regularly prune them, it should not be a memory concern (TODO: maybe yes!). + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn drop_lookups_without_peers(&mut self) { for (lookup_id, block_root) in self .single_block_lookups @@ -857,9 +989,10 @@ impl BlockLookups { .map(|lookup| (lookup.id, lookup.block_root())) .collect::>() { - debug!(self.log, "Dropping lookup with no peers"; - "id" => lookup_id, - "block_root" => ?block_root + debug!( + id = lookup_id, + %block_root, + "Dropping lookup with no peers" ); self.drop_lookup_and_children(lookup_id); } @@ -878,6 +1011,12 @@ impl BlockLookups { /// /// - One single clear warn level log per stuck incident /// - If the original bug is sporadic, it reduces the time a node is stuck from forever to 15 min + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn drop_stuck_lookups(&mut self) { // While loop to find and drop all disjoint trees of potentially stuck lookups. while let Some(stuck_lookup) = self.single_block_lookups.values().find(|lookup| { @@ -886,7 +1025,7 @@ impl BlockLookups { let ancestor_stuck_lookup = match self.find_oldest_ancestor_lookup(stuck_lookup) { Ok(lookup) => lookup, Err(e) => { - warn!(self.log, "Error finding oldest ancestor lookup"; "error" => ?e); + warn!(error = ?e,"Error finding oldest ancestor lookup"); // Default to dropping the lookup that exceeds the max duration so at least // eventually sync should be unstuck stuck_lookup @@ -894,16 +1033,18 @@ impl BlockLookups { }; if stuck_lookup.id == ancestor_stuck_lookup.id { - warn!(self.log, "Notify the devs a sync lookup is stuck"; - "block_root" => ?stuck_lookup.block_root(), - "lookup" => ?stuck_lookup, + warn!( + block_root = ?stuck_lookup.block_root(), + lookup = ?stuck_lookup, + "Notify the devs a sync lookup is stuck" ); } else { - warn!(self.log, "Notify the devs a sync lookup is stuck"; - "block_root" => ?stuck_lookup.block_root(), - "lookup" => ?stuck_lookup, - "ancestor_block_root" => ?ancestor_stuck_lookup.block_root(), - "ancestor_lookup" => ?ancestor_stuck_lookup, + warn!( + block_root = ?stuck_lookup.block_root(), + lookup = ?stuck_lookup, + ancestor_block_root = ?ancestor_stuck_lookup.block_root(), + ancestor_lookup = ?ancestor_stuck_lookup, + "Notify the devs a sync lookup is stuck" ); } @@ -913,6 +1054,12 @@ impl BlockLookups { } /// Recursively find the oldest ancestor lookup of another lookup + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn find_oldest_ancestor_lookup<'a>( &'a self, lookup: &'a SingleBlockLookup, @@ -937,6 +1084,12 @@ impl BlockLookups { /// Adds peers to a lookup and its ancestors recursively. /// Note: Takes a `lookup_id` as argument to allow recursion on mutable lookups, without having /// to duplicate the code to add peers to a lookup + #[instrument(parent = None, + level = "info", + fields(service = "lookup_sync"), + name = "lookup_sync", + skip_all + )] fn add_peers_to_lookup_and_ancestors( &mut self, lookup_id: SingleLookupId, @@ -952,9 +1105,10 @@ impl BlockLookups { for peer in peers { if lookup.add_peer(*peer) { added_some_peer = true; - debug!(self.log, "Adding peer to existing single block lookup"; - "block_root" => ?lookup.block_root(), - "peer" => ?peer + debug!( + block_root = ?lookup.block_root(), + ?peer, + "Adding peer to existing single block lookup" ); } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a9e5f646cc..671fa1e3b4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -63,12 +63,13 @@ use lighthouse_network::service::api_types::{ use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; +use logging::crit; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, info, o, trace, warn, Logger}; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; +use tracing::{debug, error, info, info_span, trace, warn, Instrument}; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, }; @@ -246,9 +247,6 @@ pub struct SyncManager { notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, sampling: Sampling, - - /// The logger for the import manager. - log: Logger, } /// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon @@ -261,7 +259,6 @@ pub fn spawn( beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, fork_context: Arc, - log: slog::Logger, ) { assert!( beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, @@ -276,12 +273,18 @@ pub fn spawn( sync_recv, SamplingConfig::Default, fork_context, - log.clone(), ); // spawn the sync manager thread - debug!(log, "Sync Manager started"); - executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); + debug!("Sync Manager started"); + executor.spawn( + async move { + Box::pin(sync_manager.main()) + .instrument(info_span!("", service = "sync")) + .await + }, + "sync", + ); } impl SyncManager { @@ -292,7 +295,6 @@ impl SyncManager { sync_recv: mpsc::UnboundedReceiver>, sampling_config: SamplingConfig, fork_context: Arc, - log: slog::Logger, ) -> Self { let network_globals = beacon_processor.network_globals.clone(); Self { @@ -303,23 +305,14 @@ impl SyncManager { beacon_processor.clone(), beacon_chain.clone(), fork_context.clone(), - log.clone(), ), - range_sync: RangeSync::new( - beacon_chain.clone(), - log.new(o!("service" => "range_sync")), - ), - backfill_sync: BackFillSync::new( - beacon_chain.clone(), - network_globals, - log.new(o!("service" => "backfill_sync")), - ), - block_lookups: BlockLookups::new(log.new(o!("service"=> "lookup_sync"))), + range_sync: RangeSync::new(beacon_chain.clone()), + backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals), + block_lookups: BlockLookups::new(), notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, )), - sampling: Sampling::new(sampling_config, log.new(o!("service" => "sampling"))), - log: log.clone(), + sampling: Sampling::new(sampling_config), } } @@ -461,10 +454,10 @@ impl SyncManager { }; let head_slot = head_slot.unwrap_or_else(|| { - debug!(self.log, - "On add peers force range sync assuming local head_slot"; - "local_head_slot" => local.head_slot, - "head_root" => ?head_root + debug!( + local_head_slot = %local.head_slot, + ?head_root, + "On add peers force range sync assuming local head_slot" ); local.head_slot }); @@ -485,7 +478,7 @@ impl SyncManager { /// Handles RPC errors related to requests that were emitted from the sync manager. fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { - trace!(self.log, "Sync manager received a failed RPC"); + trace!("Sync manager received a failed RPC"); match request_id { SyncRequestId::SingleBlock { id } => { self.on_single_block_response(id, peer_id, RpcEvent::RPCError(error)) @@ -565,15 +558,14 @@ impl SyncManager { let is_connected = self.network_globals().peers.read().is_connected(peer_id); if was_updated { debug!( - self.log, - "Peer transitioned sync state"; - "peer_id" => %peer_id, - "new_state" => rpr, - "our_head_slot" => local_sync_info.head_slot, - "our_finalized_epoch" => local_sync_info.finalized_epoch, - "their_head_slot" => remote_sync_info.head_slot, - "their_finalized_epoch" => remote_sync_info.finalized_epoch, - "is_connected" => is_connected + %peer_id, + new_state = rpr, + our_head_slot = %local_sync_info.head_slot, + our_finalized_epoch = %local_sync_info.finalized_epoch, + their_head_slot = %remote_sync_info.head_slot, + their_finalized_epoch = %remote_sync_info.finalized_epoch, + is_connected, + "Peer transitioned sync state" ); // A peer has transitioned its sync state. If the new state is "synced" we @@ -584,7 +576,7 @@ impl SyncManager { } is_connected } else { - error!(self.log, "Status'd peer is unknown"; "peer_id" => %peer_id); + error!(%peer_id, "Status'd peer is unknown"); false } } @@ -603,7 +595,7 @@ impl SyncManager { fn update_sync_state(&mut self) { let new_state: SyncState = match self.range_sync.state() { Err(e) => { - crit!(self.log, "Error getting range sync state"; "error" => %e); + crit!(error = %e, "Error getting range sync state"); return; } Ok(state) => match state { @@ -652,7 +644,7 @@ impl SyncManager { } Ok(SyncStart::NotSyncing) => {} // Ignore updating the state if the backfill sync state didn't start. Err(e) => { - error!(self.log, "Backfill sync failed to start"; "error" => ?e); + error!(error = ?e, "Backfill sync failed to start"); } } } @@ -686,7 +678,7 @@ impl SyncManager { let old_state = self.network_globals().set_sync_state(new_state); let new_state = self.network_globals().sync_state.read().clone(); if !new_state.eq(&old_state) { - info!(self.log, "Sync state updated"; "old_state" => %old_state, "new_state" => %new_state); + info!(%old_state, %new_state, "Sync state updated"); // If we have become synced - Subscribe to all the core subnet topics // We don't need to subscribe if the old state is a state that would have already // invoked this call. @@ -781,7 +773,7 @@ impl SyncManager { SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); - debug!(self.log, "Received unknown parent block message"; "block_root" => %block_root, "parent_root" => %parent_root); + debug!(%block_root, %parent_root, "Received unknown parent block message"); self.handle_unknown_parent( peer_id, block_root, @@ -799,7 +791,7 @@ impl SyncManager { let blob_slot = blob.slot(); let block_root = blob.block_root(); let parent_root = blob.block_parent_root(); - debug!(self.log, "Received unknown parent blob message"; "block_root" => %block_root, "parent_root" => %parent_root); + debug!(%block_root, %parent_root, "Received unknown parent blob message"); self.handle_unknown_parent( peer_id, block_root, @@ -817,7 +809,7 @@ impl SyncManager { let data_column_slot = data_column.slot(); let block_root = data_column.block_root(); let parent_root = data_column.block_parent_root(); - debug!(self.log, "Received unknown parent data column message"; "block_root" => %block_root, "parent_root" => %parent_root); + debug!(%block_root, %parent_root, "Received unknown parent data column message"); self.handle_unknown_parent( peer_id, block_root, @@ -834,12 +826,12 @@ impl SyncManager { SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { self.notified_unknown_roots.insert((peer_id, block_root)); - debug!(self.log, "Received unknown block hash message"; "block_root" => ?block_root, "peer" => ?peer_id); + debug!(?block_root, ?peer_id, "Received unknown block hash message"); self.handle_unknown_block_root(peer_id, block_root); } } SyncMessage::SampleBlock(block_root, block_slot) => { - debug!(self.log, "Received SampleBlock message"; "block_root" => %block_root, "slot" => block_slot); + debug!(%block_root, slot = %block_slot, "Received SampleBlock message"); if let Some((requester, result)) = self .sampling .on_new_sample_request(block_root, &mut self.network) @@ -848,7 +840,7 @@ impl SyncManager { } } SyncMessage::Disconnect(peer_id) => { - debug!(self.log, "Received disconnected message"; "peer_id" => %peer_id); + debug!(%peer_id, "Received disconnected message"); self.peer_disconnect(&peer_id); } SyncMessage::RpcError { @@ -889,7 +881,7 @@ impl SyncManager { Ok(ProcessResult::Successful) => {} Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Err(error) => { - error!(self.log, "Backfill sync failed"; "error" => ?error); + error!(error = ?error, "Backfill sync failed"); // Update the global status self.update_sync_state(); } @@ -925,7 +917,7 @@ impl SyncManager { ); } Err(reason) => { - debug!(self.log, "Ignoring unknown parent request"; "block_root" => %block_root, "parent_root" => %parent_root, "reason" => reason); + debug!(%block_root, %parent_root, reason, "Ignoring unknown parent request"); } } } @@ -937,7 +929,7 @@ impl SyncManager { .search_unknown_block(block_root, &[peer_id], &mut self.network); } Err(reason) => { - debug!(self.log, "Ignoring unknown block request"; "block_root" => %block_root, "reason" => reason); + debug!(%block_root, reason, "Ignoring unknown block request"); } } } @@ -1015,8 +1007,9 @@ impl SyncManager { // Some logs. if dropped_single_blocks_requests > 0 { - debug!(self.log, "Execution engine not online. Dropping active requests."; - "dropped_single_blocks_requests" => dropped_single_blocks_requests, + debug!( + dropped_single_blocks_requests, + "Execution engine not online. Dropping active requests." ); } } @@ -1042,7 +1035,7 @@ impl SyncManager { RpcEvent::from_chunk(block, seen_timestamp), ), _ => { - crit!(self.log, "bad request id for block"; "peer_id" => %peer_id ); + crit!(%peer_id, "bad request id for block"); } } } @@ -1084,7 +1077,7 @@ impl SyncManager { RpcEvent::from_chunk(blob, seen_timestamp), ), _ => { - crit!(self.log, "bad request id for blob"; "peer_id" => %peer_id); + crit!(%peer_id, "bad request id for blob"); } } } @@ -1110,7 +1103,7 @@ impl SyncManager { RpcEvent::from_chunk(data_column, seen_timestamp), ), _ => { - crit!(self.log, "bad request id for data_column"; "peer_id" => %peer_id); + crit!(%peer_id, "bad request id for data_column"); } } } @@ -1228,7 +1221,7 @@ impl SyncManager { fn on_sampling_result(&mut self, requester: SamplingRequester, result: SamplingResult) { match requester { SamplingRequester::ImportedBlock(block_root) => { - debug!(self.log, "Sampling result"; "block_root" => %block_root, "result" => ?result); + debug!(%block_root, ?result, "Sampling result"); match result { Ok(_) => { @@ -1239,11 +1232,11 @@ impl SyncManager { .beacon_processor() .send_sampling_completed(block_root) { - warn!(self.log, "Error sending sampling result"; "block_root" => ?block_root, "reason" => ?e); + warn!(?block_root, reason = ?e, "Error sending sampling result"); } } Err(e) => { - warn!(self.log, "Sampling failed"; "block_root" => %block_root, "reason" => ?e); + warn!(?block_root, reason = ?e, "Sampling failed"); } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 968a9bcddd..68a963dd41 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -34,13 +34,13 @@ use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, }; -use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; +use tracing::{debug, error, span, warn, Level}; use types::blob_sidecar::FixedBlobSidecarList; use types::{ BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, ForkContext, @@ -74,10 +74,10 @@ pub type CustodyByRootResult = #[derive(Debug)] pub enum RpcResponseError { - RpcError(RPCError), + RpcError(#[allow(dead_code)] RPCError), VerifyError(LookupVerifyError), - CustodyRequestError(CustodyRequestError), - BlockComponentCouplingError(String), + CustodyRequestError(#[allow(dead_code)] CustodyRequestError), + BlockComponentCouplingError(#[allow(dead_code)] String), } #[derive(Debug, PartialEq, Eq)] @@ -89,6 +89,19 @@ pub enum RpcRequestSendError { SlotClockError, } +impl std::fmt::Display for RpcRequestSendError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + RpcRequestSendError::NetworkSendError => write!(f, "Network send error"), + RpcRequestSendError::NoCustodyPeers => write!(f, "No custody peers"), + RpcRequestSendError::CustodyRequestError(e) => { + write!(f, "Custody request error: {:?}", e) + } + RpcRequestSendError::SlotClockError => write!(f, "Slot clock error"), + } + } +} + #[derive(Debug, PartialEq, Eq)] pub enum SendErrorProcessor { SendError, @@ -201,9 +214,6 @@ pub struct SyncNetworkContext { pub chain: Arc>, fork_context: Arc, - - /// Logger for the `SyncNetworkContext`. - pub log: slog::Logger, } /// Small enumeration to make dealing with block and blob requests easier. @@ -219,8 +229,13 @@ impl SyncNetworkContext { network_beacon_processor: Arc>, chain: Arc>, fork_context: Arc, - log: slog::Logger, ) -> Self { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start @@ -236,7 +251,6 @@ impl SyncNetworkContext { network_beacon_processor, chain, fork_context, - log, } } @@ -267,7 +281,6 @@ impl SyncNetworkContext { network_beacon_processor: _, chain: _, fork_context: _, - log: _, } = self; let blocks_by_root_ids = blocks_by_root_requests @@ -330,17 +343,23 @@ impl SyncNetworkContext { } pub fn status_peers(&self, chain: &C, peers: impl Iterator) { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let status_message = chain.status_message(); for peer_id in peers { debug!( - self.log, - "Sending Status Request"; - "peer" => %peer_id, - "fork_digest" => ?status_message.fork_digest, - "finalized_root" => ?status_message.finalized_root, - "finalized_epoch" => ?status_message.finalized_epoch, - "head_root" => %status_message.head_root, - "head_slot" => %status_message.head_slot, + peer = %peer_id, + fork_digest = ?status_message.fork_digest, + finalized_root = ?status_message.finalized_root, + finalized_epoch = ?status_message.finalized_epoch, + head_root = %status_message.head_root, + head_slot = %status_message.head_slot, + "Sending Status Request" ); let request = RequestType::Status(status_message.clone()); @@ -385,7 +404,6 @@ impl SyncNetworkContext { let (expects_columns, data_column_requests) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { let column_indexes = self.network_globals().sampling_columns.clone(); - let data_column_requests = self .make_columns_by_range_requests(request, &column_indexes)? .into_iter() @@ -518,6 +536,13 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::Pending("no peers")); }; + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + match self.chain.get_block_process_status(&block_root) { // Unknown block, continue request to download BlockProcessStatus::Unknown => {} @@ -560,12 +585,11 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "BlocksByRoot", - "block_root" => ?block_root, - "peer" => %peer_id, - "id" => %id + method = "BlocksByRoot", + ?block_root, + peer = %peer_id, + %id, + "Sync RPC request sent" ); self.blocks_by_root_requests.insert( @@ -608,6 +632,13 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::Pending("no peers")); }; + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let imported_blob_indexes = self .chain .data_availability_checker @@ -643,13 +674,12 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "BlobsByRoot", - "block_root" => ?block_root, - "blob_indices" => ?indices, - "peer" => %peer_id, - "id" => %id + method = "BlobsByRoot", + ?block_root, + blob_indices = ?indices, + peer = %peer_id, + %id, + "Sync RPC request sent" ); self.blobs_by_root_requests.insert( @@ -673,6 +703,13 @@ impl SyncNetworkContext { request: DataColumnsByRootSingleBlockRequest, expect_max_responses: bool, ) -> Result, &'static str> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let id = DataColumnsByRootRequestId { id: self.next_id(), requester, @@ -685,13 +722,12 @@ impl SyncNetworkContext { })?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "DataColumnsByRoot", - "block_root" => ?request.block_root, - "indices" => ?request.indices, - "peer" => %peer_id, - "id" => %id, + method = "DataColumnsByRoot", + block_root = ?request.block_root, + indices = ?request.indices, + peer = %peer_id, + %id, + "Sync RPC request sent" ); self.data_columns_by_root_requests.insert( @@ -714,6 +750,13 @@ impl SyncNetworkContext { block_root: Hash256, lookup_peers: Arc>>, ) -> Result { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let custody_indexes_imported = self .chain .data_availability_checker @@ -740,11 +783,10 @@ impl SyncNetworkContext { }; debug!( - self.log, - "Starting custody columns request"; - "block_root" => ?block_root, - "indices" => ?custody_indexes_to_fetch, - "id" => %id + ?block_root, + indices = ?custody_indexes_to_fetch, + %id, + "Starting custody columns request" ); let requester = CustodyRequester(id); @@ -753,7 +795,6 @@ impl SyncNetworkContext { CustodyId { requester }, &custody_indexes_to_fetch, lookup_peers, - self.log.clone(), ); // Note that you can only send, but not handle a response here @@ -788,13 +829,12 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "BlocksByRange", - "slots" => request.count(), - "epoch" => Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), - "peer" => %peer_id, - "id" => %id, + method = "BlocksByRange", + slots = request.count(), + epoch = %Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), + peer = %peer_id, + %id, + "Sync RPC request sent" ); self.blocks_by_range_requests.insert( @@ -830,13 +870,12 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "BlobsByRange", - "slots" => request.count, - "epoch" => request_epoch, - "peer" => %peer_id, - "id" => %id, + method = "BlobsByRange", + slots = request.count, + epoch = %request_epoch, + peer = %peer_id, + %id, + "Sync RPC request sent" ); let max_blobs_per_block = self.chain.spec.max_blobs_per_block(request_epoch); @@ -870,14 +909,13 @@ impl SyncNetworkContext { .map_err(|_| RpcRequestSendError::NetworkSendError)?; debug!( - self.log, - "Sync RPC request sent"; - "method" => "DataColumnsByRange", - "slots" => request.count, - "epoch" => Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), - "columns" => ?request.columns, - "peer" => %peer_id, - "id" => %id, + method = "DataColumnsByRange", + slots = request.count, + epoch = %Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + columns = ?request.columns, + peer = %peer_id, + %id, + "Sync RPC request sent" ); self.data_columns_by_range_requests.insert( @@ -896,13 +934,26 @@ impl SyncNetworkContext { } pub fn update_execution_engine_state(&mut self, engine_state: EngineState) { - debug!(self.log, "Sync's view on execution engine state updated"; - "past_state" => ?self.execution_engine_state, "new_state" => ?engine_state); + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + + debug!(past_state = ?self.execution_engine_state, new_state = ?engine_state, "Sync's view on execution engine state updated"); self.execution_engine_state = engine_state; } /// Terminates the connection with the peer and bans them. pub fn goodbye_peer(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + self.network_send .send(NetworkMessage::GoodbyePeer { peer_id, @@ -910,13 +961,20 @@ impl SyncNetworkContext { source: ReportSource::SyncService, }) .unwrap_or_else(|_| { - warn!(self.log, "Could not report peer: channel failed"); + warn!("Could not report peer: channel failed"); }); } /// Reports to the scoring algorithm the behaviour of a peer. pub fn report_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { - debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action, "msg" => %msg); + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + + debug!(%peer_id, %action, %msg, "Sync reporting peer"); self.network_send .send(NetworkMessage::ReportPeer { peer_id, @@ -925,23 +983,37 @@ impl SyncNetworkContext { msg, }) .unwrap_or_else(|e| { - warn!(self.log, "Could not report peer: channel failed"; "error"=> %e); + warn!(error = %e, "Could not report peer: channel failed"); }); } /// Subscribes to core topics. pub fn subscribe_core_topics(&self) { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + self.network_send .send(NetworkMessage::SubscribeCoreTopics) .unwrap_or_else(|e| { - warn!(self.log, "Could not subscribe to core topics."; "error" => %e); + warn!(error = %e, "Could not subscribe to core topics."); }); } /// Sends an arbitrary network message. fn send_network_msg(&self, msg: NetworkMessage) -> Result<(), &'static str> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + self.network_send.send(msg).map_err(|_| { - debug!(self.log, "Could not send message to the network service"); + debug!("Could not send message to the network service"); "Network channel send Failed" }) } @@ -1128,20 +1200,18 @@ impl SyncNetworkContext { None => {} Some(Ok((v, _))) => { debug!( - self.log, - "Sync RPC request completed"; - "id" => %id, - "method" => method, - "count" => get_count(v) + %id, + method, + count = get_count(v), + "Sync RPC request completed" ); } Some(Err(e)) => { debug!( - self.log, - "Sync RPC request error"; - "id" => %id, - "method" => method, - "error" => ?e + %id, + method, + error = ?e, + "Sync RPC request error" ); } } @@ -1166,11 +1236,18 @@ impl SyncNetworkContext { peer_id: PeerId, resp: RpcResponseResult>>>, ) -> Option> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + // Note: need to remove the request to borrow self again below. Otherwise we can't // do nested requests let Some(mut request) = self.custody_by_root_requests.remove(&id.requester) else { // TOOD(das): This log can happen if the request is error'ed early and dropped - debug!(self.log, "Custody column downloaded event for unknown request"; "id" => ?id); + debug!(?id, "Custody column downloaded event for unknown request"); return None; }; @@ -1185,6 +1262,13 @@ impl SyncNetworkContext { request: ActiveCustodyRequest, result: CustodyRequestResult, ) -> Option> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let result = result .map_err(RpcResponseError::CustodyRequestError) .transpose(); @@ -1193,10 +1277,10 @@ impl SyncNetworkContext { // an Option first to use in an `if let Some() { act on result }` block. match result.as_ref() { Some(Ok((columns, peer_group, _))) => { - debug!(self.log, "Custody request success, removing"; "id" => ?id, "count" => columns.len(), "peers" => ?peer_group) + debug!(?id, count = columns.len(), peers = ?peer_group, "Custody request success, removing") } Some(Err(e)) => { - debug!(self.log, "Custody request failure, removing"; "id" => ?id, "error" => ?e) + debug!(?id, error = ?e, "Custody request failure, removing" ) } None => { self.custody_by_root_requests.insert(id, request); @@ -1212,11 +1296,18 @@ impl SyncNetworkContext { block: RpcBlock, seen_timestamp: Duration, ) -> Result<(), SendErrorProcessor> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let beacon_processor = self .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - debug!(self.log, "Sending block for processing"; "block" => ?block_root, "id" => id); + debug!(block = ?block_root, id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor @@ -1228,9 +1319,8 @@ impl SyncNetworkContext { ) .map_err(|e| { error!( - self.log, - "Failed to send sync block to processor"; - "error" => ?e + error = ?e, + "Failed to send sync block to processor" ); SendErrorProcessor::SendError }) @@ -1243,11 +1333,18 @@ impl SyncNetworkContext { blobs: FixedBlobSidecarList, seen_timestamp: Duration, ) -> Result<(), SendErrorProcessor> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let beacon_processor = self .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - debug!(self.log, "Sending blobs for processing"; "block" => ?block_root, "id" => id); + debug!(?block_root, ?id, "Sending blobs for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_blobs` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type beacon_processor @@ -1259,9 +1356,8 @@ impl SyncNetworkContext { ) .map_err(|e| { error!( - self.log, - "Failed to send sync blobs to processor"; - "error" => ?e + error = ?e, + "Failed to send sync blobs to processor" ); SendErrorProcessor::SendError }) @@ -1275,19 +1371,29 @@ impl SyncNetworkContext { seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), SendErrorProcessor> { + let span = span!( + Level::INFO, + "SyncNetworkContext", + service = "network_context" + ); + let _enter = span.enter(); + let beacon_processor = self .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - debug!(self.log, "Sending custody columns for processing"; "block" => ?block_root, "process_type" => ?process_type); + debug!( + ?block_root, + ?process_type, + "Sending custody columns for processing" + ); beacon_processor .send_rpc_custody_columns(block_root, custody_columns, seen_timestamp, process_type) .map_err(|e| { error!( - self.log, - "Failed to send sync custody columns to processor"; - "error" => ?e + error = ?e, + "Failed to send sync custody columns to processor" ); SendErrorProcessor::SendError }) diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 38353d3ea2..018381a850 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -9,10 +9,10 @@ use lighthouse_network::PeerId; use lru_cache::LRUTimeCache; use parking_lot::RwLock; use rand::Rng; -use slog::{debug, warn}; use std::collections::HashSet; use std::time::{Duration, Instant}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use tracing::{debug, warn}; use types::EthSpec; use types::{data_column_sidecar::ColumnIndex, DataColumnSidecar, Hash256}; @@ -36,8 +36,7 @@ pub struct ActiveCustodyRequest { failed_peers: LRUTimeCache, /// Set of peers that claim to have imported this block and their custody columns lookup_peers: Arc>>, - /// Logger for the `SyncNetworkContext`. - pub log: slog::Logger, + _phantom: PhantomData, } @@ -70,7 +69,6 @@ impl ActiveCustodyRequest { custody_id: CustodyId, column_indices: &[ColumnIndex], lookup_peers: Arc>>, - log: slog::Logger, ) -> Self { Self { block_root, @@ -83,7 +81,6 @@ impl ActiveCustodyRequest { active_batch_columns_requests: <_>::default(), failed_peers: LRUTimeCache::new(Duration::from_secs(FAILED_PEERS_CACHE_EXPIRY_SECONDS)), lookup_peers, - log, _phantom: PhantomData, } } @@ -104,24 +101,24 @@ impl ActiveCustodyRequest { cx: &mut SyncNetworkContext, ) -> CustodyRequestResult { let Some(batch_request) = self.active_batch_columns_requests.get_mut(&req_id) else { - warn!(self.log, - "Received custody column response for unrequested index"; - "id" => ?self.custody_id, - "block_root" => ?self.block_root, - "req_id" => %req_id, + warn!( + id = ?self.custody_id, + block_root = ?self.block_root, + %req_id, + "Received custody column response for unrequested index" ); return Ok(None); }; match resp { Ok((data_columns, seen_timestamp)) => { - debug!(self.log, - "Custody column download success"; - "id" => ?self.custody_id, - "block_root" => ?self.block_root, - "req_id" => %req_id, - "peer" => %peer_id, - "count" => data_columns.len() + debug!( + id = ?self.custody_id, + block_root = ?self.block_root, + %req_id, + %peer_id, + count = data_columns.len(), + "Custody column download success" ); // Map columns by index as an optimization to not loop the returned list on each @@ -163,27 +160,27 @@ impl ActiveCustodyRequest { if !missing_column_indexes.is_empty() { // Note: Batch logging that columns are missing to not spam logger - debug!(self.log, - "Custody column peer claims to not have some data"; - "id" => ?self.custody_id, - "block_root" => ?self.block_root, - "req_id" => %req_id, - "peer" => %peer_id, + debug!( + id = ?self.custody_id, + block_root = ?self.block_root, + %req_id, + %peer_id, // TODO(das): this property can become very noisy, being the full range 0..128 - "missing_column_indexes" => ?missing_column_indexes + ?missing_column_indexes, + "Custody column peer claims to not have some data" ); self.failed_peers.insert(peer_id); } } Err(err) => { - debug!(self.log, - "Custody column download error"; - "id" => ?self.custody_id, - "block_root" => ?self.block_root, - "req_id" => %req_id, - "peer" => %peer_id, - "error" => ?err + debug!( + id = ?self.custody_id, + block_root = ?self.block_root, + %req_id, + %peer_id, + error = ?err, + "Custody column download error" ); // TODO(das): Should mark peer as failed and try from another peer diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 289ed73cdd..59b751787e 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -12,11 +12,11 @@ use lighthouse_network::service::api_types::{ }; use lighthouse_network::{PeerAction, PeerId}; use rand::{seq::SliceRandom, thread_rng}; -use slog::{debug, error, warn}; use std::{ collections::hash_map::Entry, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration, }; +use tracing::{debug, error, instrument, warn}; use types::{data_column_sidecar::ColumnIndex, ChainSpec, DataColumnSidecar, Hash256}; pub type SamplingResult = Result<(), SamplingError>; @@ -26,24 +26,35 @@ type DataColumnSidecarList = Vec>>; pub struct Sampling { requests: HashMap>, sampling_config: SamplingConfig, - log: slog::Logger, } impl Sampling { - pub fn new(sampling_config: SamplingConfig, log: slog::Logger) -> Self { + #[instrument(parent = None,level = "info", fields(service = "sampling"), name = "sampling")] + pub fn new(sampling_config: SamplingConfig) -> Self { Self { requests: <_>::default(), sampling_config, - log, } } #[cfg(test)] + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] pub fn active_sampling_requests(&self) -> Vec { self.requests.values().map(|r| r.block_root).collect() } #[cfg(test)] + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] pub fn get_request_status( &self, block_root: Hash256, @@ -61,6 +72,12 @@ impl Sampling { /// /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] pub fn on_new_sample_request( &mut self, block_root: Hash256, @@ -73,7 +90,6 @@ impl Sampling { block_root, id, &self.sampling_config, - self.log.clone(), &cx.chain.spec, )), Entry::Occupied(_) => { @@ -82,15 +98,15 @@ impl Sampling { // TODO(das): Should track failed sampling request for some time? Otherwise there's // a risk of a loop with multiple triggers creating the request, then failing, // and repeat. - debug!(self.log, "Ignoring duplicate sampling request"; "id" => ?id); + debug!(?id, "Ignoring duplicate sampling request"); return None; } }; - debug!(self.log, - "Created new sample request"; - "id" => ?id, - "column_selection" => ?request.column_selection() + debug!( + ?id, + column_selection = ?request.column_selection(), + "Created new sample request" ); // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough @@ -107,6 +123,12 @@ impl Sampling { /// /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] pub fn on_sample_downloaded( &mut self, id: SamplingId, @@ -116,7 +138,7 @@ impl Sampling { ) -> Option<(SamplingRequester, SamplingResult)> { let Some(request) = self.requests.get_mut(&id.id) else { // TOOD(das): This log can happen if the request is error'ed early and dropped - debug!(self.log, "Sample downloaded event for unknown request"; "id" => ?id); + debug!(?id, "Sample downloaded event for unknown request"); return None; }; @@ -131,6 +153,12 @@ impl Sampling { /// /// - `Some`: Request completed, won't make more progress. Expect requester to act on the result. /// - `None`: Request still active, requester should do no action + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] pub fn on_sample_verified( &mut self, id: SamplingId, @@ -139,7 +167,7 @@ impl Sampling { ) -> Option<(SamplingRequester, SamplingResult)> { let Some(request) = self.requests.get_mut(&id.id) else { // TOOD(das): This log can happen if the request is error'ed early and dropped - debug!(self.log, "Sample verified event for unknown request"; "id" => ?id); + debug!(?id, "Sample verified event for unknown request"); return None; }; @@ -150,6 +178,12 @@ impl Sampling { /// Converts a result from the internal format of `ActiveSamplingRequest` (error first to use ? /// conveniently), to an Option first format to use an `if let Some() { act on result }` pattern /// in the sync manager. + #[instrument(parent = None, + level = "info", + fields(service = "sampling"), + name = "sampling", + skip_all + )] fn handle_sampling_result( &mut self, result: Result, SamplingError>, @@ -157,7 +191,7 @@ impl Sampling { ) -> Option<(SamplingRequester, SamplingResult)> { let result = result.transpose(); if let Some(result) = result { - debug!(self.log, "Sampling request completed, removing"; "id" => ?id, "result" => ?result); + debug!(?id, ?result, "Sampling request completed, removing"); metrics::inc_counter_vec( &metrics::SAMPLING_REQUEST_RESULT, &[metrics::from_result(&result)], @@ -180,8 +214,6 @@ pub struct ActiveSamplingRequest { current_sampling_request_id: SamplingRequestId, column_shuffle: Vec, required_successes: Vec, - /// Logger for the `SyncNetworkContext`. - pub log: slog::Logger, _phantom: PhantomData, } @@ -212,7 +244,6 @@ impl ActiveSamplingRequest { block_root: Hash256, requester_id: SamplingRequester, sampling_config: &SamplingConfig, - log: slog::Logger, spec: &ChainSpec, ) -> Self { // Select ahead of time the full list of to-sample columns @@ -232,7 +263,6 @@ impl ActiveSamplingRequest { SamplingConfig::Default => REQUIRED_SUCCESSES.to_vec(), SamplingConfig::Custom { required_successes } => required_successes.clone(), }, - log, _phantom: PhantomData, } } @@ -275,9 +305,9 @@ impl ActiveSamplingRequest { .column_indexes_by_sampling_request .get(&sampling_request_id) else { - error!(self.log, - "Column indexes for the sampling request ID not found"; - "sampling_request_id" => ?sampling_request_id + error!( + ?sampling_request_id, + "Column indexes for the sampling request ID not found" ); return Ok(None); }; @@ -288,11 +318,11 @@ impl ActiveSamplingRequest { .iter() .map(|r| r.index) .collect::>(); - debug!(self.log, - "Sample download success"; - "block_root" => %self.block_root, - "column_indexes" => ?resp_column_indexes, - "count" => resp_data_columns.len() + debug!( + block_root = %self.block_root, + column_indexes = ?resp_column_indexes, + count = resp_data_columns.len(), + "Sample download success" ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); @@ -300,10 +330,10 @@ impl ActiveSamplingRequest { let mut data_columns = vec![]; for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!(self.log, - "Active column sample request not found"; - "block_root" => %self.block_root, - "column_index" => column_index + warn!( + block_root = %self.block_root, + column_index, + "Active column sample request not found" ); continue; }; @@ -314,10 +344,10 @@ impl ActiveSamplingRequest { else { // Peer does not have the requested data, mark peer as "dont have" and try // again with a different peer. - debug!(self.log, - "Sampling peer claims to not have the data"; - "block_root" => %self.block_root, - "column_index" => column_index + debug!( + block_root = %self.block_root, + column_index, + "Sampling peer claims to not have the data" ); request.on_sampling_error()?; continue; @@ -331,16 +361,16 @@ impl ActiveSamplingRequest { .iter() .map(|d| d.index) .collect::>(); - debug!(self.log, - "Received data that was not requested"; - "block_root" => %self.block_root, - "column_indexes" => ?resp_column_indexes + debug!( + block_root = %self.block_root, + column_indexes = ?resp_column_indexes, + "Received data that was not requested" ); } // Handle the downloaded data columns. if data_columns.is_empty() { - debug!(self.log, "Received empty response"; "block_root" => %self.block_root); + debug!(block_root = %self.block_root, "Received empty response"); self.column_indexes_by_sampling_request .remove(&sampling_request_id); } else { @@ -351,17 +381,17 @@ impl ActiveSamplingRequest { // Peer has data column, send to verify let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { // If processor is not available, error the entire sampling - debug!(self.log, - "Dropping sampling"; - "block" => %self.block_root, - "reason" => "beacon processor unavailable" + debug!( + block = %self.block_root, + reason = "beacon processor unavailable", + "Dropping sampling" ); return Err(SamplingError::ProcessorUnavailable); }; - debug!(self.log, - "Sending data_column for verification"; - "block" => ?self.block_root, - "column_indexes" => ?column_indexes + debug!( + block = ?self.block_root, + ?column_indexes, + "Sending data_column for verification" ); if let Err(e) = beacon_processor.send_rpc_validate_data_columns( self.block_root, @@ -375,20 +405,21 @@ impl ActiveSamplingRequest { // Beacon processor is overloaded, drop sampling attempt. Failing to sample // is not a permanent state so we should recover once the node has capacity // and receives a descendant block. - error!(self.log, - "Dropping sampling"; - "block" => %self.block_root, - "reason" => e.to_string() + error!( + block = %self.block_root, + reason = e.to_string(), + "Dropping sampling" ); return Err(SamplingError::SendFailed("beacon processor send failure")); } } } Err(err) => { - debug!(self.log, "Sample download error"; - "block_root" => %self.block_root, - "column_indexes" => ?column_indexes, - "error" => ?err + debug!( + block_root = %self.block_root, + ?column_indexes, + error = ?err, + "Sample download error" ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::FAILURE]); @@ -396,10 +427,10 @@ impl ActiveSamplingRequest { // reaching this function. Mark the peer as failed and try again with another. for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { - warn!(self.log, - "Active column sample request not found"; - "block_root" => %self.block_root, - "column_index" => column_index + warn!( + block_root = %self.block_root, + column_index, + "Active column sample request not found" ); continue; }; @@ -429,21 +460,24 @@ impl ActiveSamplingRequest { .column_indexes_by_sampling_request .get(&sampling_request_id) else { - error!(self.log, "Column indexes for the sampling request ID not found"; "sampling_request_id" => ?sampling_request_id); + error!( + ?sampling_request_id, + "Column indexes for the sampling request ID not found" + ); return Ok(None); }; match result { Ok(_) => { - debug!(self.log, "Sample verification success"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes); + debug!(block_root = %self.block_root,?column_indexes, "Sample verification success"); metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::SUCCESS]); // Valid, continue_sampling will maybe consider sampling succees for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + block_root = %self.block_root, column_index, + "Active column sample request not found" ); continue; }; @@ -451,7 +485,7 @@ impl ActiveSamplingRequest { } } Err(err) => { - debug!(self.log, "Sample verification failure"; "block_root" => %self.block_root, "column_indexes" => ?column_indexes, "reason" => ?err); + debug!(block_root = %self.block_root, ?column_indexes, reason = ?err, "Sample verification failure"); metrics::inc_counter_vec(&metrics::SAMPLE_VERIFY_RESULT, &[metrics::FAILURE]); // Peer sent invalid data, penalize and try again from different peer @@ -459,8 +493,9 @@ impl ActiveSamplingRequest { for column_index in column_indexes { let Some(request) = self.column_requests.get_mut(column_index) else { warn!( - self.log, - "Active column sample request not found"; "block_root" => %self.block_root, "column_index" => column_index + block_root = %self.block_root, + column_index, + "Active column sample request not found" ); continue; }; @@ -570,7 +605,7 @@ impl ActiveSamplingRequest { // request was sent, loop to increase the required_successes until the sampling fails if // there are no peers. if ongoings == 0 && !sent_request { - debug!(self.log, "Sampling request stalled"; "block_root" => %self.block_root); + debug!(block_root = %self.block_root, "Sampling request stalled"); } Ok(None) diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 912287a8a4..c1ad550376 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -3,6 +3,7 @@ use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use std::collections::HashSet; +use std::fmt; use std::hash::{Hash, Hasher}; use std::ops::Sub; use std::time::{Duration, Instant}; @@ -61,6 +62,7 @@ pub trait BatchConfig { fn batch_attempt_hash(blocks: &[RpcBlock]) -> u64; } +#[derive(Debug)] pub struct RangeSyncBatchConfig {} impl BatchConfig for RangeSyncBatchConfig { @@ -93,6 +95,7 @@ pub enum BatchProcessingResult { NonFaultyFailure, } +#[derive(Debug)] /// A segment of a chain. pub struct BatchInfo { /// Start slot of the batch. @@ -113,6 +116,17 @@ pub struct BatchInfo { marker: std::marker::PhantomData, } +impl fmt::Display for BatchInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Start Slot: {}, End Slot: {}, State: {}", + self.start_slot, self.end_slot, self.state + ) + } +} + +#[derive(Display)] /// Current state of a batch pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. @@ -190,15 +204,6 @@ impl BatchInfo { peers } - /// Return the number of times this batch has failed downloading and failed processing, in this - /// order. - pub fn failed_attempts(&self) -> (usize, usize) { - ( - self.failed_download_attempts.len(), - self.failed_processing_attempts.len(), - ) - } - /// Verifies if an incoming block belongs to this batch. pub fn is_expecting_block(&self, request_id: &Id) -> bool { if let BatchState::Downloading(_, expected_id) = &self.state { @@ -456,39 +461,6 @@ impl Attempt { } } -impl slog::KV for &mut BatchInfo { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - slog::KV::serialize(*self, record, serializer) - } -} - -impl slog::KV for BatchInfo { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - use slog::Value; - Value::serialize(&self.start_slot, record, "start_slot", serializer)?; - Value::serialize( - &(self.end_slot - 1), // NOTE: The -1 shows inclusive blocks - record, - "end_slot", - serializer, - )?; - serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; - serializer.emit_usize("processed", self.failed_processing_attempts.len())?; - serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; - serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; - serializer.emit_arguments("batch_ty", &format_args!("{}", self.batch_type))?; - slog::Result::Ok(()) - } -} - impl std::fmt::Debug for BatchState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index cab08dd278..70c7b6f98f 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -9,11 +9,13 @@ use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; +use logging::crit; use rand::seq::SliceRandom; use rand::Rng; -use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; +use std::fmt; use strum::IntoStaticStr; +use tracing::{debug, instrument, warn}; use types::{Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -37,6 +39,7 @@ pub type ProcessingResult = Result; /// Reasons for removing a chain #[derive(Debug)] +#[allow(dead_code)] pub enum RemoveChain { EmptyPeerPool, ChainCompleted, @@ -66,6 +69,7 @@ pub enum SyncingChainType { /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the /// chain. +#[derive(Debug)] pub struct SyncingChain { /// A random id used to identify this chain. id: ChainId, @@ -110,9 +114,16 @@ pub struct SyncingChain { /// The current processing batch, if any. current_processing_batch: Option, +} - /// The chain's log. - log: slog::Logger, +impl fmt::Display for SyncingChain { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.chain_type { + SyncingChainType::Head => write!(f, "Head"), + SyncingChainType::Finalized => write!(f, "Finalized"), + SyncingChainType::Backfill => write!(f, "Backfill"), + } + } } #[derive(PartialEq, Debug)] @@ -132,7 +143,6 @@ impl SyncingChain { target_head_root: Hash256, peer_id: PeerId, chain_type: SyncingChainType, - log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); peers.insert(peer_id, Default::default()); @@ -151,7 +161,6 @@ impl SyncingChain { attempted_optimistic_starts: HashSet::default(), state: ChainSyncingState::Stopped, current_processing_batch: None, - log: log.new(o!("chain" => id)), } } @@ -161,21 +170,25 @@ impl SyncingChain { } /// Check if the chain has peers from which to process batches. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn available_peers(&self) -> usize { self.peers.len() } /// Get the chain's id. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn get_id(&self) -> ChainId { self.id } /// Peers currently syncing this chain. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn peers(&self) -> impl Iterator + '_ { self.peers.keys().cloned() } /// Progress in epochs made by the chain + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn processed_epochs(&self) -> u64 { self.processing_target .saturating_sub(self.start_epoch) @@ -183,6 +196,7 @@ impl SyncingChain { } /// Returns the total count of pending blocks in all the batches of this chain + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn pending_blocks(&self) -> usize { self.batches .values() @@ -192,6 +206,7 @@ impl SyncingChain { /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn remove_peer( &mut self, peer_id: &PeerId, @@ -211,8 +226,7 @@ impl SyncingChain { } self.retry_batch_download(network, id)?; } else { - debug!(self.log, "Batch not found while removing peer"; - "peer" => %peer_id, "batch" => id) + debug!(%peer_id, batch = ?id, "Batch not found while removing peer") } } } @@ -225,6 +239,7 @@ impl SyncingChain { } /// Returns the latest slot number that has been processed. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn current_processed_slot(&self) -> Slot { // the last slot we processed was included in the previous batch, and corresponds to the // first slot of the current target epoch @@ -234,6 +249,7 @@ impl SyncingChain { /// A block has been received for a batch on this chain. /// If the block correctly completes the batch it will be processed if possible. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn on_block_response( &mut self, network: &mut SyncNetworkContext, @@ -245,7 +261,7 @@ impl SyncingChain { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { None => { - debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); + debug!(epoch = %batch_id, "Received a block for unknown batch"); // A batch might get removed when the chain advances, so this is non fatal. return Ok(KeepChain); } @@ -273,7 +289,7 @@ impl SyncingChain { let awaiting_batches = batch_id .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) / EPOCHS_PER_BATCH; - debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); + debug!(epoch = %batch_id, blocks = received, batch_state = self.visualize_batch_state(), %awaiting_batches,"Batch downloaded"); // pre-emptively request more blocks from peers whilst we process current blocks, self.request_batches(network)?; @@ -282,6 +298,7 @@ impl SyncingChain { /// Processes the batch with the given id. /// The batch must exist and be ready for processing + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn process_batch( &mut self, network: &mut SyncNetworkContext, @@ -317,8 +334,7 @@ impl SyncingChain { self.current_processing_batch = Some(batch_id); if let Err(e) = beacon_processor.send_chain_segment(process_id, blocks) { - crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", - "error" => %e, "batch" => self.processing_target); + crit!(msg = "process_batch",error = %e, batch = ?self.processing_target, "Failed to send chain segment to processor."); // This is unlikely to happen but it would stall syncing since the batch now has no // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is @@ -330,6 +346,7 @@ impl SyncingChain { } /// Processes the next ready batch, prioritizing optimistic batches over the processing target. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn process_completed_batches( &mut self, network: &mut SyncNetworkContext, @@ -349,7 +366,7 @@ impl SyncingChain { match state { BatchState::AwaitingProcessing(..) => { // this batch is ready - debug!(self.log, "Processing optimistic start"; "epoch" => epoch); + debug!(%epoch, "Processing optimistic start"); return self.process_batch(network, epoch); } BatchState::Downloading(..) => { @@ -377,7 +394,7 @@ impl SyncingChain { // batch has been requested and processed we can land here. We drop the // optimistic candidate since we can't conclude whether the batch included // blocks or not at this point - debug!(self.log, "Dropping optimistic candidate"; "batch" => epoch); + debug!(batch = %epoch, "Dropping optimistic candidate"); self.optimistic_start = None; } } @@ -411,7 +428,10 @@ impl SyncingChain { // inside the download buffer (between `self.processing_target` and // `self.to_be_downloaded`). In this case, eventually the chain advances to the // batch (`self.processing_target` reaches this point). - debug!(self.log, "Chain encountered a robust batch awaiting validation"; "batch" => self.processing_target); + debug!( + batch = %self.processing_target, + "Chain encountered a robust batch awaiting validation" + ); self.processing_target += EPOCHS_PER_BATCH; if self.to_be_downloaded <= self.processing_target { @@ -436,6 +456,7 @@ impl SyncingChain { /// The block processor has completed processing a batch. This function handles the result /// of the batch processor. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn on_batch_process_result( &mut self, network: &mut SyncNetworkContext, @@ -447,13 +468,11 @@ impl SyncingChain { let batch_state = self.visualize_batch_state(); let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { - debug!(self.log, "Unexpected batch result"; - "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); + debug!(batch_epoch = %batch_id, expected_batch_epoch = %processing_id,"Unexpected batch result"); return Ok(KeepChain); } None => { - debug!(self.log, "Chain was not expecting a batch result"; - "batch_epoch" => batch_id); + debug!(batch_epoch = %batch_id,"Chain was not expecting a batch result"); return Ok(KeepChain); } _ => { @@ -476,8 +495,14 @@ impl SyncingChain { })?; // Log the process result and the batch for debugging purposes. - debug!(self.log, "Batch processing result"; "result" => ?result, &batch, - "batch_epoch" => batch_id, "client" => %network.client_type(&peer), "batch_state" => batch_state); + debug!( + result = ?result, + batch_epoch = %batch_id, + client = %network.client_type(&peer), + batch_state = ?batch_state, + ?batch, + "Batch processing result" + ); // We consider three cases. Batch was successfully processed, Batch failed processing due // to a faulty peer, or batch failed processing but the peer can't be deemed faulty. @@ -563,10 +588,9 @@ impl SyncingChain { // There are some edge cases with forks that could land us in this situation. // This should be unlikely, so we tolerate these errors, but not often. warn!( - self.log, - "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %penalty, - "batch_epoch"=> batch_id, + score_adjustment = %penalty, + batch_epoch = %batch_id, + "Batch failed to download. Dropping chain scoring peers" ); for (peer, _) in self.peers.drain() { @@ -587,6 +611,7 @@ impl SyncingChain { } } + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn reject_optimistic_batch( &mut self, network: &mut SyncNetworkContext, @@ -599,13 +624,13 @@ impl SyncingChain { // it. NOTE: this is done to prevent non-sequential batches coming from optimistic // starts from filling up the buffer size if epoch < self.to_be_downloaded { - debug!(self.log, "Rejected optimistic batch left for future use"; "epoch" => %epoch, "reason" => reason); + debug!(%epoch, reason, "Rejected optimistic batch left for future use"); // this batch is now treated as any other batch, and re-requested for future use if redownload { return self.retry_batch_download(network, epoch); } } else { - debug!(self.log, "Rejected optimistic batch"; "epoch" => %epoch, "reason" => reason); + debug!(%epoch, reason, "Rejected optimistic batch"); self.batches.remove(&epoch); } } @@ -621,6 +646,7 @@ impl SyncingChain { /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. #[allow(clippy::modulo_one)] + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { @@ -629,7 +655,7 @@ impl SyncingChain { // safety check for batch boundaries if validating_epoch % EPOCHS_PER_BATCH != self.start_epoch % EPOCHS_PER_BATCH { - crit!(self.log, "Validating Epoch is not aligned"); + crit!("Validating Epoch is not aligned"); return; } @@ -651,9 +677,10 @@ impl SyncingChain { // A different peer sent the correct batch, the previous peer did not // We negatively score the original peer. let action = PeerAction::LowToleranceError; - debug!(self.log, "Re-processed batch validated. Scoring original peer"; - "batch_epoch" => id, "score_adjustment" => %action, - "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + debug!( + batch_epoch = %id, score_adjustment = %action, + original_peer = %attempt.peer_id, new_peer = %processed_attempt.peer_id, + "Re-processed batch validated. Scoring original peer" ); network.report_peer( attempt.peer_id, @@ -664,9 +691,12 @@ impl SyncingChain { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. let action = PeerAction::MidToleranceError; - debug!(self.log, "Re-processed batch validated by the same peer"; - "batch_epoch" => id, "score_adjustment" => %action, - "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id + debug!( + batch_epoch = %id, + score_adjustment = %action, + original_peer = %attempt.peer_id, + new_peer = %processed_attempt.peer_id, + "Re-processed batch validated by the same peer" ); network.report_peer( attempt.peer_id, @@ -683,13 +713,12 @@ impl SyncingChain { active_batches.remove(&id); } } - BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => crit!( - self.log, - "batch indicates inconsistent chain state while advancing chain" - ), + BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => { + crit!("batch indicates inconsistent chain state while advancing chain") + } BatchState::AwaitingProcessing(..) => {} BatchState::Processing(_) => { - debug!(self.log, "Advancing chain while processing a batch"; "batch" => id, batch); + debug!(batch = %id, %batch, "Advancing chain while processing a batch"); if let Some(processing_id) = self.current_processing_batch { if id <= processing_id { self.current_processing_batch = None; @@ -713,8 +742,12 @@ impl SyncingChain { self.optimistic_start = None; } } - debug!(self.log, "Chain advanced"; "previous_start" => old_start, - "new_start" => self.start_epoch, "processing_target" => self.processing_target); + debug!( + previous_start = %old_start, + new_start = %self.start_epoch, + processing_target = %self.processing_target, + "Chain advanced" + ); } /// An invalid batch has been received that could not be processed, but that can be retried. @@ -722,6 +755,7 @@ impl SyncingChain { /// These events occur when a peer has successfully responded with blocks, but the blocks we /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. + #[instrument(parent = None,level = "info", fields(service = self.id, network), skip_all)] fn handle_invalid_batch( &mut self, network: &mut SyncNetworkContext, @@ -781,6 +815,7 @@ impl SyncingChain { /// This chain has been requested to start syncing. /// /// This could be new chain, or an old chain that is being resumed. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn start_syncing( &mut self, network: &mut SyncNetworkContext, @@ -819,6 +854,7 @@ impl SyncingChain { /// Add a peer to the chain. /// /// If the chain is active, this starts requesting batches from this peer. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn add_peer( &mut self, network: &mut SyncNetworkContext, @@ -836,6 +872,7 @@ impl SyncingChain { /// An RPC error has occurred. /// /// If the batch exists it is re-requested. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn inject_error( &mut self, network: &mut SyncNetworkContext, @@ -852,24 +889,21 @@ impl SyncingChain { // columns. if !batch.is_expecting_block(&request_id) { debug!( - self.log, - "Batch not expecting block"; - "batch_epoch" => batch_id, - "batch_state" => ?batch.state(), - "peer_id" => %peer_id, - "request_id" => %request_id, - "batch_state" => batch_state + batch_epoch = %batch_id, + batch_state = ?batch.state(), + %peer_id, + %request_id, + ?batch_state, + "Batch not expecting block" ); return Ok(KeepChain); } debug!( - self.log, - "Batch failed. RPC Error"; - "batch_epoch" => batch_id, - "batch_state" => ?batch.state(), - "peer_id" => %peer_id, - "request_id" => %request_id, - "batch_state" => batch_state + batch_epoch = %batch_id, + batch_state = ?batch.state(), + %peer_id, + %request_id, + "Batch failed. RPC Error" ); if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); @@ -883,12 +917,11 @@ impl SyncingChain { self.retry_batch_download(network, batch_id) } else { debug!( - self.log, - "Batch not found"; - "batch_epoch" => batch_id, - "peer_id" => %peer_id, - "request_id" => %request_id, - "batch_state" => batch_state + batch_epoch = %batch_id, + %peer_id, + %request_id, + batch_state, + "Batch not found" ); // this could be an error for an old batch, removed when the chain advances Ok(KeepChain) @@ -896,6 +929,7 @@ impl SyncingChain { } /// Sends and registers the request of a batch awaiting download. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn retry_batch_download( &mut self, network: &mut SyncNetworkContext, @@ -932,6 +966,7 @@ impl SyncingChain { } /// Requests the batch assigned to the given id from a given peer. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn send_batch( &mut self, network: &mut SyncNetworkContext, @@ -958,9 +993,9 @@ impl SyncingChain { .map(|epoch| epoch == batch_id) .unwrap_or(false) { - debug!(self.log, "Requesting optimistic batch"; "epoch" => batch_id, &batch, "batch_state" => batch_state); + debug!(epoch = %batch_id, %batch, %batch_state, "Requesting optimistic batch"); } else { - debug!(self.log, "Requesting batch"; "epoch" => batch_id, &batch, "batch_state" => batch_state); + debug!(epoch = %batch_id, %batch, %batch_state, "Requesting batch"); } // register the batch for this peer return self @@ -979,8 +1014,7 @@ impl SyncingChain { } Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway - warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => ?e, &batch); + warn!(%batch_id, error = %e, %batch, "Could not send batch request"); // register the failed download and check if the batch can be retried batch.start_downloading_from_peer(peer, 1)?; // fake request_id is not relevant self.peers @@ -1005,6 +1039,7 @@ impl SyncingChain { } /// Returns true if this chain is currently syncing. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn is_syncing(&self) -> bool { match self.state { ChainSyncingState::Syncing => true, @@ -1014,6 +1049,7 @@ impl SyncingChain { /// Kickstarts the chain by sending for processing batches that are ready and requesting more /// batches if needed. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] pub fn resume( &mut self, network: &mut SyncNetworkContext, @@ -1026,6 +1062,7 @@ impl SyncingChain { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn request_batches(&mut self, network: &mut SyncNetworkContext) -> ProcessingResult { if !matches!(self.state, ChainSyncingState::Syncing) { return Ok(KeepChain); @@ -1052,10 +1089,7 @@ impl SyncingChain { // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { if !self.good_peers_on_sampling_subnets(epoch, network) { - debug!( - self.log, - "Waiting for peers to be available on sampling column subnets" - ); + debug!("Waiting for peers to be available on sampling column subnets"); return Ok(KeepChain); } @@ -1114,6 +1148,7 @@ impl SyncingChain { /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { // don't request batches beyond the target head slot if self @@ -1147,10 +1182,7 @@ impl SyncingChain { // block and data column requests are currently coupled. This can be removed once we find a // way to decouple the requests and do retries individually, see issue #6258. if !self.good_peers_on_sampling_subnets(self.to_be_downloaded, network) { - debug!( - self.log, - "Waiting for peers to be available on custody column subnets" - ); + debug!("Waiting for peers to be available on custody column subnets"); return None; } @@ -1177,6 +1209,7 @@ impl SyncingChain { /// This produces a string of the form: [D,E,E,E,E] /// to indicate the current buffer state of the chain. The symbols are defined on each of the /// batch states. See [BatchState::visualize] for symbol definitions. + #[instrument(parent = None,level = "info", fields(chain = self.id , service = "range_sync"), skip_all)] fn visualize_batch_state(&self) -> String { let mut visualization_string = String::with_capacity((BATCH_BUFFER_SIZE * 3) as usize); @@ -1212,45 +1245,6 @@ impl SyncingChain { } } -impl slog::KV for &mut SyncingChain { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - slog::KV::serialize(*self, record, serializer) - } -} - -impl slog::KV for SyncingChain { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - use slog::Value; - serializer.emit_u32("id", self.id)?; - Value::serialize(&self.start_epoch, record, "from", serializer)?; - Value::serialize( - &self.target_head_slot.epoch(T::EthSpec::slots_per_epoch()), - record, - "to", - serializer, - )?; - serializer.emit_arguments("end_root", &format_args!("{}", self.target_head_root))?; - Value::serialize( - &self.processing_target, - record, - "current_target", - serializer, - )?; - serializer.emit_usize("batches", self.batches.len())?; - serializer.emit_usize("peers", self.peers.len())?; - serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; - slog::Result::Ok(()) - } -} - use super::batch::WrongState as WrongBatchState; impl From for RemoveChain { fn from(err: WrongBatchState) -> Self { diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 4028530946..c6be3de576 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -12,11 +12,12 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; -use slog::{crit, debug, error}; +use logging::crit; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; +use tracing::{debug, error}; use types::EthSpec; use types::{Epoch, Hash256, Slot}; @@ -50,18 +51,15 @@ pub struct ChainCollection { head_chains: FnvHashMap>, /// The current sync state of the process. state: RangeSyncState, - /// Logger for the collection. - log: slog::Logger, } impl ChainCollection { - pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, - log, } } @@ -295,9 +293,8 @@ impl ChainCollection { .expect("Chain exists"); match old_id { - Some(Some(old_id)) => debug!(self.log, "Switching finalized chains"; - "old_id" => old_id, &chain), - None => debug!(self.log, "Syncing new finalized chain"; &chain), + Some(Some(old_id)) => debug!(old_id, %chain, "Switching finalized chains"), + None => debug!(%chain, "Syncing new finalized chain"), Some(None) => { // this is the same chain. We try to advance it. } @@ -309,10 +306,10 @@ impl ChainCollection { if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { if remove_reason.is_critical() { - crit!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); + crit!(chain = new_id, reason = ?remove_reason, "Chain removed while switching chains"); } else { // this happens only if sending a batch over the `network` fails a lot - error!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); + error!(chain = new_id, reason = ?remove_reason, "Chain removed while switching chains"); } self.finalized_chains.remove(&new_id); self.on_chain_removed(&new_id, true, RangeSyncType::Finalized); @@ -330,7 +327,7 @@ impl ChainCollection { ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { - debug!(self.log, "including head peer"); + debug!("including head peer"); self.add_peer_or_create_chain( local_epoch, peer_sync_info.head_root, @@ -362,16 +359,16 @@ impl ChainCollection { if syncing_chains.len() < PARALLEL_HEAD_CHAINS { // start this chain if it's not already syncing if !chain.is_syncing() { - debug!(self.log, "New head chain started syncing"; &chain); + debug!(%chain, "New head chain started syncing"); } if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { self.head_chains.remove(&id); if remove_reason.is_critical() { - crit!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); + crit!(chain = id, reason = ?remove_reason, "Chain removed while switching head chains"); } else { - error!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); + error!(chain = id, reason = ?remove_reason, "Chain removed while switching head chains"); } } else { syncing_chains.push(id); @@ -407,7 +404,6 @@ impl ChainCollection { .start_slot(T::EthSpec::slots_per_epoch()); let beacon_chain = &self.beacon_chain; - let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { target_slot <= &local_finalized_slot @@ -425,7 +421,7 @@ impl ChainCollection { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { - debug!(log_ref, "Purging out of finalized chain"; &chain); + debug!(%chain, "Purging out of finalized chain"); Some((*id, chain.is_syncing(), RangeSyncType::Finalized)) } else { None @@ -436,7 +432,7 @@ impl ChainCollection { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { - debug!(log_ref, "Purging out of date head chain"; &chain); + debug!(%chain, "Purging out of date head chain"); Some((*id, chain.is_syncing(), RangeSyncType::Head)) } else { None @@ -477,14 +473,14 @@ impl ChainCollection { .find(|(_, chain)| chain.has_same_target(target_head_slot, target_head_root)) { Some((&id, chain)) => { - debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, "id" => id); + debug!(peer_id = %peer, ?sync_type, id, "Adding peer to known chain"); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); if let Err(remove_reason) = chain.add_peer(network, peer) { if remove_reason.is_critical() { - crit!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); + crit!(chain = %id, reason = ?remove_reason, "Chain removed after adding peer"); } else { - error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); + error!(chain = %id, reason = ?remove_reason, "Chain removed after adding peer"); } let is_syncing = chain.is_syncing(); collection.remove(&id); @@ -501,9 +497,9 @@ impl ChainCollection { target_head_root, peer, sync_type.into(), - &self.log, ); - debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); + + debug!(peer_id = peer_rpr, ?sync_type, %new_chain, "New chain added to sync"); collection.insert(id, new_chain); metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_ADDED, &[sync_type.as_str()]); self.update_metrics(); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 38b032136c..e4a20f6349 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -51,10 +51,11 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerId, SyncInfo}; +use logging::crit; use lru_cache::LRUTimeCache; -use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; +use tracing::{debug, instrument, trace, warn}; use types::{Epoch, EthSpec, Hash256}; /// For how long we store failed finalized chains to prevent retries. @@ -74,23 +75,26 @@ pub struct RangeSync { chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, - /// The syncing logger. - log: slog::Logger, } impl RangeSync where T: BeaconChainTypes, { - pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] + pub fn new(beacon_chain: Arc>) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), - chains: ChainCollection::new(beacon_chain, log.clone()), + chains: ChainCollection::new(beacon_chain), failed_chains: LRUTimeCache::new(std::time::Duration::from_secs( FAILED_CHAINS_EXPIRY_SECONDS, )), awaiting_head_peers: HashMap::new(), - log, } } @@ -99,6 +103,12 @@ where self.failed_chains.keys().copied().collect() } + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn state(&self) -> SyncChainStatus { self.chains.state() } @@ -108,6 +118,12 @@ where /// may need to be synced as a result. A new peer, may increase the peer pool of a finalized /// chain, this may result in a different finalized chain from syncing as finalized chains are /// prioritised by peer-pool size. + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn add_peer( &mut self, network: &mut SyncNetworkContext, @@ -133,14 +149,13 @@ where RangeSyncType::Finalized => { // Make sure we have not recently tried this chain if self.failed_chains.contains(&remote_info.finalized_root) { - debug!(self.log, "Disconnecting peer that belongs to previously failed chain"; - "failed_root" => %remote_info.finalized_root, "peer_id" => %peer_id); + debug!(failed_root = ?remote_info.finalized_root, %peer_id,"Disconnecting peer that belongs to previously failed chain"); network.goodbye_peer(peer_id, GoodbyeReason::IrrelevantNetwork); return; } // Finalized chain search - debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); + debug!(%peer_id, "Finalization sync peer joined"); self.awaiting_head_peers.remove(&peer_id); // Because of our change in finalized sync batch size from 2 to 1 and our transition @@ -171,8 +186,7 @@ where if self.chains.is_finalizing_sync() { // If there are finalized chains to sync, finish these first, before syncing head // chains. - trace!(self.log, "Waiting for finalized sync to complete"; - "peer_id" => %peer_id, "awaiting_head_peers" => &self.awaiting_head_peers.len()); + trace!(%peer_id, awaiting_head_peers = &self.awaiting_head_peers.len(),"Waiting for finalized sync to complete"); self.awaiting_head_peers.insert(peer_id, remote_info); return; } @@ -204,6 +218,12 @@ where /// /// This function finds the chain that made this request. Once found, processes the result. /// This request could complete a chain or simply add to its progress. + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn blocks_by_range_response( &mut self, network: &mut SyncNetworkContext, @@ -229,11 +249,17 @@ where } } Err(_) => { - trace!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + trace!(%chain_id, "BlocksByRange response for removed chain") } } } + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn handle_block_process_result( &mut self, network: &mut SyncNetworkContext, @@ -259,13 +285,19 @@ where } Err(_) => { - trace!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + trace!(%chain_id, "BlocksByRange response for removed chain") } } } /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { // if the peer is in the awaiting head mapping, remove it self.awaiting_head_peers.remove(peer_id); @@ -278,6 +310,12 @@ where /// which pool the peer is in. The chain may also have a batch or batches awaiting /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { for (removed_chain, sync_type, remove_reason) in self .chains @@ -297,6 +335,12 @@ where /// /// Check to see if the request corresponds to a pending batch. If so, re-request it if possible, if there have /// been too many failed attempts for the batch, remove the chain. + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn inject_error( &mut self, network: &mut SyncNetworkContext, @@ -321,11 +365,17 @@ where } } Err(_) => { - trace!(self.log, "BlocksByRange response for removed chain"; "chain" => chain_id) + trace!(%chain_id, "BlocksByRange response for removed chain") } } } + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] fn on_chain_removed( &mut self, chain: SyncingChain, @@ -335,14 +385,18 @@ where op: &'static str, ) { if remove_reason.is_critical() { - crit!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); + crit!(?sync_type, %chain, reason = ?remove_reason,op, "Chain removed"); } else { - debug!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); + debug!(?sync_type, %chain, reason = ?remove_reason,op, "Chain removed"); } if let RemoveChain::ChainFailed { blacklist, .. } = remove_reason { if RangeSyncType::Finalized == sync_type && blacklist { - warn!(self.log, "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", FAILED_CHAINS_EXPIRY_SECONDS; &chain); + warn!( + %chain, + "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", + FAILED_CHAINS_EXPIRY_SECONDS + ); self.failed_chains.insert(chain.target_head_root); } } @@ -369,6 +423,12 @@ where } /// Kickstarts sync. + #[instrument(parent = None, + level = "info", + fields(component = "range_sync"), + name = "range_sync", + skip_all + )] pub fn resume(&mut self, network: &mut SyncNetworkContext) { for (removed_chain, sync_type, remove_reason) in self.chains.call_all(|chain| chain.resume(network)) diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 10117285eb..f79dd6de96 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -19,8 +19,8 @@ use beacon_chain::{ block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::Availability, test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, NumBlobs, }, validator_monitor::timestamp_now, AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, @@ -37,9 +37,9 @@ use lighthouse_network::{ types::SyncState, NetworkConfig, NetworkGlobals, PeerId, }; -use slog::info; use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; +use tracing::info; use types::{ data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, @@ -55,22 +55,12 @@ type DCByRootId = (SyncRequestId, Vec); impl TestRig { pub fn test_setup() -> Self { - let logger_type = if cfg!(feature = "test_logger") { - LoggerType::Test - } else if cfg!(feature = "ci_logger") { - LoggerType::CI - } else { - LoggerType::Null - }; - let log = build_log(slog::Level::Trace, logger_type); - // Use `fork_from_env` logic to set correct fork epochs let spec = test_spec::(); // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) .spec(Arc::new(spec)) - .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() .mock_execution_layer() @@ -95,7 +85,6 @@ impl TestRig { let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), - &log, network_config, chain.spec.clone(), )); @@ -104,7 +93,6 @@ impl TestRig { sync_tx, chain.clone(), harness.runtime.task_executor.clone(), - log.clone(), ); let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); @@ -137,11 +125,9 @@ impl TestRig { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, fork_context, - log.clone(), ), harness, fork_name, - log, spec, } } @@ -165,7 +151,7 @@ impl TestRig { } pub fn log(&self, msg: &str) { - info!(self.log, "TEST_RIG"; "msg" => msg); + info!(msg, "TEST_RIG"); } pub fn after_deneb(&self) -> bool { @@ -2318,11 +2304,6 @@ mod deneb_only { }) } - fn log(self, msg: &str) -> Self { - self.rig.log(msg); - self - } - fn trigger_unknown_block_from_attestation(mut self) -> Self { let block_root = self.block.canonical_root(); self.rig @@ -2626,6 +2607,11 @@ mod deneb_only { .block_imported() } + fn log(self, msg: &str) -> Self { + self.rig.log(msg); + self + } + fn parent_block_then_empty_parent_blobs(self) -> Self { self.log( " Return empty blobs for parent, block errors with missing components, downscore", diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index ef2bec80b8..ec24ddb036 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -8,7 +8,6 @@ use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_processor::WorkEvent; use lighthouse_network::NetworkGlobals; use rand_chacha::ChaCha20Rng; -use slog::Logger; use slot_clock::ManualSlotClock; use std::sync::Arc; use store::MemoryStore; @@ -64,6 +63,5 @@ struct TestRig { /// `rng` for generating test blocks and blobs. rng: ChaCha20Rng, fork_name: ForkName, - log: Logger, spec: Arc, } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 523d8a8755..bcebc06c9c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -18,7 +18,6 @@ use http_api::TlsConfig; use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; -use slog::{info, warn, Logger}; use std::cmp::max; use std::fmt::Debug; use std::fs; @@ -29,6 +28,7 @@ use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; +use tracing::{error, info, warn}; use types::graffiti::GraffitiString; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; @@ -46,7 +46,6 @@ pub fn get_config( context: &RuntimeContext, ) -> Result { let spec = &context.eth2_config.spec; - let log = context.log(); let mut client_config = ClientConfig::default(); @@ -64,12 +63,10 @@ pub fn get_config( let stdin_inputs = cfg!(windows) || cli_args.get_flag(STDIN_INPUTS_FLAG); if std::io::stdin().is_terminal() || stdin_inputs { info!( - log, "You are about to delete the chain database. This is irreversable \ and you will need to resync the chain." ); info!( - log, "Type 'confirm' to delete the database. Any other input will leave \ the database intact and Lighthouse will exit." ); @@ -80,14 +77,13 @@ pub fn get_config( let freezer_db = client_config.get_freezer_db_path(); let blobs_db = client_config.get_blobs_db_path(); purge_db(chain_db, freezer_db, blobs_db)?; - info!(log, "Database was deleted."); + info!("Database was deleted."); } else { - info!(log, "Database was not deleted. Lighthouse will now close."); + info!("Database was not deleted. Lighthouse will now close."); std::process::exit(1); } } else { warn!( - log, "The `--purge-db` flag was passed, but Lighthouse is not running \ interactively. The database was not purged. Use `--purge-db-force` \ to purge the database without requiring confirmation." @@ -104,7 +100,7 @@ pub fn get_config( let mut log_dir = client_config.data_dir().clone(); // remove /beacon from the end log_dir.pop(); - info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string")); + info!(datadir = %log_dir.into_os_string().into_string().expect("Datadir should be a valid os string"), "Data directory initialised"); /* * Networking @@ -112,7 +108,7 @@ pub fn get_config( let data_dir_ref = client_config.data_dir().clone(); - set_network_config(&mut client_config.network, cli_args, &data_dir_ref, log)?; + set_network_config(&mut client_config.network, cli_args, &data_dir_ref)?; /* * Staking flag @@ -178,7 +174,6 @@ pub fn get_config( if cli_args.get_flag("light-client-server") { warn!( - log, "The --light-client-server flag is deprecated. The light client server is enabled \ by default" ); @@ -259,8 +254,8 @@ pub fn get_config( // (e.g. using the --staking flag). if cli_args.get_flag("staking") { warn!( - log, - "Running HTTP server on port {}", client_config.http_api.listen_port + "Running HTTP server on port {}", + client_config.http_api.listen_port ); } @@ -274,11 +269,11 @@ pub fn get_config( */ if cli_args.get_flag("dummy-eth1") { - warn!(log, "The --dummy-eth1 flag is deprecated"); + warn!("The --dummy-eth1 flag is deprecated"); } if cli_args.get_flag("eth1") { - warn!(log, "The --eth1 flag is deprecated"); + warn!("The --eth1 flag is deprecated"); } if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { @@ -306,18 +301,14 @@ pub fn get_config( endpoints.as_str(), SensitiveUrl::parse, "--execution-endpoint", - log, )?; // JWTs are required if `--execution-endpoint` is supplied. They can be either passed via // file_path or directly as string. - let secret_file: PathBuf; // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. if let Some(secret_files) = cli_args.get_one::("execution-jwt") { - secret_file = - parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; - + secret_file = parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt")?; // Check if the JWT secret key is passed directly via cli flag and persist it to the default // file location. } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") { @@ -340,8 +331,7 @@ pub fn get_config( // Parse and set the payload builder, if any. if let Some(endpoint) = cli_args.get_one::("builder") { - let payload_builder = - parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder")?; el_config.builder_url = Some(payload_builder); el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; @@ -442,7 +432,7 @@ pub fn get_config( } if clap_utils::parse_optional::(cli_args, "slots-per-restore-point")?.is_some() { - warn!(log, "The slots-per-restore-point flag is deprecated"); + warn!("The slots-per-restore-point flag is deprecated"); } if let Some(backend) = clap_utils::parse_optional(cli_args, "beacon-node-backend")? { @@ -515,10 +505,9 @@ pub fn get_config( client_config.eth1.set_block_cache_truncation::(spec); info!( - log, - "Deposit contract"; - "deploy_block" => client_config.eth1.deposit_contract_deploy_block, - "address" => &client_config.eth1.deposit_contract_address + deploy_block = client_config.eth1.deposit_contract_deploy_block, + address = &client_config.eth1.deposit_contract_address, + "Deposit contract" ); // Only append network config bootnodes if discovery is not disabled @@ -912,10 +901,7 @@ pub fn get_config( } /// Gets the listening_addresses for lighthouse based on the cli options. -pub fn parse_listening_addresses( - cli_args: &ArgMatches, - log: &Logger, -) -> Result { +pub fn parse_listening_addresses(cli_args: &ArgMatches) -> Result { let listen_addresses_str = cli_args .get_many::("listen-address") .unwrap_or_default(); @@ -1018,7 +1004,7 @@ pub fn parse_listening_addresses( (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports if cli_args.value_source("port6") == Some(ValueSource::CommandLine) { - warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); + warn!("When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } // If we are only listening on ipv6 and the user has specified --port6, lets just use @@ -1032,11 +1018,11 @@ pub fn parse_listening_addresses( .unwrap_or(port); if maybe_disc6_port.is_some() { - warn!(log, "When listening only over IPv6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") + warn!("When listening only over IPv6, use the --discovery-port flag. The value of --discovery-port6 will be ignored.") } if maybe_quic6_port.is_some() { - warn!(log, "When listening only over IPv6, use the --quic-port flag. The value of --quic-port6 will be ignored.") + warn!("When listening only over IPv6, use the --quic-port flag. The value of --quic-port6 will be ignored.") } // use zero ports if required. If not, use the specific udp port. If none given, use @@ -1158,7 +1144,6 @@ pub fn set_network_config( config: &mut NetworkConfig, cli_args: &ArgMatches, data_dir: &Path, - log: &Logger, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. if let Some(dir) = cli_args.get_one::("network-dir") { @@ -1183,7 +1168,7 @@ pub fn set_network_config( config.shutdown_after_sync = true; } - config.set_listening_addr(parse_listening_addresses(cli_args, log)?); + config.set_listening_addr(parse_listening_addresses(cli_args)?); // A custom target-peers command will overwrite the --proposer-only default. if let Some(target_peers_str) = cli_args.get_one::("target-peers") { @@ -1211,10 +1196,10 @@ pub fn set_network_config( .parse() .map_err(|_| format!("Not valid as ENR nor Multiaddr: {}", addr))?; if !multi.iter().any(|proto| matches!(proto, Protocol::Udp(_))) { - slog::error!(log, "Missing UDP in Multiaddr {}", multi.to_string()); + error!(multiaddr = multi.to_string(), "Missing UDP in Multiaddr"); } if !multi.iter().any(|proto| matches!(proto, Protocol::P2p(_))) { - slog::error!(log, "Missing P2P in Multiaddr {}", multi.to_string()); + error!(multiaddr = multi.to_string(), "Missing P2P in Multiaddr"); } multiaddrs.push(multi); } @@ -1249,7 +1234,7 @@ pub fn set_network_config( }) .collect::, _>>()?; if config.trusted_peers.len() >= config.target_peers { - slog::warn!(log, "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."; "target_peers" => config.target_peers, "trusted_peers" => config.trusted_peers.len()); + warn!( target_peers = config.target_peers, trusted_peers = config.trusted_peers.len(),"More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."); } } @@ -1349,14 +1334,14 @@ pub fn set_network_config( match addr.parse::() { Ok(IpAddr::V4(v4_addr)) => { if let Some(used) = enr_ip4.as_ref() { - warn!(log, "More than one Ipv4 ENR address provided"; "used" => %used, "ignored" => %v4_addr) + warn!(used = %used, ignored = %v4_addr, "More than one Ipv4 ENR address provided") } else { enr_ip4 = Some(v4_addr) } } Ok(IpAddr::V6(v6_addr)) => { if let Some(used) = enr_ip6.as_ref() { - warn!(log, "More than one Ipv6 ENR address provided"; "used" => %used, "ignored" => %v6_addr) + warn!(used = %used, ignored = %v6_addr,"More than one Ipv6 ENR address provided") } else { enr_ip6 = Some(v6_addr) } @@ -1422,13 +1407,13 @@ pub fn set_network_config( } if parse_flag(cli_args, "disable-packet-filter") { - warn!(log, "Discv5 packet filter is disabled"); + warn!("Discv5 packet filter is disabled"); config.discv5_config.enable_packet_filter = false; } if parse_flag(cli_args, "disable-discovery") { config.disable_discovery = true; - warn!(log, "Discovery is disabled. New peers will not be found"); + warn!("Discovery is disabled. New peers will not be found"); } if parse_flag(cli_args, "disable-quic") { @@ -1475,7 +1460,10 @@ pub fn set_network_config( config.target_peers = 15; } config.proposer_only = true; - warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); + warn!( + info = "Proposer-only mode enabled", + "Do not connect a validator client to this node unless via the --proposer-nodes flag" + ); } // The inbound rate limiter is enabled by default unless `disabled` via the // `disable-inbound-rate-limiter` flag. @@ -1533,7 +1521,6 @@ pub fn parse_only_one_value( cli_value: &str, parser: F, flag_name: &str, - log: &Logger, ) -> Result where F: Fn(&str) -> Result, @@ -1547,11 +1534,10 @@ where if values.len() > 1 { warn!( - log, - "Multiple values provided"; - "info" => "multiple values are deprecated, only the first value will be used", - "count" => values.len(), - "flag" => flag_name + info = "Multiple values provided", + count = values.len(), + flag = flag_name, + "multiple values are deprecated, only the first value will be used" ); } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index e3802c837c..a7f92434ce 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -12,10 +12,10 @@ pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; use slasher::{DatabaseBackendOverride, Slasher}; -use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use store::database::interface::BeaconNodeBackend; +use tracing::{info, warn}; use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. @@ -63,7 +63,6 @@ impl ProductionBeaconNode { let spec = context.eth2_config().spec.clone(); let client_genesis = client_config.genesis.clone(); let store_config = client_config.store.clone(); - let log = context.log().clone(); let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; @@ -72,20 +71,18 @@ impl ProductionBeaconNode { if let Some(legacy_dir) = client_config.get_existing_legacy_data_dir() { warn!( - log, - "Legacy datadir location"; - "msg" => "this occurs when using relative paths for a datadir location", - "location" => ?legacy_dir, + msg = "this occurs when using relative paths for a datadir location", + location = ?legacy_dir, + "Legacy datadir location" ) } if let Err(misaligned_forks) = validator_fork_epochs(&spec) { warn!( - log, - "Fork boundaries are not well aligned / multiples of 256"; - "info" => "This may cause issues as fork boundaries do not align with the \ - start of sync committee period.", - "misaligned_forks" => ?misaligned_forks, + info = "This may cause issues as fork boundaries do not align with the \ + start of sync committee period.", + ?misaligned_forks, + "Fork boundaries are not well aligned / multiples of 256" ); } @@ -94,42 +91,30 @@ impl ProductionBeaconNode { .chain_spec(spec.clone()) .beacon_processor(client_config.beacon_processor.clone()) .http_api_config(client_config.http_api.clone()) - .disk_store( - &db_path, - &freezer_db_path, - &blobs_db_path, - store_config, - log.clone(), - )?; + .disk_store(&db_path, &freezer_db_path, &blobs_db_path, store_config)?; let builder = if let Some(mut slasher_config) = client_config.slasher.clone() { match slasher_config.override_backend() { DatabaseBackendOverride::Success(old_backend) => { info!( - log, - "Slasher backend overridden"; - "reason" => "database exists", - "configured_backend" => %old_backend, - "override_backend" => %slasher_config.backend, + reason = "database exists", + configured_backend = %old_backend, + override_backend = %slasher_config.backend, + "Slasher backend overridden" ); } DatabaseBackendOverride::Failure(path) => { warn!( - log, - "Slasher backend override failed"; - "advice" => "delete old MDBX database or enable MDBX backend", - "path" => path.display() + advice = "delete old MDBX database or enable MDBX backend", + path = %path.display(), + "Slasher backend override failed" ); } _ => {} } let slasher = Arc::new( - Slasher::open( - slasher_config, - spec, - log.new(slog::o!("service" => "slasher")), - ) - .map_err(|e| format!("Slasher open error: {:?}", e))?, + Slasher::open(slasher_config, spec) + .map_err(|e| format!("Slasher open error: {:?}", e))?, ); builder.slasher(slasher) } else { @@ -149,19 +134,17 @@ impl ProductionBeaconNode { .await?; let builder = if client_config.sync_eth1_chain { info!( - log, - "Block production enabled"; - "endpoint" => format!("{:?}", &client_config.eth1.endpoint), - "method" => "json rpc via http" + endpoint = ?client_config.eth1.endpoint, + method = "json rpc via http", + "Block production enabled" ); builder .caching_eth1_backend(client_config.eth1.clone()) .await? } else { info!( - log, - "Block production disabled"; - "reason" => "no eth1 backend configured" + reason = "no eth1 backend configured", + "Block production disabled" ); builder.no_eth1_backend()? }; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index d2f3a5c562..d17a8f04d6 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -30,12 +30,12 @@ parking_lot = { workspace = true } redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } serde = { workspace = true } -slog = { workspace = true } -sloggers = { workspace = true } smallvec = { workspace = true } state_processing = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 8f6682e758..f2821286ec 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -1,6 +1,6 @@ use crate::chunked_vector::{chunk_key, Chunk, Field}; use crate::{HotColdDB, ItemStore}; -use slog::error; +use tracing::error; use types::{ChainSpec, EthSpec, Slot}; /// Iterator over the values of a `BeaconState` vector field (like `block_roots`). @@ -82,9 +82,8 @@ where .cloned() .or_else(|| { error!( - self.store.log, - "Missing chunk value in forwards iterator"; - "vector index" => vindex + vector_index = vindex, + "Missing chunk value in forwards iterator" ); None })?; @@ -100,19 +99,17 @@ where ) .map_err(|e| { error!( - self.store.log, - "Database error in forwards iterator"; - "chunk index" => self.next_cindex, - "error" => format!("{:?}", e) + chunk_index = self.next_cindex, + error = ?e, + "Database error in forwards iterator" ); e }) .ok()? .or_else(|| { error!( - self.store.log, - "Missing chunk in forwards iterator"; - "chunk index" => self.next_cindex + chunk_index = self.next_cindex, + "Missing chunk in forwards iterator" ); None })?; diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 06393f2d21..586db44c89 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -2,7 +2,7 @@ use crate::database::interface::BeaconNodeBackend; use crate::hot_cold_store::HotColdDB; use crate::{DBColumn, Error}; -use slog::debug; +use tracing::debug; use types::EthSpec; impl HotColdDB, BeaconNodeBackend> @@ -24,11 +24,7 @@ where } }); if !ops.is_empty() { - debug!( - self.log, - "Garbage collecting {} temporary states", - ops.len() - ); + debug!("Garbage collecting {} temporary states", ops.len()); self.delete_batch(DBColumn::BeaconState, ops.clone())?; self.delete_batch(DBColumn::BeaconStateSummary, ops.clone())?; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6dee0dc180..0a545529ca 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -22,7 +22,6 @@ use lru::LruCache; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ @@ -37,6 +36,7 @@ use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; +use tracing::{debug, error, info, trace, warn}; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; @@ -81,8 +81,6 @@ pub struct HotColdDB, Cold: ItemStore> { historic_state_cache: Mutex>, /// Chain spec. pub(crate) spec: Arc, - /// Logger. - pub log: Logger, /// Mere vessel for E. _phantom: PhantomData, } @@ -203,7 +201,6 @@ impl HotColdDB, MemoryStore> { pub fn open_ephemeral( config: StoreConfig, spec: Arc, - log: Logger, ) -> Result, MemoryStore>, Error> { config.verify::()?; @@ -226,7 +223,6 @@ impl HotColdDB, MemoryStore> { config, hierarchy, spec, - log, _phantom: PhantomData, }; @@ -246,7 +242,6 @@ impl HotColdDB, BeaconNodeBackend> { migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, spec: Arc, - log: Logger, ) -> Result, Error> { config.verify::()?; @@ -272,10 +267,8 @@ impl HotColdDB, BeaconNodeBackend> { config, hierarchy, spec, - log, _phantom: PhantomData, }; - // Load the config from disk but don't error on a failed read because the config itself may // need migrating. let _ = db.load_config(); @@ -287,10 +280,9 @@ impl HotColdDB, BeaconNodeBackend> { *db.split.write() = split; info!( - db.log, - "Hot-Cold DB initialized"; - "split_slot" => split.slot, - "split_state" => ?split.state_root + %split.slot, + split_state = ?split.state_root, + "Hot-Cold DB initialized" ); } @@ -352,11 +344,10 @@ impl HotColdDB, BeaconNodeBackend> { )?; info!( - db.log, - "Blob DB initialized"; - "path" => ?blobs_db_path, - "oldest_blob_slot" => ?new_blob_info.oldest_blob_slot, - "oldest_data_column_slot" => ?new_data_column_info.oldest_data_column_slot, + path = ?blobs_db_path, + oldest_blob_slot = ?new_blob_info.oldest_blob_slot, + oldest_data_column_slot = ?new_data_column_info.oldest_data_column_slot, + "Blob DB initialized" ); // Ensure that the schema version of the on-disk database matches the software. @@ -364,10 +355,9 @@ impl HotColdDB, BeaconNodeBackend> { let db = Arc::new(db); if let Some(schema_version) = db.load_schema_version()? { debug!( - db.log, - "Attempting schema migration"; - "from_version" => schema_version.as_u64(), - "to_version" => CURRENT_SCHEMA_VERSION.as_u64(), + from_version = schema_version.as_u64(), + to_version = CURRENT_SCHEMA_VERSION.as_u64(), + "Attempting schema migration" ); migrate_schema(db.clone(), schema_version, CURRENT_SCHEMA_VERSION)?; } else { @@ -385,10 +375,9 @@ impl HotColdDB, BeaconNodeBackend> { if let Ok(hierarchy_config) = disk_config.hierarchy_config() { if &db.config.hierarchy_config != hierarchy_config { info!( - db.log, - "Updating historic state config"; - "previous_config" => %hierarchy_config, - "new_config" => %db.config.hierarchy_config, + previous_config = %hierarchy_config, + new_config = %db.config.hierarchy_config, + "Updating historic state config" ); } } @@ -400,9 +389,9 @@ impl HotColdDB, BeaconNodeBackend> { // If configured, run a foreground compaction pass. if db.config.compact_on_init { - info!(db.log, "Running foreground compaction"); + info!("Running foreground compaction"); db.compact()?; - info!(db.log, "Foreground compaction complete"); + info!("Foreground compaction complete"); } Ok(db) @@ -991,12 +980,7 @@ impl, Cold: ItemStore> HotColdDB let split = self.split.read_recursive(); if state_root != split.state_root { - warn!( - self.log, - "State cache missed"; - "state_root" => ?state_root, - "block_root" => ?block_root, - ); + warn!(?state_root, ?block_root, "State cache missed"); } // Sanity check max-slot against the split slot. @@ -1025,10 +1009,9 @@ impl, Cold: ItemStore> HotColdDB .lock() .put_state(*state_root, block_root, state)?; debug!( - self.log, - "Cached state"; - "state_root" => ?state_root, - "slot" => state.slot(), + ?state_root, + slot = %state.slot(), + "Cached state" ); } drop(split); @@ -1308,9 +1291,9 @@ impl, Cold: ItemStore> HotColdDB Ok(BlobSidecarListFromRoot::NoBlobs | BlobSidecarListFromRoot::NoRoot) => {} Err(e) => { error!( - self.log, "Error getting blobs"; - "block_root" => %block_root, - "error" => ?e + %block_root, + error = ?e, + "Error getting blobs" ); } } @@ -1333,9 +1316,9 @@ impl, Cold: ItemStore> HotColdDB } Err(e) => { error!( - self.log, "Error getting data columns"; - "block_root" => %block_root, - "error" => ?e + %block_root, + error = ?e, + "Error getting data columns" ); } } @@ -1363,10 +1346,9 @@ impl, Cold: ItemStore> HotColdDB // Rollback on failure if let Err(e) = tx_res { error!( - self.log, - "Database write failed"; - "error" => ?e, - "action" => "reverting blob DB changes" + error = ?e, + action = "reverting blob DB changes", + "Database write failed" ); let mut blob_cache_ops = blob_cache_ops; for op in blob_cache_ops.iter_mut() { @@ -1475,10 +1457,9 @@ impl, Cold: ItemStore> HotColdDB .put_state(*state_root, block_root, state)? { debug!( - self.log, - "Skipping storage of cached state"; - "slot" => state.slot(), - "state_root" => ?state_root + slot = %state.slot(), + ?state_root, + "Skipping storage of cached state" ); return Ok(()); } @@ -1486,10 +1467,9 @@ impl, Cold: ItemStore> HotColdDB // On the epoch boundary, store the full state. if state.slot() % E::slots_per_epoch() == 0 { trace!( - self.log, - "Storing full state on epoch boundary"; - "slot" => state.slot().as_u64(), - "state_root" => format!("{:?}", state_root) + slot = %state.slot().as_u64(), + ?state_root, + "Storing full state on epoch boundary" ); store_full_state(state_root, state, ops)?; } @@ -1512,11 +1492,7 @@ impl, Cold: ItemStore> HotColdDB if *state_root != self.get_split_info().state_root { // Do not warn on start up when loading the split state. - warn!( - self.log, - "State cache missed"; - "state_root" => ?state_root, - ); + warn!(?state_root, "State cache missed"); } let state_from_disk = self.load_hot_state(state_root)?; @@ -1528,10 +1504,9 @@ impl, Cold: ItemStore> HotColdDB .lock() .put_state(*state_root, block_root, &state)?; debug!( - self.log, - "Cached state"; - "state_root" => ?state_root, - "slot" => state.slot(), + ?state_root, + slot = %state.slot(), + "Cached state" ); Ok(Some(state)) } else { @@ -1595,10 +1570,9 @@ impl, Cold: ItemStore> HotColdDB .put_state(state_root, latest_block_root, state)? { debug!( - self.log, - "Cached ancestor state"; - "state_root" => ?state_root, - "slot" => slot, + ?state_root, + %slot, + "Cached ancestor state" ); } Ok(()) @@ -1650,35 +1624,31 @@ impl, Cold: ItemStore> HotColdDB match self.hierarchy.storage_strategy(slot)? { StorageStrategy::ReplayFrom(from) => { debug!( - self.log, - "Storing cold state"; - "strategy" => "replay", - "from_slot" => from, - "slot" => state.slot(), + strategy = "replay", + from_slot = %from, + %slot, + "Storing cold state", ); // Already have persisted the state summary, don't persist anything else } StorageStrategy::Snapshot => { debug!( - self.log, - "Storing cold state"; - "strategy" => "snapshot", - "slot" => state.slot(), + strategy = "snapshot", + %slot, + "Storing cold state" ); self.store_cold_state_as_snapshot(state, ops)?; } StorageStrategy::DiffFrom(from) => { debug!( - self.log, - "Storing cold state"; - "strategy" => "diff", - "from_slot" => from, - "slot" => state.slot(), + strategy = "diff", + from_slot = %from, + %slot, + "Storing cold state" ); self.store_cold_state_as_diff(state, from, ops)?; } } - Ok(()) } @@ -1837,10 +1807,9 @@ impl, Cold: ItemStore> HotColdDB metrics::start_timer(&metrics::STORE_BEACON_COLD_BUILD_BEACON_CACHES_TIME); base_state.build_all_caches(&self.spec)?; debug!( - self.log, - "Built caches for historic state"; - "target_slot" => slot, - "build_time_ms" => metrics::stop_timer_with_duration(cache_timer).as_millis() + target_slot = %slot, + build_time_ms = metrics::stop_timer_with_duration(cache_timer).as_millis(), + "Built caches for historic state" ); self.historic_state_cache .lock() @@ -1862,10 +1831,9 @@ impl, Cold: ItemStore> HotColdDB })?; let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; debug!( - self.log, - "Replayed blocks for historic state"; - "target_slot" => slot, - "replay_time_ms" => metrics::stop_timer_with_duration(replay_timer).as_millis() + target_slot = %slot, + replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), + "Replayed blocks for historic state" ); self.historic_state_cache @@ -1893,9 +1861,8 @@ impl, Cold: ItemStore> HotColdDB fn load_hdiff_buffer_for_slot(&self, slot: Slot) -> Result<(Slot, HDiffBuffer), Error> { if let Some(buffer) = self.historic_state_cache.lock().get_hdiff_buffer(slot) { debug!( - self.log, - "Hit hdiff buffer cache"; - "slot" => slot + %slot, + "Hit hdiff buffer cache" ); metrics::inc_counter(&metrics::STORE_BEACON_HDIFF_BUFFER_CACHE_HIT); return Ok((slot, buffer)); @@ -1919,10 +1886,9 @@ impl, Cold: ItemStore> HotColdDB let load_time_ms = t.elapsed().as_millis(); debug!( - self.log, - "Cached state and hdiff buffer"; - "load_time_ms" => load_time_ms, - "slot" => slot + load_time_ms, + %slot, + "Cached state and hdiff buffer" ); Ok((slot, buffer)) @@ -1945,10 +1911,9 @@ impl, Cold: ItemStore> HotColdDB let load_time_ms = t.elapsed().as_millis(); debug!( - self.log, - "Cached hdiff buffer"; - "load_time_ms" => load_time_ms, - "slot" => slot + load_time_ms, + %slot, + "Cached hdiff buffer" ); Ok((slot, buffer)) @@ -2052,9 +2017,8 @@ impl, Cold: ItemStore> HotColdDB .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( - self.log, - "State root cache miss during block replay"; - "slot" => target_slot, + slot = %target_slot, + "State root cache miss during block replay" ); } block_replayer.into_state() @@ -2180,11 +2144,6 @@ impl, Cold: ItemStore> HotColdDB &self.spec } - /// Get a reference to the `Logger` used by the database. - pub fn logger(&self) -> &Logger { - &self.log - } - /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot @@ -2579,17 +2538,9 @@ impl, Cold: ItemStore> HotColdDB columns.extend(previous_schema_columns); for column in columns { - info!( - self.log, - "Starting compaction"; - "column" => ?column - ); + info!(?column, "Starting compaction"); self.cold_db.compact_column(column)?; - info!( - self.log, - "Finishing compaction"; - "column" => ?column - ); + info!(?column, "Finishing compaction"); } Ok(()) } @@ -2690,16 +2641,15 @@ impl, Cold: ItemStore> HotColdDB })??; if already_pruned && !force { - info!(self.log, "Execution payloads are pruned"); + info!("Execution payloads are pruned"); return Ok(()); } // Iterate block roots backwards to the Bellatrix fork or the anchor slot, whichever comes // first. warn!( - self.log, - "Pruning finalized payloads"; - "info" => "you may notice degraded I/O performance while this runs" + info = "you may notice degraded I/O performance while this runs", + "Pruning finalized payloads" ); let anchor_info = self.get_anchor_info(); @@ -2713,58 +2663,41 @@ impl, Cold: ItemStore> HotColdDB Ok(tuple) => tuple, Err(e) => { warn!( - self.log, - "Stopping payload pruning early"; - "error" => ?e, + error = ?e, + "Stopping payload pruning early" ); break; } }; if slot < bellatrix_fork_slot { - info!( - self.log, - "Payload pruning reached Bellatrix boundary"; - ); + info!("Payload pruning reached Bellatrix boundary"); break; } if Some(block_root) != last_pruned_block_root && self.execution_payload_exists(&block_root)? { - debug!( - self.log, - "Pruning execution payload"; - "slot" => slot, - "block_root" => ?block_root, - ); + debug!(%slot, ?block_root, "Pruning execution payload"); last_pruned_block_root = Some(block_root); ops.push(StoreOp::DeleteExecutionPayload(block_root)); } if slot <= anchor_info.oldest_block_slot { - info!( - self.log, - "Payload pruning reached anchor oldest block slot"; - "slot" => slot - ); + info!(%slot, "Payload pruning reached anchor oldest block slot"); break; } } let payloads_pruned = ops.len(); self.do_atomically_with_block_and_blobs_cache(ops)?; - info!( - self.log, - "Execution payload pruning complete"; - "payloads_pruned" => payloads_pruned, - ); + info!(%payloads_pruned, "Execution payload pruning complete"); Ok(()) } /// Try to prune blobs, approximating the current epoch from the split slot. pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> { let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else { - debug!(self.log, "Deneb fork is disabled"); + debug!("Deneb fork is disabled"); return Ok(()); }; // The current epoch is >= split_epoch + 2. It could be greater if the database is @@ -2795,7 +2728,7 @@ impl, Cold: ItemStore> HotColdDB data_availability_boundary: Epoch, ) -> Result<(), Error> { if self.spec.deneb_fork_epoch.is_none() { - debug!(self.log, "Deneb fork is disabled"); + debug!("Deneb fork is disabled"); return Ok(()); } @@ -2804,17 +2737,13 @@ impl, Cold: ItemStore> HotColdDB let epochs_per_blob_prune = self.get_config().epochs_per_blob_prune; if !force && !pruning_enabled { - debug!( - self.log, - "Blob pruning is disabled"; - "prune_blobs" => pruning_enabled - ); + debug!(prune_blobs = pruning_enabled, "Blob pruning is disabled"); return Ok(()); } let blob_info = self.get_blob_info(); let Some(oldest_blob_slot) = blob_info.oldest_blob_slot else { - error!(self.log, "Slot of oldest blob is not known"); + error!("Slot of oldest blob is not known"); return Err(HotColdDBError::BlobPruneLogicError.into()); }; @@ -2837,13 +2766,12 @@ impl, Cold: ItemStore> HotColdDB if !force && !should_prune || !can_prune { debug!( - self.log, - "Blobs are pruned"; - "oldest_blob_slot" => oldest_blob_slot, - "data_availability_boundary" => data_availability_boundary, - "split_slot" => split.slot, - "end_epoch" => end_epoch, - "start_epoch" => start_epoch, + %oldest_blob_slot, + %data_availability_boundary, + %split.slot, + %end_epoch, + %start_epoch, + "Blobs are pruned" ); return Ok(()); } @@ -2852,21 +2780,19 @@ impl, Cold: ItemStore> HotColdDB let anchor = self.get_anchor_info(); if oldest_blob_slot < anchor.oldest_block_slot { error!( - self.log, - "Oldest blob is older than oldest block"; - "oldest_blob_slot" => oldest_blob_slot, - "oldest_block_slot" => anchor.oldest_block_slot + %oldest_blob_slot, + oldest_block_slot = %anchor.oldest_block_slot, + "Oldest blob is older than oldest block" ); return Err(HotColdDBError::BlobPruneLogicError.into()); } // Iterate block roots forwards from the oldest blob slot. debug!( - self.log, - "Pruning blobs"; - "start_epoch" => start_epoch, - "end_epoch" => end_epoch, - "data_availability_boundary" => data_availability_boundary, + %start_epoch, + %end_epoch, + %data_availability_boundary, + "Pruning blobs" ); // We collect block roots of deleted blobs in memory. Even for 10y of blob history this @@ -2922,10 +2848,7 @@ impl, Cold: ItemStore> HotColdDB let op = self.compare_and_set_blob_info(blob_info, new_blob_info)?; self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::KeyValueOp(op)])?; - debug!( - self.log, - "Blob pruning complete"; - ); + debug!("Blob pruning complete"); Ok(()) } @@ -2995,18 +2918,13 @@ impl, Cold: ItemStore> HotColdDB // If we just deleted the genesis state, re-store it using the current* schema. if self.get_split_slot() > 0 { info!( - self.log, - "Re-storing genesis state"; - "state_root" => ?genesis_state_root, + state_root = ?genesis_state_root, + "Re-storing genesis state" ); self.store_cold_state(&genesis_state_root, genesis_state, &mut cold_ops)?; } - info!( - self.log, - "Deleting historic states"; - "delete_ops" => delete_ops, - ); + info!(delete_ops, "Deleting historic states"); self.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. @@ -3022,9 +2940,8 @@ impl, Cold: ItemStore> HotColdDB pub fn prune_old_hot_states(&self) -> Result<(), Error> { let split = self.get_split_info(); debug!( - self.log, - "Database state pruning started"; - "split_slot" => split.slot, + %split.slot, + "Database state pruning started" ); let mut state_delete_batch = vec![]; for res in self @@ -3046,11 +2963,10 @@ impl, Cold: ItemStore> HotColdDB "non-canonical" }; debug!( - self.log, - "Deleting state"; - "state_root" => ?state_root, - "slot" => summary.slot, - "reason" => reason, + ?state_root, + slot = %summary.slot, + %reason, + "Deleting state" ); state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); } @@ -3058,11 +2974,7 @@ impl, Cold: ItemStore> HotColdDB } let num_deleted_states = state_delete_batch.len(); self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?; - debug!( - self.log, - "Database state pruning complete"; - "num_deleted_states" => num_deleted_states, - ); + debug!(%num_deleted_states, "Database state pruning complete"); Ok(()) } } @@ -3075,9 +2987,8 @@ pub fn migrate_database, Cold: ItemStore>( finalized_state: &BeaconState, ) -> Result<(), Error> { debug!( - store.log, - "Freezer migration started"; - "slot" => finalized_state.slot() + slot = %finalized_state.slot(), + "Freezer migration started" ); // 0. Check that the migration is sensible. @@ -3153,7 +3064,7 @@ pub fn migrate_database, Cold: ItemStore>( // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state // which always needs to be copied from the hot DB to the freezer and should not be deleted. if slot != 0 && slot < anchor_info.state_upper_limit { - debug!(store.log, "Pruning finalized state"; "slot" => slot); + debug!(%slot, "Pruning finalized state"); continue; } @@ -3213,10 +3124,9 @@ pub fn migrate_database, Cold: ItemStore>( // place in code. if latest_split_slot != current_split_slot { error!( - store.log, - "Race condition detected: Split point changed while moving states to the freezer"; - "previous split slot" => current_split_slot, - "current split slot" => latest_split_slot, + previous_split_slot = %current_split_slot, + current_split_slot = %latest_split_slot, + "Race condition detected: Split point changed while moving states to the freezer" ); // Assume the freezing procedure will be retried in case this happens. @@ -3252,9 +3162,8 @@ pub fn migrate_database, Cold: ItemStore>( )?; debug!( - store.log, - "Freezer migration complete"; - "slot" => finalized_state.slot() + slot = %finalized_state.slot(), + "Freezer migration complete" ); Ok(()) diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 97a88c01c8..0d12bbdd60 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -382,7 +382,6 @@ mod test { use crate::StoreConfig as Config; use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::{ChainSpec, MainnetEthSpec}; - use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; use types::FixedBytesExtended; @@ -398,10 +397,8 @@ mod test { #[test] fn block_root_iter() { - let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) - .unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal())).unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -447,10 +444,8 @@ mod test { #[test] fn state_root_iter() { - let log = NullLoggerBuilder.build().unwrap(); let store = - HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal()), log) - .unwrap(); + HotColdDB::open_ephemeral(Config::default(), Arc::new(ChainSpec::minimal())).unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 2a3b208aae..30df552b7b 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -4,12 +4,12 @@ use crate::metadata::ANCHOR_FOR_ARCHIVE_NODE; use crate::metrics; use crate::{Error, ItemStore}; use itertools::{process_results, Itertools}; -use slog::{debug, info}; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; use std::sync::Arc; +use tracing::{debug, info}; use types::EthSpec; impl HotColdDB @@ -37,9 +37,8 @@ where } debug!( - self.log, - "Starting state reconstruction batch"; - "start_slot" => anchor.state_lower_limit, + start_slot = %anchor.state_lower_limit, + "Starting state reconstruction batch" ); let _t = metrics::start_timer(&metrics::STORE_BEACON_RECONSTRUCTION_TIME); @@ -124,10 +123,9 @@ where || reconstruction_complete { info!( - self.log, - "State reconstruction in progress"; - "slot" => slot, - "remaining" => upper_limit_slot - 1 - slot + %slot, + remaining = %(upper_limit_slot - 1 - slot), + "State reconstruction in progress" ); self.cold_db.do_atomically(std::mem::take(&mut io_batch))?; @@ -164,10 +162,9 @@ where // batch when there is idle capacity. if batch_complete { debug!( - self.log, - "Finished state reconstruction batch"; - "start_slot" => lower_limit_slot, - "end_slot" => slot, + start_slot = %lower_limit_slot, + end_slot = %slot, + "Finished state reconstruction batch" ); return Ok(()); } diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 0738b12ec0..0d448e6c06 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -25,8 +25,6 @@ fn build_node(env: &mut Environment) -> LocalBeaconNode { #[test] fn http_server_genesis_state() { let mut env = env_builder() - .test_logger() - .expect("should build env logger") .multi_threaded_tokio_runtime() .expect("should start tokio runtime") .build() diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index 546cc2ed41..53fa2c0132 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 7c2db69604..1bd1c1e8ea 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,22 +3,21 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::time::sleep; +use tracing::{info, warn}; /// Spawns a timer service which periodically executes tasks for the beacon chain pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, ) -> Result<(), &'static str> { - let log = executor.log().clone(); let timer_future = async move { loop { let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else { - warn!(log, "Unable to determine duration to next slot"); + warn!("Unable to determine duration to next slot"); return; }; @@ -28,7 +27,7 @@ pub fn spawn_timer( }; executor.spawn(timer_future, "timer"); - info!(executor.log(), "Timer service started"); + info!("Timer service started"); Ok(()) } diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 79c8d8ead8..4c76647c0c 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -245,15 +245,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -517,8 +513,13 @@ Flags: all attestations are received for import. --light-client-server DEPRECATED - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_general.md b/book/src/help_general.md index 996b048d10..4d0d4104d4 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -56,15 +56,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -93,8 +89,13 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_vc.md b/book/src/help_vc.md index f3ccdf2ae3..7fb655910f 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -77,15 +77,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -240,8 +236,13 @@ Flags: database will have been initialized when you imported your validator keys. If you misplace your database and then run with this flag you risk being slashed. - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_vm.md b/book/src/help_vm.md index 50c204f371..0d9d2a2e4b 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -53,15 +53,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -88,8 +84,13 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 2743117eae..4f3774df10 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -60,15 +60,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -118,8 +114,13 @@ Flags: address. This is not recommended. -h, --help Prints help information - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 68aab768ae..28690d3a11 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -45,15 +45,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -104,8 +100,13 @@ Flags: directly cause slashable conditions, it might be an indicator that something is amiss. Users should also be careful to avoid submitting duplicate deposits for validators that already exist on the VC. - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 99eee32c78..af4a1a4d6d 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -49,15 +49,11 @@ Options: --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - --logfile - File path where the log file will be stored. Once it grows to the - value specified in `--logfile-max-size` a new log file is generated - where future logs are stored. Once the number of log files exceeds the - value specified in `--logfile-max-number` the oldest log file will be - overwritten. --logfile-debug-level The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-dir + Directory path where the log file will be stored --logfile-format Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] @@ -100,8 +96,13 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - --log-color - Force outputting colors when emitting logs to the terminal. + --log-color [] + Enables/Disables colors for logs in terminal. Set it to false to + disable colors. [default: true] [possible values: true, false] + --log-extra-info + If present, show module,file,line in logs + --logfile-color + Enables colors in logfile. --logfile-compress If present, compress old log files. This can help reduce the space needed to store old logs. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 94dcfac5e1..362b598c9f 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -16,9 +16,7 @@ lighthouse_network = { workspace = true } log = { workspace = true } logging = { workspace = true } serde = { workspace = true } -slog = { workspace = true } -slog-async = { workspace = true } -slog-scope = "4.3.0" -slog-term = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index bb7678631f..c43a8b397b 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -53,9 +53,7 @@ impl BootNodeConfig { let mut network_config = NetworkConfig::default(); - let logger = slog_scope::logger(); - - set_network_config(&mut network_config, matches, &data_dir, &logger)?; + set_network_config(&mut network_config, matches, &data_dir)?; // Set the Enr Discovery ports to the listening ports if not present. if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { @@ -85,7 +83,7 @@ impl BootNodeConfig { network_config.discv5_config.enr_update = false; } - let private_key = load_private_key(&network_config, &logger); + let private_key = load_private_key(&network_config); let local_key = CombinedKey::from_libp2p(private_key)?; let local_enr = if let Some(dir) = matches.get_one::("network-dir") { @@ -104,7 +102,7 @@ impl BootNodeConfig { if eth2_network_config.genesis_state_is_known() { let mut genesis_state = eth2_network_config - .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger).await? + .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout).await? .ok_or_else(|| { "The genesis state for this network is not known, this is an unsupported mode" .to_string() @@ -113,7 +111,7 @@ impl BootNodeConfig { let genesis_state_root = genesis_state .canonical_root() .map_err(|e| format!("Error hashing genesis state: {e:?}"))?; - slog::info!(logger, "Genesis state found"; "root" => ?genesis_state_root); + tracing::info!(root = ?genesis_state_root, "Genesis state found"); let enr_fork = spec.enr_fork_id::( types::Slot::from(0u64), genesis_state.genesis_validators_root(), @@ -121,10 +119,7 @@ impl BootNodeConfig { Some(enr_fork.as_ssz_bytes()) } else { - slog::warn!( - logger, - "No genesis state provided. No Eth2 field added to the ENR" - ); + tracing::warn!("No genesis state provided. No Eth2 field added to the ENR"); None } }; @@ -160,7 +155,7 @@ impl BootNodeConfig { .map_err(|e| format!("Failed to build ENR: {:?}", e))? }; - use_or_load_enr(&local_key, &mut local_enr, &network_config, &logger)?; + use_or_load_enr(&local_key, &mut local_enr, &network_config)?; local_enr }; diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index 669b126bd3..70a45b2f92 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -1,6 +1,5 @@ //! Creates a simple DISCV5 server which can be used to bootstrap an Eth2 network. use clap::ArgMatches; -use slog::{o, Drain, Level, Logger}; use eth2_network_config::Eth2NetworkConfig; mod cli; @@ -8,10 +7,9 @@ pub mod config; mod server; pub use cli::cli_app; use config::BootNodeConfig; +use tracing_subscriber::EnvFilter; use types::{EthSpec, EthSpecId}; -const LOG_CHANNEL_SIZE: usize = 2048; - /// Run the bootnode given the CLI configuration. pub fn run( lh_matches: &ArgMatches, @@ -20,49 +18,27 @@ pub fn run( eth2_network_config: &Eth2NetworkConfig, debug_level: String, ) { - let debug_level = match debug_level.as_str() { - "trace" => log::Level::Trace, - "debug" => log::Level::Debug, - "info" => log::Level::Info, - "warn" => log::Level::Warn, - "error" => log::Level::Error, - "crit" => log::Level::Error, - _ => unreachable!(), - }; + let filter_layer = EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new(debug_level.to_string().to_lowercase())) + .unwrap(); - // Setting up the initial logger format and building it. - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - slog_async::Async::new(drain) - .chan_size(LOG_CHANNEL_SIZE) - .build() - }; - - let drain = match debug_level { - log::Level::Info => drain.filter_level(Level::Info), - log::Level::Debug => drain.filter_level(Level::Debug), - log::Level::Trace => drain.filter_level(Level::Trace), - log::Level::Warn => drain.filter_level(Level::Warning), - log::Level::Error => drain.filter_level(Level::Error), - }; - - let log = Logger::root(drain.fuse(), o!()); + tracing_subscriber::fmt() + .with_env_filter(filter_layer) + .init(); // Run the main function emitting any errors if let Err(e) = match eth_spec_id { EthSpecId::Minimal => { - main::(lh_matches, bn_matches, eth2_network_config, log) + main::(lh_matches, bn_matches, eth2_network_config) } EthSpecId::Mainnet => { - main::(lh_matches, bn_matches, eth2_network_config, log) + main::(lh_matches, bn_matches, eth2_network_config) } EthSpecId::Gnosis => { - main::(lh_matches, bn_matches, eth2_network_config, log) + main::(lh_matches, bn_matches, eth2_network_config) } } { - slog::crit!(slog_scope::logger(), "{}", e); + logging::crit!(?e); } } @@ -70,7 +46,6 @@ fn main( lh_matches: &ArgMatches, bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, - log: slog::Logger, ) -> Result<(), String> { // Builds a custom executor for the bootnode let runtime = tokio::runtime::Builder::new_multi_thread() @@ -83,7 +58,6 @@ fn main( lh_matches, bn_matches, eth2_network_config, - log, ))?; Ok(()) diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 96032dddcc..d96ac0c726 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -8,14 +8,13 @@ use lighthouse_network::{ discv5::{self, enr::NodeId, Discv5}, EnrExt, Eth2Enr, }; -use slog::info; +use tracing::{info, warn}; use types::EthSpec; pub async fn run( lh_matches: &ArgMatches, bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, - log: slog::Logger, ) -> Result<(), String> { // parse the CLI args into a useable config let config: BootNodeConfig = BootNodeConfig::new(bn_matches, eth2_network_config).await?; @@ -52,19 +51,19 @@ pub async fn run( let pretty_v4_socket = enr_v4_socket.as_ref().map(|addr| addr.to_string()); let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string()); info!( - log, "Configuration parameters"; - "listening_address" => ?discv5_config.listen_config, - "advertised_v4_address" => ?pretty_v4_socket, - "advertised_v6_address" => ?pretty_v6_socket, - "eth2" => eth2_field + listening_address = ?discv5_config.listen_config, + advertised_v4_address = ?pretty_v4_socket, + advertised_v6_address = ?pretty_v6_socket, + eth2 = eth2_field, + "Configuration parameters" ); - info!(log, "Identity established"; "peer_id" => %local_enr.peer_id(), "node_id" => %local_enr.node_id()); + info!(peer_id = %local_enr.peer_id(), node_id = %local_enr.node_id(), "Identity established"); // build the contactable multiaddr list, adding the p2p protocol - info!(log, "Contact information"; "enr" => local_enr.to_base64()); - info!(log, "Enr details"; "enr" => ?local_enr); - info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p()); + info!(enr = local_enr.to_base64(), "Contact information"); + info!(enr = ?local_enr, "Enr details"); + info!(multiaddrs = ?local_enr.multiaddr_p2p(), "Contact information"); // construct the discv5 server let mut discv5: Discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap(); @@ -72,16 +71,15 @@ pub async fn run( // If there are any bootnodes add them to the routing table for enr in boot_nodes { info!( - log, - "Adding bootnode"; - "ipv4_address" => ?enr.udp4_socket(), - "ipv6_address" => ?enr.udp6_socket(), - "peer_id" => ?enr.peer_id(), - "node_id" => ?enr.node_id() + ipv4_address = ?enr.udp4_socket(), + ipv6_address = ?enr.udp6_socket(), + peer_id = ?enr.peer_id(), + node_id = ?enr.node_id(), + "Adding bootnode" ); if enr != local_enr { if let Err(e) = discv5.add_enr(enr) { - slog::warn!(log, "Failed adding ENR"; "error" => ?e); + warn!(error = ?e, "Failed adding ENR"); } } } @@ -93,7 +91,7 @@ pub async fn run( // if there are peers in the local routing table, establish a session by running a query if !discv5.table_entries_id().is_empty() { - info!(log, "Executing bootstrap query..."); + info!("Executing bootstrap query..."); let _ = discv5.find_node(NodeId::random()).await; } @@ -131,14 +129,14 @@ pub async fn run( // display server metrics let metrics = discv5.metrics(); info!( - log, "Server metrics"; - "connected_peers" => discv5.connected_peers(), - "active_sessions" => metrics.active_sessions, - "requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second), - "ipv4_nodes" => ipv4_only_reachable, - "ipv6_only_nodes" => ipv6_only_reachable, - "dual_stack_nodes" => ipv4_ipv6_reachable, - "unreachable_nodes" => unreachable_nodes, + connected_peers = discv5.connected_peers(), + active_sessions = metrics.active_sessions, + "requests/s" = format_args!("{:.2}", metrics.unsolicited_requests_per_second), + ipv4_nodes = ipv4_only_reachable, + ipv6_only_nodes = ipv6_only_reachable, + dual_stack_nodes = ipv4_ipv6_reachable, + unreachable_nodes, + "Server metrics", ); } @@ -149,7 +147,7 @@ pub async fn run( // Ignore these events here } discv5::Event::SocketUpdated(socket_addr) => { - info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); + info!(%socket_addr, "Advertised socket address updated"); } _ => {} // Ignore } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 3ab6034688..00c74a1303 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -14,7 +14,7 @@ regex = { workspace = true } rpassword = "5.0.0" serde = { workspace = true } serde_yaml = { workspace = true } -slog = { workspace = true } +tracing = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } zeroize = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 25cf368c90..4c253283fe 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -7,11 +7,11 @@ use crate::{default_keystore_password_path, read_password_string, write_file_via use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; -use slog::{error, Logger}; use std::collections::HashSet; use std::fs::{self, create_dir_all, File}; use std::io; use std::path::{Path, PathBuf}; +use tracing::error; use types::{graffiti::GraffitiString, Address, PublicKey}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; @@ -266,7 +266,6 @@ impl ValidatorDefinitions { &mut self, validators_dir: P, secrets_dir: P, - log: &Logger, ) -> Result { let mut keystore_paths = vec![]; recursively_find_voting_keystores(validators_dir, &mut keystore_paths) @@ -311,10 +310,9 @@ impl ValidatorDefinitions { Ok(keystore) => keystore, Err(e) => { error!( - log, - "Unable to read validator keystore"; - "error" => e, - "keystore" => format!("{:?}", voting_keystore_path) + error = ?e, + keystore = ?voting_keystore_path, + "Unable to read validator keystore" ); return None; } @@ -336,9 +334,8 @@ impl ValidatorDefinitions { } None => { error!( - log, - "Invalid keystore public key"; - "keystore" => format!("{:?}", voting_keystore_path) + keystore = ?voting_keystore_path, + "Invalid keystore public key" ); return None; } diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index a255e04229..da6c4dfd95 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -20,12 +20,11 @@ bytes = { workspace = true } discv5 = { workspace = true } eth2_config = { workspace = true } kzg = { workspace = true } -logging = { workspace = true } pretty_reqwest_error = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } serde_yaml = { workspace = true } sha2 = { workspace = true } -slog = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 5d5a50574b..0bb12c4187 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -19,12 +19,12 @@ use pretty_reqwest_error::PrettyReqwestError; use reqwest::{Client, Error}; use sensitive_url::SensitiveUrl; use sha2::{Digest, Sha256}; -use slog::{info, warn, Logger}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; +use tracing::{info, warn}; use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; use url::Url; @@ -198,7 +198,6 @@ impl Eth2NetworkConfig { &self, genesis_state_url: Option<&str>, timeout: Duration, - log: &Logger, ) -> Result>, String> { let spec = self.chain_spec::()?; match &self.genesis_state_source { @@ -217,9 +216,9 @@ impl Eth2NetworkConfig { format!("Unable to parse genesis state bytes checksum: {:?}", e) })?; let bytes = if let Some(specified_url) = genesis_state_url { - download_genesis_state(&[specified_url], timeout, checksum, log).await + download_genesis_state(&[specified_url], timeout, checksum).await } else { - download_genesis_state(built_in_urls, timeout, checksum, log).await + download_genesis_state(built_in_urls, timeout, checksum).await }?; let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e) @@ -387,7 +386,6 @@ async fn download_genesis_state( urls: &[&str], timeout: Duration, checksum: Hash256, - log: &Logger, ) -> Result, String> { if urls.is_empty() { return Err( @@ -407,11 +405,10 @@ async fn download_genesis_state( .unwrap_or_else(|_| "".to_string()); info!( - log, - "Downloading genesis state"; - "server" => &redacted_url, - "timeout" => ?timeout, - "info" => "this may take some time on testnets with large validator counts" + server = &redacted_url, + timeout = ?timeout, + info = "this may take some time on testnets with large validator counts", + "Downloading genesis state" ); let client = Client::new(); @@ -424,10 +421,9 @@ async fn download_genesis_state( return Ok(bytes.into()); } else { warn!( - log, - "Genesis state download failed"; - "server" => &redacted_url, - "timeout" => ?timeout, + server = &redacted_url, + timeout = ?timeout, + "Genesis state download failed" ); errors.push(format!( "Response from {} did not match local checksum", @@ -505,7 +501,7 @@ mod tests { async fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); config - .genesis_state::(None, Duration::from_secs(1), &logging::test_logger()) + .genesis_state::(None, Duration::from_secs(1)) .await .expect("beacon state can decode"); } diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 164e3e47a7..cb4a43e407 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lighthouse_version" version = "0.1.0" -authors = ["Paul Hauner "] +authors = ["Sigma Prime "] edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index b2829a48d8..a69bc6ab23 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,14 +9,12 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } +logroller = { workspace = true } metrics = { workspace = true } +once_cell = "1.17.1" parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -slog = { workspace = true } -slog-term = { workspace = true } -sloggers = { workspace = true } -take_mut = "0.2.2" tokio = { workspace = true, features = [ "time" ] } tracing = "0.1" tracing-appender = { workspace = true } diff --git a/common/logging/src/async_record.rs b/common/logging/src/async_record.rs deleted file mode 100644 index 7a97fa1a75..0000000000 --- a/common/logging/src/async_record.rs +++ /dev/null @@ -1,307 +0,0 @@ -//! An object that can be used to pass through a channel and be cloned. It can therefore be used -//! via the broadcast channel. - -use parking_lot::Mutex; -use serde::ser::SerializeMap; -use serde::serde_if_integer128; -use serde::Serialize; -use slog::{BorrowedKV, Key, Level, OwnedKVList, Record, RecordStatic, Serializer, SingleKV, KV}; -use std::cell::RefCell; -use std::fmt; -use std::fmt::Write; -use std::sync::Arc; -use take_mut::take; - -thread_local! { - static TL_BUF: RefCell = RefCell::new(String::with_capacity(128)) -} - -/// Serialized record. -#[derive(Clone)] -pub struct AsyncRecord { - msg: String, - level: Level, - location: Box, - tag: String, - logger_values: OwnedKVList, - kv: Arc>, -} - -impl AsyncRecord { - /// Serializes a `Record` and an `OwnedKVList`. - pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { - let mut ser = ToSendSerializer::new(); - record - .kv() - .serialize(record, &mut ser) - .expect("`ToSendSerializer` can't fail"); - - AsyncRecord { - msg: fmt::format(*record.msg()), - level: record.level(), - location: Box::new(*record.location()), - tag: String::from(record.tag()), - logger_values: logger_values.clone(), - kv: Arc::new(Mutex::new(ser.finish())), - } - } - - pub fn to_json_string(&self) -> Result { - serde_json::to_string(&self).map_err(|e| format!("{:?}", e)) - } -} - -pub struct ToSendSerializer { - kv: Box, -} - -impl ToSendSerializer { - fn new() -> Self { - ToSendSerializer { kv: Box::new(()) } - } - - fn finish(self) -> Box { - self.kv - } -} - -impl Serializer for ToSendSerializer { - fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_unit(&mut self, key: Key) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); - Ok(()) - } - fn emit_none(&mut self, key: Key) -> slog::Result { - let val: Option<()> = None; - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_char(&mut self, key: Key, val: char) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { - let val = val.to_owned(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { - let val = fmt::format(*val); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } -} - -impl Serialize for AsyncRecord { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Get the current time - let dt = chrono::Local::now().format("%b %e %T").to_string(); - - let rs = RecordStatic { - location: &self.location, - level: self.level, - tag: &self.tag, - }; - let mut map_serializer = SerdeSerializer::new(serializer)?; - - // Serialize the time and log level first - map_serializer.serialize_entry("time", &dt)?; - map_serializer.serialize_entry("level", self.level.as_short_str())?; - - let kv = self.kv.lock(); - - // Convoluted pattern to avoid binding `format_args!` to a temporary. - // See: https://stackoverflow.com/questions/56304313/cannot-use-format-args-due-to-temporary-value-is-freed-at-the-end-of-this-state - let mut f = |msg: std::fmt::Arguments| { - map_serializer.serialize_entry("msg", msg.to_string())?; - - let record = Record::new(&rs, &msg, BorrowedKV(&(*kv))); - self.logger_values - .serialize(&record, &mut map_serializer) - .map_err(serde::ser::Error::custom)?; - record - .kv() - .serialize(&record, &mut map_serializer) - .map_err(serde::ser::Error::custom) - }; - f(format_args!("{}", self.msg))?; - map_serializer.end() - } -} - -struct SerdeSerializer { - /// Current state of map serializing: `serde::Serializer::MapState` - ser_map: S::SerializeMap, -} - -impl SerdeSerializer { - fn new(ser: S) -> Result { - let ser_map = ser.serialize_map(None)?; - Ok(SerdeSerializer { ser_map }) - } - - fn serialize_entry(&mut self, key: K, value: V) -> Result<(), S::Error> - where - K: serde::Serialize, - V: serde::Serialize, - { - self.ser_map.serialize_entry(&key, &value) - } - - /// Finish serialization, and return the serializer - fn end(self) -> Result { - self.ser_map.end() - } -} - -// NOTE: This is borrowed from slog_json -macro_rules! impl_m( - ($s:expr, $key:expr, $val:expr) => ({ - let k_s: &str = $key.as_ref(); - $s.ser_map.serialize_entry(k_s, $val) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("serde serialization error: {}", e)))?; - Ok(()) - }); -); - -impl slog::Serializer for SerdeSerializer -where - S: serde::Serializer, -{ - fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { - impl_m!(self, key, &val) - } - - fn emit_unit(&mut self, key: Key) -> slog::Result { - impl_m!(self, key, &()) - } - - fn emit_char(&mut self, key: Key, val: char) -> slog::Result { - impl_m!(self, key, &val) - } - - fn emit_none(&mut self, key: Key) -> slog::Result { - let val: Option<()> = None; - impl_m!(self, key, &val) - } - fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { - impl_m!(self, key, &val) - } - serde_if_integer128! { - fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { - impl_m!(self, key, &val) - } - } - fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { - impl_m!(self, key, &val) - } - fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { - TL_BUF.with(|buf| { - let mut buf = buf.borrow_mut(); - - buf.write_fmt(*val).unwrap(); - - let res = { || impl_m!(self, key, &*buf) }(); - buf.clear(); - res - }) - } -} diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 0ddd867c2f..39615cd656 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,20 +1,20 @@ -use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult}; -use slog::Logger; -use slog_term::Decorator; -use std::io::{Result, Write}; +use chrono::Local; +use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize}; +use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult}; +use std::io::Write; use std::path::PathBuf; use std::sync::LazyLock; use std::time::{Duration, Instant}; -use tracing_appender::non_blocking::NonBlocking; -use tracing_appender::rolling::{RollingFileAppender, Rotation}; -use tracing_logging_layer::LoggingLayer; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use tracing::Subscriber; +use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::layer::Context; +use tracing_subscriber::{EnvFilter, Layer}; pub const MAX_MESSAGE_WIDTH: usize = 40; -pub mod async_record; +pub mod macros; mod sse_logging_components; -mod tracing_logging_layer; +pub mod tracing_logging_layer; mod tracing_metrics_layer; pub use sse_logging_components::SSELoggingComponents; @@ -32,169 +32,6 @@ pub static ERRORS_TOTAL: LazyLock> = pub static CRITS_TOTAL: LazyLock> = LazyLock::new(|| try_create_int_counter("crit_total", "Count of crits logged")); -pub struct AlignedTermDecorator { - wrapped: D, - message_width: usize, -} - -impl AlignedTermDecorator { - pub fn new(decorator: D, message_width: usize) -> Self { - AlignedTermDecorator { - wrapped: decorator, - message_width, - } - } -} - -impl Decorator for AlignedTermDecorator { - fn with_record( - &self, - record: &slog::Record, - _logger_values: &slog::OwnedKVList, - f: F, - ) -> Result<()> - where - F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()>, - { - match record.level() { - slog::Level::Info => inc_counter(&INFOS_TOTAL), - slog::Level::Warning => inc_counter(&WARNS_TOTAL), - slog::Level::Error => inc_counter(&ERRORS_TOTAL), - slog::Level::Critical => inc_counter(&CRITS_TOTAL), - _ => (), - } - - self.wrapped.with_record(record, _logger_values, |deco| { - f(&mut AlignedRecordDecorator::new(deco, self.message_width)) - }) - } -} - -struct AlignedRecordDecorator<'a> { - wrapped: &'a mut dyn slog_term::RecordDecorator, - message_count: usize, - message_active: bool, - ignore_comma: bool, - message_width: usize, -} - -impl<'a> AlignedRecordDecorator<'a> { - fn new( - decorator: &'a mut dyn slog_term::RecordDecorator, - message_width: usize, - ) -> AlignedRecordDecorator<'a> { - AlignedRecordDecorator { - wrapped: decorator, - message_count: 0, - ignore_comma: false, - message_active: false, - message_width, - } - } - - fn filtered_write(&mut self, buf: &[u8]) -> Result { - if self.ignore_comma { - //don't write comma - self.ignore_comma = false; - Ok(buf.len()) - } else if self.message_active { - self.wrapped.write(buf).inspect(|n| self.message_count += n) - } else { - self.wrapped.write(buf) - } - } -} - -impl Write for AlignedRecordDecorator<'_> { - fn write(&mut self, buf: &[u8]) -> Result { - if buf.iter().any(u8::is_ascii_control) { - let filtered = buf - .iter() - .cloned() - .map(|c| if !is_ascii_control(&c) { c } else { b'_' }) - .collect::>(); - self.filtered_write(&filtered) - } else { - self.filtered_write(buf) - } - } - - fn flush(&mut self) -> Result<()> { - self.wrapped.flush() - } -} - -impl slog_term::RecordDecorator for AlignedRecordDecorator<'_> { - fn reset(&mut self) -> Result<()> { - self.message_active = false; - self.message_count = 0; - self.ignore_comma = false; - self.wrapped.reset() - } - - fn start_whitespace(&mut self) -> Result<()> { - self.wrapped.start_whitespace() - } - - fn start_msg(&mut self) -> Result<()> { - self.message_active = true; - self.ignore_comma = false; - self.wrapped.start_msg() - } - - fn start_timestamp(&mut self) -> Result<()> { - self.wrapped.start_timestamp() - } - - fn start_level(&mut self) -> Result<()> { - self.wrapped.start_level() - } - - fn start_comma(&mut self) -> Result<()> { - if self.message_active && self.message_count + 1 < self.message_width { - self.ignore_comma = true; - } - self.wrapped.start_comma() - } - - fn start_key(&mut self) -> Result<()> { - if self.message_active && self.message_count + 1 < self.message_width { - write!( - self, - "{}", - " ".repeat(self.message_width - self.message_count) - )?; - self.message_active = false; - self.message_count = 0; - self.ignore_comma = false; - } - self.wrapped.start_key() - } - - fn start_value(&mut self) -> Result<()> { - self.wrapped.start_value() - } - - fn start_separator(&mut self) -> Result<()> { - self.wrapped.start_separator() - } -} - -/// Function to filter out ascii control codes. -/// -/// This helps to keep log formatting consistent. -/// Whitespace and padding control codes are excluded. -fn is_ascii_control(character: &u8) -> bool { - matches!( - character, - b'\x00'..=b'\x08' | - b'\x0b'..=b'\x0c' | - b'\x0e'..=b'\x1f' | - b'\x7f' | - b'\x81'..=b'\x9f' - ) -} - /// Provides de-bounce functionality for logging. #[derive(Default)] pub struct TimeLatch(Option); @@ -214,75 +51,127 @@ impl TimeLatch { } } -pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { - let mut tracing_log_path = PathBuf::new(); +pub struct Libp2pDiscv5TracingLayer { + pub libp2p_non_blocking_writer: NonBlocking, + pub _libp2p_guard: WorkerGuard, + pub discv5_non_blocking_writer: NonBlocking, + pub _discv5_guard: WorkerGuard, +} - // Ensure that `tracing_log_path` only contains directories. - for p in base_tracing_log_path.iter() { - tracing_log_path = tracing_log_path.join(p); - if let Ok(metadata) = tracing_log_path.metadata() { - if !metadata.is_dir() { - tracing_log_path.pop(); - break; - } +impl Layer for Libp2pDiscv5TracingLayer +where + S: Subscriber, +{ + fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { + let meta = event.metadata(); + let log_level = meta.level(); + let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + + let target = match meta.target().split_once("::") { + Some((crate_name, _)) => crate_name, + None => "unknown", + }; + + let mut writer = match target { + "gossipsub" => self.libp2p_non_blocking_writer.clone(), + "discv5" => self.discv5_non_blocking_writer.clone(), + _ => return, + }; + + let mut visitor = LogMessageExtractor { + message: String::default(), + }; + + event.record(&mut visitor); + let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); + + if let Err(e) = writer.write_all(message.as_bytes()) { + eprintln!("Failed to write log: {}", e); } } - - let filter_layer = match tracing_subscriber::EnvFilter::try_from_default_env() - .or_else(|_| tracing_subscriber::EnvFilter::try_new("warn")) - { - Ok(filter) => filter, - Err(e) => { - eprintln!("Failed to initialize dependency logging {e}"); - return; - } - }; - - let Ok(libp2p_writer) = RollingFileAppender::builder() - .rotation(Rotation::DAILY) - .max_log_files(2) - .filename_prefix("libp2p") - .filename_suffix("log") - .build(tracing_log_path.clone()) - else { - eprintln!("Failed to initialize libp2p rolling file appender"); - return; - }; - - let Ok(discv5_writer) = RollingFileAppender::builder() - .rotation(Rotation::DAILY) - .max_log_files(2) - .filename_prefix("discv5") - .filename_suffix("log") - .build(tracing_log_path) - else { - eprintln!("Failed to initialize discv5 rolling file appender"); - return; - }; - - let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); - let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); - - let custom_layer = LoggingLayer { - libp2p_non_blocking_writer, - _libp2p_guard, - discv5_non_blocking_writer, - _discv5_guard, - }; - - if let Err(e) = tracing_subscriber::fmt() - .with_env_filter(filter_layer) - .with_writer(std::io::sink) - .finish() - .with(MetricsLayer) - .with(custom_layer) - .try_init() - { - eprintln!("Failed to initialize dependency logging {e}"); - } } -/// Return a logger suitable for test usage. +struct LogMessageExtractor { + message: String, +} + +impl tracing_core::field::Visit for LogMessageExtractor { + fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { + self.message = format!("{} {:?}", self.message, value); + } +} + +pub fn create_libp2p_discv5_tracing_layer( + base_tracing_log_path: Option, + max_log_size: u64, + compression: bool, + max_log_number: usize, +) -> Libp2pDiscv5TracingLayer { + if let Some(mut tracing_log_path) = base_tracing_log_path { + // Ensure that `tracing_log_path` only contains directories. + for p in tracing_log_path.clone().iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + + let mut libp2p_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { + eprintln!("Failed to convert max_log_number to u64: {}", e); + 10 + })); + + let mut discv5_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { + eprintln!("Failed to convert max_log_number to u64: {}", e); + 10 + })); + + if compression { + libp2p_writer = libp2p_writer.compression(Compression::Gzip); + discv5_writer = discv5_writer.compression(Compression::Gzip); + } + + let Ok(libp2p_writer) = libp2p_writer.build() else { + eprintln!("Failed to initialize libp2p rolling file appender"); + std::process::exit(1); + }; + + let Ok(discv5_writer) = discv5_writer.build() else { + eprintln!("Failed to initialize discv5 rolling file appender"); + std::process::exit(1); + }; + + let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); + let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); + + Libp2pDiscv5TracingLayer { + libp2p_non_blocking_writer, + _libp2p_guard, + discv5_non_blocking_writer, + _discv5_guard, + } + } else { + let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink()); + let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink()); + Libp2pDiscv5TracingLayer { + libp2p_non_blocking_writer, + _libp2p_guard, + discv5_non_blocking_writer, + _discv5_guard, + } + } +} + +/// Return a tracing subscriber suitable for test usage. /// /// By default no logs will be printed, but they can be enabled via /// the `test_logger` feature. This feature can be enabled for any @@ -290,17 +179,10 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { /// ```bash /// cargo test -p beacon_chain --features logging/test_logger /// ``` -pub fn test_logger() -> Logger { - use sloggers::Build; - +pub fn create_test_tracing_subscriber() { if cfg!(feature = "test_logger") { - sloggers::terminal::TerminalLoggerBuilder::new() - .level(sloggers::types::Severity::Debug) - .build() - .expect("Should build TerminalLoggerBuilder") - } else { - sloggers::null::NullLoggerBuilder - .build() - .expect("Should build NullLoggerBuilder") + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::try_new("debug").unwrap()) + .try_init(); } } diff --git a/common/logging/src/macros.rs b/common/logging/src/macros.rs new file mode 100644 index 0000000000..eb25eba56c --- /dev/null +++ b/common/logging/src/macros.rs @@ -0,0 +1,6 @@ +#[macro_export] +macro_rules! crit { + ($($arg:tt)*) => { + tracing::error!(error_type = "crit", $($arg)*); + }; +} diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs index 244d09fbd1..e358fde6c6 100644 --- a/common/logging/src/sse_logging_components.rs +++ b/common/logging/src/sse_logging_components.rs @@ -1,46 +1,108 @@ //! This module provides an implementation of `slog::Drain` that optionally writes to a channel if //! there are subscribers to a HTTP SSE stream. -use crate::async_record::AsyncRecord; -use slog::{Drain, OwnedKVList, Record}; -use std::panic::AssertUnwindSafe; +use serde_json::json; +use serde_json::Value; use std::sync::Arc; use tokio::sync::broadcast::Sender; +use tracing::field::{Field, Visit}; +use tracing::{Event, Subscriber}; +use tracing_subscriber::layer::{Context, Layer}; /// Default log level for SSE Events. // NOTE: Made this a constant. Debug level seems to be pretty intense. Can make this // configurable later if needed. -const LOG_LEVEL: slog::Level = slog::Level::Info; +const LOG_LEVEL: tracing::Level = tracing::Level::INFO; /// The components required in the HTTP API task to receive logged events. #[derive(Clone)] pub struct SSELoggingComponents { /// The channel to receive events from. - pub sender: Arc>>, + pub sender: Arc>>, } impl SSELoggingComponents { - /// Create a new SSE drain. pub fn new(channel_size: usize) -> Self { let (sender, _receiver) = tokio::sync::broadcast::channel(channel_size); - let sender = Arc::new(AssertUnwindSafe(sender)); - SSELoggingComponents { sender } + SSELoggingComponents { + sender: Arc::new(sender), + } } } -impl Drain for SSELoggingComponents { - type Ok = (); - type Err = &'static str; +impl Layer for SSELoggingComponents { + fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { + if *event.metadata().level() > LOG_LEVEL { + return; + } - fn log(&self, record: &Record, logger_values: &OwnedKVList) -> Result { - if record.level().is_at_least(LOG_LEVEL) { - // Attempt to send the logs - match self.sender.send(AsyncRecord::from(record, logger_values)) { - Ok(_num_sent) => {} // Everything got sent - Err(_err) => {} // There are no subscribers, do nothing + let mut visitor = TracingEventVisitor::new(); + event.record(&mut visitor); + let mut log_entry = visitor.finish(event.metadata()); + + if let Some(error_type) = log_entry + .get("fields") + .and_then(|fields| fields.get("error_type")) + .and_then(|val| val.as_str()) + { + if error_type.eq_ignore_ascii_case("crit") { + log_entry["level"] = json!("CRIT"); + + if let Some(Value::Object(ref mut map)) = log_entry.get_mut("fields") { + map.remove("error_type"); + } } } - Ok(()) + + let _ = self.sender.send(Arc::new(log_entry)); + } +} +struct TracingEventVisitor { + fields: serde_json::Map, +} + +impl TracingEventVisitor { + fn new() -> Self { + TracingEventVisitor { + fields: serde_json::Map::new(), + } + } + + fn finish(self, metadata: &tracing::Metadata<'_>) -> Value { + let mut log_entry = serde_json::Map::new(); + log_entry.insert( + "time".to_string(), + json!(chrono::Local::now() + .format("%b %d %H:%M:%S%.3f") + .to_string()), + ); + log_entry.insert("level".to_string(), json!(metadata.level().to_string())); + log_entry.insert("target".to_string(), json!(metadata.target())); + log_entry.insert("fields".to_string(), Value::Object(self.fields)); + Value::Object(log_entry) + } +} + +impl Visit for TracingEventVisitor { + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + self.fields + .insert(field.name().to_string(), json!(format!("{:?}", value))); + } + + fn record_str(&mut self, field: &Field, value: &str) { + self.fields.insert(field.name().to_string(), json!(value)); + } + + fn record_i64(&mut self, field: &Field, value: i64) { + self.fields.insert(field.name().to_string(), json!(value)); + } + + fn record_u64(&mut self, field: &Field, value: u64) { + self.fields.insert(field.name().to_string(), json!(value)); + } + + fn record_bool(&mut self, field: &Field, value: bool) { + self.fields.insert(field.name().to_string(), json!(value)); } } diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index a9ddae828a..4478e1facb 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -1,56 +1,531 @@ use chrono::prelude::*; +use serde_json::{Map, Value}; +use std::collections::HashMap; use std::io::Write; +use std::sync::{Arc, Mutex}; +use tracing::field::Field; +use tracing::span::Id; use tracing::Subscriber; use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; use tracing_subscriber::layer::Context; +use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::Layer; pub struct LoggingLayer { - pub libp2p_non_blocking_writer: NonBlocking, - pub _libp2p_guard: WorkerGuard, - pub discv5_non_blocking_writer: NonBlocking, - pub _discv5_guard: WorkerGuard, + pub non_blocking_writer: NonBlocking, + pub guard: WorkerGuard, + pub disable_log_timestamp: bool, + pub log_color: bool, + pub logfile_color: bool, + pub log_format: Option, + pub logfile_format: Option, + pub extra_info: bool, + pub dep_logs: bool, + span_fields: Arc>>, +} + +impl LoggingLayer { + #[allow(clippy::too_many_arguments)] + pub fn new( + non_blocking_writer: NonBlocking, + guard: WorkerGuard, + disable_log_timestamp: bool, + log_color: bool, + logfile_color: bool, + log_format: Option, + logfile_format: Option, + extra_info: bool, + dep_logs: bool, + ) -> Self { + Self { + non_blocking_writer, + guard, + disable_log_timestamp, + log_color, + logfile_color, + log_format, + logfile_format, + extra_info, + dep_logs, + span_fields: Arc::new(Mutex::new(HashMap::new())), + } + } } impl Layer for LoggingLayer where - S: Subscriber, + S: Subscriber + for<'a> LookupSpan<'a>, { - fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { + fn on_new_span(&self, attrs: &tracing::span::Attributes<'_>, id: &Id, _ctx: Context) { + let metadata = attrs.metadata(); + let span_name = metadata.name(); + + let mut visitor = SpanFieldsExtractor::default(); + attrs.record(&mut visitor); + + let span_data = SpanData { + name: span_name.to_string(), + fields: visitor.fields, + }; + + let mut span_fields = match self.span_fields.lock() { + Ok(guard) => guard, + Err(poisoned) => poisoned.into_inner(), + }; + span_fields.insert(id.clone(), span_data); + } + + fn on_event(&self, event: &tracing::Event<'_>, ctx: Context) { let meta = event.metadata(); let log_level = meta.level(); - let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); - - let target = match meta.target().split_once("::") { - Some((crate_name, _)) => crate_name, - None => "unknown", + let timestamp = if !self.disable_log_timestamp { + Local::now().format("%b %d %H:%M:%S%.3f").to_string() + } else { + String::new() }; - let mut writer = match target { - "gossipsub" => self.libp2p_non_blocking_writer.clone(), - "discv5" => self.discv5_non_blocking_writer.clone(), - _ => return, - }; + if !self.dep_logs { + if let Some(file) = meta.file() { + if file.contains("/.cargo/") { + return; + } + } else { + return; + } + } + + let mut writer = self.non_blocking_writer.clone(); let mut visitor = LogMessageExtractor { - message: String::default(), + message: String::new(), + fields: Vec::new(), + is_crit: false, + }; + event.record(&mut visitor); + + // Remove ascii control codes from message. + // All following formatting and logs components are predetermined or known. + if visitor.message.as_bytes().iter().any(u8::is_ascii_control) { + let filtered = visitor + .message + .as_bytes() + .iter() + .map(|c| if is_ascii_control(c) { b'_' } else { *c }) + .collect::>(); + visitor.message = String::from_utf8(filtered).unwrap_or_default(); }; - event.record(&mut visitor); - let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); + let module = meta.module_path().unwrap_or(""); + let file = meta.file().unwrap_or(""); + let line = match meta.line() { + Some(line) => line.to_string(), + None => "".to_string(), + }; - if let Err(e) = writer.write_all(message.as_bytes()) { - eprintln!("Failed to write log: {}", e); + if module.contains("discv5") { + visitor + .fields + .push(("service".to_string(), "\"discv5\"".to_string())); } + + let gray = "\x1b[90m"; + let reset = "\x1b[0m"; + let location = if self.extra_info { + if self.logfile_color { + format!("{}{}::{}:{}{}", gray, module, file, line, reset) + } else { + format!("{}::{}:{}", module, file, line) + } + } else { + String::new() + }; + + let plain_level_str = if visitor.is_crit { + "CRIT" + } else { + match *log_level { + tracing::Level::ERROR => "ERROR", + tracing::Level::WARN => "WARN", + tracing::Level::INFO => "INFO", + tracing::Level::DEBUG => "DEBUG", + tracing::Level::TRACE => "TRACE", + } + }; + + let color_level_str = if visitor.is_crit { + "\x1b[35mCRIT\x1b[0m" + } else { + match *log_level { + tracing::Level::ERROR => "\x1b[31mERROR\x1b[0m", + tracing::Level::WARN => "\x1b[33mWARN\x1b[0m", + tracing::Level::INFO => "\x1b[32mINFO\x1b[0m", + tracing::Level::DEBUG => "\x1b[34mDEBUG\x1b[0m", + tracing::Level::TRACE => "\x1b[35mTRACE\x1b[0m", + } + }; + + if self.dep_logs { + if self.logfile_format.as_deref() == Some("JSON") { + build_json_log_file( + &visitor, + plain_level_str, + meta, + &ctx, + &self.span_fields, + event, + &mut writer, + ); + } else { + build_log_text( + &visitor, + plain_level_str, + ×tamp, + &ctx, + &self.span_fields, + event, + &location, + color_level_str, + self.logfile_color, + &mut writer, + ); + } + } else if self.log_format.as_deref() == Some("JSON") { + build_json_log_stdout(&visitor, plain_level_str, ×tamp, &mut writer); + } else { + build_log_text( + &visitor, + plain_level_str, + ×tamp, + &ctx, + &self.span_fields, + event, + &location, + color_level_str, + self.log_color, + &mut writer, + ); + } + } +} + +struct SpanData { + name: String, + fields: Vec<(String, String)>, +} + +#[derive(Default)] +struct SpanFieldsExtractor { + fields: Vec<(String, String)>, +} + +impl tracing_core::field::Visit for SpanFieldsExtractor { + fn record_str(&mut self, field: &Field, value: &str) { + self.fields + .push((field.name().to_string(), format!("\"{}\"", value))); + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + self.fields + .push((field.name().to_string(), format!("{:?}", value))); + } + + fn record_i64(&mut self, field: &Field, value: i64) { + self.fields + .push((field.name().to_string(), value.to_string())); + } + + fn record_u64(&mut self, field: &Field, value: u64) { + self.fields + .push((field.name().to_string(), value.to_string())); + } + + fn record_bool(&mut self, field: &Field, value: bool) { + self.fields + .push((field.name().to_string(), value.to_string())); } } struct LogMessageExtractor { message: String, + fields: Vec<(String, String)>, + is_crit: bool, } impl tracing_core::field::Visit for LogMessageExtractor { - fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { - self.message = format!("{} {:?}", self.message, value); + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == "message" { + if self.message.is_empty() { + self.message = value.to_string(); + } else { + self.fields + .push(("msg_id".to_string(), format!("\"{}\"", value))); + } + } else if field.name() == "error_type" && value == "crit" { + self.is_crit = true; + } else { + self.fields + .push((field.name().to_string(), format!("\"{}\"", value))); + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + if field.name() == "message" { + if self.message.is_empty() { + self.message = format!("{:?}", value); + } else { + self.fields + .push(("msg_id".to_string(), format!("{:?}", value))); + } + } else if field.name() == "error_type" && format!("{:?}", value) == "\"crit\"" { + self.is_crit = true; + } else { + self.fields + .push((field.name().to_string(), format!("{:?}", value))); + } + } + + fn record_i64(&mut self, field: &Field, value: i64) { + self.fields + .push((field.name().to_string(), value.to_string())); + } + + fn record_u64(&mut self, field: &Field, value: u64) { + self.fields + .push((field.name().to_string(), value.to_string())); + } + + fn record_bool(&mut self, field: &Field, value: bool) { + self.fields + .push((field.name().to_string(), value.to_string())); } } + +/// Function to filter out ascii control codes. +/// +/// This helps to keep log formatting consistent. +/// Whitespace and padding control codes are excluded. +fn is_ascii_control(character: &u8) -> bool { + matches!( + character, + b'\x00'..=b'\x08' | + b'\x0b'..=b'\x0c' | + b'\x0e'..=b'\x1f' | + b'\x7f' | + b'\x81'..=b'\x9f' + ) +} + +fn build_json_log_stdout( + visitor: &LogMessageExtractor, + plain_level_str: &str, + timestamp: &str, + writer: &mut impl Write, +) { + let mut log_map = Map::new(); + log_map.insert("msg".to_string(), Value::String(visitor.message.clone())); + log_map.insert( + "level".to_string(), + Value::String(plain_level_str.to_string()), + ); + log_map.insert("ts".to_string(), Value::String(timestamp.to_string())); + + for (key, val) in visitor.fields.clone().into_iter() { + let parsed_val = parse_field(&val); + log_map.insert(key, parsed_val); + } + + let json_obj = Value::Object(log_map); + let output = format!("{}\n", json_obj); + + if let Err(e) = writer.write_all(output.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } +} + +fn build_json_log_file<'a, S>( + visitor: &LogMessageExtractor, + plain_level_str: &str, + meta: &tracing::Metadata<'_>, + ctx: &Context<'_, S>, + span_fields: &Arc>>, + event: &tracing::Event<'_>, + writer: &mut impl Write, +) where + S: Subscriber + for<'lookup> LookupSpan<'lookup>, +{ + let utc_timestamp = Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true); + let mut log_map = Map::new(); + + log_map.insert("msg".to_string(), Value::String(visitor.message.clone())); + log_map.insert( + "level".to_string(), + Value::String(plain_level_str.to_string()), + ); + log_map.insert("ts".to_string(), Value::String(utc_timestamp)); + + let module_path = meta.module_path().unwrap_or(""); + let line_number = meta + .line() + .map_or("".to_string(), |l| l.to_string()); + let module_field = format!("{}:{}", module_path, line_number); + log_map.insert("module".to_string(), Value::String(module_field)); + + for (key, val) in visitor.fields.clone().into_iter() { + let cleaned_value = if val.starts_with('\"') && val.ends_with('\"') && val.len() >= 2 { + &val[1..val.len() - 1] + } else { + &val + }; + let parsed_val = + serde_json::from_str(cleaned_value).unwrap_or(Value::String(cleaned_value.to_string())); + log_map.insert(key, parsed_val); + } + + if let Some(scope) = ctx.event_scope(event) { + let guard = span_fields.lock().ok(); + if let Some(span_map) = guard { + for span in scope { + let id = span.id(); + if let Some(span_data) = span_map.get(&id) { + for (key, val) in &span_data.fields { + let parsed_span_val = parse_field(val); + log_map.insert(key.clone(), parsed_span_val); + } + } + } + } + } + + let json_obj = Value::Object(log_map); + let output = format!("{}\n", json_obj); + + if let Err(e) = writer.write_all(output.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } +} + +#[allow(clippy::too_many_arguments)] +fn build_log_text<'a, S>( + visitor: &LogMessageExtractor, + plain_level_str: &str, + timestamp: &str, + ctx: &Context<'_, S>, + span_fields: &Arc>>, + event: &tracing::Event<'_>, + location: &str, + color_level_str: &str, + use_color: bool, + writer: &mut impl Write, +) where + S: Subscriber + for<'lookup> LookupSpan<'lookup>, +{ + let bold_start = "\x1b[1m"; + let bold_end = "\x1b[0m"; + let mut collected_span_fields = Vec::new(); + + if let Some(scope) = ctx.event_scope(event) { + for span in scope { + let id = span.id(); + let span_fields_map = span_fields.lock().unwrap(); + if let Some(span_data) = span_fields_map.get(&id) { + collected_span_fields.push((span_data.name.clone(), span_data.fields.clone())); + } + } + } + + let mut formatted_spans = String::new(); + for (_, fields) in collected_span_fields.iter().rev() { + for (i, (field_name, field_value)) in fields.iter().enumerate() { + if i > 0 && !visitor.fields.is_empty() { + formatted_spans.push_str(", "); + } + if use_color { + formatted_spans.push_str(&format!( + "{}{}{}: {}", + bold_start, field_name, bold_end, field_value + )); + } else { + formatted_spans.push_str(&format!("{}: {}", field_name, field_value)); + } + } + } + + let level_str = if use_color { + color_level_str + } else { + plain_level_str + }; + + let fixed_message_width = 44; + let message_len = visitor.message.len(); + + let message_content = if use_color { + format!("{}{}{}", bold_start, visitor.message, bold_end) + } else { + visitor.message.clone() + }; + + let padded_message = if message_len < fixed_message_width { + let extra_color_len = if use_color { + bold_start.len() + bold_end.len() + } else { + 0 + }; + format!( + "{: 0 { + formatted_fields.push_str(", "); + } + if use_color { + formatted_fields.push_str(&format!( + "{}{}{}: {}", + bold_start, field_name, bold_end, field_value + )); + } else { + formatted_fields.push_str(&format!("{}: {}", field_name, field_value)); + } + if i == visitor.fields.len() - 1 && !collected_span_fields.is_empty() { + formatted_fields.push(','); + } + } + + let full_message = if !formatted_fields.is_empty() { + format!("{} {}", padded_message, formatted_fields) + } else { + padded_message.to_string() + }; + + let message = if !location.is_empty() { + format!( + "{} {} {} {} {}\n", + timestamp, level_str, location, full_message, formatted_spans + ) + } else { + format!( + "{} {} {} {}\n", + timestamp, level_str, full_message, formatted_spans + ) + }; + + if let Err(e) = writer.write_all(message.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } +} + +fn parse_field(val: &str) -> Value { + let cleaned = if val.starts_with('"') && val.ends_with('"') && val.len() >= 2 { + &val[1..val.len() - 1] + } else { + val + }; + serde_json::from_str(cleaned).unwrap_or(Value::String(cleaned.to_string())) +} diff --git a/common/logging/tests/test.rs b/common/logging/tests/test.rs deleted file mode 100644 index f39f2b6d5a..0000000000 --- a/common/logging/tests/test.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::env; -use std::process::Command; -use std::process::Output; - -fn run_cmd(cmd_line: &str) -> Result { - if cfg!(target_os = "windows") { - Command::new(r#"cmd"#).args(["/C", cmd_line]).output() - } else { - Command::new(r#"sh"#).args(["-c", cmd_line]).output() - } -} - -#[test] -fn test_test_logger_with_feature_test_logger() { - let cur_dir = env::current_dir().unwrap(); - let test_dir = cur_dir - .join("..") - .join("..") - .join("testing") - .join("test-test_logger"); - let cmd_line = format!( - "cd {} && cargo test --features logging/test_logger", - test_dir.to_str().unwrap() - ); - - let output = run_cmd(&cmd_line); - - // Assert output data DOES contain "INFO hi, " - let data = String::from_utf8(output.unwrap().stderr).unwrap(); - println!("data={}", data); - assert!(data.contains("INFO hi, ")); -} - -#[test] -fn test_test_logger_no_features() { - // Test without features - let cur_dir = env::current_dir().unwrap(); - let test_dir = cur_dir - .join("..") - .join("..") - .join("testing") - .join("test-test_logger"); - let cmd_line = format!("cd {} && cargo test", test_dir.to_str().unwrap()); - - let output = run_cmd(&cmd_line); - - // Assert output data DOES contain "INFO hi, " - let data = String::from_utf8(output.unwrap().stderr).unwrap(); - println!("data={}", data); - assert!(!data.contains("INFO hi, ")); -} diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index cb52cff29a..9e2c36e2c7 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -15,7 +15,7 @@ reqwest = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -slog = { workspace = true } store = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 6f919971b0..966a1a3054 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -9,9 +9,9 @@ use reqwest::{IntoUrl, Response}; pub use reqwest::{StatusCode, Url}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{debug, error, info}; use task_executor::TaskExecutor; use tokio::time::{interval_at, Instant}; +use tracing::{debug, error, info}; use types::*; pub use types::ProcessType; @@ -69,11 +69,10 @@ pub struct MonitoringHttpClient { freezer_db_path: Option, update_period: Duration, monitoring_endpoint: SensitiveUrl, - log: slog::Logger, } impl MonitoringHttpClient { - pub fn new(config: &Config, log: slog::Logger) -> Result { + pub fn new(config: &Config) -> Result { Ok(Self { client: reqwest::Client::new(), db_path: config.db_path.clone(), @@ -83,7 +82,6 @@ impl MonitoringHttpClient { ), monitoring_endpoint: SensitiveUrl::parse(&config.monitoring_endpoint) .map_err(|e| format!("Invalid monitoring endpoint: {:?}", e))?, - log, }) } @@ -111,10 +109,9 @@ impl MonitoringHttpClient { ); info!( - self.log, - "Starting monitoring API"; - "endpoint" => %self.monitoring_endpoint, - "update_period" => format!("{}s", self.update_period.as_secs()), + endpoint = %self.monitoring_endpoint, + update_period = format!("{}s", self.update_period.as_secs()), + "Starting monitoring API" ); let update_future = async move { @@ -122,10 +119,10 @@ impl MonitoringHttpClient { interval.tick().await; match self.send_metrics(&processes).await { Ok(()) => { - debug!(self.log, "Metrics sent to remote server"; "endpoint" => %self.monitoring_endpoint); + debug!(endpoint = %self.monitoring_endpoint, "Metrics sent to remote server"); } Err(e) => { - error!(self.log, "Failed to send metrics to remote endpoint"; "error" => %e) + error!(error = %e, "Failed to send metrics to remote endpoint") } } } @@ -187,18 +184,16 @@ impl MonitoringHttpClient { for process in processes { match self.get_metrics(process).await { Err(e) => error!( - self.log, - "Failed to get metrics"; - "process_type" => ?process, - "error" => %e + process_type = ?process, + error = %e, + "Failed to get metrics" ), Ok(metric) => metrics.push(metric), } } info!( - self.log, - "Sending metrics to remote endpoint"; - "endpoint" => %self.monitoring_endpoint + endpoint = %self.monitoring_endpoint, + "Sending metrics to remote endpoint" ); self.post(self.monitoring_endpoint.full.clone(), &metrics) .await diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index c1ac4b55a9..4224f00acc 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -4,17 +4,9 @@ version = "0.1.0" authors = ["Sigma Prime "] edition = { workspace = true } -[features] -default = ["slog"] -slog = ["dep:slog", "dep:sloggers", "dep:logging"] -tracing = ["dep:tracing"] - [dependencies] async-channel = { workspace = true } futures = { workspace = true } -logging = { workspace = true, optional = true } metrics = { workspace = true } -slog = { workspace = true, optional = true } -sloggers = { workspace = true, optional = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -tracing = { workspace = true, optional = true } +tracing = { workspace = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 92ddb7c0be..dbdac600f3 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,20 +1,14 @@ mod metrics; -#[cfg(not(feature = "tracing"))] pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; +use tracing::{debug, instrument}; pub use tokio::task::JoinHandle; -// Set up logging framework -#[cfg(not(feature = "tracing"))] -use slog::{debug, o}; -#[cfg(feature = "tracing")] -use tracing::debug; - /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -85,8 +79,9 @@ pub struct TaskExecutor { /// /// The task must provide a reason for shutting down. signal_tx: Sender, - #[cfg(not(feature = "tracing"))] - log: slog::Logger, + + /// The name of the service for inclusion in the logger output. + service_name: String, } impl TaskExecutor { @@ -97,39 +92,29 @@ impl TaskExecutor { /// This function should only be used during testing. In production, prefer to obtain an /// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment` /// crate). + #[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)] pub fn new>( handle: T, exit: async_channel::Receiver<()>, - #[cfg(not(feature = "tracing"))] log: slog::Logger, signal_tx: Sender, + service_name: String, ) -> Self { Self { handle_provider: handle.into(), exit, signal_tx, - #[cfg(not(feature = "tracing"))] - log, + service_name, } } /// Clones the task executor adding a service name. - #[cfg(not(feature = "tracing"))] + #[instrument(parent = None,level = "info", fields(service = service_name), name = "task_executor", skip_all)] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), exit: self.exit.clone(), signal_tx: self.signal_tx.clone(), - log: self.log.new(o!("service" => service_name)), - } - } - - /// Clones the task executor adding a service name. - #[cfg(feature = "tracing")] - pub fn clone(&self) -> Self { - TaskExecutor { - handle_provider: self.handle_provider.clone(), - exit: self.exit.clone(), - signal_tx: self.signal_tx.clone(), + service_name, } } @@ -139,6 +124,7 @@ impl TaskExecutor { /// The purpose of this function is to create a compile error if some function which previously /// returned `()` starts returning something else. Such a case may otherwise result in /// accidental error suppression. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_ignoring_error( &self, task: impl Future> + Send + 'static, @@ -150,6 +136,7 @@ impl TaskExecutor { /// Spawn a task to monitor the completion of another task. /// /// If the other task exits by panicking, then the monitor task will shut down the executor. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] fn spawn_monitor( &self, task_handle: impl Future> + Send + 'static, @@ -168,13 +155,7 @@ impl TaskExecutor { drop(timer); }); } else { - #[cfg(not(feature = "tracing"))] - debug!( - self.log, - "Couldn't spawn monitor task. Runtime shutting down" - ); - #[cfg(feature = "tracing")] - debug!("Couldn't spawn monitor task. Runtime shutting down"); + debug!("Couldn't spawn monitor task. Runtime shutting down") } } @@ -187,6 +168,7 @@ impl TaskExecutor { /// of a panic, the executor will be shut down via `self.signal_tx`. /// /// This function generates prometheus metrics on number of tasks and task duration. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn(&self, task: impl Future + Send + 'static, name: &'static str) { if let Some(task_handle) = self.spawn_handle(task, name) { self.spawn_monitor(task_handle, name) @@ -202,6 +184,7 @@ impl TaskExecutor { /// This is useful in cases where the future to be spawned needs to do additional cleanup work when /// the task is completed/canceled (e.g. writing local variables to disk) or the task is created from /// some framework which does its own cleanup (e.g. a hyper server). + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_without_exit( &self, task: impl Future + Send + 'static, @@ -218,9 +201,6 @@ impl TaskExecutor { if let Some(handle) = self.handle() { handle.spawn(future); } else { - #[cfg(not(feature = "tracing"))] - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); - #[cfg(feature = "tracing")] debug!("Couldn't spawn task. Runtime shutting down"); } } @@ -242,16 +222,13 @@ impl TaskExecutor { /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_handle( &self, task: impl Future + Send + 'static, name: &'static str, ) -> Option>> { let exit = self.exit(); - - #[cfg(not(feature = "tracing"))] - let log = self.log.clone(); - if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { // Task is shutdown before it completes if `exit` receives let int_gauge_1 = int_gauge.clone(); @@ -262,9 +239,6 @@ impl TaskExecutor { let result = match future::select(Box::pin(task), exit).await { future::Either::Left((value, _)) => Some(value), future::Either::Right(_) => { - #[cfg(not(feature = "tracing"))] - debug!(log, "Async task shutdown, exit received"; "task" => name); - #[cfg(feature = "tracing")] debug!(task = name, "Async task shutdown, exit received"); None } @@ -273,9 +247,6 @@ impl TaskExecutor { result })) } else { - #[cfg(not(feature = "tracing"))] - debug!(log, "Couldn't spawn task. Runtime shutting down"); - #[cfg(feature = "tracing")] debug!("Couldn't spawn task. Runtime shutting down"); None } @@ -290,6 +261,7 @@ impl TaskExecutor { /// The Future returned behaves like the standard JoinHandle which can return an error if the /// task failed. /// This function generates prometheus metrics on number of tasks and task duration. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn spawn_blocking_handle( &self, task: F, @@ -299,18 +271,12 @@ impl TaskExecutor { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { - #[cfg(not(feature = "tracing"))] - let log = self.log.clone(); - let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]); let join_handle = if let Some(handle) = self.handle() { handle.spawn_blocking(task) } else { - #[cfg(not(feature = "tracing"))] - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); - #[cfg(feature = "tracing")] debug!("Couldn't spawn task. Runtime shutting down"); return None; }; @@ -319,9 +285,6 @@ impl TaskExecutor { let result = match join_handle.await { Ok(result) => Ok(result), Err(error) => { - #[cfg(not(feature = "tracing"))] - debug!(log, "Blocking task ended unexpectedly"; "error" => %error); - #[cfg(feature = "tracing")] debug!(%error, "Blocking task ended unexpectedly"); Err(error) } @@ -347,6 +310,7 @@ impl TaskExecutor { /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to /// @paulhauner if you plan to use this function in production. He has put metrics in here to /// track any use of it, so don't think you can pull a sneaky one on him. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn block_on_dangerous( &self, future: F, @@ -354,44 +318,20 @@ impl TaskExecutor { ) -> Option { let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); - #[cfg(not(feature = "tracing"))] - let log = self.log.clone(); let handle = self.handle()?; let exit = self.exit(); - #[cfg(not(feature = "tracing"))] - debug!( - log, - "Starting block_on task"; - "name" => name - ); - - #[cfg(feature = "tracing")] debug!(name, "Starting block_on task"); handle.block_on(async { let output = tokio::select! { output = future => { - #[cfg(not(feature = "tracing"))] - debug!( - log, - "Completed block_on task"; - "name" => name - ); - #[cfg(feature = "tracing")] debug!( name, "Completed block_on task" ); Some(output) - }, + } _ = exit => { - #[cfg(not(feature = "tracing"))] - debug!( - log, - "Cancelled block_on task"; - "name" => name, - ); - #[cfg(feature = "tracing")] debug!( name, "Cancelled block_on task" @@ -406,6 +346,7 @@ impl TaskExecutor { } /// Returns a `Handle` to the current runtime. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn handle(&self) -> Option { self.handle_provider.handle() } @@ -420,13 +361,8 @@ impl TaskExecutor { } /// Get a channel to request shutting down. + #[instrument(parent = None,level = "info", fields(service = self.service_name), name = "task_executor", skip_all)] pub fn shutdown_sender(&self) -> Sender { self.signal_tx.clone() } - - /// Returns a reference to the logger. - #[cfg(not(feature = "tracing"))] - pub fn log(&self) -> &slog::Logger { - &self.log - } } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index 46fbff7eac..698152f6c1 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,6 +1,4 @@ use crate::TaskExecutor; -pub use logging::test_logger; -use slog::Logger; use std::sync::Arc; use tokio::runtime; @@ -16,7 +14,6 @@ pub struct TestRuntime { runtime: Option>, _runtime_shutdown: async_channel::Sender<()>, pub task_executor: TaskExecutor, - pub log: Logger, } impl Default for TestRuntime { @@ -26,7 +23,6 @@ impl Default for TestRuntime { fn default() -> Self { let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = test_logger(); let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { (None, handle) @@ -41,13 +37,12 @@ impl Default for TestRuntime { (Some(runtime), handle) }; - let task_executor = TaskExecutor::new(handle, exit, log.clone(), shutdown_tx); + let task_executor = TaskExecutor::new(handle, exit, shutdown_tx, "test".to_string()); Self { runtime, _runtime_shutdown: runtime_shutdown, task_executor, - log, } } } @@ -59,10 +54,3 @@ impl Drop for TestRuntime { } } } - -impl TestRuntime { - pub fn set_logger(&mut self, log: Logger) { - self.log = log.clone(); - self.task_executor.log = log; - } -} diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 3bd18e922a..5c009a5e78 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,10 +8,11 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +logging = { workspace = true } metrics = { workspace = true } proto_array = { workspace = true } -slog = { workspace = true } state_processing = { workspace = true } +tracing = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 4c25be950b..28a3ecdd02 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,10 +1,10 @@ use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; +use logging::crit; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, @@ -13,6 +13,7 @@ use std::cmp::Ordering; use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; +use tracing::{debug, warn}; use types::{ consts::bellatrix::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, @@ -1365,17 +1366,14 @@ where persisted: &PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, spec: &ChainSpec, - log: &Logger, ) -> Result> { let mut proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes)?; let contains_invalid_payloads = proto_array.contains_invalid_payloads(); debug!( - log, - "Restoring fork choice from persisted"; - "reset_payload_statuses" => ?reset_payload_statuses, - "contains_invalid_payloads" => contains_invalid_payloads, + ?reset_payload_statuses, + contains_invalid_payloads, "Restoring fork choice from persisted" ); // Exit early if there are no "invalid" payloads, if requested. @@ -1394,18 +1392,14 @@ where // back to a proto-array which does not have the reset applied. This indicates a // significant error in Lighthouse and warrants detailed investigation. crit!( - log, - "Failed to reset payload statuses"; - "error" => e, - "info" => "please report this error", + error = ?e, + info = "please report this error", + "Failed to reset payload statuses" ); ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes) } else { - debug!( - log, - "Successfully reset all payload statuses"; - ); + debug!("Successfully reset all payload statuses"); Ok(proto_array) } } @@ -1417,10 +1411,9 @@ where reset_payload_statuses: ResetPayloadStatuses, fc_store: T, spec: &ChainSpec, - log: &Logger, ) -> Result> { let proto_array = - Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec, log)?; + Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec)?; let current_slot = fc_store.get_current_slot(); @@ -1444,10 +1437,9 @@ where // an optimistic status so that we can have a head to start from. if let Err(e) = fork_choice.get_head(current_slot, spec) { warn!( - log, - "Could not find head on persisted FC"; - "info" => "resetting all payload statuses and retrying", - "error" => ?e + info = "resetting all payload statuses and retrying", + error = ?e, + "Could not find head on persisted FC" ); // Although we may have already made this call whilst loading `proto_array`, try it // again since we may have mutated the `proto_array` during `get_head` and therefore may diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 79beb81282..b31485600d 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -28,7 +28,6 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -log = { workspace = true } maplit = { workspace = true } merkle_proof = { workspace = true } metastruct = "0.1.0" @@ -44,13 +43,13 @@ safe_arith = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_json = { workspace = true } serde_yaml = { workspace = true } -slog = { workspace = true } smallvec = { workspace = true } ssz_types = { workspace = true, features = ["arbitrary"] } superstruct = { workspace = true } swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } tempfile = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } +tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index 42e7a0f2ee..eee267355a 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -227,17 +227,6 @@ macro_rules! impl_display { write!(f, "{}", self.0) } } - - impl slog::Value for $type { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - slog::Value::serialize(&self.0, record, key, serializer) - } - } }; } diff --git a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs index 92534369ee..f30afda257 100644 --- a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,8 +1,8 @@ use crate::*; use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; -use log::debug; use rayon::prelude::*; use std::path::PathBuf; +use tracing::debug; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of /// the validator. diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index a7a54b1416..99bef75a72 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -11,7 +11,7 @@ clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } serde = { workspace = true } -slog = { workspace = true } store = { workspace = true } strum = { workspace = true } +tracing = { workspace = true } types = { workspace = true } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index bed90df9df..f38c28d8b0 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -12,7 +12,6 @@ use clap::ValueEnum; use cli::{Compact, Inspect}; use environment::{Environment, RuntimeContext}; use serde::{Deserialize, Serialize}; -use slog::{info, warn, Logger}; use std::fs; use std::io::Write; use std::path::PathBuf; @@ -24,6 +23,7 @@ use store::{ DBColumn, HotColdDB, }; use strum::{EnumString, EnumVariantNames}; +use tracing::{info, warn}; use types::{BeaconState, EthSpec, Slot}; fn parse_client_config( @@ -49,7 +49,6 @@ fn parse_client_config( pub fn display_db_version( client_config: ClientConfig, runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), Error> { let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); @@ -67,16 +66,14 @@ pub fn display_db_version( }, client_config.store, spec, - log.clone(), )?; - info!(log, "Database version: {}", version.as_u64()); + info!(version = version.as_u64(), "Database"); if version != CURRENT_SCHEMA_VERSION { info!( - log, - "Latest schema version: {}", - CURRENT_SCHEMA_VERSION.as_u64(), + current_schema_version = CURRENT_SCHEMA_VERSION.as_u64(), + "Latest schema" ); } @@ -260,7 +257,6 @@ fn parse_compact_config(compact_config: &Compact) -> Result( compact_config: CompactConfig, client_config: ClientConfig, - log: Logger, ) -> Result<(), Error> { let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -284,10 +280,9 @@ pub fn compact_db( ) }; info!( - log, - "Compacting database"; - "db" => db_name, - "column" => ?column + db = db_name, + column = ?column, + "Compacting database" ); sub_db.compact_column(column)?; Ok(()) @@ -308,7 +303,6 @@ pub fn migrate_db( client_config: ClientConfig, mut genesis_state: BeaconState, runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), Error> { let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); @@ -327,14 +321,12 @@ pub fn migrate_db( }, client_config.store.clone(), spec.clone(), - log.clone(), )?; info!( - log, - "Migrating database schema"; - "from" => from.as_u64(), - "to" => to.as_u64(), + from = from.as_u64(), + to = to.as_u64(), + "Migrating database schema" ); let genesis_state_root = genesis_state.canonical_root()?; @@ -343,14 +335,12 @@ pub fn migrate_db( Some(genesis_state_root), from, to, - log, ) } pub fn prune_payloads( client_config: ClientConfig, runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), Error> { let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); @@ -364,7 +354,6 @@ pub fn prune_payloads( |_, _, _| Ok(()), client_config.store, spec.clone(), - log, )?; // If we're trigging a prune manually then ignore the check on the split's parent that bails @@ -376,7 +365,6 @@ pub fn prune_payloads( pub fn prune_blobs( client_config: ClientConfig, runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), Error> { let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); @@ -390,7 +378,6 @@ pub fn prune_blobs( |_, _, _| Ok(()), client_config.store, spec.clone(), - log, )?; // If we're triggering a prune manually then ignore the check on `epochs_per_blob_prune` that @@ -413,7 +400,6 @@ pub fn prune_states( prune_config: PruneStatesConfig, mut genesis_state: BeaconState, runtime_context: &RuntimeContext, - log: Logger, ) -> Result<(), String> { let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); @@ -427,7 +413,6 @@ pub fn prune_states( |_, _, _| Ok(()), client_config.store, spec.clone(), - log.clone(), ) .map_err(|e| format!("Unable to open database: {e:?}"))?; @@ -447,20 +432,14 @@ pub fn prune_states( // Check that the user has confirmed they want to proceed. if !prune_config.confirm { if db.get_anchor_info().full_state_pruning_enabled() { - info!(log, "States have already been pruned"); + info!("States have already been pruned"); return Ok(()); } - info!(log, "Ready to prune states"); - warn!( - log, - "Pruning states is irreversible"; - ); - warn!( - log, - "Re-run this command with --confirm to commit to state deletion" - ); - info!(log, "Nothing has been pruned on this run"); + info!("Ready to prune states"); + warn!("Pruning states is irreversible"); + warn!("Re-run this command with --confirm to commit to state deletion"); + info!("Nothing has been pruned on this run"); return Err("Error: confirmation flag required".into()); } @@ -471,7 +450,7 @@ pub fn prune_states( db.prune_historic_states(genesis_state_root, &genesis_state) .map_err(|e| format!("Failed to prune due to error: {e:?}"))?; - info!(log, "Historic states pruned successfully"); + info!("Historic states pruned successfully"); Ok(()) } @@ -483,7 +462,6 @@ pub fn run( ) -> Result<(), String> { let client_config = parse_client_config(cli_args, db_manager_config, &env)?; let context = env.core_context(); - let log = context.log().clone(); let format_err = |e| format!("Fatal error: {:?}", e); let get_genesis_state = || { @@ -498,7 +476,6 @@ pub fn run( network_config.genesis_state::( client_config.genesis_state_url.as_deref(), client_config.genesis_state_url_timeout, - &log, ), "get_genesis_state", ) @@ -511,30 +488,29 @@ pub fn run( cli::DatabaseManagerSubcommand::Migrate(migrate_config) => { let migrate_config = parse_migrate_config(migrate_config)?; let genesis_state = get_genesis_state()?; - migrate_db(migrate_config, client_config, genesis_state, &context, log) - .map_err(format_err) + migrate_db(migrate_config, client_config, genesis_state, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::Inspect(inspect_config) => { let inspect_config = parse_inspect_config(inspect_config)?; inspect_db::(inspect_config, client_config) } cli::DatabaseManagerSubcommand::Version(_) => { - display_db_version(client_config, &context, log).map_err(format_err) + display_db_version(client_config, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::PrunePayloads(_) => { - prune_payloads(client_config, &context, log).map_err(format_err) + prune_payloads(client_config, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::PruneBlobs(_) => { - prune_blobs(client_config, &context, log).map_err(format_err) + prune_blobs(client_config, &context).map_err(format_err) } cli::DatabaseManagerSubcommand::PruneStates(prune_states_config) => { let prune_config = parse_prune_states_config(prune_states_config)?; let genesis_state = get_genesis_state()?; - prune_states(client_config, prune_config, genesis_state, &context, log) + prune_states(client_config, prune_config, genesis_state, &context) } cli::DatabaseManagerSubcommand::Compact(compact_config) => { let compact_config = parse_compact_config(compact_config)?; - compact_db::(compact_config, client_config, log).map_err(format_err) + compact_db::(compact_config, client_config).map_err(format_err) } } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 74b7ddcb2a..b7c226f8cd 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -34,10 +34,11 @@ rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -sloggers = { workspace = true } snap = { workspace = true } state_processing = { workspace = true } store = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index a90a4843d8..80087fd6d4 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -32,9 +32,9 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; -use log::info; use std::path::PathBuf; use std::time::{Duration, Instant}; +use tracing::info; use types::{EthSpec, FullPayload, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(5); @@ -102,7 +102,7 @@ pub fn run( } if let Some(block_root) = block_root { - info!("Block root is {:?}", block_root); + info!(%block_root,"Block root"); } Ok(()) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index f055a23b36..5bfd2233f0 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -18,6 +18,7 @@ use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; use std::str::FromStr; +use tracing_subscriber::filter::LevelFilter; use types::{EthSpec, EthSpecId}; fn main() { @@ -643,24 +644,31 @@ fn main() { } fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { + let (env_builder, _file_logging_layer, _stdout_logging_layer, _sse_logging_layer_opt) = + env_builder + .multi_threaded_tokio_runtime() + .map_err(|e| format!("should start tokio runtime: {:?}", e))? + .init_tracing( + LoggerConfig { + path: None, + debug_level: LevelFilter::TRACE, + logfile_debug_level: LevelFilter::TRACE, + log_format: None, + logfile_format: None, + log_color: true, + logfile_color: false, + disable_log_timestamp: false, + max_log_size: 0, + max_log_number: 0, + compression: false, + is_restricted: true, + sse_logging: false, // No SSE Logging in LCLI + extra_info: false, + }, + "", + ); + let env = env_builder - .multi_threaded_tokio_runtime() - .map_err(|e| format!("should start tokio runtime: {:?}", e))? - .initialize_logger(LoggerConfig { - path: None, - debug_level: String::from("trace"), - logfile_debug_level: String::from("trace"), - log_format: None, - logfile_format: None, - log_color: false, - disable_log_timestamp: false, - max_log_size: 0, - max_log_number: 0, - compression: false, - is_restricted: true, - sse_logging: false, // No SSE Logging in LCLI - }) - .map_err(|e| format!("should start logger: {:?}", e))? .build() .map_err(|e| format!("should build env: {:?}", e))?; diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index dd13f6847b..f1e5c5759a 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,7 +1,6 @@ use clap::ArgMatches; use clap_utils::parse_required; use eth2_network_config::Eth2NetworkConfig; -use log::info; use serde::Serialize; use snap::raw::Decoder; use ssz::Decode; @@ -9,6 +8,7 @@ use std::fs; use std::fs::File; use std::io::Read; use std::str::FromStr; +use tracing::info; use types::*; enum OutputFormat { @@ -59,7 +59,7 @@ pub fn run_parse_ssz( spec.config_name.as_deref().unwrap_or("unknown"), E::spec_name() ); - info!("Type: {type_str}"); + info!(%type_str, "Type"); // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, // as the fork-generic decoder will always be available (requires correct --network flag). diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 2ad79051ea..834123e939 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -50,7 +50,6 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; -use log::info; use ssz::Encode; use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use state_processing::AllCaches; @@ -58,6 +57,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::time::{Duration, Instant}; +use tracing::info; use types::{BeaconState, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs index 17a947b2f0..b2308999d4 100644 --- a/lcli/src/state_root.rs +++ b/lcli/src/state_root.rs @@ -4,9 +4,9 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; -use log::info; use std::path::PathBuf; use std::time::{Duration, Instant}; +use tracing::info; use types::{BeaconState, EthSpec}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index ecfa04fc81..4831f86491 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -72,8 +72,6 @@ use eth2::{ BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; use eth2_network_config::Eth2NetworkConfig; -use log::{debug, info}; -use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -87,6 +85,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; use store::HotColdDB; +use tracing::{debug, info}; use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -196,14 +195,8 @@ pub fn run( * Create a `BeaconStore` and `ValidatorPubkeyCache` for block signature verification. */ - let store = HotColdDB::open_ephemeral( - <_>::default(), - spec.clone(), - NullLoggerBuilder - .build() - .map_err(|e| format!("Error on NullLoggerBuilder: {:?}", e))?, - ) - .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; + let store = HotColdDB::open_ephemeral(<_>::default(), spec.clone()) + .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; let store = Arc::new(store); debug!("Building pubkey cache (might take some time)"); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index fc73a2cb93..d941293e91 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -60,9 +60,10 @@ serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } slasher = { workspace = true } -slog = { workspace = true } store = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } unused_port = { workspace = true } validator_client = { workspace = true } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 02b8e0b655..6d6ffa1725 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,18 +6,19 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } +clap = { workspace = true } eth2_config = { workspace = true } eth2_network_config = { workspace = true } futures = { workspace = true } logging = { workspace = true } +logroller = { workspace = true } serde = { workspace = true } -slog = { workspace = true } -slog-async = { workspace = true } -slog-json = "2.3.0" -slog-term = { workspace = true } -sloggers = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } +tracing-appender = { workspace = true } +tracing-log = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 89d759d662..005d2734c7 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -11,31 +11,38 @@ use eth2_config::Eth2Config; use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; - -use logging::{test_logger, SSELoggingComponents}; +use logging::tracing_logging_layer::LoggingLayer; +use logging::SSELoggingComponents; +use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize}; use serde::{Deserialize, Serialize}; -use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; -use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; -use std::fs::create_dir_all; -use std::io::{Result as IOResult, Write}; use std::path::PathBuf; use std::sync::Arc; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; +use tracing::{error, info, warn}; +use tracing_subscriber::filter::LevelFilter; use types::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; #[cfg(target_family = "unix")] use { futures::Future, - std::{pin::Pin, task::Context, task::Poll}, + std::{ + fs::{read_dir, set_permissions, Permissions}, + os::unix::fs::PermissionsExt, + path::Path, + pin::Pin, + task::Context, + task::Poll, + }, tokio::signal::unix::{signal, Signal, SignalKind}, }; #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; -const LOG_CHANNEL_SIZE: usize = 16384; -const SSE_LOG_CHANNEL_SIZE: usize = 2048; +pub mod tracing_common; + +pub const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -47,37 +54,54 @@ const MAXIMUM_SHUTDOWN_TIME: u64 = 15; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LoggerConfig { pub path: Option, - pub debug_level: String, - pub logfile_debug_level: String, + #[serde(skip_serializing, skip_deserializing, default = "default_debug_level")] + pub debug_level: LevelFilter, + #[serde( + skip_serializing, + skip_deserializing, + default = "default_logfile_debug_level" + )] + pub logfile_debug_level: LevelFilter, pub log_format: Option, pub logfile_format: Option, pub log_color: bool, + pub logfile_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, pub is_restricted: bool, pub sse_logging: bool, + pub extra_info: bool, } impl Default for LoggerConfig { fn default() -> Self { LoggerConfig { path: None, - debug_level: String::from("info"), - logfile_debug_level: String::from("debug"), + debug_level: LevelFilter::INFO, + logfile_debug_level: LevelFilter::DEBUG, log_format: None, + log_color: true, logfile_format: None, - log_color: false, + logfile_color: false, disable_log_timestamp: false, max_log_size: 200, max_log_number: 5, compression: false, is_restricted: true, sse_logging: false, + extra_info: false, } } } +fn default_debug_level() -> LevelFilter { + LevelFilter::INFO +} + +fn default_logfile_debug_level() -> LevelFilter { + LevelFilter::DEBUG +} /// An execution context that can be used by a service. /// /// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a @@ -109,17 +133,11 @@ impl RuntimeContext { pub fn eth2_config(&self) -> &Eth2Config { &self.eth2_config } - - /// Returns a reference to the logger for this service. - pub fn log(&self) -> &slog::Logger { - self.executor.log() - } } /// Builds an `Environment`. pub struct EnvironmentBuilder { runtime: Option>, - log: Option, sse_logging_components: Option, eth_spec_instance: E, eth2_config: Eth2Config, @@ -131,7 +149,6 @@ impl EnvironmentBuilder { pub fn minimal() -> Self { Self { runtime: None, - log: None, sse_logging_components: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), @@ -145,7 +162,6 @@ impl EnvironmentBuilder { pub fn mainnet() -> Self { Self { runtime: None, - log: None, sse_logging_components: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), @@ -159,7 +175,6 @@ impl EnvironmentBuilder { pub fn gnosis() -> Self { Self { runtime: None, - log: None, sse_logging_components: None, eth_spec_instance: GnosisEthSpec, eth2_config: Eth2Config::gnosis(), @@ -182,149 +197,123 @@ impl EnvironmentBuilder { Ok(self) } - /// Sets a logger suitable for test usage. - pub fn test_logger(mut self) -> Result { - self.log = Some(test_logger()); - Ok(self) - } + pub fn init_tracing( + mut self, + config: LoggerConfig, + logfile_prefix: &str, + ) -> ( + Self, + LoggingLayer, + LoggingLayer, + Option, + ) { + let filename_prefix = match logfile_prefix { + "beacon_node" => "beacon", + "validator_client" => "validator", + _ => logfile_prefix, + }; - fn log_nothing(_: &mut dyn Write) -> IOResult<()> { - Ok(()) - } + #[cfg(target_family = "unix")] + let file_mode = if config.is_restricted { 0o600 } else { 0o644 }; - /// Initializes the logger using the specified configuration. - /// The logger is "async" because it has a dedicated thread that accepts logs and then - /// asynchronously flushes them to stdout/files/etc. This means the thread that raised the log - /// does not have to wait for the logs to be flushed. - /// The logger can be duplicated and more detailed logs can be output to `logfile`. - /// Note that background file logging will spawn a new thread. - pub fn initialize_logger(mut self, config: LoggerConfig) -> Result { - // Setting up the initial logger format and build it. - let stdout_drain = if let Some(ref format) = config.log_format { - match format.to_uppercase().as_str() { - "JSON" => { - let stdout_drain = slog_json::Json::default(std::io::stdout()).fuse(); - slog_async::Async::new(stdout_drain) - .chan_size(LOG_CHANNEL_SIZE) - .build() + let file_logging_layer = { + if let Some(path) = config.path { + let mut appender = LogRollerBuilder::new( + path.clone(), + PathBuf::from(format!("{}.log", filename_prefix)), + ) + .rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size))) + .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { + eprintln!("Failed to convert max_log_number to u64: {}", e); + 10 + })); + + if config.compression { + appender = appender.compression(Compression::Gzip); } - _ => return Err("Logging format provided is not supported".to_string()), - } - } else { - let stdout_decorator_builder = slog_term::TermDecorator::new(); - let stdout_decorator = if config.log_color { - stdout_decorator_builder.force_color() - } else { - stdout_decorator_builder - } - .build(); - let stdout_decorator = - logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); - let stdout_drain = slog_term::FullFormat::new(stdout_decorator); - let stdout_drain = if config.disable_log_timestamp { - stdout_drain.use_custom_timestamp(Self::log_nothing) - } else { - stdout_drain - } - .build() - .fuse(); - slog_async::Async::new(stdout_drain) - .chan_size(LOG_CHANNEL_SIZE) - .build() - }; + match appender.build() { + Ok(file_appender) => { + #[cfg(target_family = "unix")] + set_logfile_permissions(&path, filename_prefix, file_mode); - let stdout_drain = match config.debug_level.as_str() { - "info" => stdout_drain.filter_level(Level::Info), - "debug" => stdout_drain.filter_level(Level::Debug), - "trace" => stdout_drain.filter_level(Level::Trace), - "warn" => stdout_drain.filter_level(Level::Warning), - "error" => stdout_drain.filter_level(Level::Error), - "crit" => stdout_drain.filter_level(Level::Critical), - unknown => return Err(format!("Unknown debug-level: {}", unknown)), - }; + let (file_non_blocking_writer, file_guard) = + tracing_appender::non_blocking(file_appender); - let stdout_logger = Logger::root(stdout_drain.fuse(), o!()); - - // Disable file logging if values set to 0. - if config.max_log_size == 0 || config.max_log_number == 0 { - self.log = Some(stdout_logger); - return Ok(self); - } - - // Disable file logging if no path is specified. - let Some(path) = config.path else { - self.log = Some(stdout_logger); - return Ok(self); - }; - - // Ensure directories are created becfore the logfile. - if !path.exists() { - let mut dir = path.clone(); - dir.pop(); - - // Create the necessary directories for the correct service and network. - if !dir.exists() { - let res = create_dir_all(dir); - - // If the directories cannot be created, warn and disable the logger. - match res { - Ok(_) => (), + LoggingLayer::new( + file_non_blocking_writer, + file_guard, + config.disable_log_timestamp, + false, + config.logfile_color, + config.log_format.clone(), + config.logfile_format.clone(), + config.extra_info, + true, + ) + } Err(e) => { - let log = stdout_logger; - warn!( - log, - "Background file logging is disabled"; - "error" => e); - self.log = Some(log); - return Ok(self); + eprintln!("Failed to initialize rolling file appender: {}", e); + let (sink_writer, sink_guard) = + tracing_appender::non_blocking(std::io::sink()); + LoggingLayer::new( + sink_writer, + sink_guard, + config.disable_log_timestamp, + false, + config.logfile_color, + config.log_format.clone(), + config.logfile_format.clone(), + config.extra_info, + true, + ) } } + } else { + eprintln!("No path provided. File logging is disabled."); + let (sink_writer, sink_guard) = tracing_appender::non_blocking(std::io::sink()); + LoggingLayer::new( + sink_writer, + sink_guard, + config.disable_log_timestamp, + false, + true, + config.log_format.clone(), + config.logfile_format.clone(), + config.extra_info, + true, + ) } - } - - let logfile_level = match config.logfile_debug_level.as_str() { - "info" => Severity::Info, - "debug" => Severity::Debug, - "trace" => Severity::Trace, - "warn" => Severity::Warning, - "error" => Severity::Error, - "crit" => Severity::Critical, - unknown => return Err(format!("Unknown loglevel-debug-level: {}", unknown)), }; - let file_logger = FileLoggerBuilder::new(&path) - .level(logfile_level) - .channel_size(LOG_CHANNEL_SIZE) - .format(match config.logfile_format.as_deref() { - Some("JSON") => Format::Json, - _ => Format::default(), - }) - .rotate_size(config.max_log_size) - .rotate_keep(config.max_log_number) - .rotate_compress(config.compression) - .restrict_permissions(config.is_restricted) - .build() - .map_err(|e| format!("Unable to build file logger: {}", e))?; + let (stdout_non_blocking_writer, stdout_guard) = + tracing_appender::non_blocking(std::io::stdout()); - let mut log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); - - info!( - log, - "Logging to file"; - "path" => format!("{:?}", path) + let stdout_logging_layer = LoggingLayer::new( + stdout_non_blocking_writer, + stdout_guard, + config.disable_log_timestamp, + config.log_color, + true, + config.log_format, + config.logfile_format, + config.extra_info, + false, ); - // If the http API is enabled, we may need to send logs to be consumed by subscribers. - if config.sse_logging { - let sse_logger = SSELoggingComponents::new(SSE_LOG_CHANNEL_SIZE); - self.sse_logging_components = Some(sse_logger.clone()); + let sse_logging_layer_opt = if config.sse_logging { + Some(SSELoggingComponents::new(SSE_LOG_CHANNEL_SIZE)) + } else { + None + }; - log = Logger::root(Duplicate::new(log, sse_logger).fuse(), o!()); - } + self.sse_logging_components = sse_logging_layer_opt.clone(); - self.log = Some(log); - - Ok(self) + ( + self, + file_logging_layer, + stdout_logging_layer, + sse_logging_layer_opt, + ) } /// Adds a network configuration to the environment. @@ -351,7 +340,6 @@ impl EnvironmentBuilder { signal_rx: Some(signal_rx), signal: Some(signal), exit, - log: self.log.ok_or("Cannot build environment without log")?, sse_logging_components: self.sse_logging_components, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, @@ -370,7 +358,6 @@ pub struct Environment { signal_tx: Sender, signal: Option>, exit: async_channel::Receiver<()>, - log: Logger, sse_logging_components: Option, eth_spec_instance: E, pub eth2_config: Eth2Config, @@ -386,14 +373,14 @@ impl Environment { &self.runtime } - /// Returns a `Context` where no "service" has been added to the logger output. + /// Returns a `Context` where a "core" service has been added to the logger output. pub fn core_context(&self) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), self.exit.clone(), - self.log.clone(), self.signal_tx.clone(), + "core".to_string(), ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), @@ -408,8 +395,8 @@ impl Environment { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), self.exit.clone(), - self.log.new(o!("service" => service_name)), self.signal_tx.clone(), + service_name, ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), @@ -441,7 +428,7 @@ impl Environment { let terminate = SignalFuture::new(terminate_stream, "Received SIGTERM"); handles.push(terminate); } - Err(e) => error!(self.log, "Could not register SIGTERM handler"; "error" => e), + Err(e) => error!(error = ?e, "Could not register SIGTERM handler"), }; // setup for handling SIGINT @@ -450,7 +437,7 @@ impl Environment { let interrupt = SignalFuture::new(interrupt_stream, "Received SIGINT"); handles.push(interrupt); } - Err(e) => error!(self.log, "Could not register SIGINT handler"; "error" => e), + Err(e) => error!(error = ?e, "Could not register SIGINT handler"), } // setup for handling a SIGHUP @@ -459,7 +446,7 @@ impl Environment { let hup = SignalFuture::new(hup_stream, "Received SIGHUP"); handles.push(hup); } - Err(e) => error!(self.log, "Could not register SIGHUP handler"; "error" => e), + Err(e) => error!(error = ?e, "Could not register SIGHUP handler"), } future::select(inner_shutdown, future::select_all(handles.into_iter())).await @@ -467,7 +454,7 @@ impl Environment { match self.runtime().block_on(register_handlers) { future::Either::Left((Ok(reason), _)) => { - info!(self.log, "Internal shutdown received"; "reason" => reason.message()); + info!("Internal shutdown received"); Ok(reason) } future::Either::Left((Err(e), _)) => Err(e.into()), @@ -494,14 +481,12 @@ impl Environment { // setup for handling a Ctrl-C let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); - let log = self.log.clone(); ctrlc::set_handler(move || { if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { if let Err(e) = ctrlc_send.send(()) { error!( - log, - "Error sending ctrl-c message"; - "error" => e + error = ?e, + "Error sending ctrl-c message" ); } } @@ -514,7 +499,7 @@ impl Environment { .block_on(future::select(inner_shutdown, ctrlc_oneshot)) { future::Either::Left((Ok(reason), _)) => { - info!(self.log, "Internal shutdown received"; "reason" => reason.message()); + info!(reason = reason.message(), "Internal shutdown received"); Ok(reason) } future::Either::Left((Err(e), _)) => Err(e.into()), @@ -531,9 +516,8 @@ impl Environment { runtime.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME)) } Err(e) => warn!( - self.log, - "Failed to obtain runtime access to shutdown gracefully"; - "error" => ?e + error = ?e, + "Failed to obtain runtime access to shutdown gracefully" ), } } @@ -579,3 +563,37 @@ impl Future for SignalFuture { } } } + +#[cfg(target_family = "unix")] +fn set_logfile_permissions(log_dir: &Path, filename_prefix: &str, file_mode: u32) { + let newest = read_dir(log_dir) + .ok() + .into_iter() + .flat_map(|entries| entries.filter_map(Result::ok)) + .filter_map(|entry| { + let path = entry.path(); + let fname = path.file_name()?.to_string_lossy(); + if path.is_file() && fname.starts_with(filename_prefix) && fname.ends_with(".log") { + let modified = entry.metadata().ok()?.modified().ok()?; + Some((path, modified)) + } else { + None + } + }) + .max_by_key(|(_path, mtime)| *mtime); + + match newest { + Some((file, _mtime)) => { + if let Err(e) = set_permissions(&file, Permissions::from_mode(file_mode)) { + eprintln!("Failed to set permissions on {}: {}", file.display(), e); + } + } + None => { + eprintln!( + "Couldn't find a newly created logfile in {} matching prefix \"{}\".", + log_dir.display(), + filename_prefix + ); + } + } +} diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs new file mode 100644 index 0000000000..ad9060a8ff --- /dev/null +++ b/lighthouse/environment/src/tracing_common.rs @@ -0,0 +1,78 @@ +use crate::{EnvironmentBuilder, LoggerConfig}; +use clap::ArgMatches; +use logging::Libp2pDiscv5TracingLayer; +use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents}; +use std::process; +use tracing_subscriber::filter::{EnvFilter, FilterFn, LevelFilter}; +use types::EthSpec; + +pub fn construct_logger( + logger_config: LoggerConfig, + matches: &ArgMatches, + environment_builder: EnvironmentBuilder, +) -> ( + EnvironmentBuilder, + EnvFilter, + Libp2pDiscv5TracingLayer, + LoggingLayer, + LoggingLayer, + Option, + LoggerConfig, + FilterFn, +) { + let libp2p_discv5_layer = logging::create_libp2p_discv5_tracing_layer( + logger_config.path.clone(), + logger_config.max_log_size, + logger_config.compression, + logger_config.max_log_number, + ); + + let logfile_prefix = matches.subcommand_name().unwrap_or("lighthouse"); + + let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) = + environment_builder.init_tracing(logger_config.clone(), logfile_prefix); + + let filter_layer = EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new(logger_config.debug_level.to_string().to_lowercase())) + .unwrap(); + + let dependency_log_filter = + FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool); + + ( + builder, + filter_layer, + libp2p_discv5_layer, + file_logging_layer, + stdout_logging_layer, + sse_logging_layer_opt, + logger_config, + dependency_log_filter, + ) +} + +pub fn parse_level(level: &str) -> LevelFilter { + match level.to_lowercase().as_str() { + "error" => LevelFilter::ERROR, + "warn" => LevelFilter::WARN, + "info" => LevelFilter::INFO, + "debug" => LevelFilter::DEBUG, + "trace" => LevelFilter::TRACE, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + } +} + +fn filter_dependency_log(meta: &tracing::Metadata<'_>) -> bool { + if let Some(file) = meta.file() { + let target = meta.target(); + if file.contains("/.cargo/") { + return target.contains("discv5") || target.contains("libp2p"); + } else { + return !file.contains("gossipsub") && !target.contains("hyper"); + } + } + true +} diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index b0c847612a..a98caf8df5 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -9,8 +9,6 @@ fn builder() -> EnvironmentBuilder { EnvironmentBuilder::mainnet() .multi_threaded_tokio_runtime() .expect("should set runtime") - .test_logger() - .expect("should set logger") } fn eth2_network_config() -> Option { diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index d7a14e3809..8df4831503 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -11,18 +11,23 @@ use clap_utils::{ }; use cli::LighthouseSubcommands; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; +use environment::tracing_common; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; +use logging::crit; +use logging::MetricsLayer; use malloc_utils::configure_memory_allocator; -use slog::{crit, info}; use std::backtrace::Backtrace; use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; use task_executor::ShutdownReason; +use tracing::{info, warn}; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -120,15 +125,11 @@ fn main() { .display_order(0), ) .arg( - Arg::new("logfile") - .long("logfile") - .value_name("FILE") + Arg::new("logfile-dir") + .long("logfile-dir") + .value_name("DIR") .help( - "File path where the log file will be stored. Once it grows to the \ - value specified in `--logfile-max-size` a new log file is generated where \ - future logs are stored. \ - Once the number of log files exceeds the value specified in \ - `--logfile-max-number` the oldest log file will be overwritten.") + "Directory path where the log file will be stored") .action(ArgAction::Set) .global(true) .display_order(0) @@ -215,13 +216,36 @@ fn main() { .arg( Arg::new("log-color") .long("log-color") - .alias("log-colour") - .help("Force outputting colors when emitting logs to the terminal.") + .alias("log-color") + .help("Enables/Disables colors for logs in terminal. \ + Set it to false to disable colors.") + .num_args(0..=1) + .default_missing_value("true") + .default_value("true") + .value_parser(clap::value_parser!(bool)) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) + ) + .arg( + Arg::new("logfile-color") + .long("logfile-color") + .alias("logfile-colour") + .help("Enables colors in logfile.") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .global(true) .display_order(0) ) + .arg( + Arg::new("log-extra-info") + .long("log-extra-info") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, show module,file,line in logs") + .global(true) + .display_order(0) + ) .arg( Arg::new("disable-log-timestamp") .long("disable-log-timestamp") @@ -499,10 +523,16 @@ fn run( let log_format = matches.get_one::("log-format"); - let log_color = matches.get_flag("log-color"); + let log_color = matches + .get_one::("log-color") + .copied() + .unwrap_or(true); + + let logfile_color = matches.get_flag("logfile-color"); let disable_log_timestamp = matches.get_flag("disable-log-timestamp"); + let extra_info = matches.get_flag("log-extra-info"); let logfile_debug_level = matches .get_one::("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; @@ -529,15 +559,13 @@ fn run( let logfile_restricted = !matches.get_flag("logfile-no-restricted-perms"); // Construct the path to the log file. - let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; + let mut log_path: Option = clap_utils::parse_optional(matches, "logfile-dir")?; if log_path.is_none() { log_path = match matches.subcommand() { Some(("beacon_node", _)) => Some( parse_path_or_default(matches, "datadir")? .join(DEFAULT_BEACON_NODE_DIR) - .join("logs") - .join("beacon") - .with_extension("log"), + .join("logs"), ), Some(("validator_client", vc_matches)) => { let base_path = if vc_matches.contains_id("validators-dir") { @@ -546,12 +574,7 @@ fn run( parse_path_or_default(matches, "datadir")?.join(DEFAULT_VALIDATOR_DIR) }; - Some( - base_path - .join("logs") - .join("validator") - .with_extension("log"), - ) + Some(base_path.join("logs")) } _ => None, }; @@ -567,57 +590,83 @@ fn run( } }; - let logger_config = LoggerConfig { - path: log_path.clone(), - debug_level: String::from(debug_level), - logfile_debug_level: String::from(logfile_debug_level), - log_format: log_format.map(String::from), - logfile_format: logfile_format.map(String::from), - log_color, - disable_log_timestamp, - max_log_size: logfile_max_size * 1_024 * 1_024, - max_log_number: logfile_max_number, - compression: logfile_compress, - is_restricted: logfile_restricted, - sse_logging, + let ( + builder, + filter_layer, + libp2p_discv5_layer, + file_logging_layer, + stdout_logging_layer, + sse_logging_layer_opt, + logger_config, + dependency_log_filter, + ) = tracing_common::construct_logger( + LoggerConfig { + path: log_path.clone(), + debug_level: tracing_common::parse_level(debug_level), + logfile_debug_level: tracing_common::parse_level(logfile_debug_level), + log_format: log_format.map(String::from), + logfile_format: logfile_format.map(String::from), + log_color, + logfile_color, + disable_log_timestamp, + max_log_size: logfile_max_size, + max_log_number: logfile_max_number, + compression: logfile_compress, + is_restricted: logfile_restricted, + sse_logging, + extra_info, + }, + matches, + environment_builder, + ); + + let logging = tracing_subscriber::registry() + .with(dependency_log_filter) + .with(filter_layer) + .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) + .with(stdout_logging_layer.with_filter(logger_config.debug_level)) + .with(MetricsLayer) + .with(libp2p_discv5_layer); + + let logging_result = if let Some(sse_logging_layer) = sse_logging_layer_opt { + logging.with(sse_logging_layer).try_init() + } else { + logging.try_init() }; - let builder = environment_builder.initialize_logger(logger_config.clone())?; + if let Err(e) = logging_result { + eprintln!("Failed to initialize dependency logging: {e}"); + } let mut environment = builder .multi_threaded_tokio_runtime()? .eth2_network_config(eth2_network_config)? .build()?; - let log = environment.core_context().log().clone(); - // Log panics properly. { - let log = log.clone(); std::panic::set_hook(Box::new(move |info| { crit!( - log, - "Task panic. This is a bug!"; - "location" => info.location().map(ToString::to_string), - "message" => info.payload().downcast_ref::(), - "backtrace" => %Backtrace::capture(), - "advice" => "Please check above for a backtrace and notify the developers", + location = info.location().map(ToString::to_string), + message = info.payload().downcast_ref::(), + backtrace = %Backtrace::capture(), + advice = "Please check above for a backtrace and notify the developers", + "Task panic. This is a bug!" ); })); } // Allow Prometheus to export the time at which the process was started. - metrics::expose_process_start_time(&log); + metrics::expose_process_start_time(); // Allow Prometheus access to the version and commit of the Lighthouse build. metrics::expose_lighthouse_version(); #[cfg(all(feature = "modern", target_arch = "x86_64"))] if !std::is_x86_feature_detected!("adx") { - slog::warn!( - log, - "CPU seems incompatible with optimized Lighthouse build"; - "advice" => "If you get a SIGILL, please try Lighthouse portable build" + tracing::warn!( + advice = "If you get a SIGILL, please try Lighthouse portable build", + "CPU seems incompatible with optimized Lighthouse build" ); } @@ -631,7 +680,7 @@ fn run( ]; for flag in deprecated_flags { if matches.get_one::(flag).is_some() { - slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + warn!("The {} flag is deprecated and does nothing", flag); } } @@ -675,26 +724,21 @@ fn run( match LighthouseSubcommands::from_arg_matches(matches) { Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) => { - info!(log, "Running database manager for {} network", network_name); + info!("Running database manager for {} network", network_name); database_manager::run(matches, &db_manager_config, environment)?; return Ok(()); } Ok(LighthouseSubcommands::ValidatorClient(validator_client_config)) => { let context = environment.core_context(); - let log = context.log().clone(); let executor = context.executor.clone(); - let config = validator_client::Config::from_cli( - matches, - &validator_client_config, - context.log(), - ) - .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + let config = validator_client::Config::from_cli(matches, &validator_client_config) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; let shutdown_flag = matches.get_flag("immediate-shutdown"); if shutdown_flag { - info!(log, "Validator client immediate shutdown triggered."); + info!("Validator client immediate shutdown triggered."); return Ok(()); } @@ -704,7 +748,7 @@ fn run( .and_then(|mut vc| async move { vc.start_service().await }) .await { - crit!(log, "Failed to start validator client"; "reason" => e); + crit!(reason = e, "Failed to start validator client"); // Ignore the error since it always occurs during normal operation when // shutting down. let _ = executor @@ -718,17 +762,12 @@ fn run( Err(_) => (), }; - info!(log, "Lighthouse started"; "version" => VERSION); - info!( - log, - "Configured for network"; - "name" => &network_name - ); + info!(version = VERSION, "Lighthouse started"); + info!(network_name, "Configured network"); match matches.subcommand() { Some(("beacon_node", matches)) => { let context = environment.core_context(); - let log = context.log().clone(); let executor = context.executor.clone(); let mut config = beacon_node::get_config::(matches, &context)?; config.logger_config = logger_config; @@ -737,29 +776,14 @@ fn run( let shutdown_flag = matches.get_flag("immediate-shutdown"); if shutdown_flag { - info!(log, "Beacon node immediate shutdown triggered."); + info!("Beacon node immediate shutdown triggered."); return Ok(()); } - let mut tracing_log_path: Option = - clap_utils::parse_optional(matches, "logfile")?; - - if tracing_log_path.is_none() { - tracing_log_path = Some( - parse_path_or_default(matches, "datadir")? - .join(DEFAULT_BEACON_NODE_DIR) - .join("logs"), - ) - } - - let path = tracing_log_path.clone().unwrap(); - - logging::create_tracing_layer(path); - executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { - crit!(log, "Failed to start beacon node"; "reason" => e); + crit!(reason = ?e, "Failed to start beacon node"); // Ignore the error since it always occurs during normal operation when // shutting down. let _ = executor @@ -774,14 +798,14 @@ fn run( // Qt the moment this needs to exist so that we dont trigger a crit. Some(("validator_client", _)) => (), _ => { - crit!(log, "No subcommand supplied. See --help ."); + crit!("No subcommand supplied. See --help ."); return Err("No subcommand supplied.".into()); } }; // Block this thread until we get a ctrl-c or a task sends a shutdown signal. let shutdown_reason = environment.block_until_shutdown_requested()?; - info!(log, "Shutting down.."; "reason" => ?shutdown_reason); + info!(reason = ?shutdown_reason, "Shutting down.."); environment.fire_signal(); diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index 30e0120582..6b464a18be 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,8 +1,8 @@ use lighthouse_version::VERSION; pub use metrics::*; -use slog::{error, Logger}; use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; +use tracing::error; pub static PROCESS_START_TIME_SECONDS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( @@ -19,13 +19,12 @@ pub static LIGHTHOUSE_VERSION: LazyLock> = LazyLock::new(|| ) }); -pub fn expose_process_start_time(log: &Logger) { +pub fn expose_process_start_time() { match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(duration) => set_gauge(&PROCESS_START_TIME_SECONDS, duration.as_secs() as i64), Err(e) => error!( - log, - "Failed to read system time"; - "error" => %e + error = %e, + "Failed to read system time" ), } } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 8055c33bc0..96c1b5313a 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2416,20 +2416,20 @@ fn monitoring_endpoint() { // Tests for Logger flags. #[test] -fn default_log_color_flag() { +fn default_logfile_color_flag() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert!(!config.logger_config.log_color); + assert!(!config.logger_config.logfile_color); }); } #[test] -fn enabled_log_color_flag() { +fn enabled_logfile_color_flag() { CommandLineTest::new() - .flag("log-color", None) + .flag("logfile-color", None) .run_with_zero_port() .with_config(|config| { - assert!(config.logger_config.log_color); + assert!(config.logger_config.logfile_color); }); } #[test] diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 5be5c13dee..80070a0791 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -78,7 +78,7 @@ if [[ "$BEHAVIOR" == "failure" ]]; then --files /validator_keys:$vc_1_keys_artifact_id,/testnet:el_cl_genesis_data \ $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ vc \ - --debug-level debug \ + --debug-level info \ --testnet-dir=/testnet \ --validators-dir=/validator_keys/keys \ --secrets-dir=/validator_keys/secrets \ diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index fcecc2fc23..b2f6eca9c3 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -32,16 +32,14 @@ rand = { workspace = true } redb = { version = "2.1.4", optional = true } safe_arith = { workspace = true } serde = { workspace = true } -slog = { workspace = true } ssz_types = { workspace = true } strum = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } [dev-dependencies] -logging = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } - diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 41e3b5b90a..19398fada8 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -10,9 +10,9 @@ directory = { workspace = true } lighthouse_network = { workspace = true } network = { workspace = true } slasher = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } state_processing = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } types = { workspace = true } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 091a95dc4c..2409a24c78 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -8,7 +8,6 @@ use slasher::{ metrics::{self, SLASHER_DATABASE_SIZE, SLASHER_RUN_TIME}, Slasher, }; -use slog::{debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ per_block_processing::errors::{ @@ -21,6 +20,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::sync::mpsc::UnboundedSender; use tokio::time::{interval_at, Duration, Instant}; +use tracing::{debug, error, info, info_span, trace, warn, Instrument}; use types::{AttesterSlashing, Epoch, EthSpec, ProposerSlashing}; pub struct SlasherService { @@ -47,9 +47,8 @@ impl SlasherService { .slasher .clone() .ok_or("No slasher is configured")?; - let log = slasher.log().clone(); - info!(log, "Starting slasher"; "broadcast" => slasher.config().broadcast); + info!(broadcast = slasher.config().broadcast, "Starting slasher"); // Buffer just a single message in the channel. If the receiver is still processing, we // don't need to burden them with more work (we can wait). @@ -65,13 +64,17 @@ impl SlasherService { update_period, slot_offset, notif_sender, - log, - ), + ) + .instrument(tracing::info_span!("slasher", service = "slasher")), "slasher_server_notifier", ); executor.spawn_blocking( - || Self::run_processor(beacon_chain, slasher, notif_receiver, network_sender), + || { + let span = info_span!("slasher", service = "slasher"); + let _ = span.enter(); + Self::run_processor(beacon_chain, slasher, notif_receiver, network_sender); + }, "slasher_server_processor", ); @@ -84,14 +87,13 @@ impl SlasherService { update_period: u64, slot_offset: f64, notif_sender: SyncSender, - log: Logger, ) { let slot_offset = Duration::from_secs_f64(slot_offset); let start_instant = if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { Instant::now() + duration_to_next_slot + slot_offset } else { - error!(log, "Error aligning slasher to slot clock"); + error!("Error aligning slasher to slot clock"); Instant::now() }; let mut interval = interval_at(start_instant, Duration::from_secs(update_period)); @@ -104,7 +106,7 @@ impl SlasherService { break; } } else { - trace!(log, "Slasher has nothing to do: we are pre-genesis"); + trace!("Slasher has nothing to do: we are pre-genesis"); } } } @@ -116,7 +118,6 @@ impl SlasherService { notif_receiver: Receiver, network_sender: UnboundedSender>, ) { - let log = slasher.log(); while let Ok(current_epoch) = notif_receiver.recv() { let t = Instant::now(); @@ -125,10 +126,9 @@ impl SlasherService { Ok(stats) => Some(stats), Err(e) => { error!( - log, - "Error during scheduled slasher processing"; - "epoch" => current_epoch, - "error" => ?e, + epoch = %current_epoch, + error = ?e, + "Error during scheduled slasher processing" ); None } @@ -139,10 +139,9 @@ impl SlasherService { // If the database is full then pruning could help to free it up. if let Err(e) = slasher.prune_database(current_epoch) { error!( - log, - "Error during slasher database pruning"; - "epoch" => current_epoch, - "error" => ?e, + epoch = %current_epoch, + error = ?e, + "Error during slasher database pruning" ); continue; }; @@ -155,12 +154,11 @@ impl SlasherService { if let Some(stats) = stats { debug!( - log, - "Completed slasher update"; - "epoch" => current_epoch, - "time_taken" => format!("{}ms", t.elapsed().as_millis()), - "num_attestations" => stats.attestation_stats.num_processed, - "num_blocks" => stats.block_stats.num_processed, + epoch = %current_epoch, + time_taken = format!("{}ms", t.elapsed().as_millis()), + num_attestations = stats.attestation_stats.num_processed, + num_blocks = stats.block_stats.num_processed, + "Completed slasher update" ); } } @@ -181,7 +179,6 @@ impl SlasherService { slasher: &Slasher, network_sender: &UnboundedSender>, ) { - let log = slasher.log(); let attester_slashings = slasher.get_attester_slashings(); for slashing in attester_slashings { @@ -198,18 +195,16 @@ impl SlasherService { BlockOperationError::Invalid(AttesterSlashingInvalid::NoSlashableIndices), )) => { debug!( - log, - "Skipping attester slashing for slashed validators"; - "slashing" => ?slashing, + ?slashing, + "Skipping attester slashing for slashed validators" ); continue; } Err(e) => { warn!( - log, - "Attester slashing produced is invalid"; - "error" => ?e, - "slashing" => ?slashing, + error = ?e, + ?slashing, + "Attester slashing produced is invalid" ); continue; } @@ -224,9 +219,8 @@ impl SlasherService { Self::publish_attester_slashing(beacon_chain, network_sender, slashing) { debug!( - log, - "Unable to publish attester slashing"; - "error" => e, + error = ?e, + "Unable to publish attester slashing" ); } } @@ -238,7 +232,6 @@ impl SlasherService { slasher: &Slasher, network_sender: &UnboundedSender>, ) { - let log = slasher.log(); let proposer_slashings = slasher.get_proposer_slashings(); for slashing in proposer_slashings { @@ -254,18 +247,16 @@ impl SlasherService { )), )) => { debug!( - log, - "Skipping proposer slashing for slashed validator"; - "validator_index" => index, + validator_index = index, + "Skipping proposer slashing for slashed validator" ); continue; } Err(e) => { error!( - log, - "Proposer slashing produced is invalid"; - "error" => ?e, - "slashing" => ?slashing, + error = ?e, + ?slashing, + "Proposer slashing produced is invalid" ); continue; } @@ -277,9 +268,8 @@ impl SlasherService { Self::publish_proposer_slashing(beacon_chain, network_sender, slashing) { debug!( - log, - "Unable to publish proposer slashing"; - "error" => e, + error = ?e, + "Unable to publish proposer slashing" ); } } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 9e5e827034..071109e00c 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -12,12 +12,12 @@ use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; use parking_lot::Mutex; use serde::de::DeserializeOwned; -use slog::{info, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; use std::sync::Arc; +use tracing::info; use tree_hash::TreeHash; use types::{ AggregateSignature, AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, @@ -287,8 +287,8 @@ fn ssz_decode(bytes: Cow<[u8]>) -> Result { } impl SlasherDB { - pub fn open(config: Arc, spec: Arc, log: Logger) -> Result { - info!(log, "Opening slasher database"; "backend" => %config.backend); + pub fn open(config: Arc, spec: Arc) -> Result { + info!(backend = %config.backend, "Opening slasher database"); std::fs::create_dir_all(&config.database_path)?; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 19f2cd138d..12f35e657e 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -9,9 +9,9 @@ use crate::{ IndexedAttestationId, ProposerSlashingStatus, RwTransaction, SimpleBatch, SlasherDB, }; use parking_lot::Mutex; -use slog::{debug, error, info, Logger}; use std::collections::HashSet; use std::sync::Arc; +use tracing::{debug, error, info}; use types::{ AttesterSlashing, ChainSpec, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, @@ -25,26 +25,21 @@ pub struct Slasher { attester_slashings: Mutex>>, proposer_slashings: Mutex>, config: Arc, - log: Logger, } impl Slasher { - pub fn open(config: Config, spec: Arc, log: Logger) -> Result { + pub fn open(config: Config, spec: Arc) -> Result { config.validate()?; let config = Arc::new(config); - let db = SlasherDB::open(config.clone(), spec, log.clone())?; - Self::from_config_and_db(config, db, log) + let db = SlasherDB::open(config.clone(), spec)?; + Self::from_config_and_db(config, db) } /// TESTING ONLY. /// /// Initialise a slasher database from an existing `db`. The caller must ensure that the /// database's config matches the one provided. - pub fn from_config_and_db( - config: Arc, - db: SlasherDB, - log: Logger, - ) -> Result { + pub fn from_config_and_db(config: Arc, db: SlasherDB) -> Result { config.validate()?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); @@ -57,7 +52,6 @@ impl Slasher { attester_slashings, proposer_slashings, config, - log, }) } @@ -80,10 +74,6 @@ impl Slasher { &self.config } - pub fn log(&self) -> &Logger { - &self.log - } - /// Accept an attestation from the network and queue it for processing. pub fn accept_attestation(&self, attestation: IndexedAttestation) { self.attestation_queue.queue(attestation); @@ -126,11 +116,7 @@ impl Slasher { let num_slashings = slashings.len(); if !slashings.is_empty() { - info!( - self.log, - "Found {} new proposer slashings!", - slashings.len(), - ); + info!("Found {} new proposer slashings!", slashings.len()); self.proposer_slashings.lock().extend(slashings); } @@ -156,11 +142,10 @@ impl Slasher { self.attestation_queue.requeue(deferred); debug!( - self.log, - "Pre-processing attestations for slasher"; - "num_valid" => num_valid, - "num_deferred" => num_deferred, - "num_dropped" => num_dropped, + %num_valid, + num_deferred, + num_dropped, + "Pre-processing attestations for slasher" ); metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_VALID, num_valid as i64); metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_DEFERRED, num_deferred as i64); @@ -194,12 +179,7 @@ impl Slasher { } } - debug!( - self.log, - "Stored attestations in slasher DB"; - "num_stored" => num_stored, - "num_valid" => num_valid, - ); + debug!(num_stored, ?num_valid, "Stored attestations in slasher DB"); metrics::set_gauge( &SLASHER_NUM_ATTESTATIONS_STORED_PER_BATCH, num_stored as i64, @@ -239,19 +219,14 @@ impl Slasher { ) { Ok(slashings) => { if !slashings.is_empty() { - info!( - self.log, - "Found {} new double-vote slashings!", - slashings.len() - ); + info!("Found {} new double-vote slashings!", slashings.len()); } self.attester_slashings.lock().extend(slashings); } Err(e) => { error!( - self.log, - "Error checking for double votes"; - "error" => format!("{:?}", e) + error = ?e, + "Error checking for double votes" ); return Err(e); } @@ -269,20 +244,12 @@ impl Slasher { ) { Ok(slashings) => { if !slashings.is_empty() { - info!( - self.log, - "Found {} new surround slashings!", - slashings.len() - ); + info!("Found {} new surround slashings!", slashings.len()); } self.attester_slashings.lock().extend(slashings); } Err(e) => { - error!( - self.log, - "Error processing array update"; - "error" => format!("{:?}", e), - ); + error!(error = ?e, "Error processing array update"); return Err(e); } } @@ -315,10 +282,9 @@ impl Slasher { if let Some(slashing) = slashing_status.into_slashing(attestation) { debug!( - self.log, - "Found double-vote slashing"; - "validator_index" => validator_index, - "epoch" => slashing.attestation_1().data().target.epoch, + validator_index, + epoch = %slashing.attestation_1().data().target.epoch, + "Found double-vote slashing" ); slashings.insert(slashing); } diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index cc6e57d95d..22c9cfc128 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,6 +1,5 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] -use logging::test_logger; use maplit::hashset; use rayon::prelude::*; use slasher::{ @@ -272,7 +271,7 @@ fn slasher_test( let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); let spec = chain_spec(); - let slasher = Slasher::open(config, spec, test_logger()).unwrap(); + let slasher = Slasher::open(config, spec).unwrap(); let current_epoch = Epoch::new(current_epoch); for (i, attestation) in attestations.iter().enumerate() { @@ -302,7 +301,7 @@ fn parallel_slasher_test( let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); let spec = chain_spec(); - let slasher = Slasher::open(config, spec, test_logger()).unwrap(); + let slasher = Slasher::open(config, spec).unwrap(); let current_epoch = Epoch::new(current_epoch); attestations diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index 6d2a1f5176..ef525c6f3f 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,6 +1,5 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] -use logging::test_logger; use slasher::{ test_utils::{block as test_block, chain_spec, E}, Config, Slasher, @@ -13,7 +12,7 @@ fn empty_pruning() { let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); let spec = chain_spec(); - let slasher = Slasher::::open(config, spec, test_logger()).unwrap(); + let slasher = Slasher::::open(config, spec).unwrap(); slasher.prune_database(Epoch::new(0)).unwrap(); } @@ -27,7 +26,7 @@ fn block_pruning() { config.history_length = 2; let spec = chain_spec(); - let slasher = Slasher::::open(config.clone(), spec, test_logger()).unwrap(); + let slasher = Slasher::::open(config.clone(), spec).unwrap(); let current_epoch = Epoch::from(2 * config.history_length); // Pruning the empty database should be safe. diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index ff234dff3f..3270700d88 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,6 +1,5 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] -use logging::test_logger; use rand::prelude::*; use slasher::{ test_utils::{ @@ -36,9 +35,8 @@ impl Default for TestConfig { fn make_db() -> (TempDir, SlasherDB) { let tempdir = tempdir().unwrap(); let initial_config = Arc::new(Config::new(tempdir.path().into())); - let logger = test_logger(); let spec = chain_spec(); - let db = SlasherDB::open(initial_config.clone(), spec, logger).unwrap(); + let db = SlasherDB::open(initial_config.clone(), spec).unwrap(); (tempdir, db) } @@ -60,7 +58,7 @@ fn random_test(seed: u64, mut db: SlasherDB, test_config: TestConfig) -> Slas let config = Arc::new(config); db.update_config(config.clone()); - let slasher = Slasher::::from_config_and_db(config.clone(), db, test_logger()).unwrap(); + let slasher = Slasher::::from_config_and_db(config.clone(), db).unwrap(); let validators = (0..num_validators as u64).collect::>(); diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index 2ec56bc7d5..e34d0f2233 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,6 +1,5 @@ #![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] -use logging::test_logger; use slasher::{ test_utils::{chain_spec, indexed_att}, Config, Slasher, @@ -17,7 +16,7 @@ fn attestation_pruning_empty_wrap_around() { config.chunk_size = 16; config.history_length = 16; - let slasher = Slasher::open(config.clone(), spec, test_logger()).unwrap(); + let slasher = Slasher::open(config.clone(), spec).unwrap(); let v = vec![0]; let history_length = config.history_length as u64; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index a1c74389a7..2f97cdf5b9 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -371,7 +371,6 @@ impl Tester { } let harness = BeaconChainHarness::>::builder(E::default()) - .logger(logging::test_logger()) .spec(spec.clone()) .keypairs(vec![]) .chain_config(ChainConfig { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index f664509304..cf31c184fe 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -105,7 +105,6 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: impl TestRig { pub fn new(generic_engine: Engine) -> Self { - let log = logging::test_logger(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -114,7 +113,12 @@ impl TestRig { ); let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let executor = TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + shutdown_tx, + "test".to_string(), + ); let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec()); spec.terminal_total_difficulty = Uint256::ZERO; @@ -131,8 +135,7 @@ impl TestRig { default_datadir: execution_engine.datadir(), ..Default::default() }; - let execution_layer = - ExecutionLayer::from_config(config, executor.clone(), log.clone()).unwrap(); + let execution_layer = ExecutionLayer::from_config(config, executor.clone()).unwrap(); ExecutionPair { execution_engine, execution_layer, @@ -150,8 +153,7 @@ impl TestRig { default_datadir: execution_engine.datadir(), ..Default::default() }; - let execution_layer = - ExecutionLayer::from_config(config, executor, log.clone()).unwrap(); + let execution_layer = ExecutionLayer::from_config(config, executor).unwrap(); ExecutionPair { execution_engine, execution_layer, diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 77645dba45..12b0afcc75 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -8,14 +8,17 @@ edition = { workspace = true } [dependencies] clap = { workspace = true } env_logger = { workspace = true } +environment = { workspace = true } eth2_network_config = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } node_test_rig = { path = "../node_test_rig" } parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } serde_json = { workspace = true } tokio = { workspace = true } +tracing-subscriber = { workspace = true } types = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 82a7028582..fff5c71a87 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -13,6 +13,12 @@ use rayon::prelude::*; use std::cmp::max; use std::sync::Arc; use std::time::Duration; + +use environment::tracing_common; +use logging::MetricsLayer; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; @@ -82,23 +88,47 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let mut env = EnvironmentBuilder::minimal() - .initialize_logger(LoggerConfig { + let ( + env_builder, + filter_layer, + _libp2p_discv5_layer, + file_logging_layer, + stdout_logging_layer, + _sse_logging_layer_opt, + logger_config, + _dependency_log_filter, + ) = tracing_common::construct_logger( + LoggerConfig { path: None, - debug_level: log_level.clone(), - logfile_debug_level: log_level.clone(), + debug_level: tracing_common::parse_level(&log_level.clone()), + logfile_debug_level: tracing_common::parse_level(&log_level.clone()), log_format: None, logfile_format: None, - log_color: false, + log_color: true, + logfile_color: true, disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, is_restricted: true, sse_logging: false, - })? - .multi_threaded_tokio_runtime()? - .build()?; + extra_info: false, + }, + matches, + EnvironmentBuilder::minimal(), + ); + + if let Err(e) = tracing_subscriber::registry() + .with(filter_layer) + .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) + .with(stdout_logging_layer.with_filter(logger_config.debug_level)) + .with(MetricsLayer) + .try_init() + { + eprintln!("Failed to initialize dependency logging: {e}"); + } + + let mut env = env_builder.multi_threaded_tokio_runtime()?.build()?; let mut spec = (*env.eth2_config.spec).clone(); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 7d4bdfa264..98a6a34ffa 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -3,7 +3,9 @@ use crate::{checks, LocalNetwork}; use clap::ArgMatches; use crate::retry::with_retry; +use environment::tracing_common; use futures::prelude::*; +use logging::MetricsLayer; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, testing_validator_config, ValidatorFiles, @@ -13,8 +15,9 @@ use std::cmp::max; use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use types::{Epoch, EthSpec, MinimalEthSpec}; - const END_EPOCH: u64 = 16; const GENESIS_DELAY: u64 = 32; const ALTAIR_FORK_EPOCH: u64 = 0; @@ -89,23 +92,49 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { }) .collect::>(); - let mut env = EnvironmentBuilder::minimal() - .initialize_logger(LoggerConfig { + let ( + env_builder, + filter_layer, + libp2p_discv5_layer, + file_logging_layer, + stdout_logging_layer, + _sse_logging_layer_opt, + logger_config, + dependency_log_filter, + ) = tracing_common::construct_logger( + LoggerConfig { path: None, - debug_level: log_level.clone(), - logfile_debug_level: log_level.clone(), + debug_level: tracing_common::parse_level(&log_level.clone()), + logfile_debug_level: tracing_common::parse_level(&log_level.clone()), log_format: None, logfile_format: None, - log_color: false, + log_color: true, + logfile_color: false, disable_log_timestamp: false, max_log_size: 0, max_log_number: 0, compression: false, is_restricted: true, sse_logging: false, - })? - .multi_threaded_tokio_runtime()? - .build()?; + extra_info: false, + }, + matches, + EnvironmentBuilder::minimal(), + ); + + if let Err(e) = tracing_subscriber::registry() + .with(dependency_log_filter) + .with(filter_layer) + .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) + .with(stdout_logging_layer.with_filter(logger_config.debug_level)) + .with(libp2p_discv5_layer) + .with(MetricsLayer) + .try_init() + { + eprintln!("Failed to initialize dependency logging: {e}"); + } + + let mut env = env_builder.multi_threaded_tokio_runtime()?.build()?; let mut spec = (*env.eth2_config.spec).clone(); diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml deleted file mode 100644 index d2d705f714..0000000000 --- a/testing/test-test_logger/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "test-test_logger" -version = "0.1.0" -edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -logging = { workspace = true } -slog = { workspace = true } diff --git a/testing/test-test_logger/src/lib.rs b/testing/test-test_logger/src/lib.rs deleted file mode 100644 index a2e2a80943..0000000000 --- a/testing/test-test_logger/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -use slog::{info, Logger}; - -pub struct Config { - log: Logger, -} - -pub fn fn_with_logging(config: &Config) { - info!(&config.log, "hi"); -} - -#[cfg(test)] -mod tests { - use super::*; - use logging::test_logger; - - #[test] - fn test_fn_with_logging() { - let config = Config { log: test_logger() }; - - fn_with_logging(&config); - } -} diff --git a/testing/validator_test_rig/Cargo.toml b/testing/validator_test_rig/Cargo.toml index 76560b8afc..bdbdac95d8 100644 --- a/testing/validator_test_rig/Cargo.toml +++ b/testing/validator_test_rig/Cargo.toml @@ -10,5 +10,5 @@ mockito = { workspace = true } regex = { workspace = true } sensitive_url = { workspace = true } serde_json = { workspace = true } -slog = { workspace = true } +tracing = { workspace = true } types = { workspace = true } diff --git a/testing/validator_test_rig/src/mock_beacon_node.rs b/testing/validator_test_rig/src/mock_beacon_node.rs index f875116155..7a90270913 100644 --- a/testing/validator_test_rig/src/mock_beacon_node.rs +++ b/testing/validator_test_rig/src/mock_beacon_node.rs @@ -1,20 +1,18 @@ use eth2::types::{GenericResponse, SyncingData}; use eth2::{BeaconNodeHttpClient, StatusCode, Timeouts}; -use logging::test_logger; use mockito::{Matcher, Mock, Server, ServerGuard}; use regex::Regex; use sensitive_url::SensitiveUrl; -use slog::{info, Logger}; use std::marker::PhantomData; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; +use tracing::info; use types::{ChainSpec, ConfigAndPreset, EthSpec, SignedBlindedBeaconBlock}; pub struct MockBeaconNode { server: ServerGuard, pub beacon_api_client: BeaconNodeHttpClient, - log: Logger, _phantom: PhantomData, pub received_blocks: Arc>>>, } @@ -27,11 +25,9 @@ impl MockBeaconNode { SensitiveUrl::from_str(&server.url()).unwrap(), Timeouts::set_all(Duration::from_secs(1)), ); - let log = test_logger(); Self { server, beacon_api_client, - log, _phantom: PhantomData, received_blocks: Arc::new(Mutex::new(Vec::new())), } @@ -69,7 +65,6 @@ impl MockBeaconNode { /// Mocks the `post_beacon_blinded_blocks_v2_ssz` response with an optional `delay`. pub fn mock_post_beacon_blinded_blocks_v2_ssz(&mut self, delay: Duration) -> Mock { let path_pattern = Regex::new(r"^/eth/v2/beacon/blinded_blocks$").unwrap(); - let log = self.log.clone(); let url = self.server.url(); let received_blocks = Arc::clone(&self.received_blocks); @@ -80,7 +75,6 @@ impl MockBeaconNode { .with_status(200) .with_body_from_request(move |request| { info!( - log, "{}", format!( "Received published block request on server {} with delay {} s", diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 659495b2b3..1eb14cf1d5 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -25,7 +25,6 @@ mod tests { use initialized_validators::{ load_pem_certificate, load_pkcs12_identity, InitializedValidators, }; - use logging::test_logger; use parking_lot::Mutex; use reqwest::Client; use serde::Serialize; @@ -316,7 +315,6 @@ mod tests { using_web3signer: bool, spec: Arc, ) -> Self { - let log = test_logger(); let validator_dir = TempDir::new().unwrap(); let config = initialized_validators::Config::default(); @@ -325,7 +323,6 @@ mod tests { validator_definitions, validator_dir.path().into(), config.clone(), - log.clone(), ) .await .unwrap(); @@ -340,8 +337,12 @@ mod tests { ); let (runtime_shutdown, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = - TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + let executor = TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + shutdown_tx, + "test".to_string(), + ); let slashing_db_path = validator_dir.path().join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); @@ -365,7 +366,6 @@ mod tests { slot_clock, &config, executor, - log.clone(), ); Self { diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index fb6007b00a..85517682bb 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -29,9 +29,9 @@ reqwest = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } slashing_protection = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } types = { workspace = true } validator_http_api = { workspace = true } validator_http_metrics = { workspace = true } diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 598020d137..4297bae15f 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -15,10 +15,10 @@ eth2 = { workspace = true } futures = { workspace = true } itertools = { workspace = true } serde = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } strum = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/beacon_node_health.rs b/validator_client/beacon_node_fallback/src/beacon_node_health.rs index 80d3fb7efd..1b5d5b98cb 100644 --- a/validator_client/beacon_node_fallback/src/beacon_node_health.rs +++ b/validator_client/beacon_node_fallback/src/beacon_node_health.rs @@ -1,9 +1,9 @@ use super::CandidateError; use eth2::BeaconNodeHttpClient; use serde::{Deserialize, Serialize}; -use slog::{warn, Logger}; use std::cmp::Ordering; use std::fmt::{Debug, Display, Formatter}; +use tracing::warn; use types::Slot; /// Sync distances between 0 and DEFAULT_SYNC_TOLERANCE are considered `synced`. @@ -276,15 +276,13 @@ impl BeaconNodeHealth { pub async fn check_node_health( beacon_node: &BeaconNodeHttpClient, - log: &Logger, ) -> Result<(Slot, bool, bool), CandidateError> { let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { warn!( - log, - "Unable connect to beacon node"; - "error" => %e + error = %e, + "Unable connect to beacon node" ); return Err(CandidateError::Offline); diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index abcf74a1a6..befc18c563 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -12,7 +12,6 @@ use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; -use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; use std::fmt; @@ -24,6 +23,7 @@ use std::time::{Duration, Instant}; use std::vec::Vec; use strum::EnumVariantNames; use tokio::{sync::RwLock, time::sleep}; +use tracing::{debug, error, warn}; use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; use validator_metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; @@ -222,15 +222,14 @@ impl CandidateBeaconNode { distance_tiers: &BeaconNodeSyncDistanceTiers, slot_clock: Option<&T>, spec: &ChainSpec, - log: &Logger, ) -> Result<(), CandidateError> { - if let Err(e) = self.is_compatible(spec, log).await { + if let Err(e) = self.is_compatible(spec).await { *self.health.write().await = Err(e); return Err(e); } if let Some(slot_clock) = slot_clock { - match check_node_health(&self.beacon_node, log).await { + match check_node_health(&self.beacon_node).await { Ok((head, is_optimistic, el_offline)) => { let Some(slot_clock_head) = slot_clock.now() else { let e = match slot_clock.is_prior_to_genesis() { @@ -288,17 +287,16 @@ impl CandidateBeaconNode { } /// Checks if the node has the correct specification. - async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { + async fn is_compatible(&self, spec: &ChainSpec) -> Result<(), CandidateError> { let config = self .beacon_node .get_config_spec::() .await .map_err(|e| { error!( - log, - "Unable to read spec from beacon node"; - "error" => %e, - "endpoint" => %self.beacon_node, + error = %e, + endpoint = %self.beacon_node, + "Unable to read spec from beacon node" ); CandidateError::Offline })? @@ -306,71 +304,64 @@ impl CandidateBeaconNode { let beacon_node_spec = ChainSpec::from_config::(&config).ok_or_else(|| { error!( - log, + endpoint = %self.beacon_node, "The minimal/mainnet spec type of the beacon node does not match the validator \ - client. See the --network command."; - "endpoint" => %self.beacon_node, + client. See the --network command." + ); CandidateError::Incompatible })?; if beacon_node_spec.genesis_fork_version != spec.genesis_fork_version { error!( - log, - "Beacon node is configured for a different network"; - "endpoint" => %self.beacon_node, - "bn_genesis_fork" => ?beacon_node_spec.genesis_fork_version, - "our_genesis_fork" => ?spec.genesis_fork_version, + endpoint = %self.beacon_node, + bn_genesis_fork = ?beacon_node_spec.genesis_fork_version, + our_genesis_fork = ?spec.genesis_fork_version, + "Beacon node is configured for a different network" ); return Err(CandidateError::Incompatible); } else if beacon_node_spec.altair_fork_epoch != spec.altair_fork_epoch { warn!( - log, - "Beacon node has mismatched Altair fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_altair_fork_epoch" => ?beacon_node_spec.altair_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, + endpoint = %self.beacon_node, + endpoint_altair_fork_epoch = ?beacon_node_spec.altair_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Altair fork epoch" ); } else if beacon_node_spec.bellatrix_fork_epoch != spec.bellatrix_fork_epoch { warn!( - log, - "Beacon node has mismatched Bellatrix fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_bellatrix_fork_epoch" => ?beacon_node_spec.bellatrix_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, + endpoint = %self.beacon_node, + endpoint_bellatrix_fork_epoch = ?beacon_node_spec.bellatrix_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Bellatrix fork epoch" ); } else if beacon_node_spec.capella_fork_epoch != spec.capella_fork_epoch { warn!( - log, - "Beacon node has mismatched Capella fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_capella_fork_epoch" => ?beacon_node_spec.capella_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, + endpoint = %self.beacon_node, + endpoint_capella_fork_epoch = ?beacon_node_spec.capella_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Capella fork epoch" ); } else if beacon_node_spec.deneb_fork_epoch != spec.deneb_fork_epoch { warn!( - log, - "Beacon node has mismatched Deneb fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_deneb_fork_epoch" => ?beacon_node_spec.deneb_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, + endpoint = %self.beacon_node, + endpoint_deneb_fork_epoch = ?beacon_node_spec.deneb_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Deneb fork epoch" ); } else if beacon_node_spec.electra_fork_epoch != spec.electra_fork_epoch { warn!( - log, - "Beacon node has mismatched Electra fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_electra_fork_epoch" => ?beacon_node_spec.electra_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, + endpoint = %self.beacon_node, + endpoint_electra_fork_epoch = ?beacon_node_spec.electra_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Electra fork epoch" ); } else if beacon_node_spec.fulu_fork_epoch != spec.fulu_fork_epoch { warn!( - log, - "Beacon node has mismatched Fulu fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_fulu_fork_epoch" => ?beacon_node_spec.fulu_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, - ); + endpoint = %self.beacon_node, + endpoint_fulu_fork_epoch = ?beacon_node_spec.fulu_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Fulu fork epoch" + ); } Ok(()) @@ -387,7 +378,6 @@ pub struct BeaconNodeFallback { slot_clock: Option, broadcast_topics: Vec, spec: Arc, - log: Logger, } impl BeaconNodeFallback { @@ -396,7 +386,6 @@ impl BeaconNodeFallback { config: Config, broadcast_topics: Vec, spec: Arc, - log: Logger, ) -> Self { let distance_tiers = config.sync_tolerances; Self { @@ -405,7 +394,6 @@ impl BeaconNodeFallback { slot_clock: None, broadcast_topics, spec, - log, } } @@ -488,7 +476,6 @@ impl BeaconNodeFallback { &self.distance_tiers, self.slot_clock.as_ref(), &self.spec, - &self.log, )); nodes.push(candidate.beacon_node.to_string()); } @@ -501,10 +488,9 @@ impl BeaconNodeFallback { if let Err(e) = result { if *e != CandidateError::PreGenesis { warn!( - self.log, - "A connected beacon node errored during routine health check"; - "error" => ?e, - "endpoint" => node, + error = ?e, + endpoint = %node, + "A connected beacon node errored during routine health check" ); } } @@ -576,11 +562,7 @@ impl BeaconNodeFallback { // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - futures.push(Self::run_on_candidate( - candidate.beacon_node.clone(), - &func, - &self.log, - )); + futures.push(Self::run_on_candidate(candidate.beacon_node.clone(), &func)); } drop(candidates); @@ -598,11 +580,7 @@ impl BeaconNodeFallback { // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - futures.push(Self::run_on_candidate( - candidate.beacon_node.clone(), - &func, - &self.log, - )); + futures.push(Self::run_on_candidate(candidate.beacon_node.clone(), &func)); } drop(candidates); @@ -621,7 +599,6 @@ impl BeaconNodeFallback { async fn run_on_candidate( candidate: BeaconNodeHttpClient, func: F, - log: &Logger, ) -> Result)> where F: Fn(BeaconNodeHttpClient) -> R, @@ -636,10 +613,9 @@ impl BeaconNodeFallback { Ok(val) => Ok(val), Err(e) => { debug!( - log, - "Request to beacon node failed"; - "node" => %candidate, - "error" => ?e, + node = %candidate, + error = ?e, + "Request to beacon node failed" ); inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); Err((candidate.to_string(), Error::RequestFailed(e))) @@ -666,11 +642,7 @@ impl BeaconNodeFallback { // Run `func` using a `candidate`, returning the value or capturing errors. for candidate in candidates.iter() { - futures.push(Self::run_on_candidate( - candidate.beacon_node.clone(), - &func, - &self.log, - )); + futures.push(Self::run_on_candidate(candidate.beacon_node.clone(), &func)); } drop(candidates); @@ -752,7 +724,6 @@ mod tests { use crate::beacon_node_health::BeaconNodeHealthTier; use eth2::SensitiveUrl; use eth2::Timeouts; - use logging::test_logger; use slot_clock::TestingSlotClock; use strum::VariantNames; use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; @@ -902,10 +873,9 @@ mod tests { candidates: Vec>, topics: Vec, spec: Arc, - log: Logger, ) -> BeaconNodeFallback { let mut beacon_node_fallback = - BeaconNodeFallback::new(candidates, Config::default(), topics, spec, log); + BeaconNodeFallback::new(candidates, Config::default(), topics, spec); beacon_node_fallback.set_slot_clock(TestingSlotClock::new( Slot::new(1), @@ -932,7 +902,6 @@ mod tests { ], vec![], spec.clone(), - test_logger(), ); // BeaconNodeHealthTier 1 @@ -979,7 +948,6 @@ mod tests { vec![beacon_node_1, beacon_node_2], vec![ApiTopic::Blocks], spec.clone(), - test_logger(), ); mock_beacon_node_1.mock_post_beacon_blinded_blocks_v2_ssz(Duration::from_secs(0)); @@ -1021,7 +989,6 @@ mod tests { vec![beacon_node_1, beacon_node_2, beacon_node_3], vec![], spec.clone(), - test_logger(), ); let mock1 = mock_beacon_node_1.mock_offline_node(); diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index 66b61a411b..803dd94322 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -8,11 +8,12 @@ authors = ["Sigma Prime "] beacon_node_fallback = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } +logging = { workspace = true } parking_lot = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index 4a593c2700..cb81b3ffc2 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -32,14 +32,15 @@ use beacon_node_fallback::BeaconNodeFallback; use environment::RuntimeContext; use eth2::types::LivenessResponseData; +use logging::crit; use parking_lot::RwLock; -use slog::{crit, error, info, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::future::Future; use std::sync::Arc; use task_executor::ShutdownReason; use tokio::time::sleep; +use tracing::{error, info}; use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; /// A wrapper around `PublicKeyBytes` which encodes information about the status of a validator @@ -164,7 +165,6 @@ impl DoppelgangerState { /// doppelganger progression. async fn beacon_node_liveness( beacon_nodes: Arc>, - log: Logger, current_epoch: Epoch, validator_indices: Vec, ) -> LivenessResponses { @@ -203,10 +203,9 @@ async fn beacon_node_liveness( .await .unwrap_or_else(|e| { crit!( - log, - "Failed previous epoch liveness query"; - "error" => %e, - "previous_epoch" => %previous_epoch, + error = %e, + previous_epoch = %previous_epoch, + "Failed previous epoch liveness query" ); // Return an empty vec. In effect, this means to keep trying to make doppelganger // progress even if some of the calls are failing. @@ -239,10 +238,9 @@ async fn beacon_node_liveness( .await .unwrap_or_else(|e| { crit!( - log, - "Failed current epoch liveness query"; - "error" => %e, - "current_epoch" => %current_epoch, + error = %e, + current_epoch = %current_epoch, + "Failed current epoch liveness query" ); // Return an empty vec. In effect, this means to keep trying to make doppelganger // progress even if some of the calls are failing. @@ -257,11 +255,10 @@ async fn beacon_node_liveness( || current_epoch_responses.len() != previous_epoch_responses.len() { error!( - log, - "Liveness query omitted validators"; - "previous_epoch_response" => previous_epoch_responses.len(), - "current_epoch_response" => current_epoch_responses.len(), - "requested" => validator_indices.len(), + previous_epoch_response = previous_epoch_responses.len(), + current_epoch_response = current_epoch_responses.len(), + requested = validator_indices.len(), + "Liveness query omitted validators" ) } @@ -271,19 +268,12 @@ async fn beacon_node_liveness( } } +#[derive(Default)] pub struct DoppelgangerService { doppelganger_states: RwLock>, - log: Logger, } impl DoppelgangerService { - pub fn new(log: Logger) -> Self { - Self { - doppelganger_states: <_>::default(), - log, - } - } - /// Starts a reoccurring future which will try to keep the doppelganger service updated each /// slot. pub fn start_update_service( @@ -302,35 +292,25 @@ impl DoppelgangerService { let get_index = move |pubkey| validator_store.get_validator_index(&pubkey); // Define the `get_liveness` function as one that queries the beacon node API. - let log = service.log.clone(); let get_liveness = move |current_epoch, validator_indices| { - beacon_node_liveness( - beacon_nodes.clone(), - log.clone(), - current_epoch, - validator_indices, - ) + beacon_node_liveness(beacon_nodes.clone(), current_epoch, validator_indices) }; let mut shutdown_sender = context.executor.shutdown_sender(); - let log = service.log.clone(); + let mut shutdown_func = move || { if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure("Doppelganger detected.")) { crit!( - log, - "Failed to send shutdown signal"; - "msg" => "terminate this process immediately", - "error" => ?e + msg = "terminate this process immediately", + error = ?e, + "Failed to send shutdown signal" ); } }; - info!( - service.log, - "Doppelganger detection service started"; - ); + info!("Doppelganger detection service started"); context.executor.spawn( async move { @@ -360,9 +340,8 @@ impl DoppelgangerService { .await { error!( - service.log, - "Error during doppelganger detection"; - "error" => ?e + error = ?e, + "Error during doppelganger detection" ); } } @@ -387,10 +366,9 @@ impl DoppelgangerService { }) .unwrap_or_else(|| { crit!( - self.log, - "Validator unknown to doppelganger service"; - "msg" => "preventing validator from performing duties", - "pubkey" => ?validator + msg = "preventing validator from performing duties", + pubkey = ?validator, + "Validator unknown to doppelganger service" ); DoppelgangerStatus::UnknownToDoppelganger(validator) }) @@ -552,11 +530,7 @@ impl DoppelgangerService { // Resolve the index from the server response back to a public key. let Some(pubkey) = indices_map.get(&response.index) else { - crit!( - self.log, - "Inconsistent indices map"; - "validator_index" => response.index, - ); + crit!(validator_index = response.index, "Inconsistent indices map"); // Skip this result if an inconsistency is detected. continue; }; @@ -566,9 +540,8 @@ impl DoppelgangerService { state.next_check_epoch } else { crit!( - self.log, - "Inconsistent doppelganger state"; - "validator_pubkey" => ?pubkey, + validator_pubkey = ?pubkey, + "Inconsistent doppelganger state" ); // Skip this result if an inconsistency is detected. continue; @@ -582,15 +555,14 @@ impl DoppelgangerService { let violators_exist = !violators.is_empty(); if violators_exist { crit!( - self.log, - "Doppelganger(s) detected"; - "msg" => "A doppelganger occurs when two different validator clients run the \ - same public key. This validator client detected another instance of a local \ - validator on the network and is shutting down to prevent potential slashable \ - offences. Ensure that you are not running a duplicate or overlapping \ - validator client", - "doppelganger_indices" => ?violators - ) + msg = "A doppelganger occurs when two different validator clients run the \ + same public key. This validator client detected another instance of a local \ + validator on the network and is shutting down to prevent potential slashable \ + offences. Ensure that you are not running a duplicate or overlapping \ + validator client", + doppelganger_indices = ?violators, + "Doppelganger(s) detected" + ); } // The concept of "epoch satisfaction" is that for some epoch `e` we are *satisfied* that @@ -665,19 +637,17 @@ impl DoppelgangerService { doppelganger_state.complete_detection_in_epoch(previous_epoch); info!( - self.log, - "Found no doppelganger"; - "further_checks_remaining" => doppelganger_state.remaining_epochs, - "epoch" => response.epoch, - "validator_index" => response.index + further_checks_remaining = doppelganger_state.remaining_epochs, + epoch = %response.epoch, + validator_index = response.index, + "Found no doppelganger" ); if doppelganger_state.remaining_epochs == 0 { info!( - self.log, - "Doppelganger detection complete"; - "msg" => "starting validator", - "validator_index" => response.index + msg = "starting validator", + validator_index = response.index, + "Doppelganger detection complete" ); } } @@ -696,7 +666,6 @@ impl DoppelgangerService { mod test { use super::*; use futures::executor::block_on; - use logging::test_logger; use slot_clock::TestingSlotClock; use std::future; use std::time::Duration; @@ -740,13 +709,12 @@ mod test { fn build(self) -> TestScenario { let mut rng = XorShiftRng::from_seed([42; 16]); let slot_clock = TestingSlotClock::new(Slot::new(0), GENESIS_TIME, SLOT_DURATION); - let log = test_logger(); TestScenario { validators: (0..self.validator_count) .map(|_| PublicKeyBytes::random_for_test(&mut rng)) .collect(), - doppelganger: DoppelgangerService::new(log), + doppelganger: DoppelgangerService::default(), slot_clock, } } diff --git a/validator_client/graffiti_file/Cargo.toml b/validator_client/graffiti_file/Cargo.toml index 8868f5aec8..b3bbeb1fd7 100644 --- a/validator_client/graffiti_file/Cargo.toml +++ b/validator_client/graffiti_file/Cargo.toml @@ -11,7 +11,7 @@ path = "src/lib.rs" [dependencies] bls = { workspace = true } serde = { workspace = true } -slog = { workspace = true } +tracing = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/validator_client/graffiti_file/src/lib.rs b/validator_client/graffiti_file/src/lib.rs index 9dab2e7827..86f582aa38 100644 --- a/validator_client/graffiti_file/src/lib.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -1,12 +1,11 @@ +use bls::PublicKeyBytes; use serde::{Deserialize, Serialize}; -use slog::warn; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader}; use std::path::PathBuf; use std::str::FromStr; - -use bls::PublicKeyBytes; +use tracing::warn; use types::{graffiti::GraffitiString, Graffiti}; #[derive(Debug)] @@ -108,7 +107,6 @@ fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { // the next block produced by the validator with the given public key. pub fn determine_graffiti( validator_pubkey: &PublicKeyBytes, - log: &slog::Logger, graffiti_file: Option, validator_definition_graffiti: Option, graffiti_flag: Option, @@ -117,7 +115,7 @@ pub fn determine_graffiti( .and_then(|mut g| match g.load_graffiti(validator_pubkey) { Ok(g) => g, Err(e) => { - warn!(log, "Failed to read graffiti file"; "error" => ?e); + warn!(error = ?e, "Failed to read graffiti file"); None } }) diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 651e658a7a..482212d890 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -29,9 +29,9 @@ parking_lot = { workspace = true } rand = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } signing_method = { workspace = true } slashing_protection = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } sysinfo = { workspace = true } system_health = { workspace = true } @@ -39,6 +39,7 @@ task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } diff --git a/validator_client/http_api/src/create_signed_voluntary_exit.rs b/validator_client/http_api/src/create_signed_voluntary_exit.rs index 32269b202b..7a9dc798d6 100644 --- a/validator_client/http_api/src/create_signed_voluntary_exit.rs +++ b/validator_client/http_api/src/create_signed_voluntary_exit.rs @@ -1,8 +1,8 @@ use bls::{PublicKey, PublicKeyBytes}; use eth2::types::GenericResponse; -use slog::{info, Logger}; use slot_clock::SlotClock; use std::sync::Arc; +use tracing::info; use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit}; use validator_store::ValidatorStore; @@ -11,7 +11,6 @@ pub async fn create_signed_voluntary_exit, validator_store: Arc>, slot_clock: T, - log: Logger, ) -> Result, warp::Rejection> { let epoch = match maybe_epoch { Some(epoch) => epoch, @@ -45,10 +44,9 @@ pub async fn create_signed_voluntary_exit pubkey_bytes.as_hex_string(), - "epoch" => epoch + validator = pubkey_bytes.as_hex_string(), + %epoch, + "Signing voluntary exit" ); let signed_voluntary_exit = validator_store diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index fd6b4fdae5..c2bcfe5ab4 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -11,12 +11,12 @@ use eth2::lighthouse_vc::{ use eth2_keystore::Keystore; use initialized_validators::{Error, InitializedValidators}; use signing_method::SigningMethod; -use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; +use tracing::{info, warn}; use types::{EthSpec, PublicKeyBytes}; use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; use validator_store::ValidatorStore; @@ -64,7 +64,6 @@ pub fn import( secrets_dir: Option, validator_store: Arc>, task_executor: TaskExecutor, - log: Logger, ) -> Result { // Check request validity. This is the only cases in which we should return a 4xx code. if request.keystores.len() != request.passwords.len() { @@ -88,18 +87,14 @@ pub fn import( .iter() .any(|data| data.pubkey == pubkey_bytes) { - warn!( - log, - "Slashing protection data not provided"; - "public_key" => ?public_key, - ); + warn!(?public_key, "Slashing protection data not provided"); } } } validator_store.import_slashing_protection(slashing_protection) } else { - warn!(log, "No slashing protection data provided with keystores"); + warn!("No slashing protection data provided with keystores"); Ok(()) }; @@ -133,10 +128,9 @@ pub fn import( Ok(status) => Status::ok(status), Err(e) => { warn!( - log, - "Error importing keystore, skipped"; - "pubkey" => pubkey_str, - "error" => ?e, + pubkey = pubkey_str, + error = ?e, + "Error importing keystore, skipped" ); Status::error(ImportKeystoreStatus::Error, e) } @@ -157,9 +151,8 @@ pub fn import( if successful_import > 0 { info!( - log, - "Imported keystores via standard HTTP API"; - "count" => successful_import, + count = successful_import, + "Imported keystores via standard HTTP API" ); } @@ -243,9 +236,8 @@ pub fn delete( request: DeleteKeystoresRequest, validator_store: Arc>, task_executor: TaskExecutor, - log: Logger, ) -> Result { - let export_response = export(request, validator_store, task_executor, log.clone())?; + let export_response = export(request, validator_store, task_executor)?; // Check the status is Deleted to confirm deletion is successful, then only display the log let successful_deletion = export_response @@ -256,9 +248,8 @@ pub fn delete( if successful_deletion > 0 { info!( - log, - "Deleted keystore via standard HTTP API"; - "count" => successful_deletion, + count = successful_deletion, + "Deleted keystore via standard HTTP API" ); } @@ -276,7 +267,6 @@ pub fn export( request: DeleteKeystoresRequest, validator_store: Arc>, task_executor: TaskExecutor, - log: Logger, ) -> Result { // Remove from initialized validators. let initialized_validators_rwlock = validator_store.initialized_validators(); @@ -294,10 +284,9 @@ pub fn export( Ok(status) => status, Err(error) => { warn!( - log, - "Error deleting keystore"; - "pubkey" => ?pubkey_bytes, - "error" => ?error, + pubkey = ?pubkey_bytes, + ?error, + "Error deleting keystore" ); SingleExportKeystoresResponse { status: Status::error(DeleteKeystoreStatus::Error, error), diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index ae50b6a927..5bb4747bfe 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -34,10 +34,10 @@ use eth2::lighthouse_vc::{ }; use health_metrics::observe::Observe; use lighthouse_version::version_with_platform; +use logging::crit; use logging::SSELoggingComponents; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; -use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; use std::future::Future; @@ -49,6 +49,7 @@ use sysinfo::{System, SystemExt}; use system_health::observe_system_health_vc; use task_executor::TaskExecutor; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; +use tracing::{info, warn}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use validator_services::block_service::BlockService; @@ -87,7 +88,6 @@ pub struct Context { pub graffiti_flag: Option, pub spec: Arc, pub config: Config, - pub log: Logger, pub sse_logging_components: Option, pub slot_clock: T, pub _phantom: PhantomData, @@ -148,7 +148,6 @@ pub fn serve( let config = &ctx.config; let allow_keystore_export = config.allow_keystore_export; let store_passwords_in_secrets_dir = config.store_passwords_in_secrets_dir; - let log = ctx.log.clone(); // Configure CORS. let cors_builder = { @@ -165,7 +164,7 @@ pub fn serve( // Sanity check. if !config.enabled { - crit!(log, "Cannot start disabled metrics HTTP server"); + crit!("Cannot start disabled metrics HTTP server"); return Err(Error::Other( "A disabled metrics server should not be started".to_string(), )); @@ -179,9 +178,8 @@ pub fn serve( Ok(abs_path) => api_token_path = abs_path, Err(e) => { warn!( - log, - "Error canonicalizing token path"; - "error" => ?e, + error = ?e, + "Error canonicalizing token path" ); } }; @@ -239,9 +237,6 @@ pub fn serve( let inner_graffiti_flag = ctx.graffiti_flag; let graffiti_flag_filter = warp::any().map(move || inner_graffiti_flag); - let inner_ctx = ctx.clone(); - let log_filter = warp::any().map(move || inner_ctx.log.clone()); - let inner_slot_clock = ctx.slot_clock.clone(); let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone()); @@ -399,12 +394,10 @@ pub fn serve( .and(validator_store_filter.clone()) .and(graffiti_file_filter.clone()) .and(graffiti_flag_filter) - .and(log_filter.clone()) .then( |validator_store: Arc>, graffiti_file: Option, - graffiti_flag: Option, - log| { + graffiti_flag: Option| { blocking_json_task(move || { let mut result = HashMap::new(); for (key, graffiti_definition) in validator_store @@ -414,7 +407,6 @@ pub fn serve( { let graffiti = determine_graffiti( key, - &log, graffiti_file.clone(), graffiti_definition, graffiti_flag, @@ -834,11 +826,10 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) - .and(log_filter.clone()) - .then(move |request, validator_store, task_executor, log| { + .then(move |request, validator_store, task_executor| { blocking_json_task(move || { if allow_keystore_export { - keystores::export(request, validator_store, task_executor, log) + keystores::export(request, validator_store, task_executor) } else { Err(warp_utils::reject::custom_bad_request( "keystore export is disabled".to_string(), @@ -1079,14 +1070,12 @@ pub fn serve( .and(warp::path::end()) .and(validator_store_filter.clone()) .and(slot_clock_filter) - .and(log_filter.clone()) .and(task_executor_filter.clone()) .then( |pubkey: PublicKey, query: api_types::VoluntaryExitQuery, validator_store: Arc>, slot_clock: T, - log, task_executor: TaskExecutor| { blocking_json_task(move || { if let Some(handle) = task_executor.handle() { @@ -1096,7 +1085,6 @@ pub fn serve( query.epoch, validator_store, slot_clock, - log, ))?; Ok(signed_voluntary_exit) } else { @@ -1196,9 +1184,8 @@ pub fn serve( .and(secrets_dir_filter) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) - .and(log_filter.clone()) .then( - move |request, validator_dir, secrets_dir, validator_store, task_executor, log| { + move |request, validator_dir, secrets_dir, validator_store, task_executor| { let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); blocking_json_task(move || { keystores::import( @@ -1207,7 +1194,6 @@ pub fn serve( secrets_dir, validator_store, task_executor, - log, ) }) }, @@ -1218,11 +1204,8 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) - .and(log_filter.clone()) - .then(|request, validator_store, task_executor, log| { - blocking_json_task(move || { - keystores::delete(request, validator_store, task_executor, log) - }) + .then(|request, validator_store, task_executor| { + blocking_json_task(move || keystores::delete(request, validator_store, task_executor)) }); // GET /eth/v1/remotekeys @@ -1237,11 +1220,8 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) - .and(log_filter.clone()) - .then(|request, validator_store, task_executor, log| { - blocking_json_task(move || { - remotekeys::import(request, validator_store, task_executor, log) - }) + .then(|request, validator_store, task_executor| { + blocking_json_task(move || remotekeys::import(request, validator_store, task_executor)) }); // DELETE /eth/v1/remotekeys @@ -1249,11 +1229,8 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter) .and(task_executor_filter) - .and(log_filter.clone()) - .then(|request, validator_store, task_executor, log| { - blocking_json_task(move || { - remotekeys::delete(request, validator_store, task_executor, log) - }) + .then(|request, validator_store, task_executor| { + blocking_json_task(move || remotekeys::delete(request, validator_store, task_executor)) }); // Subscribe to get VC logs via Server side events @@ -1271,7 +1248,9 @@ pub fn serve( match msg { Ok(data) => { // Serialize to json - match data.to_json_string() { + match serde_json::to_string(&data) + .map_err(|e| format!("{:?}", e)) + { // Send the json as a Server Sent Event Ok(json) => Event::default().json_data(json).map_err(|e| { warp_utils::reject::server_sent_event_error(format!( @@ -1364,10 +1343,9 @@ pub fn serve( )?; info!( - log, - "HTTP API started"; - "listen_address" => listening_socket.to_string(), - "api_token_file" => ?api_token_path, + listen_address = listening_socket.to_string(), + ?api_token_path, + "HTTP API started" ); Ok((listening_socket, server)) diff --git a/validator_client/http_api/src/remotekeys.rs b/validator_client/http_api/src/remotekeys.rs index 289be57182..49d666f303 100644 --- a/validator_client/http_api/src/remotekeys.rs +++ b/validator_client/http_api/src/remotekeys.rs @@ -8,11 +8,11 @@ use eth2::lighthouse_vc::std_types::{ ListRemotekeysResponse, SingleListRemotekeysResponse, Status, }; use initialized_validators::{Error, InitializedValidators}; -use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; +use tracing::{info, warn}; use types::{EthSpec, PublicKeyBytes}; use url::Url; use validator_store::ValidatorStore; @@ -52,12 +52,10 @@ pub fn import( request: ImportRemotekeysRequest, validator_store: Arc>, task_executor: TaskExecutor, - log: Logger, ) -> Result { info!( - log, - "Importing remotekeys via standard HTTP API"; - "count" => request.remote_keys.len(), + count = request.remote_keys.len(), + "Importing remotekeys via standard HTTP API" ); // Import each remotekey. Some remotekeys may fail to be imported, so we record a status for each. let mut statuses = Vec::with_capacity(request.remote_keys.len()); @@ -70,10 +68,9 @@ pub fn import( Ok(status) => Status::ok(status), Err(e) => { warn!( - log, - "Error importing keystore, skipped"; - "pubkey" => remotekey.pubkey.to_string(), - "error" => ?e, + pubkey = remotekey.pubkey.to_string(), + error = ?e, + "Error importing keystore, skipped" ); Status::error(ImportRemotekeyStatus::Error, e) } @@ -148,12 +145,10 @@ pub fn delete( request: DeleteRemotekeysRequest, validator_store: Arc>, task_executor: TaskExecutor, - log: Logger, ) -> Result { info!( - log, - "Deleting remotekeys via standard HTTP API"; - "count" => request.pubkeys.len(), + count = request.pubkeys.len(), + "Deleting remotekeys via standard HTTP API" ); // Remove from initialized validators. let initialized_validators_rwlock = validator_store.initialized_validators(); @@ -171,10 +166,9 @@ pub fn delete( Ok(status) => Status::ok(status), Err(error) => { warn!( - log, - "Error deleting keystore"; - "pubkey" => ?pubkey_bytes, - "error" => ?error, + pubkey = ?pubkey_bytes, + ?error, + "Error deleting keystore" ); Status::error(DeleteRemotekeyStatus::Error, error) } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 0531626846..4a5d3b6cc7 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -14,7 +14,6 @@ use eth2::{ use eth2_keystore::KeystoreBuilder; use initialized_validators::key_cache::{KeyCache, CACHE_FILENAME}; use initialized_validators::{InitializedValidators, OnDecryptFailure}; -use logging::test_logger; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; @@ -70,8 +69,6 @@ impl ApiTester { } pub async fn new_with_http_config(http_config: HttpConfig) -> Self { - let log = test_logger(); - let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); let token_path = tempdir().unwrap().path().join(PK_FILENAME); @@ -82,7 +79,6 @@ impl ApiTester { validator_defs, validator_dir.path().into(), Default::default(), - log.clone(), ) .await .unwrap(); @@ -110,11 +106,10 @@ impl ApiTester { slashing_protection, Hash256::repeat_byte(42), spec.clone(), - Some(Arc::new(DoppelgangerService::new(log.clone()))), + Some(Arc::new(DoppelgangerService::default())), slot_clock.clone(), &config, test_runtime.task_executor.clone(), - log.clone(), )); validator_store @@ -134,7 +129,6 @@ impl ApiTester { graffiti_flag: Some(Graffiti::default()), spec, config: http_config, - log, sse_logging_components: None, slot_clock, _phantom: PhantomData, diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 4e9acc4237..5468718fb5 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -18,7 +18,6 @@ use eth2::{ Error as ApiError, }; use eth2_keystore::KeystoreBuilder; -use logging::test_logger; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; @@ -61,8 +60,6 @@ impl ApiTester { } pub async fn new_with_config(config: ValidatorStoreConfig) -> Self { - let log = test_logger(); - let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); let token_path = tempdir().unwrap().path().join("api-token.txt"); @@ -73,7 +70,6 @@ impl ApiTester { validator_defs, validator_dir.path().into(), InitializedValidatorsConfig::default(), - log.clone(), ) .await .unwrap(); @@ -100,11 +96,10 @@ impl ApiTester { slashing_protection, Hash256::repeat_byte(42), spec.clone(), - Some(Arc::new(DoppelgangerService::new(log.clone()))), + Some(Arc::new(DoppelgangerService::default())), slot_clock.clone(), &config, test_runtime.task_executor.clone(), - log.clone(), )); validator_store @@ -133,7 +128,6 @@ impl ApiTester { http_token_path: token_path, }, sse_logging_components: None, - log, slot_clock: slot_clock.clone(), _phantom: PhantomData, }); diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index a3432410bc..f2684da4b1 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -7,12 +7,13 @@ authors = ["Sigma Prime "] [dependencies] health_metrics = { workspace = true } lighthouse_version = { workspace = true } +logging = { workspace = true } malloc_utils = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } +tracing = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } validator_services = { workspace = true } diff --git a/validator_client/http_metrics/src/lib.rs b/validator_client/http_metrics/src/lib.rs index f1c6d4ed8a..6bf18e7b93 100644 --- a/validator_client/http_metrics/src/lib.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -3,15 +3,16 @@ //! For other endpoints, see the `http_api` crate. use lighthouse_version::version_with_platform; +use logging::crit; use malloc_utils::scrape_allocator_metrics; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; -use slog::{crit, info, Logger}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; +use tracing::info; use types::EthSpec; use validator_services::duties_service::DutiesService; use validator_store::ValidatorStore; @@ -48,7 +49,6 @@ pub struct Shared { pub struct Context { pub config: Config, pub shared: RwLock>, - pub log: Logger, } /// Configuration for the HTTP server. @@ -93,7 +93,6 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result<(SocketAddr, impl Future), Error> { let config = &ctx.config; - let log = ctx.log.clone(); // Configure CORS. let cors_builder = { @@ -110,7 +109,7 @@ pub fn serve( // Sanity check. if !config.enabled { - crit!(log, "Cannot start disabled metrics HTTP server"); + crit!("Cannot start disabled metrics HTTP server"); return Err(Error::Other( "A disabled metrics server should not be started".to_string(), )); @@ -151,9 +150,8 @@ pub fn serve( )?; info!( - log, - "Metrics HTTP server started"; - "listen_address" => listening_socket.to_string(), + listen_address = listening_socket.to_string(), + "Metrics HTTP server started" ); Ok((listening_socket, server)) diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml index 05e85261f9..8b2ae62aea 100644 --- a/validator_client/initialized_validators/Cargo.toml +++ b/validator_client/initialized_validators/Cargo.toml @@ -18,8 +18,8 @@ reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } signing_method = { workspace = true } -slog = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index bd64091dae..cbc1287a85 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -22,13 +22,13 @@ use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use serde::{Deserialize, Serialize}; use signing_method::SigningMethod; -use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use tracing::{debug, error, info, warn}; use types::graffiti::GraffitiString; use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; @@ -503,8 +503,6 @@ pub struct InitializedValidators { validators: HashMap, /// The clients used for communications with a remote signer. web3_signer_client_map: Option>, - /// For logging via `slog`. - log: Logger, config: Config, } @@ -514,7 +512,6 @@ impl InitializedValidators { definitions: ValidatorDefinitions, validators_dir: PathBuf, config: Config, - log: Logger, ) -> Result { let mut this = Self { validators_dir, @@ -522,7 +519,6 @@ impl InitializedValidators { validators: HashMap::default(), web3_signer_client_map: None, config, - log, }; this.update_validators().await?; Ok(this) @@ -1151,10 +1147,9 @@ impl InitializedValidators { for uuid in cache.uuids() { if !definitions_map.contains_key(uuid) { debug!( - self.log, - "Resetting the key cache"; - "keystore_uuid" => %uuid, - "reason" => "impossible to decrypt due to missing keystore", + keystore_uuid = %uuid, + reason = "impossible to decrypt due to missing keystore", + "Resetting the key cache" ); return Ok(KeyCache::new()); } @@ -1281,30 +1276,27 @@ impl InitializedValidators { self.validators .insert(init.voting_public_key().compress(), init); info!( - self.log, - "Enabled validator"; - "signing_method" => "local_keystore", - "voting_pubkey" => format!("{:?}", def.voting_public_key), + signing_method = "local_keystore", + voting_pubkey = format!("{:?}", def.voting_public_key), + "Enabled validator" ); if let Some(lockfile_path) = existing_lockfile_path { warn!( - self.log, - "Ignored stale lockfile"; - "path" => lockfile_path.display(), - "cause" => "Ungraceful shutdown (harmless) OR \ + path = ?lockfile_path.display(), + cause = "Ungraceful shutdown (harmless) OR \ non-Lighthouse client using this keystore \ - (risky)" + (risky)", + "Ignored stale lockfile" ); } } Err(e) => { error!( - self.log, - "Failed to initialize validator"; - "error" => format!("{:?}", e), - "signing_method" => "local_keystore", - "validator" => format!("{:?}", def.voting_public_key) + error = format!("{:?}", e), + signing_method = "local_keystore", + validator = format!("{:?}", def.voting_public_key), + "Failed to initialize validator" ); // Exit on an invalid validator. @@ -1327,19 +1319,17 @@ impl InitializedValidators { .insert(init.voting_public_key().compress(), init); info!( - self.log, - "Enabled validator"; - "signing_method" => "remote_signer", - "voting_pubkey" => format!("{:?}", def.voting_public_key), + signing_method = "remote_signer", + voting_pubkey = format!("{:?}", def.voting_public_key), + "Enabled validator" ); } Err(e) => { error!( - self.log, - "Failed to initialize validator"; - "error" => format!("{:?}", e), - "signing_method" => "remote_signer", - "validator" => format!("{:?}", def.voting_public_key) + error = format!("{:?}", e), + signing_method = "remote_signer", + validator = format!("{:?}", def.voting_public_key), + "Failed to initialize validator" ); // Exit on an invalid validator. @@ -1364,9 +1354,8 @@ impl InitializedValidators { } info!( - self.log, - "Disabled validator"; - "voting_pubkey" => format!("{:?}", def.voting_public_key) + voting_pubkey = format!("{:?}", def.voting_public_key), + "Disabled validator" ); } } @@ -1378,23 +1367,18 @@ impl InitializedValidators { } let validators_dir = self.validators_dir.clone(); - let log = self.log.clone(); if has_local_definitions && key_cache.is_modified() { tokio::task::spawn_blocking(move || { match key_cache.save(validators_dir) { - Err(e) => warn!( - log, - "Error during saving of key_cache"; - "err" => format!("{:?}", e) - ), - Ok(true) => info!(log, "Modified key_cache saved successfully"), + Err(e) => warn!(err = format!("{:?}", e), "Error during saving of key_cache"), + Ok(true) => info!("Modified key_cache saved successfully"), _ => {} }; }) .await .map_err(Error::TokioJoin)?; } else { - debug!(log, "Key cache not modified"); + debug!("Key cache not modified"); } // Update the enabled and total validator counts diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 1a098742d8..88e6dd794d 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -19,6 +19,7 @@ rusqlite = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tempfile = { workspace = true } +tracing = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs new file mode 100644 index 0000000000..5f3e0fe036 --- /dev/null +++ b/validator_client/src/check_synced.rs @@ -0,0 +1,25 @@ +use crate::beacon_node_fallback::CandidateError; +use eth2::{types::Slot, BeaconNodeHttpClient}; +use tracing::warn; + +pub async fn check_node_health( + beacon_node: &BeaconNodeHttpClient, +) -> Result<(Slot, bool, bool), CandidateError> { + let resp = match beacon_node.get_node_syncing().await { + Ok(resp) => resp, + Err(e) => { + warn!( + error = %e, + "Unable connect to beacon node" + ); + + return Err(CandidateError::Offline); + } + }; + + Ok(( + resp.data.head_slot, + resp.data.is_optimistic, + resp.data.el_offline, + )) +} diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 358bdacf5c..cfc88969c9 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -12,11 +12,11 @@ use graffiti_file::GraffitiFile; use initialized_validators::Config as InitializedValidatorsConfig; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; use std::time::Duration; +use tracing::{info, warn}; use types::GRAFFITI_BYTES_LEN; use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; @@ -141,7 +141,6 @@ impl Config { pub fn from_cli( cli_args: &ArgMatches, validator_client_config: &ValidatorClient, - log: &Logger, ) -> Result { let mut config = Config::default(); @@ -207,7 +206,10 @@ impl Config { .read_graffiti_file() .map_err(|e| format!("Error reading graffiti file: {:?}", e))?; config.graffiti_file = Some(graffiti_file); - info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path.to_str()); + info!( + path = graffiti_file_path.to_str(), + "Successfully loaded graffiti file" + ); } if let Some(input_graffiti) = validator_client_config.graffiti.as_ref() { @@ -375,10 +377,9 @@ impl Config { config.validator_store.enable_web3signer_slashing_protection = if validator_client_config.disable_slashing_protection_web3signer { warn!( - log, - "Slashing protection for remote keys disabled"; - "info" => "ensure slashing protection on web3signer is enabled or you WILL \ - get slashed" + info = "ensure slashing protection on web3signer is enabled or you WILL \ + get slashed", + "Slashing protection for remote keys disabled" ); false } else { diff --git a/validator_client/src/latency.rs b/validator_client/src/latency.rs index 22f02c7c0b..edd8daa731 100644 --- a/validator_client/src/latency.rs +++ b/validator_client/src/latency.rs @@ -1,9 +1,9 @@ use beacon_node_fallback::BeaconNodeFallback; use environment::RuntimeContext; -use slog::debug; use slot_clock::SlotClock; use std::sync::Arc; use tokio::time::sleep; +use tracing::debug; use types::EthSpec; /// The latency service will run 11/12ths of the way through the slot. @@ -17,8 +17,6 @@ pub fn start_latency_service( slot_clock: T, beacon_nodes: Arc>, ) { - let log = context.log().clone(); - let future = async move { loop { let sleep_time = slot_clock @@ -39,10 +37,9 @@ pub fn start_latency_service( for (i, measurement) in beacon_nodes.measure_latency().await.iter().enumerate() { if let Some(latency) = measurement.latency { debug!( - log, - "Measured BN latency"; - "node" => &measurement.beacon_node_id, - "latency" => latency.as_millis(), + node = &measurement.beacon_node_id, + latency = latency.as_millis(), + "Measured BN latency" ); validator_metrics::observe_timer_vec( &validator_metrics::VC_BEACON_NODE_LATENCY, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 1b91eb71c2..7171dea57b 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -23,7 +23,6 @@ use initialized_validators::Error::UnableToOpenVotingKeystore; use notifier::spawn_notifier; use parking_lot::RwLock; use reqwest::Certificate; -use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::fs::File; @@ -37,6 +36,7 @@ use tokio::{ sync::mpsc, time::{sleep, Duration}, }; +use tracing::{debug, error, info, warn}; use types::{EthSpec, Hash256}; use validator_http_api::ApiSecret; use validator_services::{ @@ -97,7 +97,7 @@ impl ProductionValidatorClient { cli_args: &ArgMatches, validator_client_config: &ValidatorClient, ) -> Result { - let config = Config::from_cli(cli_args, validator_client_config, context.log()) + let config = Config::from_cli(cli_args, validator_client_config) .map_err(|e| format!("Unable to initialize config: {}", e))?; Self::new(context, config).await } @@ -105,8 +105,6 @@ impl ProductionValidatorClient { /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. pub async fn new(context: RuntimeContext, config: Config) -> Result { - let log = context.log().clone(); - // Attempt to raise soft fd limit. The behavior is OS specific: // `linux` - raise soft fd limit to hard // `macos` - raise soft fd limit to `min(kernel limit, hard fd limit)` @@ -114,25 +112,20 @@ impl ProductionValidatorClient { match fdlimit::raise_fd_limit().map_err(|e| format!("Unable to raise fd limit: {}", e))? { fdlimit::Outcome::LimitRaised { from, to } => { debug!( - log, - "Raised soft open file descriptor resource limit"; - "old_limit" => from, - "new_limit" => to + old_limit = from, + new_limit = to, + "Raised soft open file descriptor resource limit" ); } fdlimit::Outcome::Unsupported => { - debug!( - log, - "Raising soft open file descriptor resource limit is not supported" - ); + debug!("Raising soft open file descriptor resource limit is not supported"); } }; info!( - log, - "Starting validator client"; - "beacon_nodes" => format!("{:?}", &config.beacon_nodes), - "validator_dir" => format!("{:?}", config.validator_dir), + beacon_nodes = ?config.beacon_nodes, + validator_dir = ?config.validator_dir, + "Starting validator client" ); // Optionally start the metrics server. @@ -147,7 +140,6 @@ impl ProductionValidatorClient { Arc::new(validator_http_metrics::Context { config: config.http_metrics.clone(), shared: RwLock::new(shared), - log: log.clone(), }); let exit = context.executor.exit(); @@ -162,15 +154,14 @@ impl ProductionValidatorClient { Some(ctx) } else { - info!(log, "HTTP metrics server is disabled"); + info!("HTTP metrics server is disabled"); None }; // Start the explorer client which periodically sends validator process // and system metrics to the configured endpoint. if let Some(monitoring_config) = &config.monitoring_api { - let monitoring_client = - MonitoringHttpClient::new(monitoring_config, context.log().clone())?; + let monitoring_client = MonitoringHttpClient::new(monitoring_config)?; monitoring_client.auto_update( context.executor.clone(), vec![ProcessType::Validator, ProcessType::System], @@ -182,7 +173,7 @@ impl ProductionValidatorClient { if !config.disable_auto_discover { let new_validators = validator_defs - .discover_local_keystores(&config.validator_dir, &config.secrets_dir, &log) + .discover_local_keystores(&config.validator_dir, &config.secrets_dir) .map_err(|e| format!("Unable to discover local validator keystores: {:?}", e))?; validator_defs.save(&config.validator_dir).map_err(|e| { format!( @@ -190,18 +181,13 @@ impl ProductionValidatorClient { e ) })?; - info!( - log, - "Completed validator discovery"; - "new_validators" => new_validators, - ); + info!(new_validators, "Completed validator discovery"); } let validators = InitializedValidators::from_definitions( validator_defs, config.validator_dir.clone(), config.initialized_validators.clone(), - log.clone(), ) .await .map_err(|e| { @@ -218,17 +204,17 @@ impl ProductionValidatorClient { let voting_pubkeys: Vec<_> = validators.iter_voting_pubkeys().collect(); info!( - log, - "Initialized validators"; - "disabled" => validators.num_total().saturating_sub(validators.num_enabled()), - "enabled" => validators.num_enabled(), + disabled = validators + .num_total() + .saturating_sub(validators.num_enabled()), + enabled = validators.num_enabled(), + "Initialized validators" ); if voting_pubkeys.is_empty() { warn!( - log, - "No enabled validators"; - "hint" => "create validators via the API, or the `lighthouse account` CLI command" + hint = "create validators via the API, or the `lighthouse account` CLI command", + "No enabled validators" ); } @@ -303,10 +289,7 @@ impl ProductionValidatorClient { // Use quicker timeouts if a fallback beacon node exists. let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { - info!( - log, - "Fallback endpoints are available, using optimized timeouts."; - ); + info!("Fallback endpoints are available, using optimized timeouts."); Timeouts { attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, @@ -389,7 +372,6 @@ impl ProductionValidatorClient { config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), - log.clone(), ); let mut proposer_nodes: BeaconNodeFallback<_, E> = BeaconNodeFallback::new( @@ -397,12 +379,11 @@ impl ProductionValidatorClient { config.beacon_node_fallback, config.broadcast_topics.clone(), context.eth2_config.spec.clone(), - log.clone(), ); // Perform some potentially long-running initialization tasks. let (genesis_time, genesis_validators_root) = tokio::select! { - tuple = init_from_beacon_node(&beacon_nodes, &proposer_nodes, &context) => tuple?, + tuple = init_from_beacon_node(&beacon_nodes, &proposer_nodes) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; @@ -427,12 +408,7 @@ impl ProductionValidatorClient { start_fallback_updater_service(context.clone(), proposer_nodes.clone())?; let doppelganger_service = if config.enable_doppelganger_protection { - Some(Arc::new(DoppelgangerService::new( - context - .service_context(DOPPELGANGER_SERVICE_NAME.into()) - .log() - .clone(), - ))) + Some(Arc::new(DoppelgangerService::default())) } else { None }; @@ -446,16 +422,14 @@ impl ProductionValidatorClient { slot_clock.clone(), &config.validator_store, context.executor.clone(), - log.clone(), )); // Ensure all validators are registered in doppelganger protection. validator_store.register_all_in_doppelganger_protection_if_enabled()?; info!( - log, - "Loaded validator keypair store"; - "voting_validators" => validator_store.num_voting_validators() + voting_validators = validator_store.num_voting_validators(), + "Loaded validator keypair store" ); // Perform pruning of the slashing protection database on start-up. In case the database is @@ -551,7 +525,6 @@ impl ProductionValidatorClient { // whole epoch! let channel_capacity = E::slots_per_epoch() as usize; let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); - let log = self.context.log(); let api_secret = ApiSecret::create_or_open(&self.config.http_api.http_token_path)?; @@ -569,7 +542,6 @@ impl ProductionValidatorClient { config: self.config.http_api.clone(), sse_logging_components: self.context.sse_logging_components.clone(), slot_clock: self.slot_clock.clone(), - log: log.clone(), _phantom: PhantomData, }); @@ -585,12 +557,12 @@ impl ProductionValidatorClient { Some(listen_addr) } else { - info!(log, "HTTP API server is disabled"); + info!("HTTP API server is disabled"); None }; // Wait until genesis has occurred. - wait_for_genesis(&self.beacon_nodes, self.genesis_time, &self.context).await?; + wait_for_genesis(&self.beacon_nodes, self.genesis_time).await?; duties_service::start_update_service(self.duties_service.clone(), block_service_tx); @@ -625,7 +597,7 @@ impl ProductionValidatorClient { ) .map_err(|e| format!("Unable to start doppelganger service: {}", e))? } else { - info!(log, "Doppelganger protection disabled.") + info!("Doppelganger protection disabled.") } spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?; @@ -645,7 +617,6 @@ impl ProductionValidatorClient { async fn init_from_beacon_node( beacon_nodes: &BeaconNodeFallback, proposer_nodes: &BeaconNodeFallback, - context: &RuntimeContext, ) -> Result<(u64, Hash256), String> { loop { beacon_nodes.update_all_candidates().await; @@ -659,41 +630,37 @@ async fn init_from_beacon_node( if proposer_total > 0 && proposer_available == 0 { warn!( - context.log(), - "Unable to connect to a proposer node"; - "retry in" => format!("{} seconds", RETRY_DELAY.as_secs()), - "total_proposers" => proposer_total, - "available_proposers" => proposer_available, - "total_beacon_nodes" => num_total, - "available_beacon_nodes" => num_available, + retry_in = format!("{} seconds", RETRY_DELAY.as_secs()), + total_proposers = proposer_total, + available_proposers = proposer_available, + total_beacon_nodes = num_total, + available_beacon_nodes = num_available, + "Unable to connect to a proposer node" ); } if num_available > 0 && proposer_available == 0 { info!( - context.log(), - "Initialized beacon node connections"; - "total" => num_total, - "available" => num_available, + total = num_total, + available = num_available, + "Initialized beacon node connections" ); break; } else if num_available > 0 { info!( - context.log(), - "Initialized beacon node connections"; - "total" => num_total, - "available" => num_available, - "proposers_available" => proposer_available, - "proposers_total" => proposer_total, + total = num_total, + available = num_available, + proposer_available, + proposer_total, + "Initialized beacon node connections" ); break; } else { warn!( - context.log(), - "Unable to connect to a beacon node"; - "retry in" => format!("{} seconds", RETRY_DELAY.as_secs()), - "total" => num_total, - "available" => num_available, + retry_in = format!("{} seconds", RETRY_DELAY.as_secs()), + total = num_total, + available = num_available, + "Unable to connect to a beacon node" ); sleep(RETRY_DELAY).await; } @@ -714,15 +681,11 @@ async fn init_from_beacon_node( .filter_map(|(_, e)| e.request_failure()) .any(|e| e.status() == Some(StatusCode::NOT_FOUND)) { - info!( - context.log(), - "Waiting for genesis"; - ); + info!("Waiting for genesis"); } else { error!( - context.log(), - "Errors polling beacon node"; - "error" => %errors + %errors, + "Errors polling beacon node" ); } } @@ -737,7 +700,6 @@ async fn init_from_beacon_node( async fn wait_for_genesis( beacon_nodes: &BeaconNodeFallback, genesis_time: u64, - context: &RuntimeContext, ) -> Result<(), String> { let now = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -751,28 +713,25 @@ async fn wait_for_genesis( // the slot clock. if now < genesis_time { info!( - context.log(), - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis_time - now).as_secs() + seconds_to_wait = (genesis_time - now).as_secs(), + "Starting node prior to genesis" ); // Start polling the node for pre-genesis information, cancelling the polling as soon as the // timer runs out. tokio::select! { - result = poll_whilst_waiting_for_genesis(beacon_nodes, genesis_time, context.log()) => result?, + result = poll_whilst_waiting_for_genesis(beacon_nodes, genesis_time) => result?, () = sleep(genesis_time - now) => () }; info!( - context.log(), - "Genesis has occurred"; - "ms_since_genesis" => (genesis_time - now).as_millis() + ms_since_genesis = (genesis_time - now).as_millis(), + "Genesis has occurred" ); } else { info!( - context.log(), - "Genesis has already occurred"; - "seconds_ago" => (now - genesis_time).as_secs() + seconds_ago = (now - genesis_time).as_secs(), + "Genesis has already occurred" ); } @@ -784,7 +743,6 @@ async fn wait_for_genesis( async fn poll_whilst_waiting_for_genesis( beacon_nodes: &BeaconNodeFallback, genesis_time: Duration, - log: &Logger, ) -> Result<(), String> { loop { match beacon_nodes @@ -798,19 +756,17 @@ async fn poll_whilst_waiting_for_genesis( if !is_staking { error!( - log, - "Staking is disabled for beacon node"; - "msg" => "this will caused missed duties", - "info" => "see the --staking CLI flag on the beacon node" + msg = "this will caused missed duties", + info = "see the --staking CLI flag on the beacon node", + "Staking is disabled for beacon node" ); } if now < genesis_time { info!( - log, - "Waiting for genesis"; - "bn_staking_enabled" => is_staking, - "seconds_to_wait" => (genesis_time - now).as_secs() + bn_staking_enabled = is_staking, + seconds_to_wait = (genesis_time - now).as_secs(), + "Waiting for genesis" ); } else { break Ok(()); @@ -818,9 +774,8 @@ async fn poll_whilst_waiting_for_genesis( } Err(e) => { error!( - log, - "Error polling beacon node"; - "error" => %e + error = %e, + "Error polling beacon node" ); } } diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index ff66517795..75b3d46457 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,8 +1,8 @@ use crate::{DutiesService, ProductionValidatorClient}; use metrics::set_gauge; -use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; +use tracing::{debug, error, info}; use types::EthSpec; /// Spawns a notifier service which periodically logs information about the node. @@ -14,14 +14,12 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); let interval_fut = async move { - let log = context.log(); - loop { if let Some(duration_to_next_slot) = duties_service.slot_clock.duration_to_next_slot() { sleep(duration_to_next_slot + slot_duration / 2).await; - notify(&duties_service, log).await; + notify(&duties_service).await; } else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; continue; @@ -34,10 +32,7 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu } /// Performs a single notification routine. -async fn notify( - duties_service: &DutiesService, - log: &Logger, -) { +async fn notify(duties_service: &DutiesService) { let (candidate_info, num_available, num_synced) = duties_service.beacon_nodes.get_notifier_info().await; let num_total = candidate_info.len(); @@ -61,20 +56,18 @@ async fn notify( .map(|candidate| candidate.endpoint.as_str()) .unwrap_or("None"); info!( - log, - "Connected to beacon node(s)"; - "primary" => primary, - "total" => num_total, - "available" => num_available, - "synced" => num_synced, + primary, + total = num_total, + available = num_available, + synced = num_synced, + "Connected to beacon node(s)" ) } else { error!( - log, - "No synced beacon nodes"; - "total" => num_total, - "available" => num_available, - "synced" => num_synced, + total = num_total, + available = num_available, + synced = num_synced, + "No synced beacon nodes" ) } if num_synced_fallback > 0 { @@ -86,23 +79,21 @@ async fn notify( for info in candidate_info { if let Ok(health) = info.health { debug!( - log, - "Beacon node info"; - "status" => "Connected", - "index" => info.index, - "endpoint" => info.endpoint, - "head_slot" => %health.head, - "is_optimistic" => ?health.optimistic_status, - "execution_engine_status" => ?health.execution_status, - "health_tier" => %health.health_tier, + status = "Connected", + index = info.index, + endpoint = info.endpoint, + head_slot = %health.head, + is_optimistic = ?health.optimistic_status, + execution_engine_status = ?health.execution_status, + health_tier = %health.health_tier, + "Beacon node info" ); } else { debug!( - log, - "Beacon node info"; - "status" => "Disconnected", - "index" => info.index, - "endpoint" => info.endpoint, + status = "Disconnected", + index = info.index, + endpoint = info.endpoint, + "Beacon node info" ); } } @@ -116,45 +107,44 @@ async fn notify( let doppelganger_detecting_validators = duties_service.doppelganger_detecting_count(); if doppelganger_detecting_validators > 0 { - info!(log, "Listening for doppelgangers"; "doppelganger_detecting_validators" => doppelganger_detecting_validators) + info!( + doppelganger_detecting_validators, + "Listening for doppelgangers" + ) } if total_validators == 0 { info!( - log, - "No validators present"; - "msg" => "see `lighthouse vm create --help` or the HTTP API documentation" + msg = "see `lighthouse vm create --help` or the HTTP API documentation", + "No validators present" ) } else if total_validators == attesting_validators { info!( - log, - "All validators active"; - "current_epoch_proposers" => proposing_validators, - "active_validators" => attesting_validators, - "total_validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), + current_epoch_proposers = proposing_validators, + active_validators = attesting_validators, + total_validators = total_validators, + %epoch, + %slot, + "All validators active" ); } else if attesting_validators > 0 { info!( - log, - "Some validators active"; - "current_epoch_proposers" => proposing_validators, - "active_validators" => attesting_validators, - "total_validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), + current_epoch_proposers = proposing_validators, + active_validators = attesting_validators, + total_validators = total_validators, + %epoch, + %slot, + "Some validators active" ); } else { info!( - log, - "Awaiting activation"; - "validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), + validators = total_validators, + %epoch, + %slot, + "Awaiting activation" ); } } else { - error!(log, "Unable to read slot clock"); + error!("Unable to read slot clock"); } } diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index b4495a7c81..4b023bb40a 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -13,11 +13,12 @@ environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } graffiti_file = { workspace = true } +logging = { workspace = true } parking_lot = { workspace = true } safe_arith = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 961741a977..8e098b81b0 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -3,12 +3,13 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use either::Either; use environment::RuntimeContext; use futures::future::join_all; -use slog::{crit, debug, error, info, trace, warn}; +use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use tokio::time::{sleep, sleep_until, Duration, Instant}; +use tracing::{debug, error, info, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -128,9 +129,8 @@ impl Deref for AttestationService { impl AttestationService { /// Starts the service which periodically produces attestations. pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { - let log = self.context.log().clone(); if self.disable { - info!(log, "Attestation service disabled"); + info!("Attestation service disabled"); return Ok(()); } @@ -141,9 +141,8 @@ impl AttestationService { .ok_or("Unable to determine duration to next slot")?; info!( - log, - "Attestation production service started"; - "next_update_millis" => duration_to_next_slot.as_millis() + next_update_millis = duration_to_next_slot.as_millis(), + "Attestation production service started" ); let executor = self.context.executor.clone(); @@ -152,22 +151,14 @@ impl AttestationService { loop { if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { sleep(duration_to_next_slot + slot_duration / 3).await; - let log = self.context.log(); if let Err(e) = self.spawn_attestation_tasks(slot_duration) { - crit!( - log, - "Failed to spawn attestation tasks"; - "error" => e - ) + crit!(error = e, "Failed to spawn attestation tasks") } else { - trace!( - log, - "Spawned attestation tasks"; - ) + trace!("Spawned attestation tasks"); } } else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; continue; @@ -249,7 +240,6 @@ impl AttestationService { validator_duties: Vec, aggregate_production_instant: Instant, ) -> Result<(), ()> { - let log = self.context.log(); let attestations_timer = validator_metrics::start_timer_vec( &validator_metrics::ATTESTATION_SERVICE_TIMES, &[validator_metrics::ATTESTATIONS], @@ -269,11 +259,10 @@ impl AttestationService { .await .map_err(move |e| { crit!( - log, - "Error during attestation routine"; - "error" => format!("{:?}", e), - "committee_index" => committee_index, - "slot" => slot.as_u64(), + error = format!("{:?}", e), + committee_index, + slot = slot.as_u64(), + "Error during attestation routine" ) })?; @@ -306,11 +295,10 @@ impl AttestationService { .await .map_err(move |e| { crit!( - log, - "Error during attestation routine"; - "error" => format!("{:?}", e), - "committee_index" => committee_index, - "slot" => slot.as_u64(), + error = format!("{:?}", e), + committee_index, + slot = slot.as_u64(), + "Error during attestation routine" ) })?; } @@ -336,8 +324,6 @@ impl AttestationService { committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], ) -> Result, String> { - let log = self.context.log(); - if validator_duties.is_empty() { return Ok(None); } @@ -373,13 +359,12 @@ impl AttestationService { // Ensure that the attestation matches the duties. if !duty.match_attestation_data::(attestation_data, &self.context.eth2_config.spec) { crit!( - log, - "Inconsistent validator duties during signing"; - "validator" => ?duty.pubkey, - "duty_slot" => duty.slot, - "attestation_slot" => attestation_data.slot, - "duty_index" => duty.committee_index, - "attestation_index" => attestation_data.index, + validator = ?duty.pubkey, + duty_slot = %duty.slot, + attestation_slot = %attestation_data.slot, + duty_index = duty.committee_index, + attestation_index = attestation_data.index, + "Inconsistent validator duties during signing" ); return None; } @@ -396,11 +381,10 @@ impl AttestationService { Ok(attestation) => attestation, Err(err) => { crit!( - log, - "Invalid validator duties during signing"; - "validator" => ?duty.pubkey, - "duty" => ?duty, - "err" => ?err, + validator = ?duty.pubkey, + ?duty, + ?err, + "Invalid validator duties during signing" ); return None; } @@ -421,24 +405,22 @@ impl AttestationService { // A pubkey can be missing when a validator was recently // removed via the API. warn!( - log, - "Missing pubkey for attestation"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "validator" => ?duty.pubkey, - "committee_index" => committee_index, - "slot" => slot.as_u64(), + info = "a validator may have recently been removed from this VC", + pubkey = ?pubkey, + validator = ?duty.pubkey, + committee_index = committee_index, + slot = slot.as_u64(), + "Missing pubkey for attestation" ); None } Err(e) => { crit!( - log, - "Failed to sign attestation"; - "error" => ?e, - "validator" => ?duty.pubkey, - "committee_index" => committee_index, - "slot" => slot.as_u64(), + error = ?e, + validator = ?duty.pubkey, + committee_index, + slot = slot.as_u64(), + "Failed to sign attestation" ); None } @@ -453,7 +435,7 @@ impl AttestationService { .unzip(); if attestations.is_empty() { - warn!(log, "No attestations were published"); + warn!("No attestations were published"); return Ok(None); } let fork_name = self @@ -481,12 +463,11 @@ impl AttestationService { // This shouldn't happen unless BN and VC are out of sync with // respect to the Electra fork. error!( - log, - "Unable to convert to SingleAttestation"; - "error" => ?e, - "committee_index" => attestation_data.index, - "slot" => slot.as_u64(), - "type" => "unaggregated", + error = ?e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to convert to SingleAttestation" ); None } @@ -509,22 +490,20 @@ impl AttestationService { .await { Ok(()) => info!( - log, - "Successfully published attestations"; - "count" => attestations.len(), - "validator_indices" => ?validator_indices, - "head_block" => ?attestation_data.beacon_block_root, - "committee_index" => attestation_data.index, - "slot" => attestation_data.slot.as_u64(), - "type" => "unaggregated", + count = attestations.len(), + validator_indices = ?validator_indices, + head_block = ?attestation_data.beacon_block_root, + committee_index = attestation_data.index, + slot = attestation_data.slot.as_u64(), + "type" = "unaggregated", + "Successfully published attestations" ), Err(e) => error!( - log, - "Unable to publish attestations"; - "error" => %e, - "committee_index" => attestation_data.index, - "slot" => slot.as_u64(), - "type" => "unaggregated", + error = %e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to publish attestations" ), } @@ -550,8 +529,6 @@ impl AttestationService { committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], ) -> Result<(), String> { - let log = self.context.log(); - if !validator_duties .iter() .any(|duty_and_proof| duty_and_proof.selection_proof.is_some()) @@ -609,7 +586,7 @@ impl AttestationService { let selection_proof = duty_and_proof.selection_proof.as_ref()?; if !duty.match_attestation_data::(attestation_data, &self.context.eth2_config.spec) { - crit!(log, "Inconsistent validator duties during signing"); + crit!("Inconsistent validator duties during signing"); return None; } @@ -627,19 +604,14 @@ impl AttestationService { Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently // removed via the API. - debug!( - log, - "Missing pubkey for aggregate"; - "pubkey" => ?pubkey, - ); + debug!(?pubkey, "Missing pubkey for aggregate"); None } Err(e) => { crit!( - log, - "Failed to sign aggregate"; - "error" => ?e, - "pubkey" => ?duty.pubkey, + error = ?e, + pubkey = ?duty.pubkey, + "Failed to sign aggregate" ); None } @@ -683,14 +655,13 @@ impl AttestationService { for signed_aggregate_and_proof in signed_aggregate_and_proofs { let attestation = signed_aggregate_and_proof.message().aggregate(); info!( - log, - "Successfully published attestation"; - "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), - "signatures" => attestation.num_set_aggregation_bits(), - "head_block" => format!("{:?}", attestation.data().beacon_block_root), - "committee_index" => attestation.committee_index(), - "slot" => attestation.data().slot.as_u64(), - "type" => "aggregated", + aggregator = signed_aggregate_and_proof.message().aggregator_index(), + signatures = attestation.num_set_aggregation_bits(), + head_block = format!("{:?}", attestation.data().beacon_block_root), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Successfully published attestation" ); } } @@ -698,13 +669,12 @@ impl AttestationService { for signed_aggregate_and_proof in signed_aggregate_and_proofs { let attestation = &signed_aggregate_and_proof.message().aggregate(); crit!( - log, - "Failed to publish attestation"; - "error" => %e, - "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), - "committee_index" => attestation.committee_index(), - "slot" => attestation.data().slot.as_u64(), - "type" => "aggregated", + error = %e, + aggregator = signed_aggregate_and_proof.message().aggregator_index(), + committee_index = attestation.committee_index(), + slot = attestation.data().slot.as_u64(), + "type" = "aggregated", + "Failed to publish attestation" ); } } diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index 60eb0361ad..d2dbbb656e 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -4,7 +4,7 @@ use environment::RuntimeContext; use eth2::types::{FullBlockContents, PublishBlockRequest}; use eth2::{BeaconNodeHttpClient, StatusCode}; use graffiti_file::{determine_graffiti, GraffitiFile}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use logging::crit; use slot_clock::SlotClock; use std::fmt::Debug; use std::future::Future; @@ -12,6 +12,7 @@ use std::ops::Deref; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; +use tracing::{debug, error, info, trace, warn}; use types::{ BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock, Slot, @@ -219,9 +220,7 @@ impl BlockService { self, mut notification_rx: mpsc::Receiver, ) -> Result<(), String> { - let log = self.context.log().clone(); - - info!(log, "Block production service started"); + info!("Block production service started"); let executor = self.inner.context.executor.clone(); @@ -230,7 +229,7 @@ impl BlockService { while let Some(notif) = notification_rx.recv().await { self.do_update(notif).await.ok(); } - debug!(log, "Block service shutting down"); + debug!("Block service shutting down"); }, "block_service", ); @@ -240,64 +239,54 @@ impl BlockService { /// Attempt to produce a block for any block producers in the `ValidatorStore`. async fn do_update(&self, notification: BlockServiceNotification) -> Result<(), ()> { - let log = self.context.log(); let _timer = validator_metrics::start_timer_vec( &validator_metrics::BLOCK_SERVICE_TIMES, &[validator_metrics::FULL_UPDATE], ); let slot = self.slot_clock.now().ok_or_else(move || { - crit!(log, "Duties manager failed to read slot clock"); + crit!("Duties manager failed to read slot clock"); })?; if notification.slot != slot { warn!( - log, - "Skipping block production for expired slot"; - "current_slot" => slot.as_u64(), - "notification_slot" => notification.slot.as_u64(), - "info" => "Your machine could be overloaded" + current_slot = slot.as_u64(), + notification_slot = notification.slot.as_u64(), + info = "Your machine could be overloaded", + "Skipping block production for expired slot" ); return Ok(()); } if slot == self.context.eth2_config.spec.genesis_slot { debug!( - log, - "Not producing block at genesis slot"; - "proposers" => format!("{:?}", notification.block_proposers), + proposers = format!("{:?}", notification.block_proposers), + "Not producing block at genesis slot" ); return Ok(()); } - trace!( - log, - "Block service update started"; - "slot" => slot.as_u64() - ); + trace!(slot = slot.as_u64(), "Block service update started"); let proposers = notification.block_proposers; if proposers.is_empty() { trace!( - log, - "No local block proposers for this slot"; - "slot" => slot.as_u64() + slot = slot.as_u64(), + "No local block proposers for this slot" ) } else if proposers.len() > 1 { error!( - log, - "Multiple block proposers for this slot"; - "action" => "producing blocks for all proposers", - "num_proposers" => proposers.len(), - "slot" => slot.as_u64(), + action = "producing blocks for all proposers", + num_proposers = proposers.len(), + slot = slot.as_u64(), + "Multiple block proposers for this slot" ) } for validator_pubkey in proposers { let builder_boost_factor = self.get_builder_boost_factor(&validator_pubkey); let service = self.clone(); - let log = log.clone(); self.inner.context.executor.spawn( async move { let result = service @@ -308,11 +297,10 @@ impl BlockService { Ok(_) => {} Err(BlockError::Recoverable(e)) | Err(BlockError::Irrecoverable(e)) => { error!( - log, - "Error whilst producing block"; - "error" => ?e, - "block_slot" => ?slot, - "info" => "block v3 proposal failed, this error may or may not result in a missed block" + error = ?e, + block_slot = ?slot, + info = "block v3 proposal failed, this error may or may not result in a missed block", + "Error whilst producing block" ); } } @@ -332,7 +320,6 @@ impl BlockService { validator_pubkey: &PublicKeyBytes, unsigned_block: UnsignedBlock, ) -> Result<(), BlockError> { - let log = self.context.log(); let signing_timer = validator_metrics::start_timer(&validator_metrics::BLOCK_SIGNING_TIMES); let res = match unsigned_block { @@ -357,11 +344,10 @@ impl BlockService { // A pubkey can be missing when a validator was recently removed // via the API. warn!( - log, - "Missing pubkey for block"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot + info = "a validator may have recently been removed from this VC", + ?pubkey, + ?slot, + "Missing pubkey for block" ); return Ok(()); } @@ -377,10 +363,9 @@ impl BlockService { Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); info!( - log, - "Publishing signed block"; - "slot" => slot.as_u64(), - "signing_time_ms" => signing_time_ms, + slot = slot.as_u64(), + signing_time_ms = signing_time_ms, + "Publishing signed block" ); // Publish block with first available beacon node. @@ -396,13 +381,12 @@ impl BlockService { .await?; info!( - log, - "Successfully published block"; - "block_type" => ?signed_block.block_type(), - "deposits" => signed_block.num_deposits(), - "attestations" => signed_block.num_attestations(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), + block_type = ?signed_block.block_type(), + deposits = signed_block.num_deposits(), + attestations = signed_block.num_attestations(), + graffiti = ?graffiti.map(|g| g.as_utf8_lossy()), + slot = signed_block.slot().as_u64(), + "Successfully published block" ); Ok(()) } @@ -413,7 +397,6 @@ impl BlockService { validator_pubkey: PublicKeyBytes, builder_boost_factor: Option, ) -> Result<(), BlockError> { - let log = self.context.log(); let _timer = validator_metrics::start_timer_vec( &validator_metrics::BLOCK_SERVICE_TIMES, &[validator_metrics::BEACON_BLOCK], @@ -429,11 +412,10 @@ impl BlockService { // A pubkey can be missing when a validator was recently removed // via the API. warn!( - log, - "Missing pubkey for block randao"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot + info = "a validator may have recently been removed from this VC", + ?pubkey, + ?slot, + "Missing pubkey for block randao" ); return Ok(()); } @@ -447,7 +429,6 @@ impl BlockService { let graffiti = determine_graffiti( &validator_pubkey, - log, self.graffiti_file.clone(), self.validator_store.graffiti(&validator_pubkey), self.graffiti, @@ -461,11 +442,7 @@ impl BlockService { proposer_nodes: self.proposer_nodes.clone(), }; - info!( - log, - "Requesting unsigned block"; - "slot" => slot.as_u64(), - ); + info!(slot = slot.as_u64(), "Requesting unsigned block"); // Request block from first responsive beacon node. // @@ -484,7 +461,6 @@ impl BlockService { graffiti, proposer_index, builder_boost_factor, - log, ) .await .map_err(|e| { @@ -514,7 +490,6 @@ impl BlockService { signed_block: &SignedBlock, beacon_node: BeaconNodeHttpClient, ) -> Result<(), BlockError> { - let log = self.context.log(); let slot = signed_block.slot(); match signed_block { SignedBlock::Full(signed_block) => { @@ -525,7 +500,7 @@ impl BlockService { beacon_node .post_beacon_blocks_v2_ssz(signed_block, None) .await - .or_else(|e| handle_block_post_error(e, slot, log))? + .or_else(|e| handle_block_post_error(e, slot))? } SignedBlock::Blinded(signed_block) => { let _post_timer = validator_metrics::start_timer_vec( @@ -535,7 +510,7 @@ impl BlockService { beacon_node .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await - .or_else(|e| handle_block_post_error(e, slot, log))? + .or_else(|e| handle_block_post_error(e, slot))? } } Ok::<_, BlockError>(()) @@ -548,7 +523,6 @@ impl BlockService { graffiti: Option, proposer_index: Option, builder_boost_factor: Option, - log: &Logger, ) -> Result, BlockError> { let (block_response, _) = beacon_node .get_validator_blocks_v3::( @@ -570,11 +544,7 @@ impl BlockService { eth2::types::ProduceBlockV3Response::Blinded(block) => UnsignedBlock::Blinded(block), }; - info!( - log, - "Received unsigned block"; - "slot" => slot.as_u64(), - ); + info!(slot = slot.as_u64(), "Received unsigned block"); if proposer_index != Some(unsigned_block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), @@ -662,23 +632,21 @@ impl SignedBlock { } } -fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> { +fn handle_block_post_error(err: eth2::Error, slot: Slot) -> Result<(), BlockError> { // Handle non-200 success codes. if let Some(status) = err.status() { if status == StatusCode::ACCEPTED { info!( - log, - "Block is already known to BN or might be invalid"; - "slot" => slot, - "status_code" => status.as_u16(), + %slot, + status_code = status.as_u16(), + "Block is already known to BN or might be invalid" ); return Ok(()); } else if status.is_success() { debug!( - log, - "Block published with non-standard success code"; - "slot" => slot, - "status_code" => status.as_u16(), + %slot, + status_code = status.as_u16(), + "Block published with non-standard success code" ); return Ok(()); } diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 7437ff8bcf..0921f95298 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -18,7 +18,6 @@ use eth2::types::{ use futures::{stream, StreamExt}; use parking_lot::RwLock; use safe_arith::{ArithError, SafeArith}; -use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; @@ -26,6 +25,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::{sync::mpsc::Sender, time::sleep}; +use tracing::{debug, error, info, warn}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; use validator_metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -378,7 +378,6 @@ pub fn start_update_service( * Spawn the task which keeps track of local block proposal duties. */ let duties_service = core_duties_service.clone(); - let log = core_duties_service.context.log().clone(); core_duties_service.context.executor.spawn( async move { loop { @@ -394,9 +393,8 @@ pub fn start_update_service( if let Err(e) = poll_beacon_proposers(&duties_service, &mut block_service_tx).await { error!( - log, - "Failed to poll beacon proposers"; - "error" => ?e + error = ?e, + "Failed to poll beacon proposers" ) } } @@ -413,7 +411,6 @@ pub fn start_update_service( * Spawn the task which keeps track of local attestation duties. */ let duties_service = core_duties_service.clone(); - let log = core_duties_service.context.log().clone(); core_duties_service.context.executor.spawn( async move { loop { @@ -428,9 +425,8 @@ pub fn start_update_service( if let Err(e) = poll_beacon_attesters(&duties_service).await { error!( - log, - "Failed to poll beacon attesters"; - "error" => ?e + error = ?e, + "Failed to poll beacon attesters" ); } } @@ -440,15 +436,13 @@ pub fn start_update_service( // Spawn the task which keeps track of local sync committee duties. let duties_service = core_duties_service.clone(); - let log = core_duties_service.context.log().clone(); core_duties_service.context.executor.spawn( async move { loop { if let Err(e) = poll_sync_committee_duties(&duties_service).await { error!( - log, - "Failed to poll sync committee duties"; - "error" => ?e + error = ?e, + "Failed to poll sync committee duties" ); } @@ -480,8 +474,6 @@ async fn poll_validator_indices( &[validator_metrics::UPDATE_INDICES], ); - let log = duties_service.context.log(); - // Collect *all* pubkeys for resolving indices, even those undergoing doppelganger protection. // // Since doppelganger protection queries rely on validator indices it is important to ensure we @@ -547,11 +539,10 @@ async fn poll_validator_indices( match download_result { Ok(Some(response)) => { info!( - log, - "Validator exists in beacon chain"; - "pubkey" => ?pubkey, - "validator_index" => response.data.index, - "fee_recipient" => fee_recipient + ?pubkey, + validator_index = response.data.index, + fee_recipient, + "Validator exists in beacon chain" ); duties_service .validator_store @@ -575,21 +566,15 @@ async fn poll_validator_indices( .insert(pubkey, next_poll_slot); } - debug!( - log, - "Validator without index"; - "pubkey" => ?pubkey, - "fee_recipient" => fee_recipient - ) + debug!(?pubkey, fee_recipient, "Validator without index") } // Don't exit early on an error, keep attempting to resolve other indices. Err(e) => { error!( - log, - "Failed to resolve pubkey to index"; - "error" => %e, - "pubkey" => ?pubkey, - "fee_recipient" => fee_recipient + error = %e, + ?pubkey, + fee_recipient, + "Failed to resolve pubkey to index" ) } } @@ -613,8 +598,6 @@ async fn poll_beacon_attesters( &[validator_metrics::UPDATE_ATTESTERS_CURRENT_EPOCH], ); - let log = duties_service.context.log(); - let current_slot = duties_service .slot_clock .now() @@ -653,11 +636,10 @@ async fn poll_beacon_attesters( .await { error!( - log, - "Failed to download attester duties"; - "current_epoch" => current_epoch, - "request_epoch" => current_epoch, - "err" => ?e, + %current_epoch, + request_epoch = %current_epoch, + err = ?e, + "Failed to download attester duties" ) } @@ -675,11 +657,10 @@ async fn poll_beacon_attesters( .await { error!( - log, - "Failed to download attester duties"; - "current_epoch" => current_epoch, - "request_epoch" => next_epoch, - "err" => ?e, + %current_epoch, + request_epoch = %next_epoch, + err = ?e, + "Failed to download attester duties" ) } @@ -758,9 +739,8 @@ async fn poll_beacon_attesters( .await; if subscription_result.as_ref().is_ok() { debug!( - log, - "Broadcast attestation subscriptions"; - "count" => subscriptions.len(), + count = subscriptions.len(), + "Broadcast attestation subscriptions" ); for subscription_slots in subscription_slots_to_confirm { subscription_slots.record_successful_subscription_at(current_slot); @@ -768,9 +748,8 @@ async fn poll_beacon_attesters( } else if let Err(e) = subscription_result { if e.num_errors() < duties_service.beacon_nodes.num_total().await { warn!( - log, - "Some subscriptions failed"; - "error" => %e, + error = %e, + "Some subscriptions failed" ); // If subscriptions were sent to at least one node, regard that as a success. // There is some redundancy built into the subscription schedule to handle failures. @@ -779,9 +758,8 @@ async fn poll_beacon_attesters( } } else { error!( - log, - "All subscriptions failed"; - "error" => %e + error = %e, + "All subscriptions failed" ); } } @@ -809,14 +787,11 @@ async fn poll_beacon_attesters_for_epoch( local_indices: &[u64], local_pubkeys: &HashSet, ) -> Result<(), Error> { - let log = duties_service.context.log(); - // No need to bother the BN if we don't have any validators. if local_indices.is_empty() { debug!( - duties_service.context.log(), - "No validators, not downloading duties"; - "epoch" => epoch, + %epoch, + "No validators, not downloading duties" ); return Ok(()); } @@ -895,10 +870,9 @@ async fn poll_beacon_attesters_for_epoch( ); debug!( - log, - "Downloaded attester duties"; - "dependent_root" => %dependent_root, - "num_new_duties" => new_duties.len(), + %dependent_root, + num_new_duties = new_duties.len(), + "Downloaded attester duties" ); // Update the duties service with the new `DutyAndProof` messages. @@ -929,10 +903,9 @@ async fn poll_beacon_attesters_for_epoch( && prior_duty_and_proof.duty == duty_and_proof.duty { warn!( - log, - "Redundant attester duty update"; - "dependent_root" => %dependent_root, - "validator_index" => duty.validator_index, + %dependent_root, + validator_index = duty.validator_index, + "Redundant attester duty update" ); continue; } @@ -940,11 +913,10 @@ async fn poll_beacon_attesters_for_epoch( // Using `already_warned` avoids excessive logs. if dependent_root != *prior_dependent_root && already_warned.take().is_some() { warn!( - log, - "Attester duties re-org"; - "prior_dependent_root" => %prior_dependent_root, - "dependent_root" => %dependent_root, - "note" => "this may happen from time to time" + %prior_dependent_root, + %dependent_root, + note = "this may happen from time to time", + "Attester duties re-org" ) } *mut_value = (dependent_root, duty_and_proof); @@ -1056,8 +1028,6 @@ async fn fill_in_selection_proofs( duties: Vec, dependent_root: Hash256, ) { - let log = duties_service.context.log(); - // Sort duties by slot in a BTreeMap. let mut duties_by_slot: BTreeMap> = BTreeMap::new(); @@ -1125,20 +1095,18 @@ async fn fill_in_selection_proofs( // A pubkey can be missing when a validator was recently // removed via the API. warn!( - log, - "Missing pubkey for duty and proof"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, + info = "a validator may have recently been removed from this VC", + ?pubkey, + "Missing pubkey for duty and proof" ); // Do not abort the entire batch for a single failure. continue; } Err(e) => { error!( - log, - "Failed to produce duty and proof"; - "error" => ?e, - "msg" => "may impair attestation duties" + error = ?e, + msg = "may impair attestation duties", + "Failed to produce duty and proof" ); // Do not abort the entire batch for a single failure. continue; @@ -1163,9 +1131,8 @@ async fn fill_in_selection_proofs( // Our selection proofs are no longer relevant due to a reorg, abandon // this entire background process. debug!( - log, - "Stopping selection proof background task"; - "reason" => "re-org" + reason = "re-org", + "Stopping selection proof background task" ); return; } @@ -1188,11 +1155,10 @@ async fn fill_in_selection_proofs( let time_taken_ms = Duration::from_secs_f64(timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); debug!( - log, - "Computed attestation selection proofs"; - "batch_size" => batch_size, - "lookahead_slot" => lookahead_slot, - "time_taken_ms" => time_taken_ms + batch_size, + %lookahead_slot, + time_taken_ms, + "Computed attestation selection proofs" ); } else { // Just sleep for one slot if we are unable to read the system clock, this gives @@ -1234,8 +1200,6 @@ async fn poll_beacon_proposers( &[validator_metrics::UPDATE_PROPOSERS], ); - let log = duties_service.context.log(); - let current_slot = duties_service .slot_clock .now() @@ -1251,7 +1215,6 @@ async fn poll_beacon_proposers( &initial_block_proposers, block_service_tx, &duties_service.validator_store, - log, ) .await; @@ -1290,10 +1253,9 @@ async fn poll_beacon_proposers( .collect::>(); debug!( - log, - "Downloaded proposer duties"; - "dependent_root" => %dependent_root, - "num_relevant_duties" => relevant_duties.len(), + %dependent_root, + num_relevant_duties = relevant_duties.len(), + "Downloaded proposer duties" ); if let Some((prior_dependent_root, _)) = duties_service @@ -1303,20 +1265,18 @@ async fn poll_beacon_proposers( { if dependent_root != prior_dependent_root { warn!( - log, - "Proposer duties re-org"; - "prior_dependent_root" => %prior_dependent_root, - "dependent_root" => %dependent_root, - "msg" => "this may happen from time to time" + %prior_dependent_root, + %dependent_root, + msg = "this may happen from time to time", + "Proposer duties re-org" ) } } } // Don't return early here, we still want to try and produce blocks using the cached values. Err(e) => error!( - log, - "Failed to download proposer duties"; - "err" => %e, + err = %e, + "Failed to download proposer duties" ), } @@ -1341,13 +1301,11 @@ async fn poll_beacon_proposers( &additional_block_producers, block_service_tx, &duties_service.validator_store, - log, ) .await; debug!( - log, - "Detected new block proposer"; - "current_slot" => current_slot, + %current_slot, + "Detected new block proposer" ); validator_metrics::inc_counter(&validator_metrics::PROPOSAL_CHANGED); } @@ -1368,7 +1326,6 @@ async fn notify_block_production_service( block_proposers: &HashSet, block_service_tx: &mut Sender, validator_store: &ValidatorStore, - log: &Logger, ) { let non_doppelganger_proposers = block_proposers .iter() @@ -1385,10 +1342,9 @@ async fn notify_block_production_service( .await { error!( - log, - "Failed to notify block service"; - "current_slot" => current_slot, - "error" => %e + %current_slot, + error = %e, + "Failed to notify block service" ); }; } diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 1f63f932a8..3367f2d6ca 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -3,7 +3,6 @@ use bls::PublicKeyBytes; use doppelganger_service::DoppelgangerStatus; use environment::RuntimeContext; use parking_lot::RwLock; -use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::hash::Hash; @@ -11,6 +10,7 @@ use std::ops::Deref; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{sleep, Duration}; +use tracing::{debug, error, info, warn}; use types::{ Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, ValidatorRegistrationData, @@ -173,13 +173,8 @@ impl PreparationService { /// Starts the service which periodically produces proposer preparations. pub fn start_proposer_prepare_service(self, spec: &ChainSpec) -> Result<(), String> { - let log = self.context.log().clone(); - let slot_duration = Duration::from_secs(spec.seconds_per_slot); - info!( - log, - "Proposer preparation service started"; - ); + info!("Proposer preparation service started"); let executor = self.context.executor.clone(); let spec = spec.clone(); @@ -192,9 +187,8 @@ impl PreparationService { .await .map_err(|e| { error!( - log, - "Error during proposer preparation"; - "error" => ?e, + error = ?e, + "Error during proposer preparation" ) }) .unwrap_or(()); @@ -203,7 +197,7 @@ impl PreparationService { if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { sleep(duration_to_next_slot).await; } else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; } @@ -216,12 +210,7 @@ impl PreparationService { /// Starts the service which periodically sends connected beacon nodes validator registration information. pub fn start_validator_registration_service(self, spec: &ChainSpec) -> Result<(), String> { - let log = self.context.log().clone(); - - info!( - log, - "Validator registration service started"; - ); + info!("Validator registration service started"); let spec = spec.clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); @@ -232,14 +221,14 @@ impl PreparationService { loop { // Poll the endpoint immediately to ensure fee recipients are received. if let Err(e) = self.register_validators().await { - error!(log,"Error during validator registration";"error" => ?e); + error!(error = ?e, "Error during validator registration"); } // Wait one slot if the register validator request fails or if we should not publish at the current slot. if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { sleep(duration_to_next_slot).await; } else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; } @@ -274,7 +263,6 @@ impl PreparationService { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec { - let log = self.context.log(); self.collect_proposal_data(|pubkey, proposal_data| { if let Some(fee_recipient) = proposal_data.fee_recipient { Some(ProposerPreparationData { @@ -285,10 +273,9 @@ impl PreparationService { } else { if spec.bellatrix_fork_epoch.is_some() { error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey + msg = "update validator_definitions.yml", + ?pubkey, + "Validator is missing fee recipient" ); } None @@ -336,8 +323,6 @@ impl PreparationService { &self, preparation_data: Vec, ) -> Result<(), String> { - let log = self.context.log(); - // Post the proposer preparations to the BN. let preparation_data_len = preparation_data.len(); let preparation_entries = preparation_data.as_slice(); @@ -351,14 +336,12 @@ impl PreparationService { .await { Ok(()) => debug!( - log, - "Published proposer preparation"; - "count" => preparation_data_len, + count = preparation_data_len, + "Published proposer preparation" ), Err(e) => error!( - log, - "Unable to publish proposer preparation to all beacon nodes"; - "error" => %e, + error = %e, + "Unable to publish proposer preparation to all beacon nodes" ), } Ok(()) @@ -400,8 +383,6 @@ impl PreparationService { &self, registration_keys: Vec, ) -> Result<(), String> { - let log = self.context.log(); - let registration_data_len = registration_keys.len(); let mut signed = Vec::with_capacity(registration_data_len); @@ -442,19 +423,14 @@ impl PreparationService { Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently // removed via the API. - debug!( - log, - "Missing pubkey for registration data"; - "pubkey" => ?pubkey, - ); + debug!(?pubkey, "Missing pubkey for registration data"); continue; } Err(e) => { error!( - log, - "Unable to sign validator registration data"; - "error" => ?e, - "pubkey" => ?pubkey + error = ?e, + ?pubkey, + "Unable to sign validator registration data" ); continue; } @@ -474,9 +450,8 @@ impl PreparationService { { Ok(()) => { info!( - log, - "Published validator registrations to the builder network"; - "count" => batch.len(), + count = batch.len(), + "Published validator registrations to the builder network" ); let mut guard = self.validator_registration_cache.write(); for signed_data in batch { @@ -487,9 +462,8 @@ impl PreparationService { } } Err(e) => warn!( - log, - "Unable to publish validator registrations to the builder network"; - "error" => %e, + error = %e, + "Unable to publish validator registrations to the builder network" ), } } diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index 6c983b5430..5151633514 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -1,12 +1,13 @@ use crate::duties_service::{DutiesService, Error}; use doppelganger_service::DoppelgangerStatus; use futures::future::join_all; +use logging::crit; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use slog::{crit, debug, info, warn}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; +use tracing::{debug, info, warn}; use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; use validator_store::Error as ValidatorStoreError; @@ -414,23 +415,20 @@ pub async fn poll_sync_committee_duties_for_period Result<(), Error> { let spec = &duties_service.spec; - let log = duties_service.context.log(); // no local validators don't need to poll for sync committee if local_indices.is_empty() { debug!( - duties_service.context.log(), - "No validators, not polling for sync committee duties"; - "sync_committee_period" => sync_committee_period, + sync_committee_period, + "No validators, not polling for sync committee duties" ); return Ok(()); } debug!( - log, - "Fetching sync committee duties"; - "sync_committee_period" => sync_committee_period, - "num_validators" => local_indices.len(), + sync_committee_period, + num_validators = local_indices.len(), + "Fetching sync committee duties" ); let period_start_epoch = spec.epochs_per_sync_committee_period * sync_committee_period; @@ -452,16 +450,15 @@ pub async fn poll_sync_committee_duties_for_period res.data, Err(e) => { warn!( - log, - "Failed to download sync committee duties"; - "sync_committee_period" => sync_committee_period, - "error" => %e, + sync_committee_period, + error = %e, + "Failed to download sync committee duties" ); return Ok(()); } }; - debug!(log, "Fetched sync duties from BN"; "count" => duties.len()); + debug!(count = duties.len(), "Fetched sync duties from BN"); // Add duties to map. let committee_duties = duties_service @@ -479,9 +476,8 @@ pub async fn poll_sync_committee_duties_for_period "this could be due to a really long re-org, or a bug" + message = "this could be due to a really long re-org, or a bug", + "Sync committee duties changed" ); } updated_due_to_reorg @@ -489,10 +485,8 @@ pub async fn poll_sync_committee_duties_for_period duty.validator_index, - "sync_committee_period" => sync_committee_period, + validator_index = duty.validator_index, + sync_committee_period, "Validator in sync committee" ); *validator_duties = Some(ValidatorDuties::new(duty)); @@ -509,14 +503,11 @@ pub async fn fill_in_aggregation_proofs( current_slot: Slot, pre_compute_slot: Slot, ) { - let log = duties_service.context.log(); - debug!( - log, - "Calculating sync selection proofs"; - "period" => sync_committee_period, - "current_slot" => current_slot, - "pre_compute_slot" => pre_compute_slot + period = sync_committee_period, + %current_slot, + %pre_compute_slot, + "Calculating sync selection proofs" ); // Generate selection proofs for each validator at each slot, one slot at a time. @@ -532,9 +523,8 @@ pub async fn fill_in_aggregation_proofs( Ok(subnet_ids) => subnet_ids, Err(e) => { crit!( - log, - "Arithmetic error computing subnet IDs"; - "error" => ?e, + error = ?e, + "Arithmetic error computing subnet IDs" ); continue; } @@ -556,21 +546,19 @@ pub async fn fill_in_aggregation_proofs( // A pubkey can be missing when a validator was recently // removed via the API. debug!( - log, - "Missing pubkey for sync selection proof"; - "pubkey" => ?pubkey, - "pubkey" => ?duty.pubkey, - "slot" => proof_slot, + ?pubkey, + pubkey = ?duty.pubkey, + slot = %proof_slot, + "Missing pubkey for sync selection proof" ); return None; } Err(e) => { warn!( - log, - "Unable to sign selection proof"; - "error" => ?e, - "pubkey" => ?duty.pubkey, - "slot" => proof_slot, + error = ?e, + pubkey = ?duty.pubkey, + slot = %proof_slot, + "Unable to sign selection proof" ); return None; } @@ -579,22 +567,20 @@ pub async fn fill_in_aggregation_proofs( match proof.is_aggregator::() { Ok(true) => { debug!( - log, - "Validator is sync aggregator"; - "validator_index" => duty.validator_index, - "slot" => proof_slot, - "subnet_id" => %subnet_id, + validator_index = duty.validator_index, + slot = %proof_slot, + %subnet_id, + "Validator is sync aggregator" ); Some(((proof_slot, *subnet_id), proof)) } Ok(false) => None, Err(e) => { warn!( - log, - "Error determining is_aggregator"; - "pubkey" => ?duty.pubkey, - "slot" => proof_slot, - "error" => ?e, + pubkey = ?duty.pubkey, + slot = %proof_slot, + error = ?e, + "Error determining is_aggregator" ); None } @@ -614,11 +600,7 @@ pub async fn fill_in_aggregation_proofs( // Add to global storage (we add regularly so the proofs can be used ASAP). let sync_map = duties_service.sync_duties.committees.read(); let Some(committee_duties) = sync_map.get(&sync_committee_period) else { - debug!( - log, - "Missing sync duties"; - "period" => sync_committee_period, - ); + debug!(period = sync_committee_period, "Missing sync duties"); continue; }; let validators = committee_duties.validators.read(); @@ -629,20 +611,18 @@ pub async fn fill_in_aggregation_proofs( duty.aggregation_duties.proofs.write().extend(proofs); } else { debug!( - log, - "Missing sync duty to update"; - "validator_index" => validator_index, - "period" => sync_committee_period, + validator_index, + period = sync_committee_period, + "Missing sync duty to update" ); } } if num_validators_updated > 0 { debug!( - log, - "Finished computing sync selection proofs"; - "slot" => slot, - "updated_validators" => num_validators_updated, + %slot, + updated_validators = num_validators_updated, + "Finished computing sync selection proofs" ); } } diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 5f84c517f3..d99c0d3107 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -4,13 +4,14 @@ use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; use futures::future::FutureExt; -use slog::{crit, debug, error, info, trace, warn}; +use logging::crit; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tokio::time::{sleep, sleep_until, Duration, Instant}; +use tracing::{debug, error, info, trace, warn}; use types::{ ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, @@ -86,9 +87,8 @@ impl SyncCommitteeService { } pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { - let log = self.context.log().clone(); if self.duties_service.disable_attesting { - info!(log, "Sync committee service disabled"); + info!("Sync committee service disabled"); return Ok(()); } @@ -99,9 +99,8 @@ impl SyncCommitteeService { .ok_or("Unable to determine duration to next slot")?; info!( - log, - "Sync committee service started"; - "next_update_millis" => duration_to_next_slot.as_millis() + next_update_millis = duration_to_next_slot.as_millis(), + "Sync committee service started" ); let executor = self.context.executor.clone(); @@ -110,7 +109,6 @@ impl SyncCommitteeService { loop { if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { // Wait for contribution broadcast interval 1/3 of the way through the slot. - let log = self.context.log(); sleep(duration_to_next_slot + slot_duration / 3).await; // Do nothing if the Altair fork has not yet occurred. @@ -120,21 +118,17 @@ impl SyncCommitteeService { if let Err(e) = self.spawn_contribution_tasks(slot_duration).await { crit!( - log, - "Failed to spawn sync contribution tasks"; - "error" => e + error = ?e, + "Failed to spawn sync contribution tasks" ) } else { - trace!( - log, - "Spawned sync contribution tasks"; - ) + trace!("Spawned sync contribution tasks") } // Do subscriptions for future slots/epochs. self.spawn_subscription_tasks(); } else { - error!(log, "Failed to read slot clock"); + error!("Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; } @@ -146,7 +140,6 @@ impl SyncCommitteeService { } async fn spawn_contribution_tasks(&self, slot_duration: Duration) -> Result<(), String> { - let log = self.context.log().clone(); let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; let duration_to_next_slot = self .slot_clock @@ -165,16 +158,12 @@ impl SyncCommitteeService { .sync_duties .get_duties_for_slot(slot, &self.duties_service.spec) else { - debug!(log, "No duties known for slot {}", slot); + debug!("No duties known for slot {}", slot); return Ok(()); }; if slot_duties.duties.is_empty() { - debug!( - log, - "No local validators in current sync committee"; - "slot" => slot, - ); + debug!(%slot, "No local validators in current sync committee"); return Ok(()); } @@ -201,11 +190,10 @@ impl SyncCommitteeService { Ok(block) => block.data.root, Err(errs) => { warn!( - log, + errors = errs.to_string(), + %slot, "Refusing to sign sync committee messages for an optimistic head block or \ - a block head with unknown optimistic status"; - "errors" => errs.to_string(), - "slot" => slot, + a block head with unknown optimistic status" ); return Ok(()); } @@ -251,8 +239,6 @@ impl SyncCommitteeService { beacon_block_root: Hash256, validator_duties: Vec, ) -> Result<(), ()> { - let log = self.context.log(); - // Create futures to produce sync committee signatures. let signature_futures = validator_duties.iter().map(|duty| async move { match self @@ -270,21 +256,19 @@ impl SyncCommitteeService { // A pubkey can be missing when a validator was recently // removed via the API. debug!( - log, - "Missing pubkey for sync committee signature"; - "pubkey" => ?pubkey, - "validator_index" => duty.validator_index, - "slot" => slot, + ?pubkey, + validator_index = duty.validator_index, + %slot, + "Missing pubkey for sync committee signature" ); None } Err(e) => { crit!( - log, - "Failed to sign sync committee signature"; - "validator_index" => duty.validator_index, - "slot" => slot, - "error" => ?e, + validator_index = duty.validator_index, + %slot, + error = ?e, + "Failed to sign sync committee signature" ); None } @@ -307,19 +291,17 @@ impl SyncCommitteeService { .await .map_err(|e| { error!( - log, - "Unable to publish sync committee messages"; - "slot" => slot, - "error" => %e, + %slot, + error = %e, + "Unable to publish sync committee messages" ); })?; info!( - log, - "Successfully published sync committee messages"; - "count" => committee_signatures.len(), - "head_block" => ?beacon_block_root, - "slot" => slot, + count = committee_signatures.len(), + head_block = ?beacon_block_root, + %slot, + "Successfully published sync committee messages" ); Ok(()) @@ -362,8 +344,6 @@ impl SyncCommitteeService { ) -> Result<(), ()> { sleep_until(aggregate_instant).await; - let log = self.context.log(); - let contribution = &self .beacon_nodes .first_success(|beacon_node| async move { @@ -380,20 +360,14 @@ impl SyncCommitteeService { .await .map_err(|e| { crit!( - log, - "Failed to produce sync contribution"; - "slot" => slot, - "beacon_block_root" => ?beacon_block_root, - "error" => %e, + %slot, + ?beacon_block_root, + error = %e, + "Failed to produce sync contribution" ) })? .ok_or_else(|| { - crit!( - log, - "No aggregate contribution found"; - "slot" => slot, - "beacon_block_root" => ?beacon_block_root, - ); + crit!(%slot, ?beacon_block_root, "No aggregate contribution found"); })? .data; @@ -414,20 +388,14 @@ impl SyncCommitteeService { Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently // removed via the API. - debug!( - log, - "Missing pubkey for sync contribution"; - "pubkey" => ?pubkey, - "slot" => slot, - ); + debug!(?pubkey, %slot, "Missing pubkey for sync contribution"); None } Err(e) => { crit!( - log, - "Unable to sign sync committee contribution"; - "slot" => slot, - "error" => ?e, + %slot, + error = ?e, + "Unable to sign sync committee contribution" ); None } @@ -452,20 +420,18 @@ impl SyncCommitteeService { .await .map_err(|e| { error!( - log, - "Unable to publish signed contributions and proofs"; - "slot" => slot, - "error" => %e, + %slot, + error = %e, + "Unable to publish signed contributions and proofs" ); })?; info!( - log, - "Successfully published sync contributions"; - "subnet" => %subnet_id, - "beacon_block_root" => %beacon_block_root, - "num_signers" => contribution.aggregation_bits.num_set_bits(), - "slot" => slot, + subnet = %subnet_id, + beacon_block_root = %beacon_block_root, + num_signers = contribution.aggregation_bits.num_set_bits(), + %slot, + "Successfully published sync contributions" ); Ok(()) @@ -473,14 +439,13 @@ impl SyncCommitteeService { fn spawn_subscription_tasks(&self) { let service = self.clone(); - let log = self.context.log().clone(); + self.inner.context.executor.spawn( async move { service.publish_subscriptions().await.unwrap_or_else(|e| { error!( - log, - "Error publishing subscriptions"; - "error" => ?e, + error = ?e, + "Error publishing subscriptions" ) }); }, @@ -489,7 +454,6 @@ impl SyncCommitteeService { } async fn publish_subscriptions(self) -> Result<(), String> { - let log = self.context.log().clone(); let spec = &self.duties_service.spec; let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; @@ -526,12 +490,7 @@ impl SyncCommitteeService { let mut subscriptions = vec![]; for (duty_slot, sync_committee_period) in duty_slots { - debug!( - log, - "Fetching subscription duties"; - "duty_slot" => duty_slot, - "current_slot" => slot, - ); + debug!(%duty_slot, %slot, "Fetching subscription duties"); match self .duties_service .sync_duties @@ -544,9 +503,8 @@ impl SyncCommitteeService { )), None => { debug!( - log, - "No duties for subscription"; - "slot" => duty_slot, + slot = %duty_slot, + "No duties for subscription" ); all_succeeded = false; } @@ -554,29 +512,23 @@ impl SyncCommitteeService { } if subscriptions.is_empty() { - debug!( - log, - "No sync subscriptions to send"; - "slot" => slot, - ); + debug!(%slot, "No sync subscriptions to send"); return Ok(()); } // Post subscriptions to BN. debug!( - log, - "Posting sync subscriptions to BN"; - "count" => subscriptions.len(), + count = subscriptions.len(), + "Posting sync subscriptions to BN" ); let subscriptions_slice = &subscriptions; for subscription in subscriptions_slice { debug!( - log, - "Subscription"; - "validator_index" => subscription.validator_index, - "validator_sync_committee_indices" => ?subscription.sync_committee_indices, - "until_epoch" => subscription.until_epoch, + validator_index = subscription.validator_index, + validator_sync_committee_indices = ?subscription.sync_committee_indices, + until_epoch = %subscription.until_epoch, + "Subscription" ); } @@ -590,10 +542,9 @@ impl SyncCommitteeService { .await { error!( - log, - "Unable to post sync committee subscriptions"; - "slot" => slot, - "error" => %e, + %slot, + error = %e, + "Unable to post sync committee subscriptions" ); all_succeeded = false; } diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index 99c3025a30..1338c2a07e 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -12,12 +12,13 @@ path = "src/lib.rs" account_utils = { workspace = true } doppelganger_service = { workspace = true } initialized_validators = { workspace = true } +logging = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } signing_method = { workspace = true } slashing_protection = { workspace = true } -slog = { workspace = true } slot_clock = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 5bd9ffd8b2..9b2576847d 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,18 +1,19 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; use doppelganger_service::{DoppelgangerService, DoppelgangerStatus, DoppelgangerValidatorStore}; use initialized_validators::InitializedValidators; +use logging::crit; use parking_lot::{Mutex, RwLock}; use serde::{Deserialize, Serialize}; use signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, }; -use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; +use tracing::{error, info, warn}; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, @@ -82,7 +83,6 @@ pub struct ValidatorStore { slashing_protection_last_prune: Arc>, genesis_validators_root: Hash256, spec: Arc, - log: Logger, doppelganger_service: Option>, slot_clock: T, fee_recipient_process: Option
, @@ -114,7 +114,6 @@ impl ValidatorStore { slot_clock: T, config: &Config, task_executor: TaskExecutor, - log: Logger, ) -> Self { Self { validators: Arc::new(RwLock::new(validators)), @@ -122,7 +121,6 @@ impl ValidatorStore { slashing_protection_last_prune: Arc::new(Mutex::new(Epoch::new(0))), genesis_validators_root, spec, - log, doppelganger_service, slot_clock, fee_recipient_process: config.fee_recipient, @@ -581,10 +579,9 @@ impl ValidatorStore { // Make sure the block slot is not higher than the current slot to avoid potential attacks. if block.slot() > current_slot { warn!( - self.log, - "Not signing block with slot greater than current slot"; - "block_slot" => block.slot().as_u64(), - "current_slot" => current_slot.as_u64() + block_slot = block.slot().as_u64(), + current_slot = current_slot.as_u64(), + "Not signing block with slot greater than current slot" ); return Err(Error::GreaterThanCurrentSlot { slot: block.slot(), @@ -630,10 +627,7 @@ impl ValidatorStore { Ok(SignedBeaconBlock::from_block(block, signature)) } Ok(Safe::SameData) => { - warn!( - self.log, - "Skipping signing of previously signed block"; - ); + warn!("Skipping signing of previously signed block"); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_BLOCKS_TOTAL, &[validator_metrics::SAME_DATA], @@ -642,10 +636,9 @@ impl ValidatorStore { } Err(NotSafe::UnregisteredValidator(pk)) => { warn!( - self.log, - "Not signing block for unregistered validator"; - "msg" => "Carefully consider running with --init-slashing-protection (see --help)", - "public_key" => format!("{:?}", pk) + msg = "Carefully consider running with --init-slashing-protection (see --help)", + public_key = format!("{:?}", pk), + "Not signing block for unregistered validator" ); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_BLOCKS_TOTAL, @@ -654,11 +647,7 @@ impl ValidatorStore { Err(Error::Slashable(NotSafe::UnregisteredValidator(pk))) } Err(e) => { - crit!( - self.log, - "Not signing slashable block"; - "error" => format!("{:?}", e) - ); + crit!(error = format!("{:?}", e), "Not signing slashable block"); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_BLOCKS_TOTAL, &[validator_metrics::SLASHABLE], @@ -725,10 +714,7 @@ impl ValidatorStore { Ok(()) } Ok(Safe::SameData) => { - warn!( - self.log, - "Skipping signing of previously signed attestation" - ); + warn!("Skipping signing of previously signed attestation"); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, &[validator_metrics::SAME_DATA], @@ -737,10 +723,9 @@ impl ValidatorStore { } Err(NotSafe::UnregisteredValidator(pk)) => { warn!( - self.log, - "Not signing attestation for unregistered validator"; - "msg" => "Carefully consider running with --init-slashing-protection (see --help)", - "public_key" => format!("{:?}", pk) + msg = "Carefully consider running with --init-slashing-protection (see --help)", + public_key = format!("{:?}", pk), + "Not signing attestation for unregistered validator" ); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, @@ -750,10 +735,9 @@ impl ValidatorStore { } Err(e) => { crit!( - self.log, - "Not signing slashable attestation"; - "attestation" => format!("{:?}", attestation.data()), - "error" => format!("{:?}", e) + attestation = format!("{:?}", attestation.data()), + error = format!("{:?}", e), + "Not signing slashable attestation" ); validator_metrics::inc_counter_vec( &validator_metrics::SIGNED_ATTESTATIONS_TOTAL, @@ -1068,13 +1052,12 @@ impl ValidatorStore { if first_run { info!( - self.log, - "Pruning slashing protection DB"; - "epoch" => current_epoch, - "msg" => "pruning may take several minutes the first time it runs" + epoch = %current_epoch, + msg = "pruning may take several minutes the first time it runs", + "Pruning slashing protection DB" ); } else { - info!(self.log, "Pruning slashing protection DB"; "epoch" => current_epoch); + info!(epoch = %current_epoch, "Pruning slashing protection DB"); } let _timer = @@ -1090,9 +1073,8 @@ impl ValidatorStore { .prune_all_signed_attestations(all_pubkeys.iter(), new_min_target_epoch) { error!( - self.log, - "Error during pruning of signed attestations"; - "error" => ?e, + error = ?e, + "Error during pruning of signed attestations" ); return; } @@ -1102,15 +1084,14 @@ impl ValidatorStore { .prune_all_signed_blocks(all_pubkeys.iter(), new_min_slot) { error!( - self.log, - "Error during pruning of signed blocks"; - "error" => ?e, + error = ?e, + "Error during pruning of signed blocks" ); return; } *last_prune = current_epoch; - info!(self.log, "Completed pruning of slashing protection DB"); + info!("Completed pruning of slashing protection DB"); } }