mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-02 16:21:42 +00:00
Use scoped rayon pool for backfill chain segment processing (#7924)
Part of #7866 - Continuation of #7921 In the above PR, we enabled rayon for batch KZG verification in chain segment processing. However, using the global rayon thread pool for backfill is likely to create resource contention with higher-priority beacon processor work. This PR introduces a dedicated low-priority rayon thread pool `LOW_PRIORITY_RAYON_POOL` and uses it for processing backfill chain segments. This prevents backfill KZG verification from using the global rayon thread pool and competing with high-priority beacon processor tasks for CPU resources. However, this PR by itself doesn't prevent CPU oversubscription because other tasks could still fill up the global rayon thread pool, and having an extra thread pool could make things worse. To address this we need the beacon processor to coordinate total CPU allocation across all tasks, which is covered in: - #7789 Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com> Co-Authored-By: Eitan Seri- Levi <eserilev@gmail.com> Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu>
This commit is contained in:
@@ -12,6 +12,7 @@ logging = { workspace = true }
|
||||
metrics = { workspace = true }
|
||||
num_cpus = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
slot_clock = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
//! checks the queues to see if there are more parcels of work that can be spawned in a new worker
|
||||
//! task.
|
||||
|
||||
use crate::rayon_manager::RayonManager;
|
||||
use crate::work_reprocessing_queue::{
|
||||
QueuedBackfillBatch, QueuedColumnReconstruction, QueuedGossipBlock, ReprocessQueueMessage,
|
||||
};
|
||||
@@ -47,6 +48,7 @@ use lighthouse_network::{MessageId, NetworkGlobals, PeerId};
|
||||
use logging::TimeLatch;
|
||||
use logging::crit;
|
||||
use parking_lot::Mutex;
|
||||
use rayon::ThreadPool;
|
||||
pub use scheduler::work_reprocessing_queue;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use slot_clock::SlotClock;
|
||||
@@ -74,6 +76,7 @@ use work_reprocessing_queue::{
|
||||
};
|
||||
|
||||
mod metrics;
|
||||
pub mod rayon_manager;
|
||||
pub mod scheduler;
|
||||
|
||||
/// The maximum size of the channel for work events to the `BeaconProcessor`.
|
||||
@@ -603,7 +606,7 @@ pub enum Work<E: EthSpec> {
|
||||
process_fn: BlockingFn,
|
||||
},
|
||||
ChainSegment(AsyncFn),
|
||||
ChainSegmentBackfill(AsyncFn),
|
||||
ChainSegmentBackfill(BlockingFn),
|
||||
Status(BlockingFn),
|
||||
BlocksByRangeRequest(AsyncFn),
|
||||
BlocksByRootsRequest(AsyncFn),
|
||||
@@ -807,6 +810,7 @@ pub struct BeaconProcessor<E: EthSpec> {
|
||||
pub network_globals: Arc<NetworkGlobals<E>>,
|
||||
pub executor: TaskExecutor,
|
||||
pub current_workers: usize,
|
||||
pub rayon_manager: RayonManager,
|
||||
pub config: BeaconProcessorConfig,
|
||||
}
|
||||
|
||||
@@ -1603,7 +1607,17 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
||||
Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => {
|
||||
task_spawner.spawn_async(work)
|
||||
}
|
||||
Work::ChainSegmentBackfill(process_fn) => task_spawner.spawn_async(process_fn),
|
||||
Work::ChainSegmentBackfill(process_fn) => {
|
||||
if self.config.enable_backfill_rate_limiting {
|
||||
task_spawner.spawn_blocking_with_rayon(
|
||||
self.rayon_manager.low_priority_threadpool.clone(),
|
||||
process_fn,
|
||||
)
|
||||
} else {
|
||||
// use the global rayon thread pool if backfill rate limiting is disabled.
|
||||
task_spawner.spawn_blocking(process_fn)
|
||||
}
|
||||
}
|
||||
Work::ApiRequestP0(process_fn) | Work::ApiRequestP1(process_fn) => match process_fn {
|
||||
BlockingOrAsync::Blocking(process_fn) => task_spawner.spawn_blocking(process_fn),
|
||||
BlockingOrAsync::Async(process_fn) => task_spawner.spawn_async(process_fn),
|
||||
@@ -1665,6 +1679,22 @@ impl TaskSpawner {
|
||||
WORKER_TASK_NAME,
|
||||
)
|
||||
}
|
||||
|
||||
/// Spawns a blocking task on a rayon thread pool, dropping the `SendOnDrop` after task completion.
|
||||
fn spawn_blocking_with_rayon<F>(self, thread_pool: Arc<ThreadPool>, task: F)
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
self.executor.spawn_blocking(
|
||||
move || {
|
||||
thread_pool.install(|| {
|
||||
task();
|
||||
});
|
||||
drop(self.send_idle_on_drop)
|
||||
},
|
||||
WORKER_TASK_NAME,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct will send a message on `self.tx` when it is dropped. An error will be logged
|
||||
|
||||
27
beacon_node/beacon_processor/src/rayon_manager.rs
Normal file
27
beacon_node/beacon_processor/src/rayon_manager.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
use std::sync::Arc;
|
||||
|
||||
const DEFAULT_LOW_PRIORITY_DIVISOR: usize = 4;
|
||||
const MINIMUM_LOW_PRIORITY_THREAD_COUNT: usize = 1;
|
||||
|
||||
pub struct RayonManager {
|
||||
/// Smaller rayon thread pool for lower-priority, compute-intensive tasks.
|
||||
/// By default ~25% of CPUs or a minimum of 1 thread.
|
||||
pub low_priority_threadpool: Arc<ThreadPool>,
|
||||
}
|
||||
|
||||
impl Default for RayonManager {
|
||||
fn default() -> Self {
|
||||
let low_prio_threads =
|
||||
(num_cpus::get() / DEFAULT_LOW_PRIORITY_DIVISOR).max(MINIMUM_LOW_PRIORITY_THREAD_COUNT);
|
||||
let low_priority_threadpool = Arc::new(
|
||||
ThreadPoolBuilder::new()
|
||||
.num_threads(low_prio_threads)
|
||||
.build()
|
||||
.expect("failed to build low-priority rayon pool"),
|
||||
);
|
||||
Self {
|
||||
low_priority_threadpool,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -173,7 +173,7 @@ pub struct IgnoredRpcBlock {
|
||||
}
|
||||
|
||||
/// A backfill batch work that has been queued for processing later.
|
||||
pub struct QueuedBackfillBatch(pub AsyncFn);
|
||||
pub struct QueuedBackfillBatch(pub BlockingFn);
|
||||
|
||||
pub struct QueuedColumnReconstruction {
|
||||
pub block_root: Hash256,
|
||||
@@ -1084,7 +1084,7 @@ mod tests {
|
||||
// Now queue a backfill sync batch.
|
||||
work_reprocessing_tx
|
||||
.try_send(ReprocessQueueMessage::BackfillSync(QueuedBackfillBatch(
|
||||
Box::pin(async {}),
|
||||
Box::new(|| {}),
|
||||
)))
|
||||
.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
Reference in New Issue
Block a user