[RFC PATCH v5 4/9] sched/fair: Dynamically update cfs_overload_cpus
From: Chen Jinghuang
Date: Fri Mar 20 2026 - 02:20:31 EST
From: Steve Sistare <steven.sistare@xxxxxxxxxx>
An overloaded CPU has more than 1 runnable task. When a CFS task wakes
on a CPU, if h_nr_runnable transitions from 1 to more, then set the CPU in
the cfs_overload_cpus bitmap. When a CFS task sleeps, if h_nr_runnable
transitions from 2 to less, then clear the CPU in cfs_overload_cpus.
Signed-off-by: Steve Sistare <steven.sistare@xxxxxxxxxx>
Signed-off-by: Chen Jinghuang <chenjinghuang2@xxxxxxxxxx>
---
v5: Rename h_nr_running to h_nr_runnable and reposition
overload_set/overload_clear to fix overload detection for delay dequeue.
v4: Detect CPU overload via changes in h_nr_running.
---
kernel/sched/fair.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 44 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eea99ec01a3f..92c3bcff5b6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -55,6 +55,7 @@
#include <uapi/linux/sched/types.h>
#include "sched.h"
+#include "sparsemask.h"
#include "stats.h"
#include "autogroup.h"
@@ -5076,6 +5077,33 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
+#ifdef CONFIG_SMP
+static void overload_clear(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_clear_elem(overload_cpus, rq->cpu);
+ rcu_read_unlock();
+}
+
+static void overload_set(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_set_elem(overload_cpus, rq->cpu);
+ rcu_read_unlock();
+}
+#else /* CONFIG_SMP */
+static inline void overload_clear(struct rq *rq) {}
+static inline void overload_set(struct rq *rq) {}
+#endif
+
void __setparam_fair(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_entity *se = &p->se;
@@ -5955,6 +5983,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (!dequeue)
return false; /* Throttle no longer required. */
+
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
@@ -6875,6 +6904,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int h_nr_idle = task_has_idle_policy(p);
int h_nr_runnable = 1;
int task_new = !(flags & ENQUEUE_WAKEUP);
+ unsigned int prev_nr = rq->cfs.h_nr_runnable;
int rq_h_nr_queued = rq->cfs.h_nr_queued;
u64 slice = 0;
@@ -6892,6 +6922,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (flags & ENQUEUE_DELAYED) {
requeue_delayed_entity(se);
+
+ if (prev_nr <= 1 && rq->cfs.h_nr_runnable >= 2)
+ overload_set(rq);
+
return;
}
@@ -6961,6 +6995,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1);
+ if (prev_nr <= 1 && rq->cfs.h_nr_runnable >= 2)
+ overload_set(rq);
/*
* Since new tasks are assigned an initial util_avg equal to
@@ -7003,6 +7039,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
int h_nr_idle = 0;
int h_nr_queued = 0;
int h_nr_runnable = 0;
+ unsigned int prev_nr = rq->cfs.h_nr_runnable;
struct cfs_rq *cfs_rq;
u64 slice = 0;
@@ -7018,8 +7055,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq = cfs_rq_of(se);
if (!dequeue_entity(cfs_rq, se, flags)) {
- if (p && &p->se == se)
+ if (p && &p->se == se) {
+ if (prev_nr >= 2 && rq->cfs.h_nr_runnable <= 1)
+ overload_clear(rq);
+
return -1;
+ }
slice = cfs_rq_min_slice(cfs_rq);
break;
@@ -7077,6 +7118,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
}
sub_nr_running(rq, h_nr_queued);
+ if (prev_nr >= 2 && rq->cfs.h_nr_runnable <= 1)
+ overload_clear(rq);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
--
2.34.1