[RFC][PATCH 5/8] sched/fair: Add cgroup_mode: CONCUR

From: Peter Zijlstra

Date: Tue Mar 17 2026 - 06:53:01 EST


A variation of MAX; where instead of assuming maximal concurrent, this scales
with 'min(nr_tasks, nr_cpus)'. This handles the low concurrency cases more
gracefully, with the exception of CPU affnity.

Note: the tracking of tg->tasks is somewhat expensive :-/

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/debug.c | 1 +
kernel/sched/fair.c | 38 +++++++++++++++++++++++++++++++++++---
kernel/sched/sched.h | 3 +++
3 files changed, 39 insertions(+), 3 deletions(-)

--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -593,6 +593,7 @@ int cgroup_mode = 1;
static const char *cgroup_mode_str[] = {
"up",
"smp",
+ "concur",
"max",
};

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4210,6 +4210,30 @@ static long calc_max_shares(struct cfs_r
return __calc_smp_shares(cfs_rq, tg_shares * nr, max_shares);
}

+static inline int tg_tasks(struct task_group *tg)
+{
+ return max(1, atomic_long_read(&tg->tasks));
+}
+
+/*
+ * Func: min(fraction(num * tg->shares), nice -20); where
+ * num = min(nr_tasks, nr_cpus)
+ *
+ * Similar to max, except scale with min(nr_tasks, nr_cpus), which gives
+ * a far more natural distrubution. Can still create edge case using CPU
+ * affinity.
+ */
+static long calc_concur_shares(struct cfs_rq *cfs_rq)
+{
+ struct task_group *tg = cfs_rq->tg;
+ int nr_cpus = tg_cpus(tg);
+ int nr_tasks = tg_tasks(tg);
+ int nr = min(nr_tasks, nr_cpus);
+ long tg_shares = READ_ONCE(tg->shares);
+ long max_shares = scale_load(sched_prio_to_weight[0]);
+ return __calc_smp_shares(cfs_rq, nr * tg_shares, max_shares);
+}
+
/*
* Func: fraction(tg->shares)
*
@@ -4236,6 +4260,8 @@ static long calc_group_shares(struct cfs
if (cgroup_mode == 0)
return calc_up_shares(cfs_rq);
if (cgroup_mode == 2)
+ return calc_concur_shares(cfs_rq);
+ if (cgroup_mode == 3)
return calc_max_shares(cfs_rq);

return calc_smp_shares(cfs_rq);
@@ -4381,7 +4407,7 @@ static inline bool cfs_rq_is_decayed(str
*/
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
{
- long delta;
+ long delta, dt;
u64 now;

/*
@@ -4403,16 +4429,19 @@ static inline void update_tg_load_avg(st
return;

delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
- if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
+ dt = cfs_rq->h_nr_queued - cfs_rq->tg_tasks_contrib;
+ if (dt || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
+ atomic_long_add(dt, &cfs_rq->tg->tasks);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
+ cfs_rq->tg_tasks_contrib = cfs_rq->h_nr_queued;
cfs_rq->last_update_tg_load_avg = now;
}
}

static inline void clear_tg_load_avg(struct cfs_rq *cfs_rq)
{
- long delta;
+ long delta, dt;
u64 now;

/*
@@ -4423,8 +4452,11 @@ static inline void clear_tg_load_avg(str

now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
delta = 0 - cfs_rq->tg_load_avg_contrib;
+ dt = 0 - cfs_rq->tg_tasks_contrib;
atomic_long_add(delta, &cfs_rq->tg->load_avg);
+ atomic_long_add(dt, &cfs_rq->tg->tasks);
cfs_rq->tg_load_avg_contrib = 0;
+ cfs_rq->tg_tasks_contrib = 0;
cfs_rq->last_update_tg_load_avg = now;
}

--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -491,6 +491,8 @@ struct task_group {
* will also be accessed at each tick.
*/
atomic_long_t load_avg ____cacheline_aligned;
+ atomic_long_t tasks;
+
#endif /* CONFIG_FAIR_GROUP_SCHED */

#ifdef CONFIG_RT_GROUP_SCHED
@@ -720,6 +722,7 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
u64 last_update_tg_load_avg;
unsigned long tg_load_avg_contrib;
+ unsigned long tg_tasks_contrib;
long propagate;
long prop_runnable_sum;