[PATCH] sched,workqueue: Use READ_ONCE()/WRITE_ONCE() for wake_cpu accesses

From: Yu Peng

Date: Fri Mar 27 2026 - 03:36:42 EST


task_struct->wake_cpu is used as a wake placement hint by scheduler code
and workqueue's non-strict affinity repatriation path.

These accesses are intentionally lockless and stale values are tolerated,
affecting only wakeup placement. Use READ_ONCE()/WRITE_ONCE() to document
that contract and constrain compiler optimizations on the shared accesses.

No functional change intended.

Signed-off-by: Yu Peng <pengyu@xxxxxxxxxx>
---
kernel/sched/core.c | 6 +++---
kernel/sched/sched.h | 2 +-
kernel/workqueue.c | 5 +++--
3 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 496dff740dcaf..8a7f46cf30fda 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2567,7 +2567,7 @@ static int migration_cpu_stop(void *data)
update_rq_clock(rq);
rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
} else {
- p->wake_cpu = arg->dest_cpu;
+ WRITE_ONCE(p->wake_cpu, arg->dest_cpu);
}

/*
@@ -3318,7 +3318,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
* it before it went to sleep. This means on wakeup we make the
* previous CPU our target instead of where it really is.
*/
- p->wake_cpu = cpu;
+ WRITE_ONCE(p->wake_cpu, cpu);
}
}

@@ -4227,7 +4227,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_cond_load_acquire(&p->on_cpu, !VAL);

- cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
+ cpu = select_task_rq(p, READ_ONCE(p->wake_cpu), &wake_flags);
if (task_cpu(p) != cpu) {
if (p->in_iowait) {
delayacct_blkio_end(p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 43bbf0693cca4..127be762e8567 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2294,7 +2294,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
*/
smp_wmb();
WRITE_ONCE(task_thread_info(p)->cpu, cpu);
- p->wake_cpu = cpu;
+ WRITE_ONCE(p->wake_cpu, cpu);
rseq_sched_set_ids_changed(p);
#endif /* CONFIG_SMP */
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b77119d71641a..b5f542d53105d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1284,13 +1284,14 @@ static bool kick_pool(struct worker_pool *pool)
* its affinity scope. Repatriate.
*/
if (!pool->attrs->affn_strict &&
- !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
+ !cpumask_test_cpu(READ_ONCE(p->wake_cpu),
+ pool->attrs->__pod_cpumask)) {
struct work_struct *work = list_first_entry(&pool->worklist,
struct work_struct, entry);
int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
cpu_online_mask);
if (wake_cpu < nr_cpu_ids) {
- p->wake_cpu = wake_cpu;
+ WRITE_ONCE(p->wake_cpu, wake_cpu);
get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
}
}
--
2.43.0