[PATCH v2 1/4] sched: Ensure matching stack state for kcov disable/enable on switch
From: Jann Horn
Date: Wed Mar 18 2026 - 12:48:32 EST
Ensure that kcov is disabled and enabled with the same call stack.
This will be relied on by subsequent patches for recording function
entry/exit records via kcov.
This patch should not affect compilation of normal kernels without KCOV
(though it changes "inline" to "__always_inline").
To: Ingo Molnar <mingo@xxxxxxxxxx>
To: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Signed-off-by: Jann Horn <jannh@xxxxxxxxxx>
---
kernel/sched/core.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7f77c165a6e..c470f0a669ec 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5072,8 +5072,10 @@ static inline void kmap_local_sched_in(void)
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
+ *
+ * Must be inlined for kcov_prepare_switch().
*/
-static inline void
+static __always_inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
__must_hold(__rq_lockp(rq))
@@ -5149,7 +5151,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
tick_nohz_task_switch();
finish_lock_switch(rq);
finish_arch_post_lock_switch();
- kcov_finish_switch(current);
/*
* kmap_local_sched_out() is invoked with rq::lock held and
* interrupts disabled. There is no requirement for that, but the
@@ -5295,7 +5296,13 @@ context_switch(struct rq *rq, struct task_struct *prev,
switch_to(prev, next, prev);
barrier();
- return finish_task_switch(prev);
+ rq = finish_task_switch(prev);
+ /*
+ * This has to happen outside finish_task_switch() to ensure that
+ * entry/exit records are balanced.
+ */
+ kcov_finish_switch(current);
+ return rq;
}
/*
--
2.53.0.851.ga537e3e6e9-goog