[tip: sched/core] sched/topology: Remove sched_domain_shared allocation with sd_data
From: tip-bot2 for K Prateek Nayak
Date: Wed Mar 18 2026 - 04:11:24 EST
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 10febd397591d93f42adb743c2c664041e7f1bcb
Gitweb: https://git.kernel.org/tip/10febd397591d93f42adb743c2c664041e7f1bcb
Author: K Prateek Nayak <kprateek.nayak@xxxxxxx>
AuthorDate: Thu, 12 Mar 2026 04:44:30
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Wed, 18 Mar 2026 09:06:49 +01:00
sched/topology: Remove sched_domain_shared allocation with sd_data
Now that "sd->shared" assignments are using the sched_domain_shared
objects allocated with s_data, remove the sd_data based allocations.
Signed-off-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Reviewed-by: Valentin Schneider <vschneid@xxxxxxxxxx>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Tested-by: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Link: https://patch.msgid.link/20260312044434.1974-6-kprateek.nayak@xxxxxxx
---
include/linux/sched/topology.h | 1 -
kernel/sched/topology.c | 19 -------------------
2 files changed, 20 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index a1e1032..51c2958 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -172,7 +172,6 @@ typedef int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain *__percpu *sd;
- struct sched_domain_shared *__percpu *sds;
struct sched_group *__percpu *sg;
struct sched_group_capacity *__percpu *sgc;
};
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index b19d84f..4315059 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1609,9 +1609,6 @@ static void claim_allocations(int cpu, struct s_data *d)
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
- *per_cpu_ptr(sdd->sds, cpu) = NULL;
-
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
@@ -2390,10 +2387,6 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sd)
return -ENOMEM;
- sdd->sds = alloc_percpu(struct sched_domain_shared *);
- if (!sdd->sds)
- return -ENOMEM;
-
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
@@ -2404,7 +2397,6 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
- struct sched_domain_shared *sds;
struct sched_group *sg;
struct sched_group_capacity *sgc;
@@ -2415,13 +2407,6 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
*per_cpu_ptr(sdd->sd, j) = sd;
- sds = kzalloc_node(sizeof(struct sched_domain_shared),
- GFP_KERNEL, cpu_to_node(j));
- if (!sds)
- return -ENOMEM;
-
- *per_cpu_ptr(sdd->sds, j) = sds;
-
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
@@ -2463,8 +2448,6 @@ static void __sdt_free(const struct cpumask *cpu_map)
kfree(*per_cpu_ptr(sdd->sd, j));
}
- if (sdd->sds)
- kfree(*per_cpu_ptr(sdd->sds, j));
if (sdd->sg)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgc)
@@ -2472,8 +2455,6 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
free_percpu(sdd->sd);
sdd->sd = NULL;
- free_percpu(sdd->sds);
- sdd->sds = NULL;
free_percpu(sdd->sg);
sdd->sg = NULL;
free_percpu(sdd->sgc);