[PATCH 1/3] sched/topology: Introduce arch hooks for asympacking

From: Christian Loehle

Date: Wed Mar 25 2026 - 14:22:08 EST


Prepare for arch-specifc asympacking logic.

No functional impact intended.

Signed-off-by: Christian Loehle <christian.loehle@xxxxxxx>
---
include/linux/arch_topology.h | 24 ++++++++++++++++++++++++
include/linux/sched/topology.h | 9 +++++++++
kernel/sched/fair.c | 16 ----------------
kernel/sched/topology.c | 8 ++++++++
4 files changed, 41 insertions(+), 16 deletions(-)

diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index ebd7f8935f96..3ab571b287ef 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -94,6 +94,11 @@ void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
void freq_inv_set_max_ratio(int cpu, u64 max_rate);
+void arch_topology_init_cppc_asym(void);
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+bool topology_init_cppc_asym_packing(int __percpu *priority_var);
+#endif

/*
* Architectures like ARM64 don't have reliable architectural way to get SMT
@@ -105,10 +110,29 @@ static inline bool topology_core_has_smt(int cpu)
return cpu_topology[cpu].thread_id != -1;
}

+#ifdef CONFIG_ARM64
+#undef arch_sched_asym_flags
+#define arch_sched_asym_flags arm64_arch_sched_asym_flags
+int arm64_arch_asym_cpu_priority(int cpu);
+int arm64_arch_sched_asym_flags(void);
+#endif
+
#else

static inline bool topology_core_has_smt(int cpu) { return false; }

#endif /* CONFIG_GENERIC_ARCH_TOPOLOGY */

+/*
+ * Architectures may override this to provide a custom CPU priority for
+ * asymmetric packing.
+ */
+#ifndef arch_asym_cpu_priority
+#define arch_asym_cpu_priority topology_arch_asym_cpu_priority
+static inline int topology_arch_asym_cpu_priority(int cpu)
+{
+ return -cpu;
+}
+#endif
+
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 45c0022b91ce..48cfa89df0fc 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -50,6 +50,15 @@ extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl,
extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);

extern int arch_asym_cpu_priority(int cpu);
+extern int arch_sched_asym_flags(void);
+
+/*
+ * The margin used when comparing CPU capacities.
+ * is 'cap1' noticeably greater than 'cap2'
+ *
+ * (default: ~5%)
+ */
+#define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)

struct sched_domain_attr {
int relax_domain_level;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bf948db905ed..c5f8aa3ad535 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -88,14 +88,6 @@ static int __init setup_sched_thermal_decay_shift(char *str)
}
__setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);

-/*
- * For asym packing, by default the lower numbered CPU has higher priority.
- */
-int __weak arch_asym_cpu_priority(int cpu)
-{
- return -cpu;
-}
-
/*
* The margin used when comparing utilization with CPU capacity.
*
@@ -103,14 +95,6 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)

-/*
- * The margin used when comparing CPU capacities.
- * is 'cap1' noticeably greater than 'cap2'
- *
- * (default: ~5%)
- */
-#define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
-
#ifdef CONFIG_CFS_BANDWIDTH
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 32dcddaead82..b0c590dfdb01 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1742,6 +1742,14 @@ sd_init(struct sched_domain_topology_level *tl,
return sd;
}

+#ifndef arch_sched_asym_flags
+#define arch_sched_asym_flags topology_arch_sched_asym_flags
+static inline int topology_arch_sched_asym_flags(void)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SCHED_SMT
int cpu_smt_flags(void)
{
--
2.34.1