[PATCH RESEND v4 4/4] x86/segment: Implement loadsegment()/savesegment() macros with static inline helpers

From: Uros Bizjak

Date: Mon Mar 30 2026 - 01:59:00 EST


Convert the __loadsegment_simple() and savesegment() macro
implementations into static inline helper functions generated
via small helper macros.

Historically loadsegment() and savesegment() relied on macros that
embedded inline assembly. This approach obscures types, complicates
debugging, and makes the call sites harder for the compiler and static
analysis tools to reason about.

This change is purely mechanical and does not alter the generated code,
but improves readability, type safety, and compiler visibility of the
helpers.

Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx>
Suggested-by: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx>
---
v3: New patch in series.
v4: Use u16 instead of unsigned short.
---
arch/x86/include/asm/segment.h | 57 +++++++++++++++++++++-------------
1 file changed, 35 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 0f4283dcd0c4..dbd90fede5e7 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -302,19 +302,17 @@ extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_I
* failure to fully clear the cached descriptor is only observable for
* FS and GS.
*/
-#define __loadsegment_simple(seg, value) \
-do { \
- unsigned short __val = (value); \
- \
- asm volatile(" \n" \
- "1: movl %k0,%%" #seg " \n" \
+#define LOAD_SEGMENT(seg) \
+static inline void __loadsegment_##seg(u16 value) \
+{ \
+ asm volatile("1: movl %k0,%%" #seg "\n" \
_ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k0)\
- : "+r" (__val) : : "memory"); \
-} while (0)
+ : "+r" (value) : : "memory"); \
+}

-#define __loadsegment_ss(value) __loadsegment_simple(ss, (value))
-#define __loadsegment_ds(value) __loadsegment_simple(ds, (value))
-#define __loadsegment_es(value) __loadsegment_simple(es, (value))
+LOAD_SEGMENT(ss)
+LOAD_SEGMENT(ds)
+LOAD_SEGMENT(es)

#ifdef CONFIG_X86_32

@@ -322,19 +320,16 @@ do { \
* On 32-bit systems, the hidden parts of FS and GS are unobservable if
* the selector is NULL, so there's no funny business here.
*/
-#define __loadsegment_fs(value) __loadsegment_simple(fs, (value))
-#define __loadsegment_gs(value) __loadsegment_simple(gs, (value))
+LOAD_SEGMENT(fs)
+LOAD_SEGMENT(gs)

#else

-static inline void __loadsegment_fs(unsigned short value)
+static inline void __loadsegment_fs(u16 value)
{
- asm volatile(" \n"
- "1: movw %0, %%fs \n"
- "2: \n"
-
+ asm volatile("1: movw %0, %%fs\n"
+ "2:\n"
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_CLEAR_FS)
-
: : ASM_INPUT_RM (value) : "memory");
}

@@ -342,13 +337,31 @@ static inline void __loadsegment_fs(unsigned short value)

#endif

-#define loadsegment(seg, value) __loadsegment_ ## seg (value)
+#undef LOAD_SEGMENT
+
+#define loadsegment(seg, val) __loadsegment_##seg(val)

/*
* Save a segment register away:
*/
-#define savesegment(seg, value) \
- asm volatile("movl %%" #seg ",%k0" : "=r" (value))
+#define SAVE_SEGMENT(seg) \
+static inline unsigned long __savesegment_##seg(void) \
+{ \
+ unsigned long v; \
+ asm volatile("movl %%" #seg ",%k0" : "=r" (v)); \
+ return v; \
+}
+
+SAVE_SEGMENT(cs)
+SAVE_SEGMENT(ss)
+SAVE_SEGMENT(ds)
+SAVE_SEGMENT(es)
+SAVE_SEGMENT(fs)
+SAVE_SEGMENT(gs)
+
+#undef SAVE_SEGMENT
+
+#define savesegment(seg, var) ((var) = __savesegment_##seg())

#endif /* !__ASSEMBLER__ */
#endif /* __KERNEL__ */
--
2.53.0