[patch 7/8] x86/vdso: Prepare for robust futex unlock support

From: Thomas Gleixner

Date: Mon Mar 16 2026 - 13:16:58 EST


There will be a VDSO function to unlock non-contended robust futexes in
user space. The unlock sequence is racy vs. clearing the list_pending_op
pointer in the task's robust list head. To plug this race the kernel needs
to know the instruction window so it can clear the pointer when the task is
interrupted within that race window.

Add the symbols to the vdso2c generator and use them in the VDSO VMA code
to update the critical section addresses in mm_struct::futex on (re)map().

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
---
arch/x86/entry/vdso/vma.c | 20 ++++++++++++++++++++
arch/x86/include/asm/vdso.h | 3 +++
arch/x86/tools/vdso2c.c | 17 ++++++++++-------
3 files changed, 33 insertions(+), 7 deletions(-)

--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -73,6 +73,23 @@ static void vdso_fix_landing(const struc
regs->ip = new_vma->vm_start + ipoffset;
}

+#ifdef CONFIG_FUTEX_ROBUST_UNLOCK
+static void vdso_futex_robust_unlock_update_ips(void)
+{
+ const struct vdso_image *image = current->mm->context.vdso_image;
+ unsigned long vdso = (unsigned long) current->mm->context.vdso;
+
+ current->mm->futex.unlock_cs_start_ip =
+ vdso + image->sym___vdso_futex_robust_try_unlock_cs_start;
+ current->mm->futex.unlock_cs_success_ip =
+ vdso + image->sym___vdso_futex_robust_try_unlock_cs_success;
+ current->mm->futex.unlock_cs_end_ip =
+ vdso + image->sym___vdso_futex_robust_try_unlock_cs_end;
+}
+#else
+static inline void vdso_futex_robust_unlock_update_ips(void) { }
+#endif
+
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -80,6 +97,7 @@ static int vdso_mremap(const struct vm_s

vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start;
+ vdso_futex_robust_unlock_update_ips();

return 0;
}
@@ -189,6 +207,8 @@ static int map_vdso(const struct vdso_im
current->mm->context.vdso = (void __user *)text_start;
current->mm->context.vdso_image = image;

+ vdso_futex_robust_unlock_update_ips();
+
up_fail:
mmap_write_unlock(mm);
return ret;
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -25,6 +25,9 @@ struct vdso_image {
long sym_int80_landing_pad;
long sym_vdso32_sigreturn_landing_pad;
long sym_vdso32_rt_sigreturn_landing_pad;
+ long sym___vdso_futex_robust_try_unlock_cs_start;
+ long sym___vdso_futex_robust_try_unlock_cs_success;
+ long sym___vdso_futex_robust_try_unlock_cs_end;
};

extern const struct vdso_image vdso64_image;
--- a/arch/x86/tools/vdso2c.c
+++ b/arch/x86/tools/vdso2c.c
@@ -75,13 +75,16 @@ struct vdso_sym {
};

struct vdso_sym required_syms[] = {
- {"VDSO32_NOTE_MASK", true},
- {"__kernel_vsyscall", true},
- {"__kernel_sigreturn", true},
- {"__kernel_rt_sigreturn", true},
- {"int80_landing_pad", true},
- {"vdso32_rt_sigreturn_landing_pad", true},
- {"vdso32_sigreturn_landing_pad", true},
+ {"VDSO32_NOTE_MASK", true},
+ {"__kernel_vsyscall", true},
+ {"__kernel_sigreturn", true},
+ {"__kernel_rt_sigreturn", true},
+ {"int80_landing_pad", true},
+ {"vdso32_rt_sigreturn_landing_pad", true},
+ {"vdso32_sigreturn_landing_pad", true},
+ {"__vdso_futex_robust_try_unlock_cs_start", true},
+ {"__vdso_futex_robust_try_unlock_cs_success", true},
+ {"__vdso_futex_robust_try_unlock_cs_end", true},
};

__attribute__((format(printf, 1, 2))) __attribute__((noreturn))