[patch v2 04/11] uaccess: Provide unsafe_atomic_store_release_user()
From: Thomas Gleixner
Date: Thu Mar 19 2026 - 19:27:20 EST
The upcoming support for unlocking robust futexes in the kernel requires
store release semantics. Syscalls do not imply memory ordering on all
architectures so the unlock operation requires a barrier.
This barrier can be avoided when stores imply release like on x86.
Provide a generic version with a smp_mb() before the unsafe_put_user(),
which can be overridden by architectures.
Provide also a ARCH_STORE_IMPLIES_RELEASE Kconfig option, which can be
selected by architectures where store implies release, so that the smp_mb()
in the generic implementation can be avoided.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
---
V2: New patch
---
arch/Kconfig | 4 ++++
include/linux/uaccess.h | 9 +++++++++
2 files changed, 13 insertions(+)
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -403,6 +403,10 @@ config ARCH_32BIT_OFF_T
config ARCH_32BIT_USTAT_F_TINODE
bool
+# Selected by architectures when plain stores have release semantics
+config ARCH_STORE_IMPLIES_RELEASE
+ bool
+
config HAVE_ASM_MODVERSIONS
bool
help
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -644,6 +644,15 @@ static inline void user_access_restore(u
#define user_read_access_end user_access_end
#endif
+#ifndef unsafe_atomic_store_release_user
+# define unsafe_atomic_store_release_user(val, uptr, elbl) \
+ do { \
+ if (!IS_ENABLED(CONFIG_ARCH_STORE_IMPLIES_RELEASE)) \
+ smp_mb(); \
+ unsafe_put_user(val, uptr, elbl); \
+ } while (0)
+#endif
+
/* Define RW variant so the below _mode macro expansion works */
#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
#define user_rw_access_begin(u, s) user_access_begin(u, s)