2024-04-17 17:58:49

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH] locking/atomic/x86: Merge __arch{,_try}_cmpxchg64_emu_local() with __arch{,_try}_cmpxchg64_emu()

Macros __arch{,_try}_cmpxchg64_emu() are almost identical to their
local variants __arch{,_try}_cmpxchg64_emu_local(), differing only
by lock prefixes.

Merge these two macros by introducing additional macro parameters
to pass lock location and lock prefix from their respective static
inline functions.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
---
arch/x86/include/asm/cmpxchg_32.h | 56 ++++++-------------------------
1 file changed, 10 insertions(+), 46 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 9dedc13d5a77..ed2797f132ce 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -86,14 +86,14 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/

-#define __arch_cmpxchg64_emu(_ptr, _old, _new) \
+#define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock) \
({ \
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
- asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
+ asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- "lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
@@ -104,40 +104,25 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,

static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
- return __arch_cmpxchg64_emu(ptr, old, new);
+ return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_cmpxchg64 arch_cmpxchg64

-#define __arch_cmpxchg64_emu_local(_ptr, _old, _new) \
-({ \
- union __u64_halves o = { .full = (_old), }, \
- n = { .full = (_new), }; \
- \
- asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
- "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- : [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
- : "memory"); \
- \
- o.full; \
-})
-
static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
- return __arch_cmpxchg64_emu_local(ptr, old, new);
+ return __arch_cmpxchg64_emu(ptr, old, new, ,);
}
#define arch_cmpxchg64_local arch_cmpxchg64_local

-#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new) \
+#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock) \
({ \
union __u64_halves o = { .full = *(_oldp), }, \
n = { .full = (_new), }; \
bool ret; \
\
- asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
+ asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- "lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
@@ -153,34 +138,13 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64

static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
- return __arch_try_cmpxchg64_emu(ptr, oldp, new);
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_try_cmpxchg64 arch_try_cmpxchg64

-#define __arch_try_cmpxchg64_emu_local(_ptr, _oldp, _new) \
-({ \
- union __u64_halves o = { .full = *(_oldp), }, \
- n = { .full = (_new), }; \
- bool ret; \
- \
- asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
- "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- CC_SET(e) \
- : CC_OUT(e) (ret), \
- [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
- : "memory"); \
- \
- if (unlikely(!ret)) \
- *(_oldp) = o.full; \
- \
- likely(ret); \
-})
-
static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
{
- return __arch_try_cmpxchg64_emu_local(ptr, oldp, new);
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
}
#define arch_try_cmpxchg64_local arch_try_cmpxchg64_local

--
2.42.0



Subject: [tip: locking/core] locking/atomic/x86: Merge __arch{,_try}_cmpxchg64_emu_local() with __arch{,_try}_cmpxchg64_emu()

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 33eb8ab4ec83cf0975d0113966c7e71cd6be60b2
Gitweb: https://git.kernel.org/tip/33eb8ab4ec83cf0975d0113966c7e71cd6be60b2
Author: Uros Bizjak <[email protected]>
AuthorDate: Wed, 17 Apr 2024 19:58:12 +02:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Wed, 24 Apr 2024 11:45:13 +02:00

locking/atomic/x86: Merge __arch{,_try}_cmpxchg64_emu_local() with __arch{,_try}_cmpxchg64_emu()

Macros __arch{,_try}_cmpxchg64_emu() are almost identical to their
local variants __arch{,_try}_cmpxchg64_emu_local(), differing only
by lock prefixes.

Merge these two macros by introducing additional macro parameters
to pass lock location and lock prefix from their respective static
inline functions.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/include/asm/cmpxchg_32.h | 56 +++++-------------------------
1 file changed, 10 insertions(+), 46 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 9dedc13..ed2797f 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -86,14 +86,14 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/

-#define __arch_cmpxchg64_emu(_ptr, _old, _new) \
+#define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock) \
({ \
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
- asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
+ asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- "lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
@@ -104,40 +104,25 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,

static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
- return __arch_cmpxchg64_emu(ptr, old, new);
+ return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_cmpxchg64 arch_cmpxchg64

-#define __arch_cmpxchg64_emu_local(_ptr, _old, _new) \
-({ \
- union __u64_halves o = { .full = (_old), }, \
- n = { .full = (_new), }; \
- \
- asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
- "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- : [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
- : "memory"); \
- \
- o.full; \
-})
-
static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
- return __arch_cmpxchg64_emu_local(ptr, old, new);
+ return __arch_cmpxchg64_emu(ptr, old, new, ,);
}
#define arch_cmpxchg64_local arch_cmpxchg64_local

-#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new) \
+#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock) \
({ \
union __u64_halves o = { .full = *(_oldp), }, \
n = { .full = (_new), }; \
bool ret; \
\
- asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
+ asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
- "lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+ _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
@@ -153,34 +138,13 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64

static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
- return __arch_try_cmpxchg64_emu(ptr, oldp, new);
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_try_cmpxchg64 arch_try_cmpxchg64

-#define __arch_try_cmpxchg64_emu_local(_ptr, _oldp, _new) \
-({ \
- union __u64_halves o = { .full = *(_oldp), }, \
- n = { .full = (_new), }; \
- bool ret; \
- \
- asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
- "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
- CC_SET(e) \
- : CC_OUT(e) (ret), \
- [ptr] "+m" (*(_ptr)), \
- "+a" (o.low), "+d" (o.high) \
- : "b" (n.low), "c" (n.high), "S" (_ptr) \
- : "memory"); \
- \
- if (unlikely(!ret)) \
- *(_oldp) = o.full; \
- \
- likely(ret); \
-})
-
static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
{
- return __arch_try_cmpxchg64_emu_local(ptr, oldp, new);
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
}
#define arch_try_cmpxchg64_local arch_try_cmpxchg64_local