While studying riscv's cmpxchg.h file, I got really interested in
understanding how RISCV asm implemented the different versions of
{cmp,}xchg.
When I understood the pattern, it made sense for me to remove the
duplications and create macros to make it easier to understand what exactly
changes between the versions: Instruction sufixes & barriers.
I split those changes in 3 levels for each cmpxchg and xchg, resulting a
total of 6 patches. I did this so it becomes easier to review and remove
the last levels if desired, but I have no issue squashing them if it's
better.
Please provide comments.
Thanks!
Leo
Changes since v1:
- Fixed patch 4/6 suffix from 'w.aqrl' to '.w.aqrl', to avoid build error
Leonardo Bras (6):
riscv/cmpxchg: Deduplicate cmpxchg() asm functions
riscv/cmpxchg: Deduplicate cmpxchg() macros
riscv/cmpxchg: Deduplicate arch_cmpxchg() macros
riscv/cmpxchg: Deduplicate xchg() asm functions
riscv/cmpxchg: Deduplicate xchg() macros
riscv/cmpxchg: Deduplicate arch_xchg() macros
arch/riscv/include/asm/cmpxchg.h | 316 +++++++------------------------
1 file changed, 64 insertions(+), 252 deletions(-)
--
2.40.0
In this header every cmpxchg define (_relaxed, _acquire, _release,
vanilla) contain it's own asm file, both for 4-byte variables an 8-byte
variables, on a total of 8 versions of mostly the same asm.
This is usually bad, as it means any change may be done in up to 8
different places.
Unify those versions by creating a new define with enough parameters to
generate any version of the previous 8.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 102 ++++++++-----------------------
1 file changed, 24 insertions(+), 78 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 12debce235e52..21984d24cbfe7 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -163,6 +163,22 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
+
+#define ___cmpxchg(lr_sfx, sc_sfx, prepend, append) \
+{ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr" lr_sfx " %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+}
+
#define __cmpxchg_relaxed(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@@ -172,26 +188,10 @@
register unsigned int __rc; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".w", ".w", "", ""); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".d", ".d", "", ""); \
break; \
default: \
BUILD_BUG(); \
@@ -216,28 +216,10 @@
register unsigned int __rc; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".w", ".w", "", RISCV_ACQUIRE_BARRIER); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".d", ".d", "", RISCV_ACQUIRE_BARRIER); \
break; \
default: \
BUILD_BUG(); \
@@ -262,28 +244,10 @@
register unsigned int __rc; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".w", ".w", RISCV_RELEASE_BARRIER, ""); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".d", ".d", RISCV_RELEASE_BARRIER, ""); \
break; \
default: \
BUILD_BUG(); \
@@ -308,28 +272,10 @@
register unsigned int __rc; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- " fence rw, rw\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".w", ".w.rl", "", " fence rw, rw\n"); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- " fence rw, rw\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ ___cmpxchg(".d", ".d.rl", "", " fence rw, rw\n"); \
break; \
default: \
BUILD_BUG(); \
--
2.40.0
Every cmpxchg define (_relaxed, _acquire, _release, vanilla) contain it's
own define for creating tmp variables and selecting the correct asm code
for give variable size.
All those defines are mostly the same code (other than specific barriers),
so there is no need to keep the 4 copies.
Unify those under a more general define, that can reproduce the previous 4
versions.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 72 ++++++--------------------------
1 file changed, 12 insertions(+), 60 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 21984d24cbfe7..c7a13eec4dbcc 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -164,8 +164,8 @@
* indicated by comparing RETURN with OLD.
*/
-#define ___cmpxchg(lr_sfx, sc_sfx, prepend, append) \
-{ \
+#define ____cmpxchg(lr_sfx, sc_sfx, prepend, append) \
+({ \
__asm__ __volatile__ ( \
prepend \
"0: lr" lr_sfx " %0, %2\n" \
@@ -177,9 +177,9 @@
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
-}
+})
-#define __cmpxchg_relaxed(ptr, old, new, size) \
+#define ___cmpxchg(ptr, old, new, size, sc_sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
@@ -188,10 +188,10 @@
register unsigned int __rc; \
switch (size) { \
case 4: \
- ___cmpxchg(".w", ".w", "", ""); \
+ ____cmpxchg(".w", ".w" sc_sfx, prepend, append); \
break; \
case 8: \
- ___cmpxchg(".d", ".d", "", ""); \
+ ____cmpxchg(".d", ".d" sc_sfx, prepend, append); \
break; \
default: \
BUILD_BUG(); \
@@ -199,6 +199,9 @@
__ret; \
})
+#define __cmpxchg_relaxed(ptr, old, new, size) \
+ ___cmpxchg(ptr, old, new, size, "", "", "")
+
#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
@@ -208,24 +211,7 @@
})
#define __cmpxchg_acquire(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- ___cmpxchg(".w", ".w", "", RISCV_ACQUIRE_BARRIER); \
- break; \
- case 8: \
- ___cmpxchg(".d", ".d", "", RISCV_ACQUIRE_BARRIER); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___cmpxchg(ptr, old, new, size, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
@@ -236,24 +222,7 @@
})
#define __cmpxchg_release(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- ___cmpxchg(".w", ".w", RISCV_RELEASE_BARRIER, ""); \
- break; \
- case 8: \
- ___cmpxchg(".d", ".d", RISCV_RELEASE_BARRIER, ""); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___cmpxchg(ptr, old, new, size, "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg_release(ptr, o, n) \
({ \
@@ -264,24 +233,7 @@
})
#define __cmpxchg(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- ___cmpxchg(".w", ".w.rl", "", " fence rw, rw\n"); \
- break; \
- case 8: \
- ___cmpxchg(".d", ".d.rl", "", " fence rw, rw\n"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___cmpxchg(ptr, old, new, size, ".rl", "", " fence rw, rw\n")
#define arch_cmpxchg(ptr, o, n) \
({ \
--
2.40.0
Every arch_cmpxchg define (_relaxed, _acquire, _release, vanilla) contain
it's own define for creating tmp variables and calling the correct internal
macro for the desired version.
Those defines are mostly the same code, so there is no need to keep the 4
copies.
Create a helper define to avoid code duplication.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 31 ++++++++++---------------------
1 file changed, 10 insertions(+), 21 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index c7a13eec4dbcc..e49a2edc6f36c 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -202,46 +202,35 @@
#define __cmpxchg_relaxed(ptr, old, new, size) \
___cmpxchg(ptr, old, new, size, "", "", "")
-#define arch_cmpxchg_relaxed(ptr, o, n) \
+#define _arch_cmpxchg(order, ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
+ (__typeof__(*(ptr))) __cmpxchg ## order((ptr), \
+ _o_, _n_, \
+ sizeof(*(ptr)));\
})
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+ _arch_cmpxchg(_relaxed, ptr, o, n)
+
#define __cmpxchg_acquire(ptr, old, new, size) \
___cmpxchg(ptr, old, new, size, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg(_acquire, ptr, o, n)
#define __cmpxchg_release(ptr, old, new, size) \
___cmpxchg(ptr, old, new, size, "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg_release(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg(_release, ptr, o, n)
#define __cmpxchg(ptr, old, new, size) \
___cmpxchg(ptr, old, new, size, ".rl", "", " fence rw, rw\n")
#define arch_cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg(, ptr, o, n)
#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
--
2.40.0
In this header every xchg define (_relaxed, _acquire, _release, vanilla)
contain it's own asm file, both for 4-byte variables an 8-byte variables,
on a total of 8 versions of mostly the same asm.
This is usually bad, as it means any change may be done in up to 8
different places.
Unify those versions by creating a new define with enough parameters to
generate any version of the previous 8.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 63 ++++++++++----------------------
1 file changed, 19 insertions(+), 44 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index e49a2edc6f36c..715bf61e72c82 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -11,6 +11,17 @@
#include <asm/barrier.h>
#include <asm/fence.h>
+#define ___xchg(sfx, prepend, append) \
+({ \
+ __asm__ __volatile__ ( \
+ prepend \
+ " amoswap" sfx " %0, %2, %1\n" \
+ append \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+})
+
#define __xchg_relaxed(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@@ -18,18 +29,10 @@
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".w", "", ""); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".d", "", ""); \
break; \
default: \
BUILD_BUG(); \
@@ -51,20 +54,10 @@
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".w", "", RISCV_ACQUIRE_BARRIER); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".d", "", RISCV_ACQUIRE_BARRIER); \
break; \
default: \
BUILD_BUG(); \
@@ -86,20 +79,10 @@
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".w", RISCV_RELEASE_BARRIER, ""); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".d", RISCV_RELEASE_BARRIER, ""); \
break; \
default: \
BUILD_BUG(); \
@@ -121,18 +104,10 @@
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".w.aqrl", "", ""); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ ___xchg(".d.aqrl", "", ""); \
break; \
default: \
BUILD_BUG(); \
--
2.40.0
Every xchg define (_relaxed, _acquire, _release, vanilla) contain it's own
define for creating tmp variables and selecting the correct asm code for
given variable size.
All those defines are mostly the same code (other than specific barriers),
so there is no need to keep the 4 copies.
Unify those under a more general define, that can reproduce the previous 4
versions.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 62 ++++++--------------------------
1 file changed, 10 insertions(+), 52 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 715bf61e72c82..23da4d8e6f0c8 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -11,7 +11,7 @@
#include <asm/barrier.h>
#include <asm/fence.h>
-#define ___xchg(sfx, prepend, append) \
+#define ____xchg(sfx, prepend, append) \
({ \
__asm__ __volatile__ ( \
prepend \
@@ -22,17 +22,17 @@
: "memory"); \
})
-#define __xchg_relaxed(ptr, new, size) \
+#define ___xchg(ptr, new, size, sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- ___xchg(".w", "", ""); \
+ ____xchg(".w" sfx, prepend, append); \
break; \
case 8: \
- ___xchg(".d", "", ""); \
+ ____xchg(".d" sfx, prepend, append); \
break; \
default: \
BUILD_BUG(); \
@@ -40,6 +40,9 @@
__ret; \
})
+#define __xchg_relaxed(ptr, new, size) \
+ ___xchg(ptr, new, size, "", "", "")
+
#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
@@ -48,22 +51,7 @@
})
#define __xchg_acquire(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- ___xchg(".w", "", RISCV_ACQUIRE_BARRIER); \
- break; \
- case 8: \
- ___xchg(".d", "", RISCV_ACQUIRE_BARRIER); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___xchg(ptr, new, size, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_xchg_acquire(ptr, x) \
({ \
@@ -73,22 +61,7 @@
})
#define __xchg_release(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- ___xchg(".w", RISCV_RELEASE_BARRIER, ""); \
- break; \
- case 8: \
- ___xchg(".d", RISCV_RELEASE_BARRIER, ""); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___xchg(ptr, new, size, "", RISCV_RELEASE_BARRIER, "")
#define arch_xchg_release(ptr, x) \
({ \
@@ -98,22 +71,7 @@
})
#define __xchg(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- ___xchg(".w.aqrl", "", ""); \
- break; \
- case 8: \
- ___xchg(".d.aqrl", "", ""); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ ___xchg(ptr, new, size, ".aqrl", "", "")
#define arch_xchg(ptr, x) \
({ \
--
2.40.0
Every arch_xchg define (_relaxed, _acquire, _release, vanilla) contain it's
own define for creating tmp variables and calling the correct internal
macro for the desired version.
Those defines are mostly the same code, so there is no need to keep the 4
copies.
Create a helper define to avoid code duplication.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/cmpxchg.h | 26 +++++++++-----------------
1 file changed, 9 insertions(+), 17 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 23da4d8e6f0c8..d13da2286c82a 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -43,41 +43,33 @@
#define __xchg_relaxed(ptr, new, size) \
___xchg(ptr, new, size, "", "", "")
-#define arch_xchg_relaxed(ptr, x) \
+#define _arch_xchg(order, ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
- _x_, sizeof(*(ptr))); \
+ (__typeof__(*(ptr))) __xchg ## order((ptr), \
+ _x_, sizeof(*(ptr))); \
})
+#define arch_xchg_relaxed(ptr, x) \
+ _arch_xchg(_relaxed, ptr, x)
+
#define __xchg_acquire(ptr, new, size) \
___xchg(ptr, new, size, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_xchg_acquire(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_acquire((ptr), \
- _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(_acquire, ptr, x)
#define __xchg_release(ptr, new, size) \
___xchg(ptr, new, size, "", RISCV_RELEASE_BARRIER, "")
#define arch_xchg_release(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_release((ptr), \
- _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(_release, ptr, x)
#define __xchg(ptr, new, size) \
___xchg(ptr, new, size, ".aqrl", "", "")
#define arch_xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(, ptr, x)
#define xchg32(ptr, x) \
({ \
--
2.40.0
On Tue, 2023-03-21 at 03:34 -0300, Leonardo Bras wrote:
> While studying riscv's cmpxchg.h file, I got really interested in
> understanding how RISCV asm implemented the different versions of
> {cmp,}xchg.
>
> When I understood the pattern, it made sense for me to remove the
> duplications and create macros to make it easier to understand what exactly
> changes between the versions: Instruction sufixes & barriers.
>
> I split those changes in 3 levels for each cmpxchg and xchg, resulting a
> total of 6 patches. I did this so it becomes easier to review and remove
> the last levels if desired, but I have no issue squashing them if it's
> better.
>
> Please provide comments.
>
> Thanks!
> Leo
>
> Changes since v1:
> - Fixed patch 4/6 suffix from 'w.aqrl' to '.w.aqrl', to avoid build error
>
> Leonardo Bras (6):
> riscv/cmpxchg: Deduplicate cmpxchg() asm functions
> riscv/cmpxchg: Deduplicate cmpxchg() macros
> riscv/cmpxchg: Deduplicate arch_cmpxchg() macros
> riscv/cmpxchg: Deduplicate xchg() asm functions
> riscv/cmpxchg: Deduplicate xchg() macros
> riscv/cmpxchg: Deduplicate arch_xchg() macros
>
> arch/riscv/include/asm/cmpxchg.h | 316 +++++++------------------------
> 1 file changed, 64 insertions(+), 252 deletions(-)
>
Re-sending, since it seems to have not worked with patchwork properly.