2023-11-04 09:17:14

by wuqiang.matt

[permalink] [raw]
Subject: [PATCH v2 0/4] locking/atomic: arch_cmpxchg[64]_local undefined

This patch series implement arch_cmpxchg[64]_local for arc,
openrisc and hexagon.

For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.

wuqiang.matt (4):
locking/atomic: arc: data size checking in arch_cmpxchg
locking/atomic: arc: arch_cmpxchg[64]_local undefined
locking/atomic: openrisc: arch_cmpxchg[64]_local undefined
locking/atomic: hexagon: arch_cmpxchg[64]_local undefined

arch/arc/include/asm/cmpxchg.h | 40 ++++++++++++++++++----
arch/hexagon/include/asm/cmpxchg.h | 51 ++++++++++++++++++++++++++++-
arch/openrisc/include/asm/cmpxchg.h | 6 ++++
3 files changed, 90 insertions(+), 7 deletions(-)

--
2.40.1


2023-11-04 09:17:16

by wuqiang.matt

[permalink] [raw]
Subject: [PATCH v2 1/4] locking/atomic: arc: data size checking in arch_cmpxchg

Macro __cmpxchg() renamed to __cmpxchg_32() to emphasize it's explicit
support of 32bit data size, BUILD_BUG_ON() added to avoid any possible
misuses with unsupported data types.

In case CONFIG_ARC_HAS_LLSC is undefined, arch_cmpxchg() uses spinlock
to accomplish SMP-safety, so the BUILD_BUG_ON checking is uncecessary.

Signed-off-by: wuqiang.matt <[email protected]>
---
arch/arc/include/asm/cmpxchg.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index e138fde067de..bf46514f6f12 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -18,14 +18,16 @@
* if (*ptr == @old)
* *ptr = @new
*/
-#define __cmpxchg(ptr, old, new) \
+#define __cmpxchg_32(ptr, old, new) \
({ \
__typeof__(*(ptr)) _prev; \
\
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ \
__asm__ __volatile__( \
- "1: llock %0, [%1] \n" \
+ "1: llock %0, [%1] \n" \
" brne %0, %2, 2f \n" \
- " scond %3, [%1] \n" \
+ " scond %3, [%1] \n" \
" bnz 1b \n" \
"2: \n" \
: "=&r"(_prev) /* Early clobber prevent reg reuse */ \
@@ -47,7 +49,7 @@
\
switch(sizeof((_p_))) { \
case 4: \
- _prev_ = __cmpxchg(_p_, _o_, _n_); \
+ _prev_ = __cmpxchg_32(_p_, _o_, _n_); \
break; \
default: \
BUILD_BUG(); \
@@ -65,8 +67,6 @@
__typeof__(*(ptr)) _prev_; \
unsigned long __flags; \
\
- BUILD_BUG_ON(sizeof(_p_) != 4); \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
--
2.40.1

2023-11-04 09:17:18

by wuqiang.matt

[permalink] [raw]
Subject: [PATCH v2 3/4] locking/atomic: openrisc: arch_cmpxchg[64]_local undefined

For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.

Signed-off-by: wuqiang.matt <[email protected]>
---
arch/openrisc/include/asm/cmpxchg.h | 6 ++++++
1 file changed, 6 insertions(+)

diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 8ee151c072e4..f1ffe8b6f5ef 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -139,6 +139,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
(unsigned long)(n), \
sizeof(*(ptr))); \
})
+#define arch_cmpxchg_local arch_cmpxchg
+
+/* always make arch_cmpxchg64_local available for openrisc */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))

/*
* This function doesn't exist, so you'll get a linker error if
--
2.40.1

2023-11-04 09:18:07

by wuqiang.matt

[permalink] [raw]
Subject: [PATCH v2 2/4] locking/atomic: arc: arch_cmpxchg[64]_local undefined

For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.

Signed-off-by: wuqiang.matt <[email protected]>
---
arch/arc/include/asm/cmpxchg.h | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)

diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index bf46514f6f12..91429f2350df 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -80,6 +80,34 @@

#endif

+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+#ifdef CONFIG_ARC_HAS_LLSC
+ case 4:
+ return __cmpxchg_32((int32_t *)ptr, old, new);
+#endif
+ default:
+ return __generic_cmpxchg_local(ptr, old, new, size);
+ }
+
+ return old;
+}
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof__(*ptr))__cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
/*
* xchg
*/
--
2.40.1

2023-11-04 09:18:10

by wuqiang.matt

[permalink] [raw]
Subject: [PATCH v2 4/4] locking/atomic: hexagon: arch_cmpxchg[64]_local undefined

For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.

Reported-by: kernel test robot <[email protected]>
Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

Signed-off-by: wuqiang.matt <[email protected]>
---
arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
1 file changed, 50 insertions(+), 1 deletion(-)

diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..2b5e5bbaf807 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -8,6 +8,8 @@
#ifndef _ASM_CMPXCHG_H
#define _ASM_CMPXCHG_H

+#include <linux/build_bug.h>
+
/*
* __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap
@@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
* variable casting.
*/

-#define arch_cmpxchg(ptr, old, new) \
+#define __cmpxchg_32(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __oldval = 0; \
\
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ \
asm volatile( \
"1: %0 = memw_locked(%1);\n" \
" { P0 = cmp.eq(%0,%2);\n" \
@@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
__oldval; \
})

+#define __cmpxchg(ptr, old, val, size) \
+({ \
+ __typeof__(*(ptr)) oldval; \
+ \
+ switch (size) { \
+ case 4: \
+ oldval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ oldval = val; \
+ break; \
+ } \
+ \
+ oldval; \
+})
+
+#define arch_cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg_local(ptr, old, val) \
+({ \
+ __typeof__(*(ptr)) retval; \
+ int size = sizeof(*(ptr)); \
+ \
+ switch (size) { \
+ case 4: \
+ retval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ retval = __generic_cmpxchg_local(ptr, old, \
+ val, size); \
+ break; \
+ } \
+ \
+ retval; \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
#endif /* _ASM_CMPXCHG_H */
--
2.40.1