2023-11-25 01:32:11

by Guo Ren

[permalink] [raw]
Subject: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants

From: Guo Ren <[email protected]>

The definitions of atomic(64)_read(set) are relax version, and using
them for acquire and release is incorrect. Besides, mapping
acquire/release/relaxed to Full ops is terrible and should be the last
choice. So, the final solution is to increase template priority.

Signed-off-by: Guo Ren <[email protected]>
Signed-off-by: Guo Ren <[email protected]>
---
Changelog:
v2:
- Use gen-atomic-fallback.sh instead of header modification
---
include/linux/atomic/atomic-arch-fallback.h | 118 +-------------------
scripts/atomic/gen-atomic-fallback.sh | 2 +-
2 files changed, 2 insertions(+), 118 deletions(-)

diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 18f5744dfb5d..d3c176123216 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -459,8 +459,6 @@ raw_atomic_read_acquire(const atomic_t *v)
{
#if defined(arch_atomic_read_acquire)
return arch_atomic_read_acquire(v);
-#elif defined(arch_atomic_read)
- return arch_atomic_read(v);
#else
int ret;

@@ -508,8 +506,6 @@ raw_atomic_set_release(atomic_t *v, int i)
{
#if defined(arch_atomic_set_release)
arch_atomic_set_release(v, i);
-#elif defined(arch_atomic_set)
- arch_atomic_set(v, i);
#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
@@ -1031,8 +1027,6 @@ raw_atomic_inc_return_acquire(atomic_t *v)
int ret = arch_atomic_inc_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_inc_return)
- return arch_atomic_inc_return(v);
#else
return raw_atomic_add_return_acquire(1, v);
#endif
@@ -1056,8 +1050,6 @@ raw_atomic_inc_return_release(atomic_t *v)
#elif defined(arch_atomic_inc_return_relaxed)
__atomic_release_fence();
return arch_atomic_inc_return_relaxed(v);
-#elif defined(arch_atomic_inc_return)
- return arch_atomic_inc_return(v);
#else
return raw_atomic_add_return_release(1, v);
#endif
@@ -1078,8 +1070,6 @@ raw_atomic_inc_return_relaxed(atomic_t *v)
{
#if defined(arch_atomic_inc_return_relaxed)
return arch_atomic_inc_return_relaxed(v);
-#elif defined(arch_atomic_inc_return)
- return arch_atomic_inc_return(v);
#else
return raw_atomic_add_return_relaxed(1, v);
#endif
@@ -1130,8 +1120,6 @@ raw_atomic_fetch_inc_acquire(atomic_t *v)
int ret = arch_atomic_fetch_inc_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_fetch_inc)
- return arch_atomic_fetch_inc(v);
#else
return raw_atomic_fetch_add_acquire(1, v);
#endif
@@ -1155,8 +1143,6 @@ raw_atomic_fetch_inc_release(atomic_t *v)
#elif defined(arch_atomic_fetch_inc_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_inc_relaxed(v);
-#elif defined(arch_atomic_fetch_inc)
- return arch_atomic_fetch_inc(v);
#else
return raw_atomic_fetch_add_release(1, v);
#endif
@@ -1177,8 +1163,6 @@ raw_atomic_fetch_inc_relaxed(atomic_t *v)
{
#if defined(arch_atomic_fetch_inc_relaxed)
return arch_atomic_fetch_inc_relaxed(v);
-#elif defined(arch_atomic_fetch_inc)
- return arch_atomic_fetch_inc(v);
#else
return raw_atomic_fetch_add_relaxed(1, v);
#endif
@@ -1249,8 +1233,6 @@ raw_atomic_dec_return_acquire(atomic_t *v)
int ret = arch_atomic_dec_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_dec_return)
- return arch_atomic_dec_return(v);
#else
return raw_atomic_sub_return_acquire(1, v);
#endif
@@ -1274,8 +1256,6 @@ raw_atomic_dec_return_release(atomic_t *v)
#elif defined(arch_atomic_dec_return_relaxed)
__atomic_release_fence();
return arch_atomic_dec_return_relaxed(v);
-#elif defined(arch_atomic_dec_return)
- return arch_atomic_dec_return(v);
#else
return raw_atomic_sub_return_release(1, v);
#endif
@@ -1296,8 +1276,6 @@ raw_atomic_dec_return_relaxed(atomic_t *v)
{
#if defined(arch_atomic_dec_return_relaxed)
return arch_atomic_dec_return_relaxed(v);
-#elif defined(arch_atomic_dec_return)
- return arch_atomic_dec_return(v);
#else
return raw_atomic_sub_return_relaxed(1, v);
#endif
@@ -1348,8 +1326,6 @@ raw_atomic_fetch_dec_acquire(atomic_t *v)
int ret = arch_atomic_fetch_dec_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_fetch_dec)
- return arch_atomic_fetch_dec(v);
#else
return raw_atomic_fetch_sub_acquire(1, v);
#endif
@@ -1373,8 +1349,6 @@ raw_atomic_fetch_dec_release(atomic_t *v)
#elif defined(arch_atomic_fetch_dec_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_dec_relaxed(v);
-#elif defined(arch_atomic_fetch_dec)
- return arch_atomic_fetch_dec(v);
#else
return raw_atomic_fetch_sub_release(1, v);
#endif
@@ -1395,8 +1369,6 @@ raw_atomic_fetch_dec_relaxed(atomic_t *v)
{
#if defined(arch_atomic_fetch_dec_relaxed)
return arch_atomic_fetch_dec_relaxed(v);
-#elif defined(arch_atomic_fetch_dec)
- return arch_atomic_fetch_dec(v);
#else
return raw_atomic_fetch_sub_relaxed(1, v);
#endif
@@ -1590,8 +1562,6 @@ raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
int ret = arch_atomic_fetch_andnot_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_fetch_andnot)
- return arch_atomic_fetch_andnot(i, v);
#else
return raw_atomic_fetch_and_acquire(~i, v);
#endif
@@ -1616,8 +1586,6 @@ raw_atomic_fetch_andnot_release(int i, atomic_t *v)
#elif defined(arch_atomic_fetch_andnot_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_andnot_relaxed(i, v);
-#elif defined(arch_atomic_fetch_andnot)
- return arch_atomic_fetch_andnot(i, v);
#else
return raw_atomic_fetch_and_release(~i, v);
#endif
@@ -1639,8 +1607,6 @@ raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
#if defined(arch_atomic_fetch_andnot_relaxed)
return arch_atomic_fetch_andnot_relaxed(i, v);
-#elif defined(arch_atomic_fetch_andnot)
- return arch_atomic_fetch_andnot(i, v);
#else
return raw_atomic_fetch_and_relaxed(~i, v);
#endif
@@ -1933,8 +1899,6 @@ raw_atomic_xchg_acquire(atomic_t *v, int new)
int ret = arch_atomic_xchg_relaxed(v, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_xchg)
- return arch_atomic_xchg(v, new);
#else
return raw_xchg_acquire(&v->counter, new);
#endif
@@ -1959,8 +1923,6 @@ raw_atomic_xchg_release(atomic_t *v, int new)
#elif defined(arch_atomic_xchg_relaxed)
__atomic_release_fence();
return arch_atomic_xchg_relaxed(v, new);
-#elif defined(arch_atomic_xchg)
- return arch_atomic_xchg(v, new);
#else
return raw_xchg_release(&v->counter, new);
#endif
@@ -1982,8 +1944,6 @@ raw_atomic_xchg_relaxed(atomic_t *v, int new)
{
#if defined(arch_atomic_xchg_relaxed)
return arch_atomic_xchg_relaxed(v, new);
-#elif defined(arch_atomic_xchg)
- return arch_atomic_xchg(v, new);
#else
return raw_xchg_relaxed(&v->counter, new);
#endif
@@ -2038,8 +1998,6 @@ raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_cmpxchg)
- return arch_atomic_cmpxchg(v, old, new);
#else
return raw_cmpxchg_acquire(&v->counter, old, new);
#endif
@@ -2065,8 +2023,6 @@ raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
#elif defined(arch_atomic_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic_cmpxchg)
- return arch_atomic_cmpxchg(v, old, new);
#else
return raw_cmpxchg_release(&v->counter, old, new);
#endif
@@ -2089,8 +2045,6 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
#if defined(arch_atomic_cmpxchg_relaxed)
return arch_atomic_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic_cmpxchg)
- return arch_atomic_cmpxchg(v, old, new);
#else
return raw_cmpxchg_relaxed(&v->counter, old, new);
#endif
@@ -2151,8 +2105,6 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_try_cmpxchg)
- return arch_atomic_try_cmpxchg(v, old, new);
#else
int r, o = *old;
r = raw_atomic_cmpxchg_acquire(v, o, new);
@@ -2183,8 +2135,6 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#elif defined(arch_atomic_try_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic_try_cmpxchg)
- return arch_atomic_try_cmpxchg(v, old, new);
#else
int r, o = *old;
r = raw_atomic_cmpxchg_release(v, o, new);
@@ -2212,8 +2162,6 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
#if defined(arch_atomic_try_cmpxchg_relaxed)
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic_try_cmpxchg)
- return arch_atomic_try_cmpxchg(v, old, new);
#else
int r, o = *old;
r = raw_atomic_cmpxchg_relaxed(v, o, new);
@@ -2331,8 +2279,6 @@ raw_atomic_add_negative_acquire(int i, atomic_t *v)
bool ret = arch_atomic_add_negative_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic_add_negative)
- return arch_atomic_add_negative(i, v);
#else
return raw_atomic_add_return_acquire(i, v) < 0;
#endif
@@ -2357,8 +2303,6 @@ raw_atomic_add_negative_release(int i, atomic_t *v)
#elif defined(arch_atomic_add_negative_relaxed)
__atomic_release_fence();
return arch_atomic_add_negative_relaxed(i, v);
-#elif defined(arch_atomic_add_negative)
- return arch_atomic_add_negative(i, v);
#else
return raw_atomic_add_return_release(i, v) < 0;
#endif
@@ -2380,8 +2324,6 @@ raw_atomic_add_negative_relaxed(int i, atomic_t *v)
{
#if defined(arch_atomic_add_negative_relaxed)
return arch_atomic_add_negative_relaxed(i, v);
-#elif defined(arch_atomic_add_negative)
- return arch_atomic_add_negative(i, v);
#else
return raw_atomic_add_return_relaxed(i, v) < 0;
#endif
@@ -2575,8 +2517,6 @@ raw_atomic64_read_acquire(const atomic64_t *v)
{
#if defined(arch_atomic64_read_acquire)
return arch_atomic64_read_acquire(v);
-#elif defined(arch_atomic64_read)
- return arch_atomic64_read(v);
#else
s64 ret;

@@ -2624,8 +2564,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i)
{
#if defined(arch_atomic64_set_release)
arch_atomic64_set_release(v, i);
-#elif defined(arch_atomic64_set)
- arch_atomic64_set(v, i);
#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
@@ -3147,8 +3085,6 @@ raw_atomic64_inc_return_acquire(atomic64_t *v)
s64 ret = arch_atomic64_inc_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_inc_return)
- return arch_atomic64_inc_return(v);
#else
return raw_atomic64_add_return_acquire(1, v);
#endif
@@ -3172,8 +3108,6 @@ raw_atomic64_inc_return_release(atomic64_t *v)
#elif defined(arch_atomic64_inc_return_relaxed)
__atomic_release_fence();
return arch_atomic64_inc_return_relaxed(v);
-#elif defined(arch_atomic64_inc_return)
- return arch_atomic64_inc_return(v);
#else
return raw_atomic64_add_return_release(1, v);
#endif
@@ -3194,8 +3128,6 @@ raw_atomic64_inc_return_relaxed(atomic64_t *v)
{
#if defined(arch_atomic64_inc_return_relaxed)
return arch_atomic64_inc_return_relaxed(v);
-#elif defined(arch_atomic64_inc_return)
- return arch_atomic64_inc_return(v);
#else
return raw_atomic64_add_return_relaxed(1, v);
#endif
@@ -3246,8 +3178,6 @@ raw_atomic64_fetch_inc_acquire(atomic64_t *v)
s64 ret = arch_atomic64_fetch_inc_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_fetch_inc)
- return arch_atomic64_fetch_inc(v);
#else
return raw_atomic64_fetch_add_acquire(1, v);
#endif
@@ -3271,8 +3201,6 @@ raw_atomic64_fetch_inc_release(atomic64_t *v)
#elif defined(arch_atomic64_fetch_inc_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_inc_relaxed(v);
-#elif defined(arch_atomic64_fetch_inc)
- return arch_atomic64_fetch_inc(v);
#else
return raw_atomic64_fetch_add_release(1, v);
#endif
@@ -3293,8 +3221,6 @@ raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
{
#if defined(arch_atomic64_fetch_inc_relaxed)
return arch_atomic64_fetch_inc_relaxed(v);
-#elif defined(arch_atomic64_fetch_inc)
- return arch_atomic64_fetch_inc(v);
#else
return raw_atomic64_fetch_add_relaxed(1, v);
#endif
@@ -3365,8 +3291,6 @@ raw_atomic64_dec_return_acquire(atomic64_t *v)
s64 ret = arch_atomic64_dec_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_dec_return)
- return arch_atomic64_dec_return(v);
#else
return raw_atomic64_sub_return_acquire(1, v);
#endif
@@ -3390,8 +3314,6 @@ raw_atomic64_dec_return_release(atomic64_t *v)
#elif defined(arch_atomic64_dec_return_relaxed)
__atomic_release_fence();
return arch_atomic64_dec_return_relaxed(v);
-#elif defined(arch_atomic64_dec_return)
- return arch_atomic64_dec_return(v);
#else
return raw_atomic64_sub_return_release(1, v);
#endif
@@ -3412,8 +3334,6 @@ raw_atomic64_dec_return_relaxed(atomic64_t *v)
{
#if defined(arch_atomic64_dec_return_relaxed)
return arch_atomic64_dec_return_relaxed(v);
-#elif defined(arch_atomic64_dec_return)
- return arch_atomic64_dec_return(v);
#else
return raw_atomic64_sub_return_relaxed(1, v);
#endif
@@ -3464,8 +3384,6 @@ raw_atomic64_fetch_dec_acquire(atomic64_t *v)
s64 ret = arch_atomic64_fetch_dec_relaxed(v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_fetch_dec)
- return arch_atomic64_fetch_dec(v);
#else
return raw_atomic64_fetch_sub_acquire(1, v);
#endif
@@ -3489,8 +3407,6 @@ raw_atomic64_fetch_dec_release(atomic64_t *v)
#elif defined(arch_atomic64_fetch_dec_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_dec_relaxed(v);
-#elif defined(arch_atomic64_fetch_dec)
- return arch_atomic64_fetch_dec(v);
#else
return raw_atomic64_fetch_sub_release(1, v);
#endif
@@ -3511,8 +3427,6 @@ raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
{
#if defined(arch_atomic64_fetch_dec_relaxed)
return arch_atomic64_fetch_dec_relaxed(v);
-#elif defined(arch_atomic64_fetch_dec)
- return arch_atomic64_fetch_dec(v);
#else
return raw_atomic64_fetch_sub_relaxed(1, v);
#endif
@@ -3706,8 +3620,6 @@ raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_fetch_andnot)
- return arch_atomic64_fetch_andnot(i, v);
#else
return raw_atomic64_fetch_and_acquire(~i, v);
#endif
@@ -3732,8 +3644,6 @@ raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#elif defined(arch_atomic64_fetch_andnot_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_andnot_relaxed(i, v);
-#elif defined(arch_atomic64_fetch_andnot)
- return arch_atomic64_fetch_andnot(i, v);
#else
return raw_atomic64_fetch_and_release(~i, v);
#endif
@@ -3755,8 +3665,6 @@ raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
#if defined(arch_atomic64_fetch_andnot_relaxed)
return arch_atomic64_fetch_andnot_relaxed(i, v);
-#elif defined(arch_atomic64_fetch_andnot)
- return arch_atomic64_fetch_andnot(i, v);
#else
return raw_atomic64_fetch_and_relaxed(~i, v);
#endif
@@ -4049,8 +3957,6 @@ raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
s64 ret = arch_atomic64_xchg_relaxed(v, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_xchg)
- return arch_atomic64_xchg(v, new);
#else
return raw_xchg_acquire(&v->counter, new);
#endif
@@ -4075,8 +3981,6 @@ raw_atomic64_xchg_release(atomic64_t *v, s64 new)
#elif defined(arch_atomic64_xchg_relaxed)
__atomic_release_fence();
return arch_atomic64_xchg_relaxed(v, new);
-#elif defined(arch_atomic64_xchg)
- return arch_atomic64_xchg(v, new);
#else
return raw_xchg_release(&v->counter, new);
#endif
@@ -4098,8 +4002,6 @@ raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
{
#if defined(arch_atomic64_xchg_relaxed)
return arch_atomic64_xchg_relaxed(v, new);
-#elif defined(arch_atomic64_xchg)
- return arch_atomic64_xchg(v, new);
#else
return raw_xchg_relaxed(&v->counter, new);
#endif
@@ -4154,8 +4056,6 @@ raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_cmpxchg)
- return arch_atomic64_cmpxchg(v, old, new);
#else
return raw_cmpxchg_acquire(&v->counter, old, new);
#endif
@@ -4181,8 +4081,6 @@ raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
#elif defined(arch_atomic64_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic64_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic64_cmpxchg)
- return arch_atomic64_cmpxchg(v, old, new);
#else
return raw_cmpxchg_release(&v->counter, old, new);
#endif
@@ -4205,8 +4103,6 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
#if defined(arch_atomic64_cmpxchg_relaxed)
return arch_atomic64_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic64_cmpxchg)
- return arch_atomic64_cmpxchg(v, old, new);
#else
return raw_cmpxchg_relaxed(&v->counter, old, new);
#endif
@@ -4267,8 +4163,6 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_try_cmpxchg)
- return arch_atomic64_try_cmpxchg(v, old, new);
#else
s64 r, o = *old;
r = raw_atomic64_cmpxchg_acquire(v, o, new);
@@ -4299,8 +4193,6 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic64_try_cmpxchg)
- return arch_atomic64_try_cmpxchg(v, old, new);
#else
s64 r, o = *old;
r = raw_atomic64_cmpxchg_release(v, o, new);
@@ -4328,8 +4220,6 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
#if defined(arch_atomic64_try_cmpxchg_relaxed)
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-#elif defined(arch_atomic64_try_cmpxchg)
- return arch_atomic64_try_cmpxchg(v, old, new);
#else
s64 r, o = *old;
r = raw_atomic64_cmpxchg_relaxed(v, o, new);
@@ -4447,8 +4337,6 @@ raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
bool ret = arch_atomic64_add_negative_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-#elif defined(arch_atomic64_add_negative)
- return arch_atomic64_add_negative(i, v);
#else
return raw_atomic64_add_return_acquire(i, v) < 0;
#endif
@@ -4473,8 +4361,6 @@ raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
#elif defined(arch_atomic64_add_negative_relaxed)
__atomic_release_fence();
return arch_atomic64_add_negative_relaxed(i, v);
-#elif defined(arch_atomic64_add_negative)
- return arch_atomic64_add_negative(i, v);
#else
return raw_atomic64_add_return_release(i, v) < 0;
#endif
@@ -4496,8 +4382,6 @@ raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
#if defined(arch_atomic64_add_negative_relaxed)
return arch_atomic64_add_negative_relaxed(i, v);
-#elif defined(arch_atomic64_add_negative)
- return arch_atomic64_add_negative(i, v);
#else
return raw_atomic64_add_return_relaxed(i, v) < 0;
#endif
@@ -4657,4 +4541,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}

#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
+// 9cae73fa68c7c3b6b36a4ec7ee88c81edaa1cb1f
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
index c0c8a85d7c81..d313c4bf91c4 100755
--- a/scripts/atomic/gen-atomic-fallback.sh
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -102,7 +102,7 @@ gen_proto_order_variant()
fi

# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
- if [ ! -z "${order}" ]; then
+ if [ ! -z "${order}" ] && [ -z "${template}" ]; then
printf "#elif defined(arch_${basename})\n"
printf "\t${retstmt}arch_${basename}(${args});\n"
fi
--
2.36.1


2023-11-25 09:14:15

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants

Hi,

kernel test robot noticed the following build warnings:

[auto build test WARNING on kees/for-next/pstore]
[also build test WARNING on kees/for-next/kspp]
[cannot apply to linus/master v6.7-rc2 next-20231124]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/guoren-kernel-org/locking-atomic-scripts-Increase-template-priority-in-order-variants/20231125-093207
base: https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore
patch link: https://lore.kernel.org/r/20231125013025.3620560-1-guoren%40kernel.org
patch subject: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants
config: i386-defconfig (https://download.01.org/0day-ci/archive/20231125/[email protected]/config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231125/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

In file included from arch/x86/include/asm/atomic.h:8:0,
from include/linux/atomic.h:7,
from include/linux/cpumask.h:13,
from arch/x86/include/asm/cpumask.h:5,
from arch/x86/include/asm/msr.h:11,
from arch/x86/include/asm/processor.h:23,
from arch/x86/include/asm/timex.h:5,
from include/linux/timex.h:67,
from include/linux/time32.h:13,
from include/linux/time.h:60,
from include/linux/compat.h:10,
from kernel/futex/core.c:34:
kernel/futex/core.c: In function 'raw_atomic64_cmpxchg_relaxed':
>> arch/x86/include/asm/cmpxchg.h:130:2: warning: '__ret' is used uninitialized in this function [-Wuninitialized]
__ret; \
^~~~~
arch/x86/include/asm/cmpxchg.h:87:21: note: '__ret' was declared here
__typeof__(*(ptr)) __ret; \
^
arch/x86/include/asm/cmpxchg.h:134:2: note: in expansion of macro '__raw_cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:149:2: note: in expansion of macro '__cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
#define raw_cmpxchg_relaxed arch_cmpxchg
^~~~~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:4107:9: note: in expansion of macro 'raw_cmpxchg_relaxed'
return raw_cmpxchg_relaxed(&v->counter, old, new);
^~~~~~~~~~~~~~~~~~~
In function 'raw_atomic64_cmpxchg_relaxed',
inlined from 'get_inode_sequence_number' at include/linux/atomic/atomic-instrumented.h:2817:9,
inlined from 'get_futex_key' at kernel/futex/core.c:387:23:
arch/x86/include/asm/cmpxchg.h:128:3: error: call to '__cmpxchg_wrong_size' declared with attribute error: Bad argument size for cmpxchg
__cmpxchg_wrong_size(); \
^~~~~~~~~~~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:134:2: note: in expansion of macro '__raw_cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:149:2: note: in expansion of macro '__cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
#define raw_cmpxchg_relaxed arch_cmpxchg
^~~~~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:4107:9: note: in expansion of macro 'raw_cmpxchg_relaxed'
return raw_cmpxchg_relaxed(&v->counter, old, new);
^~~~~~~~~~~~~~~~~~~


vim +/__ret +130 arch/x86/include/asm/cmpxchg.h

e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 79
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 80 /*
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 81 * Atomic compare and exchange. Compare OLD with MEM, if identical,
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 82 * store NEW in MEM. Return the initial value in MEM. Success is
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 83 * indicated by comparing RETURN with OLD.
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 84 */
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 85 #define __raw_cmpxchg(ptr, old, new, size, lock) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 86 ({ \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 87 __typeof__(*(ptr)) __ret; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 88 __typeof__(*(ptr)) __old = (old); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 89 __typeof__(*(ptr)) __new = (new); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 90 switch (size) { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 91 case __X86_CASE_B: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 92 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 93 volatile u8 *__ptr = (volatile u8 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 94 asm volatile(lock "cmpxchgb %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 95 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 96 : "q" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 97 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 98 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 99 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 100 case __X86_CASE_W: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 101 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 102 volatile u16 *__ptr = (volatile u16 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 103 asm volatile(lock "cmpxchgw %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 104 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 105 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 106 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 107 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 108 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 109 case __X86_CASE_L: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 110 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 111 volatile u32 *__ptr = (volatile u32 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 112 asm volatile(lock "cmpxchgl %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 113 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 114 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 115 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 116 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 117 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 118 case __X86_CASE_Q: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 119 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 120 volatile u64 *__ptr = (volatile u64 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 121 asm volatile(lock "cmpxchgq %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 122 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 123 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 124 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 125 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 126 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 127 default: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 128 __cmpxchg_wrong_size(); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 129 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 @130 __ret; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 131 })
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 132

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-11-25 09:51:27

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants

Hi,

kernel test robot noticed the following build warnings:

[auto build test WARNING on kees/for-next/pstore]
[also build test WARNING on kees/for-next/kspp]
[cannot apply to linus/master v6.7-rc2 next-20231124]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/guoren-kernel-org/locking-atomic-scripts-Increase-template-priority-in-order-variants/20231125-093207
base: https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore
patch link: https://lore.kernel.org/r/20231125013025.3620560-1-guoren%40kernel.org
patch subject: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants
config: i386-buildonly-randconfig-002-20231125 (https://download.01.org/0day-ci/archive/20231125/[email protected]/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231125/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

In file included from arch/x86/include/asm/atomic.h:8,
from include/linux/atomic.h:7,
from include/linux/jump_label.h:255,
from include/linux/static_key.h:1,
from arch/x86/include/asm/nospec-branch.h:6,
from arch/x86/include/asm/paravirt_types.h:27,
from arch/x86/include/asm/ptrace.h:97,
from arch/x86/include/asm/math_emu.h:5,
from arch/x86/include/asm/processor.h:13,
from arch/x86/include/asm/timex.h:5,
from include/linux/timex.h:67,
from include/linux/time32.h:13,
from include/linux/time.h:60,
from include/linux/compat.h:10,
from kernel/futex/core.c:34:
arch/x86/include/asm/cmpxchg.h: In function 'raw_atomic64_cmpxchg_relaxed':
>> arch/x86/include/asm/cmpxchg.h:130:9: warning: '__ret' is used uninitialized [-Wuninitialized]
130 | __ret; \
| ^~~~~
arch/x86/include/asm/cmpxchg.h:87:28: note: '__ret' was declared here
87 | __typeof__(*(ptr)) __ret; \
| ^~~~~
arch/x86/include/asm/cmpxchg.h:134:9: note: in expansion of macro '__raw_cmpxchg'
134 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
| ^~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:149:9: note: in expansion of macro '__cmpxchg'
149 | __cmpxchg(ptr, old, new, sizeof(*(ptr)))
| ^~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
91 | #define raw_cmpxchg_relaxed arch_cmpxchg
| ^~~~~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:4107:16: note: in expansion of macro 'raw_cmpxchg_relaxed'
4107 | return raw_cmpxchg_relaxed(&v->counter, old, new);
| ^~~~~~~~~~~~~~~~~~~
In function 'raw_atomic64_cmpxchg_relaxed',
inlined from 'atomic64_cmpxchg_relaxed' at include/linux/atomic/atomic-instrumented.h:2817:9,
inlined from 'get_inode_sequence_number' at kernel/futex/core.c:186:9,
inlined from 'get_futex_key' at kernel/futex/core.c:387:23:
arch/x86/include/asm/cmpxchg.h:128:17: error: call to '__cmpxchg_wrong_size' declared with attribute error: Bad argument size for cmpxchg
128 | __cmpxchg_wrong_size(); \
| ^~~~~~~~~~~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:134:9: note: in expansion of macro '__raw_cmpxchg'
134 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
| ^~~~~~~~~~~~~
arch/x86/include/asm/cmpxchg.h:149:9: note: in expansion of macro '__cmpxchg'
149 | __cmpxchg(ptr, old, new, sizeof(*(ptr)))
| ^~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
91 | #define raw_cmpxchg_relaxed arch_cmpxchg
| ^~~~~~~~~~~~
include/linux/atomic/atomic-arch-fallback.h:4107:16: note: in expansion of macro 'raw_cmpxchg_relaxed'
4107 | return raw_cmpxchg_relaxed(&v->counter, old, new);
| ^~~~~~~~~~~~~~~~~~~


vim +/__ret +130 arch/x86/include/asm/cmpxchg.h

e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 79
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 80 /*
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 81 * Atomic compare and exchange. Compare OLD with MEM, if identical,
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 82 * store NEW in MEM. Return the initial value in MEM. Success is
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 83 * indicated by comparing RETURN with OLD.
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 84 */
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 85 #define __raw_cmpxchg(ptr, old, new, size, lock) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 86 ({ \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 87 __typeof__(*(ptr)) __ret; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 88 __typeof__(*(ptr)) __old = (old); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 89 __typeof__(*(ptr)) __new = (new); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 90 switch (size) { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 91 case __X86_CASE_B: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 92 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 93 volatile u8 *__ptr = (volatile u8 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 94 asm volatile(lock "cmpxchgb %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 95 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 96 : "q" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 97 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 98 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 99 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 100 case __X86_CASE_W: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 101 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 102 volatile u16 *__ptr = (volatile u16 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 103 asm volatile(lock "cmpxchgw %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 104 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 105 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 106 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 107 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 108 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 109 case __X86_CASE_L: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 110 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 111 volatile u32 *__ptr = (volatile u32 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 112 asm volatile(lock "cmpxchgl %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 113 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 114 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 115 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 116 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 117 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 118 case __X86_CASE_Q: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 119 { \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 120 volatile u64 *__ptr = (volatile u64 *)(ptr); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 121 asm volatile(lock "cmpxchgq %2,%1" \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 122 : "=a" (__ret), "+m" (*__ptr) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 123 : "r" (__new), "0" (__old) \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 124 : "memory"); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 125 break; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 126 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 127 default: \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 128 __cmpxchg_wrong_size(); \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 129 } \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 @130 __ret; \
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 131 })
e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 132

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-11-25 09:51:44

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants

Hi,

kernel test robot noticed the following build errors:

[auto build test ERROR on kees/for-next/pstore]
[also build test ERROR on kees/for-next/kspp]
[cannot apply to linus/master v6.7-rc2 next-20231124]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/guoren-kernel-org/locking-atomic-scripts-Increase-template-priority-in-order-variants/20231125-093207
base: https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore
patch link: https://lore.kernel.org/r/20231125013025.3620560-1-guoren%40kernel.org
patch subject: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants
config: x86_64-rhel-8.3-rust (https://download.01.org/0day-ci/archive/20231125/[email protected]/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231125/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All errors (new ones prefixed by >>):

In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
>> include/linux/atomic/atomic-arch-fallback.h:3961:9: error: invalid output size for constraint '+q'
return raw_xchg_acquire(&v->counter, new);
^
include/linux/atomic/atomic-arch-fallback.h:27:26: note: expanded from macro 'raw_xchg_acquire'
#define raw_xchg_acquire arch_xchg
^
arch/x86/include/asm/cmpxchg.h:78:27: note: expanded from macro 'arch_xchg'
#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
^
arch/x86/include/asm/cmpxchg.h:48:19: note: expanded from macro '__xchg_op'
: "+q" (__ret), "+m" (*(ptr)) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:3985:9: error: invalid output size for constraint '+q'
return raw_xchg_release(&v->counter, new);
^
include/linux/atomic/atomic-arch-fallback.h:39:26: note: expanded from macro 'raw_xchg_release'
#define raw_xchg_release arch_xchg
^
arch/x86/include/asm/cmpxchg.h:78:27: note: expanded from macro 'arch_xchg'
#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
^
arch/x86/include/asm/cmpxchg.h:48:19: note: expanded from macro '__xchg_op'
: "+q" (__ret), "+m" (*(ptr)) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:4006:9: error: invalid output size for constraint '+q'
return raw_xchg_relaxed(&v->counter, new);
^
include/linux/atomic/atomic-arch-fallback.h:48:26: note: expanded from macro 'raw_xchg_relaxed'
#define raw_xchg_relaxed arch_xchg
^
arch/x86/include/asm/cmpxchg.h:78:27: note: expanded from macro 'arch_xchg'
#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
^
arch/x86/include/asm/cmpxchg.h:48:19: note: expanded from macro '__xchg_op'
: "+q" (__ret), "+m" (*(ptr)) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
>> include/linux/atomic/atomic-arch-fallback.h:4060:9: error: invalid output size for constraint '=a'
return raw_cmpxchg_acquire(&v->counter, old, new);
^
include/linux/atomic/atomic-arch-fallback.h:70:29: note: expanded from macro 'raw_cmpxchg_acquire'
#define raw_cmpxchg_acquire arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:95:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
>> include/linux/atomic/atomic-arch-fallback.h:4060:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:70:29: note: expanded from macro 'raw_cmpxchg_acquire'
#define raw_cmpxchg_acquire arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:104:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
>> include/linux/atomic/atomic-arch-fallback.h:4060:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:70:29: note: expanded from macro 'raw_cmpxchg_acquire'
#define raw_cmpxchg_acquire arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:113:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
>> include/linux/atomic/atomic-arch-fallback.h:4060:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:70:29: note: expanded from macro 'raw_cmpxchg_acquire'
#define raw_cmpxchg_acquire arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:122:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:4085:9: error: invalid output size for constraint '=a'
return raw_cmpxchg_release(&v->counter, old, new);
^
include/linux/atomic/atomic-arch-fallback.h:82:29: note: expanded from macro 'raw_cmpxchg_release'
#define raw_cmpxchg_release arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:95:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:4085:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:82:29: note: expanded from macro 'raw_cmpxchg_release'
#define raw_cmpxchg_release arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:104:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:4085:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:82:29: note: expanded from macro 'raw_cmpxchg_release'
#define raw_cmpxchg_release arch_cmpxchg
^
arch/x86/include/asm/cmpxchg.h:149:2: note: expanded from macro 'arch_cmpxchg'
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
^
arch/x86/include/asm/cmpxchg.h:134:2: note: expanded from macro '__cmpxchg'
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
^
arch/x86/include/asm/cmpxchg.h:113:17: note: expanded from macro '__raw_cmpxchg'
: "=a" (__ret), "+m" (*__ptr) \
^
In file included from arch/x86/entry/vdso/vdso32/vclock_gettime.c:4:
In file included from arch/x86/entry/vdso/vdso32/../vclock_gettime.c:11:
In file included from include/linux/time.h:60:
In file included from include/linux/time32.h:13:
In file included from include/linux/timex.h:67:
In file included from arch/x86/include/asm/timex.h:5:
In file included from arch/x86/include/asm/processor.h:23:
In file included from arch/x86/include/asm/msr.h:11:
In file included from arch/x86/include/asm/cpumask.h:5:
In file included from include/linux/cpumask.h:13:
In file included from include/linux/atomic.h:80:
include/linux/atomic/atomic-arch-fallback.h:4085:9: error: invalid output size for constraint '=a'
include/linux/atomic/atomic-arch-fallback.h:82:29: note: expanded from macro 'raw_cmpxchg_release'
#define raw_cmpxchg_release arch_cmpxchg


vim +3961 include/linux/atomic/atomic-arch-fallback.h

d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3939
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3940 /**
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3941 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3942 * @v: pointer to atomic64_t
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3943 * @new: s64 value to assign
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3944 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3945 * Atomically updates @v to @new with acquire ordering.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3946 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3947 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3948 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3949 * Return: The original value of @v.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3950 */
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3951 static __always_inline s64
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3952 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3953 {
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3954 #if defined(arch_atomic64_xchg_acquire)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3955 return arch_atomic64_xchg_acquire(v, new);
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3956 #elif defined(arch_atomic64_xchg_relaxed)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3957 s64 ret = arch_atomic64_xchg_relaxed(v, new);
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3958 __atomic_acquire_fence();
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3959 return ret;
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3960 #else
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 @3961 return raw_xchg_acquire(&v->counter, new);
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3962 #endif
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3963 }
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3964
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3965 /**
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3966 * raw_atomic64_xchg_release() - atomic exchange with release ordering
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3967 * @v: pointer to atomic64_t
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3968 * @new: s64 value to assign
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3969 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3970 * Atomically updates @v to @new with release ordering.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3971 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3972 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3973 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3974 * Return: The original value of @v.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3975 */
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3976 static __always_inline s64
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3977 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3978 {
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3979 #if defined(arch_atomic64_xchg_release)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3980 return arch_atomic64_xchg_release(v, new);
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3981 #elif defined(arch_atomic64_xchg_relaxed)
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3982 __atomic_release_fence();
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3983 return arch_atomic64_xchg_relaxed(v, new);
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3984 #else
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3985 return raw_xchg_release(&v->counter, new);
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 3986 #endif
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3987 }
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 3988
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3989 /**
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3990 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3991 * @v: pointer to atomic64_t
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3992 * @new: s64 value to assign
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3993 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3994 * Atomically updates @v to @new with relaxed ordering.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3995 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3996 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3997 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3998 * Return: The original value of @v.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 3999 */
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4000 static __always_inline s64
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4001 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4002 {
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4003 #if defined(arch_atomic64_xchg_relaxed)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4004 return arch_atomic64_xchg_relaxed(v, new);
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4005 #else
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4006 return raw_xchg_relaxed(&v->counter, new);
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4007 #endif
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4008 }
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4009
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4010 /**
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4011 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4012 * @v: pointer to atomic64_t
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4013 * @old: s64 value to compare with
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4014 * @new: s64 value to assign
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4015 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4016 * If (@v == @old), atomically updates @v to @new with full ordering.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4017 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4018 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4019 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4020 * Return: The original value of @v.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4021 */
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4022 static __always_inline s64
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4023 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4024 {
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4025 #if defined(arch_atomic64_cmpxchg)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4026 return arch_atomic64_cmpxchg(v, old, new);
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4027 #elif defined(arch_atomic64_cmpxchg_relaxed)
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4028 s64 ret;
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4029 __atomic_pre_full_fence();
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4030 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4031 __atomic_post_full_fence();
37f8173dd84936 include/linux/atomic-arch-fallback.h Peter Zijlstra 2020-01-24 4032 return ret;
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4033 #else
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4034 return raw_cmpxchg(&v->counter, old, new);
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4035 #endif
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4036 }
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4037
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4038 /**
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4039 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4040 * @v: pointer to atomic64_t
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4041 * @old: s64 value to compare with
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4042 * @new: s64 value to assign
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4043 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4044 * If (@v == @old), atomically updates @v to @new with acquire ordering.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4045 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4046 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4047 *
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4048 * Return: The original value of @v.
ad8110706f3811 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4049 */
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4050 static __always_inline s64
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4051 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4052 {
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4053 #if defined(arch_atomic64_cmpxchg_acquire)
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4054 return arch_atomic64_cmpxchg_acquire(v, old, new);
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4055 #elif defined(arch_atomic64_cmpxchg_relaxed)
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4056 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4057 __atomic_acquire_fence();
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4058 return ret;
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4059 #else
9257959a6e5b4f include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 @4060 return raw_cmpxchg_acquire(&v->counter, old, new);
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4061 #endif
1d78814d41701c include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4062 }
d12157efc8e083 include/linux/atomic/atomic-arch-fallback.h Mark Rutland 2023-06-05 4063

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-11-26 00:38:49

by Guo Ren

[permalink] [raw]
Subject: Re: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants

On Sat, Nov 25, 2023 at 5:11 PM kernel test robot <[email protected]> wrote:
>
> Hi,
>
> kernel test robot noticed the following build warnings:
>
> [auto build test WARNING on kees/for-next/pstore]
> [also build test WARNING on kees/for-next/kspp]
> [cannot apply to linus/master v6.7-rc2 next-20231124]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch#_base_tree_information]
>
> url: https://github.com/intel-lab-lkp/linux/commits/guoren-kernel-org/locking-atomic-scripts-Increase-template-priority-in-order-variants/20231125-093207
> base: https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore
> patch link: https://lore.kernel.org/r/20231125013025.3620560-1-guoren%40kernel.org
> patch subject: [PATCH V2] locking/atomic: scripts: Increase template priority in order variants
> config: i386-defconfig (https://download.01.org/0day-ci/archive/20231125/[email protected]/config)
> compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231125/[email protected]/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <[email protected]>
> | Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
>
> All warnings (new ones prefixed by >>):
>
> In file included from arch/x86/include/asm/atomic.h:8:0,
> from include/linux/atomic.h:7,
> from include/linux/cpumask.h:13,
> from arch/x86/include/asm/cpumask.h:5,
> from arch/x86/include/asm/msr.h:11,
> from arch/x86/include/asm/processor.h:23,
> from arch/x86/include/asm/timex.h:5,
> from include/linux/timex.h:67,
> from include/linux/time32.h:13,
> from include/linux/time.h:60,
> from include/linux/compat.h:10,
> from kernel/futex/core.c:34:
> kernel/futex/core.c: In function 'raw_atomic64_cmpxchg_relaxed':
> >> arch/x86/include/asm/cmpxchg.h:130:2: warning: '__ret' is used uninitialized in this function [-Wuninitialized]
> __ret; \
> ^~~~~
> arch/x86/include/asm/cmpxchg.h:87:21: note: '__ret' was declared here
> __typeof__(*(ptr)) __ret; \
> ^
> arch/x86/include/asm/cmpxchg.h:134:2: note: in expansion of macro '__raw_cmpxchg'
> __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
> ^~~~~~~~~~~~~
> arch/x86/include/asm/cmpxchg.h:149:2: note: in expansion of macro '__cmpxchg'
> __cmpxchg(ptr, old, new, sizeof(*(ptr)))
> ^~~~~~~~~
> include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
> #define raw_cmpxchg_relaxed arch_cmpxchg
> ^~~~~~~~~~~~
> include/linux/atomic/atomic-arch-fallback.h:4107:9: note: in expansion of macro 'raw_cmpxchg_relaxed'
> return raw_cmpxchg_relaxed(&v->counter, old, new);
> ^~~~~~~~~~~~~~~~~~~
> In function 'raw_atomic64_cmpxchg_relaxed',
> inlined from 'get_inode_sequence_number' at include/linux/atomic/atomic-instrumented.h:2817:9,
> inlined from 'get_futex_key' at kernel/futex/core.c:387:23:
> arch/x86/include/asm/cmpxchg.h:128:3: error: call to '__cmpxchg_wrong_size' declared with attribute error: Bad argument size for cmpxchg
Gcc is okay, but Clang failed. I would dig it out.

> __cmpxchg_wrong_size(); \
> ^~~~~~~~~~~~~~~~~~~~~~
> arch/x86/include/asm/cmpxchg.h:134:2: note: in expansion of macro '__raw_cmpxchg'
> __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
> ^~~~~~~~~~~~~
> arch/x86/include/asm/cmpxchg.h:149:2: note: in expansion of macro '__cmpxchg'
> __cmpxchg(ptr, old, new, sizeof(*(ptr)))
> ^~~~~~~~~
> include/linux/atomic/atomic-arch-fallback.h:91:29: note: in expansion of macro 'arch_cmpxchg'
> #define raw_cmpxchg_relaxed arch_cmpxchg
> ^~~~~~~~~~~~
> include/linux/atomic/atomic-arch-fallback.h:4107:9: note: in expansion of macro 'raw_cmpxchg_relaxed'
> return raw_cmpxchg_relaxed(&v->counter, old, new);
> ^~~~~~~~~~~~~~~~~~~
>
>
> vim +/__ret +130 arch/x86/include/asm/cmpxchg.h
>
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 79
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 80 /*
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 81 * Atomic compare and exchange. Compare OLD with MEM, if identical,
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 82 * store NEW in MEM. Return the initial value in MEM. Success is
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 83 * indicated by comparing RETURN with OLD.
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 84 */
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 85 #define __raw_cmpxchg(ptr, old, new, size, lock) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 86 ({ \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 87 __typeof__(*(ptr)) __ret; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 88 __typeof__(*(ptr)) __old = (old); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 89 __typeof__(*(ptr)) __new = (new); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 90 switch (size) { \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 91 case __X86_CASE_B: \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 92 { \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 93 volatile u8 *__ptr = (volatile u8 *)(ptr); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 94 asm volatile(lock "cmpxchgb %2,%1" \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 95 : "=a" (__ret), "+m" (*__ptr) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 96 : "q" (__new), "0" (__old) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 97 : "memory"); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 98 break; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 99 } \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 100 case __X86_CASE_W: \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 101 { \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 102 volatile u16 *__ptr = (volatile u16 *)(ptr); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 103 asm volatile(lock "cmpxchgw %2,%1" \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 104 : "=a" (__ret), "+m" (*__ptr) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 105 : "r" (__new), "0" (__old) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 106 : "memory"); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 107 break; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 108 } \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 109 case __X86_CASE_L: \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 110 { \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 111 volatile u32 *__ptr = (volatile u32 *)(ptr); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 112 asm volatile(lock "cmpxchgl %2,%1" \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 113 : "=a" (__ret), "+m" (*__ptr) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 114 : "r" (__new), "0" (__old) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 115 : "memory"); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 116 break; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 117 } \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 118 case __X86_CASE_Q: \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 119 { \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 120 volatile u64 *__ptr = (volatile u64 *)(ptr); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 121 asm volatile(lock "cmpxchgq %2,%1" \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 122 : "=a" (__ret), "+m" (*__ptr) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 123 : "r" (__new), "0" (__old) \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 124 : "memory"); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 125 break; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 126 } \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 127 default: \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 128 __cmpxchg_wrong_size(); \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 129 } \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 @130 __ret; \
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 131 })
> e9826380d83d1b Jeremy Fitzhardinge 2011-08-18 132
>
> --
> 0-DAY CI Kernel Test Service
> https://github.com/intel/lkp-tests/wiki



--
Best Regards
Guo Ren