2024-04-02 04:27:15

by Al Viro

[permalink] [raw]
Subject: [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32

Conversion between u32 and unsigned long is tautological there,
and the only use of return value is to return it from
__cmpxchg() (which return unsigned long).

Signed-off-by: Al Viro <[email protected]>
---
arch/sparc/include/asm/cmpxchg_32.h | 2 +-
arch/sparc/lib/atomic32.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index d0af82c240b7..112bfaa28729 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -39,7 +39,7 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
-unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);

/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index cf80d1ae352b..d90d756123d8 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -159,7 +159,7 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
}
EXPORT_SYMBOL(sp32___change_bit);

-unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
+u32 __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
unsigned long flags;
u32 prev;
@@ -169,7 +169,7 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);

- return (unsigned long)prev;
+ return prev;
}
EXPORT_SYMBOL(__cmpxchg_u32);

--
2.39.2



2024-04-02 04:27:40

by Al Viro

[permalink] [raw]
Subject: [PATCH 2/8] sparc32: make the first argument of __cmpxchg_u64() volatile u64 *

.. to match all cmpxchg variants.

Signed-off-by: Al Viro <[email protected]>
---
arch/sparc/include/asm/cmpxchg_32.h | 2 +-
arch/sparc/lib/atomic32.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 112bfaa28729..86254c366477 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})

-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new);
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)

#include <asm-generic/cmpxchg-local.h>
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index d90d756123d8..e15affbbb523 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -173,7 +173,7 @@ u32 __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
}
EXPORT_SYMBOL(__cmpxchg_u32);

-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
--
2.39.2


2024-04-02 04:27:45

by Al Viro

[permalink] [raw]
Subject: [PATCH 3/8] sparc32: unify __cmpxchg_u{32,64}

Add a macro that expands to one of those when given u32 or u64
as an argument - atomic32.c has a lot of similar stuff already.

Signed-off-by: Al Viro <[email protected]>
---
arch/sparc/lib/atomic32.c | 41 +++++++++++++++------------------------
1 file changed, 16 insertions(+), 25 deletions(-)

diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index e15affbbb523..0d215a772428 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -159,32 +159,23 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
}
EXPORT_SYMBOL(sp32___change_bit);

-u32 __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
-{
- unsigned long flags;
- u32 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return prev;
-}
+#define CMPXCHG(T) \
+ T __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
+ \
+ return prev; \
+ }
+
+CMPXCHG(u32)
+CMPXCHG(u64)
EXPORT_SYMBOL(__cmpxchg_u32);
-
-u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return prev;
-}
EXPORT_SYMBOL(__cmpxchg_u64);

unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
--
2.39.2


2024-04-02 04:27:49

by Al Viro

[permalink] [raw]
Subject: [PATCH 4/8] sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes

trivial now

Signed-off-by: Al Viro <[email protected]>
---
arch/sparc/include/asm/cmpxchg_32.h | 7 ++++++-
arch/sparc/lib/atomic32.c | 4 ++++
2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 86254c366477..1324984de36a 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -38,7 +38,8 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v

/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
-/* we only need to support cmpxchg of a u32 on sparc */
+u8 __cmpxchg_u8(volatile u8 *m, u8 old, u8 new_);
+u16 __cmpxchg_u16(volatile u16 *m, u16 old, u16 new_);
u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);

/* don't worry...optimizer will get rid of most of this */
@@ -46,6 +47,10 @@ static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+ case 2:
+ return __cmpxchg_u16((u16 *)ptr, (u16)old, (u16)new_);
case 4:
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
default:
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 0d215a772428..8ae880ebf07a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -173,8 +173,12 @@ EXPORT_SYMBOL(sp32___change_bit);
return prev; \
}

+CMPXCHG(u8)
+CMPXCHG(u16)
CMPXCHG(u32)
CMPXCHG(u64)
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);

--
2.39.2


2024-04-02 04:28:19

by Al Viro

[permalink] [raw]
Subject: [PATCH 5/8] parisc: __cmpxchg_u32(): lift conversion into the callers

__cmpxchg_u32() return value is unsigned int explicitly cast to
unsigned long. Both callers are returns from functions that
return unsigned long; might as well return have __cmpxchg_u32()
return that unsigned int (aka u32) and let the callers convert
implicitly.

Signed-off-by: Al Viro <[email protected]>
---
arch/parisc/include/asm/cmpxchg.h | 3 +--
arch/parisc/lib/bitops.c | 6 +++---
2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index c1d776bb16b4..0924ebc576d2 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -57,8 +57,7 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
extern void __cmpxchg_called_with_bad_pointer(void);

/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
- unsigned int new_);
+extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);

diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 36a314199074..ae2231d92198 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -68,16 +68,16 @@ u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
return prev;
}

-unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
+u32 notrace __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
unsigned long flags;
- unsigned int prev;
+ u32 prev;

_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
- return (unsigned long)prev;
+ return prev;
}

u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
--
2.39.2


2024-04-02 04:28:21

by Al Viro

[permalink] [raw]
Subject: [PATCH 7/8] parisc: add missing export of __cmpxchg_u8()

__cmpxchg_u8() had been added (initially) for the sake of
drivers/phy/ti/phy-tusb1210.c; the thing is, that drivers is
modular, so we need an export

Fixes: b344d6a83d01 "parisc: add support for cmpxchg on u8 pointers"
Signed-off-by: Al Viro <[email protected]>
---
arch/parisc/kernel/parisc_ksyms.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6f0c92e8149d..dcf61cbd3147 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -22,6 +22,7 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
--
2.39.2


2024-04-02 04:28:33

by Al Viro

[permalink] [raw]
Subject: [PATCH 8/8] parisc: add u16 support to cmpxchg()

Add (and export) __cmpxchg_u16(), teach __cmpxchg() to use it.
And get rid of the casts of {old,new_} in __cmpxchg() - __cmpxchg_u...()
has those arguments declared as u<size> and conversion from unsigned
long to those is automatic.

Signed-off-by: Al Viro <[email protected]>
---
arch/parisc/include/asm/cmpxchg.h | 13 +++++++------
arch/parisc/kernel/parisc_ksyms.c | 1 +
arch/parisc/lib/bitops.c | 1 +
3 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 0924ebc576d2..1a0eec8c91f5 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -56,10 +56,11 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);

-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
+/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
-extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);

/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@@ -67,11 +68,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
+ case 8: return __cmpxchg_u64((volatile u64 *)ptr, old, new_);
#endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr,
- (unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
+ case 4: return __cmpxchg_u32((volatile u32 *)ptr, old, new_);
+ case 2: return __cmpxchg_u16((volatile u16 *)ptr, old, new_);
+ case 1: return __cmpxchg_u8((volatile u8 *)ptr, old, new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index dcf61cbd3147..c1587aa35beb 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -23,6 +23,7 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index cae30a3eb6d9..9df810050642 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -71,4 +71,5 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)

CMPXCHG(u64)
CMPXCHG(u32)
+CMPXCHG(u16)
CMPXCHG(u8)
--
2.39.2


2024-04-02 04:28:36

by Al Viro

[permalink] [raw]
Subject: [PATCH 6/8] parisc: unify implementations of __cmpxchg_u{8,32,64}

identical except for type name involved

Signed-off-by: Al Viro <[email protected]>
---
arch/parisc/lib/bitops.c | 51 +++++++++++++---------------------------
1 file changed, 16 insertions(+), 35 deletions(-)

diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index ae2231d92198..cae30a3eb6d9 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -56,38 +56,19 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
}


-u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
-
-u32 notrace __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
-{
- unsigned long flags;
- u32 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
-
-u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
-{
- unsigned long flags;
- u8 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
+#define CMPXCHG(T) \
+ T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ _atomic_spin_lock_irqsave(ptr, flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ _atomic_spin_unlock_irqrestore(ptr, flags); \
+ return prev; \
+ }
+
+CMPXCHG(u64)
+CMPXCHG(u32)
+CMPXCHG(u8)
--
2.39.2


2024-04-02 07:29:28

by Arnd Bergmann

[permalink] [raw]
Subject: Re: [PATCH 3/8] sparc32: unify __cmpxchg_u{32,64}

On Tue, Apr 2, 2024, at 06:28, Al Viro wrote:
> Add a macro that expands to one of those when given u32 or u64
> as an argument - atomic32.c has a lot of similar stuff already.
>
> Signed-off-by: Al Viro <[email protected]>

I think we should merge Sam's series to remove non-CAS sparc32
first:

https://lore.kernel.org/all/[email protected]/

I don't see a patch yet to actually change cmpxchg() to use CAS,
but it can probably just share the sparc64 implementation at
that point.

Arnd

2024-04-02 20:02:33

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 3/8] sparc32: unify __cmpxchg_u{32,64}

On Tue, Apr 02, 2024 at 09:28:57AM +0200, Arnd Bergmann wrote:
> On Tue, Apr 2, 2024, at 06:28, Al Viro wrote:
> > Add a macro that expands to one of those when given u32 or u64
> > as an argument - atomic32.c has a lot of similar stuff already.
> >
> > Signed-off-by: Al Viro <[email protected]>
>
> I think we should merge Sam's series to remove non-CAS sparc32
> first:
>
> https://lore.kernel.org/all/[email protected]/
>
> I don't see a patch yet to actually change cmpxchg() to use CAS,
> but it can probably just share the sparc64 implementation at
> that point.

Fair points!

In the meantime, I pulled in Al's patches for both sparc and parisc.

If I leave out sparc support, I am inducing build failures on sparc in
RCU code. I do not feel comfortable pulling in Sam's series.

One approach is for me to push in the emulation and uses as they are
accepted, and modify from there.

Another approach would be for me to add KCSAN annotation to RCU's current
open-coded one-byte emulation and let things play out from there.

A third would be for us all to deadlock on each other.

My preference is the first approach. But what would work better?

Thanx, Paul

2024-04-02 20:03:18

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32

On Tue, Apr 02, 2024 at 12:28:28AM -0400, Al Viro wrote:
> Conversion between u32 and unsigned long is tautological there,
> and the only use of return value is to return it from
> __cmpxchg() (which return unsigned long).
>
> Signed-off-by: Al Viro <[email protected]>

I have pulled these in as replacements for my patches in the meantime.

Thank you!

Thanx, Paul

> ---
> arch/sparc/include/asm/cmpxchg_32.h | 2 +-
> arch/sparc/lib/atomic32.c | 4 ++--
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
> index d0af82c240b7..112bfaa28729 100644
> --- a/arch/sparc/include/asm/cmpxchg_32.h
> +++ b/arch/sparc/include/asm/cmpxchg_32.h
> @@ -39,7 +39,7 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
> /* bug catcher for when unsupported size is used - won't link */
> void __cmpxchg_called_with_bad_pointer(void);
> /* we only need to support cmpxchg of a u32 on sparc */
> -unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
> +u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
>
> /* don't worry...optimizer will get rid of most of this */
> static inline unsigned long
> diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
> index cf80d1ae352b..d90d756123d8 100644
> --- a/arch/sparc/lib/atomic32.c
> +++ b/arch/sparc/lib/atomic32.c
> @@ -159,7 +159,7 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
> }
> EXPORT_SYMBOL(sp32___change_bit);
>
> -unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
> +u32 __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
> {
> unsigned long flags;
> u32 prev;
> @@ -169,7 +169,7 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
> *ptr = new;
> spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
>
> - return (unsigned long)prev;
> + return prev;
> }
> EXPORT_SYMBOL(__cmpxchg_u32);
>
> --
> 2.39.2
>

2024-04-03 23:34:21

by Al Viro

[permalink] [raw]
Subject: Re: [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32

On Tue, Apr 02, 2024 at 01:03:13PM -0700, Paul E. McKenney wrote:
> On Tue, Apr 02, 2024 at 12:28:28AM -0400, Al Viro wrote:
> > Conversion between u32 and unsigned long is tautological there,
> > and the only use of return value is to return it from
> > __cmpxchg() (which return unsigned long).
> >
> > Signed-off-by: Al Viro <[email protected]>
>
> I have pulled these in as replacements for my patches in the meantime.
>
> Thank you!

FWIW, updated branch force-pushed; the difference is that __cmpxchg()
on sparc32 went
- switch (size) {
- case 1:
- return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
- case 2:
- return __cmpxchg_u16((u16 *)ptr, (u16)old, (u16)new_);
- case 4:
- return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
- default:
- __cmpxchg_called_with_bad_pointer();
- break;
- }
- return old;
+ return
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);

(and similar for parisc). Rationale: sparse does generate constant
truncation warnings in unreachable statements, but not in never-evaluated
subexpressions. Alternative would be what parisc used to do in mainline:
case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
and we'd need the same in 16bit case (both on parisc and sparc32).
Explicit (and rather mysterious) & 0xff for passing unsigned long to
a function that takes u8 was there to tell sparse that e.g.
cmpxchg(&int_var, 0, 0x12345678) was *not* trying to feed
0x12345678 to a __cmpxchg_u8(), which would quietly truncate it had
it ever been reached. Use of conditional expression avoids that
without having to play with explicit (and utterly pointless from
C point of view) masking. IMO it's better that way, not to mention
being more concise than use of switch.

2024-04-04 03:09:29

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32

On Wed, Apr 03, 2024 at 11:20:53PM +0100, Al Viro wrote:
> On Tue, Apr 02, 2024 at 01:03:13PM -0700, Paul E. McKenney wrote:
> > On Tue, Apr 02, 2024 at 12:28:28AM -0400, Al Viro wrote:
> > > Conversion between u32 and unsigned long is tautological there,
> > > and the only use of return value is to return it from
> > > __cmpxchg() (which return unsigned long).
> > >
> > > Signed-off-by: Al Viro <[email protected]>
> >
> > I have pulled these in as replacements for my patches in the meantime.
> >
> > Thank you!
>
> FWIW, updated branch force-pushed; the difference is that __cmpxchg()
> on sparc32 went
> - switch (size) {
> - case 1:
> - return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
> - case 2:
> - return __cmpxchg_u16((u16 *)ptr, (u16)old, (u16)new_);
> - case 4:
> - return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
> - default:
> - __cmpxchg_called_with_bad_pointer();
> - break;
> - }
> - return old;
> + return
> + size == 1 ? __cmpxchg_u8(ptr, old, new_) :
> + size == 2 ? __cmpxchg_u16(ptr, old, new_) :
> + size == 4 ? __cmpxchg_u32(ptr, old, new_) :
> + (__cmpxchg_called_with_bad_pointer(), old);
>
> (and similar for parisc). Rationale: sparse does generate constant
> truncation warnings in unreachable statements, but not in never-evaluated
> subexpressions. Alternative would be what parisc used to do in mainline:
> case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
> and we'd need the same in 16bit case (both on parisc and sparc32).
> Explicit (and rather mysterious) & 0xff for passing unsigned long to
> a function that takes u8 was there to tell sparse that e.g.
> cmpxchg(&int_var, 0, 0x12345678) was *not* trying to feed
> 0x12345678 to a __cmpxchg_u8(), which would quietly truncate it had
> it ever been reached. Use of conditional expression avoids that
> without having to play with explicit (and utterly pointless from
> C point of view) masking. IMO it's better that way, not to mention
> being more concise than use of switch.

Cute! I replaced the old versions of your patches with this series.

However, I was too lazy to apply this transformation to the other
cmpxchg() implementations. ;-)

Thanx, Paul