Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
in asm/bitops.h but failed to clear it for PPC32 in
asm/simple_spinlock.h
So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
returns 1 when CONFIG_PPC64 is set and 0 otherwise.
Reported-by: Pali Rohár <[email protected]>
Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
Cc: [email protected]
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 7ae6aeef8464..5095c636a680 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2,1\n\
+"1: lwarx %0,0,%2,%3\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
@@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
PPC_ACQUIRE_BARRIER
"2:"
: "=&r" (tmp)
- : "r" (token), "r" (&lock->slock)
+ : "r" (token), "r" (&lock->slock), "i" (eh)
: "cr0", "memory");
return tmp;
@@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
__asm__ __volatile__(
-"1: lwarx %0,0,%1,1\n"
+"1: lwarx %0,0,%1,%2\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
@@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
bne- 1b\n"
PPC_ACQUIRE_BARRIER
"2:" : "=&r" (tmp)
- : "r" (&rw->lock)
+ : "r" (&rw->lock), "i" (eh)
: "cr0", "xer", "memory");
return tmp;
@@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2,1\n\
+"1: lwarx %0,0,%2,%3\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ACQUIRE_BARRIER
"2:" : "=&r" (tmp)
- : "r" (token), "r" (&rw->lock)
+ : "r" (token), "r" (&rw->lock), "i" (eh)
: "cr0", "memory");
return tmp;
--
2.36.1
The eh field must remain 0 for PPC32 and is only used
by PPC64.
Don't hide that behind a macro, just leave the responsibility
to the user.
At the time being, the only users of PPC_RAW_L{WDQ}ARX are
setting the eh field to 0, so the special handling of __PPC_EH
is useless. Just take the value given by the caller.
Same for DEFINE_TESTOP(), don't do special handling in that
macro, ensure the caller hands over the proper eh value.
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/include/asm/bitops.h | 4 ++--
arch/powerpc/include/asm/ppc-opcode.h | 11 +----------
2 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 344fba3b16eb..c2b8c53e0dcb 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -163,7 +163,7 @@ static inline unsigned long fn( \
"bne- 1b\n" \
postfix \
: "=&r" (old), "=&r" (t) \
- : "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
+ : "rK" (mask), "r" (p), "i" (eh) \
: "cc", "memory"); \
return (old & mask); \
}
@@ -171,7 +171,7 @@ static inline unsigned long fn( \
DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
- PPC_ACQUIRE_BARRIER, 1)
+ PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 7b81b37a191e..d9703c5fd713 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -343,6 +343,7 @@
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
#define __PPC_RC21 (0x1 << 10)
#define __PPC_PRFX_R(r) (((r) & 0x1) << 20)
+#define __PPC_EH(eh) (((eh) & 0x1) << 0)
/*
* Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
@@ -359,16 +360,6 @@
#define PPC_LI_MASK 0x03fffffc
#define PPC_LI(v) ((v) & PPC_LI_MASK)
-/*
- * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
- * larx with EH set as an illegal instruction.
- */
-#ifdef CONFIG_PPC64
-#define __PPC_EH(eh) (((eh) & 0x1) << 0)
-#else
-#define __PPC_EH(eh) 0
-#endif
-
/* Base instruction encoding */
#define PPC_RAW_CP_ABORT (0x7c00068c)
#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
--
2.36.1
Just like first patch of this series, define a local 'eh'
in order to make the code clearer.
And IS_ENABLED() returns either 1 or 0 so no need to
do IS_ENABLED(CONFIG_PPC64) ? 1 : 0.
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/include/asm/atomic.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 853dc86864f4..0204e77613ec 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -140,6 +140,7 @@ static __always_inline bool
arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
__asm__ __volatile__ (
"1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n"
@@ -150,7 +151,7 @@ arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (r), "+m" (v->counter)
- : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
+ : "r" (&v->counter), "r" (o), "r" (new), "i" (eh)
: "cr0", "memory");
if (unlikely(r != o))
--
2.36.1
Hi!
On Tue, Aug 02, 2022 at 11:02:36AM +0200, Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
>
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
>
> Reported-by: Pali Roh?r <[email protected]>
Reviewed-by: Segher Boessenkool <[email protected]>
> + unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>
> token = LOCK_TOKEN;
> __asm__ __volatile__(
> -"1: lwarx %0,0,%2,1\n\
> +"1: lwarx %0,0,%2,%3\n\
> cmpwi 0,%0,0\n\
> bne- 2f\n\
> stwcx. %1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
> PPC_ACQUIRE_BARRIER
> "2:"
> : "=&r" (tmp)
> - : "r" (token), "r" (&lock->slock)
> + : "r" (token), "r" (&lock->slock), "i" (eh)
> : "cr0", "memory");
That should work yes. But please note that "n" is prefered if a number
is required (like here), not some other constant, as allowed by "i".
Thanks!
Segher
On Tuesday 02 August 2022 11:02:36 Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
>
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
>
> Reported-by: Pali Rohár <[email protected]>
> Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
> Cc: [email protected]
> Signed-off-by: Christophe Leroy <[email protected]>
This fix works perfectly. Thanks!
Tested-by: Pali Rohár <[email protected]>
> ---
> arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
> index 7ae6aeef8464..5095c636a680 100644
> --- a/arch/powerpc/include/asm/simple_spinlock.h
> +++ b/arch/powerpc/include/asm/simple_spinlock.h
> @@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
> static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
> {
> unsigned long tmp, token;
> + unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>
> token = LOCK_TOKEN;
> __asm__ __volatile__(
> -"1: lwarx %0,0,%2,1\n\
> +"1: lwarx %0,0,%2,%3\n\
> cmpwi 0,%0,0\n\
> bne- 2f\n\
> stwcx. %1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
> PPC_ACQUIRE_BARRIER
> "2:"
> : "=&r" (tmp)
> - : "r" (token), "r" (&lock->slock)
> + : "r" (token), "r" (&lock->slock), "i" (eh)
> : "cr0", "memory");
>
> return tmp;
> @@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
> static inline long __arch_read_trylock(arch_rwlock_t *rw)
> {
> long tmp;
> + unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>
> __asm__ __volatile__(
> -"1: lwarx %0,0,%1,1\n"
> +"1: lwarx %0,0,%1,%2\n"
> __DO_SIGN_EXTEND
> " addic. %0,%0,1\n\
> ble- 2f\n"
> @@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
> bne- 1b\n"
> PPC_ACQUIRE_BARRIER
> "2:" : "=&r" (tmp)
> - : "r" (&rw->lock)
> + : "r" (&rw->lock), "i" (eh)
> : "cr0", "xer", "memory");
>
> return tmp;
> @@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
> static inline long __arch_write_trylock(arch_rwlock_t *rw)
> {
> long tmp, token;
> + unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>
> token = WRLOCK_TOKEN;
> __asm__ __volatile__(
> -"1: lwarx %0,0,%2,1\n\
> +"1: lwarx %0,0,%2,%3\n\
> cmpwi 0,%0,0\n\
> bne- 2f\n"
> " stwcx. %1,0,%2\n\
> bne- 1b\n"
> PPC_ACQUIRE_BARRIER
> "2:" : "=&r" (tmp)
> - : "r" (token), "r" (&rw->lock)
> + : "r" (token), "r" (&rw->lock), "i" (eh)
> : "cr0", "memory");
>
> return tmp;
> --
> 2.36.1
>