This series makes barrier-related macro more neat and clear.
This is a follow-up to [0-3], change to multiple patches,
for readability, create new message thread.
v5 -> v6: [PATCH 1/4] let this change to pass review by checkpatch.pl
instead of overwriting again in [PATCH 4/4]. for [PATCH 4/4] change
the error message example in commit message to make it more relevant
v4 -> v5: [PATCH 3/4] __atomic_acquire_fence and __atomic_release_fence
omit-the-fence-on-uniprocessor optimization, and fix the typo of
RISCV_RELEASE_BARRIER when spliting the patch in v3.
v3 -> v4: fix [PATCH 1/4] commit message weird line breaks and let
[PATCH 3/4] fix the form that can pass the checking of checkpatch.pl.
v2 -> v3: split the patch into multiple patches for one problem per patch.
Also review the changelog to make the description more precise.
v1 -> v2: makes compilation pass with allyesconfig instead of
defconfig only, also satisfy scripts/checkpatch.pl.
- (__asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"))
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
[0](v1/v2) https://lore.kernel.org/lkml/[email protected]/
[1] (v3) https://lore.kernel.org/lkml/[email protected]/
[2] (v4) https://lore.kernel.org/lkml/[email protected]/
[4] (v5) https://lore.kernel.org/lkml/[email protected]/
Eric Chan (4):
riscv/barrier: Define __{mb,rmb,wmb}
riscv/barrier: Define RISCV_FULL_BARRIER
riscv/barrier: Consolidate fence definitions
riscv/barrier: Resolve checkpatch.pl error
arch/riscv/include/asm/atomic.h | 17 ++++++++---------
arch/riscv/include/asm/barrier.h | 21 ++++++++++-----------
arch/riscv/include/asm/cmpxchg.h | 5 ++---
arch/riscv/include/asm/fence.h | 10 ++++++++--
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 5 +++--
arch/riscv/include/asm/mmiowb.h | 2 +-
7 files changed, 36 insertions(+), 32 deletions(-)
--
2.44.0.rc0.258.g7320e95886-goog
Introduce __{mb,rmb,wmb}, and rely on the generic definitions for
{mb,rmb,wmb}. Although KCSAN is not supported yet, the definitions can
be made more consistent with generic instrumentation. Also add a space
to make the changes pass check by checkpatch.pl.
Without the space, the error message is as below:
ERROR: space required after that ',' (ctx:VxV)
26: FILE: arch/riscv/include/asm/barrier.h:23:
+#define __mb() RISCV_FENCE(iorw,iorw)
^
Signed-off-by: Eric Chan <[email protected]>
---
v5 -> v6: let this change to pass review by checkpatch.pl instead
of overwriting again in [PATCH 4/4]
v3 -> v4: fix commit message weird line breaks
arch/riscv/include/asm/barrier.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 110752594228..173b44a989f8 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -20,9 +20,9 @@
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
-#define mb() RISCV_FENCE(iorw,iorw)
-#define rmb() RISCV_FENCE(ir,ir)
-#define wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define __smp_mb() RISCV_FENCE(rw,rw)
--
2.44.0.rc0.258.g7320e95886-goog
Introduce RISCV_FULL_BARRIER and use in arch_atomic* function.
like RISCV_ACQUIRE_BARRIER and RISCV_RELEASE_BARRIER, the fence
instruction can be eliminated When SMP is not enabled.
Signed-off-by: Eric Chan <[email protected]>
---
arch/riscv/include/asm/atomic.h | 16 ++++++++--------
arch/riscv/include/asm/cmpxchg.h | 4 ++--
arch/riscv/include/asm/fence.h | 2 ++
3 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c2153..31e6e2e7cc18 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -207,7 +207,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -228,7 +228,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -248,7 +248,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -268,7 +268,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -288,7 +288,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -310,7 +310,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -331,7 +331,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -352,7 +352,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2f4726d3cfcc..a608e4d1a0a4 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -313,7 +313,7 @@
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
@@ -325,7 +325,7 @@
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 2b443a3a487f..6c26c44dfcd6 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -4,9 +4,11 @@
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#define RISCV_FULL_BARRIER "\tfence rw, rw\n"
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
+#define RISCV_FULL_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
--
2.44.0.rc0.258.g7320e95886-goog
Disparate fence implementations are consolidated into fence.h.
Also introduce RISCV_FENCE_ASM to make fence macro more reusable.
Signed-off-by: Eric Chan <[email protected]>
---
v4 -> v5: __atomic_acquire_fence and __atomic_release_fence
omit-the-fence-on-uniprocessor optimization, and fix the typo of
RISCV_RELEASE_BARRIER when spliting the patch in v3.
v3 -> v4 fix the form that can pass the checking of checkpatch.pl.
v1 -> v2: makes compilation pass with allyesconfig instead of
defconfig only, also satisfy scripts/checkpatch.pl.
- (__asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"))
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
arch/riscv/include/asm/atomic.h | 1 -
arch/riscv/include/asm/barrier.h | 3 +--
arch/riscv/include/asm/cmpxchg.h | 1 -
arch/riscv/include/asm/fence.h | 10 +++++++---
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 5 +++--
arch/riscv/include/asm/mmiowb.h | 2 +-
7 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 31e6e2e7cc18..0e0522e588ca 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -17,7 +17,6 @@
#endif
#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 173b44a989f8..15857dbc2279 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -11,13 +11,12 @@
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
+#include <asm/fence.h>
#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))
-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define __mb() RISCV_FENCE(iorw, iorw)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index a608e4d1a0a4..2fee65cc8443 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,7 +8,6 @@
#include <linux/bug.h>
-#include <asm/barrier.h>
#include <asm/fence.h>
#define __xchg_relaxed(ptr, new, size) \
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6c26c44dfcd6..6bcd80325dfc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,10 +1,14 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
-#define RISCV_FULL_BARRIER "\tfence rw, rw\n"
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 42497d487a17..1c5c641075d2 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index 4c58ee7f95ec..06cadfd7a237 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
+#include <asm/fence.h>
#include <asm/mmiowb.h>
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 0b2333e71fdc..52ce4a399d9b 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
--
2.44.0.rc0.258.g7320e95886-goog
The past form of RISCV_FENCE would cause checkpatch.pl to issue
error messages, the example is as follows:
ERROR: space required after that ',' (ctx:VxV)
26: FILE: arch/riscv/include/asm/barrier.h:27:
+#define __smp_mb() RISCV_FENCE(rw,rw)
^
fix the remaining of RISCV_FENCE.
Signed-off-by: Eric Chan <[email protected]>
---
v5 -> v6: change the error message example in commit message to make it
more relevant
arch/riscv/include/asm/barrier.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 15857dbc2279..880b56d8480d 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -24,14 +24,14 @@
#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -39,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})
@@ -68,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
--
2.44.0.rc0.258.g7320e95886-goog
> Eric Chan (4):
> riscv/barrier: Define __{mb,rmb,wmb}
> riscv/barrier: Define RISCV_FULL_BARRIER
> riscv/barrier: Consolidate fence definitions
> riscv/barrier: Resolve checkpatch.pl error
LGTM. For the series,
Reviewed-by: Andrea Parri <[email protected]>
Andrea
On 2024-02-17 7:12 AM, Eric Chan wrote:
> This series makes barrier-related macro more neat and clear.
> This is a follow-up to [0-3], change to multiple patches,
> for readability, create new message thread.
>
> v5 -> v6: [PATCH 1/4] let this change to pass review by checkpatch.pl
> instead of overwriting again in [PATCH 4/4]. for [PATCH 4/4] change
> the error message example in commit message to make it more relevant
>
> v4 -> v5: [PATCH 3/4] __atomic_acquire_fence and __atomic_release_fence
> omit-the-fence-on-uniprocessor optimization, and fix the typo of
> RISCV_RELEASE_BARRIER when spliting the patch in v3.
>
> v3 -> v4: fix [PATCH 1/4] commit message weird line breaks and let
> [PATCH 3/4] fix the form that can pass the checking of checkpatch.pl.
>
> v2 -> v3: split the patch into multiple patches for one problem per patch.
> Also review the changelog to make the description more precise.
>
> v1 -> v2: makes compilation pass with allyesconfig instead of
> defconfig only, also satisfy scripts/checkpatch.pl.
> - (__asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"))
> + ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
>
> [0](v1/v2) https://lore.kernel.org/lkml/[email protected]/
> [1] (v3) https://lore.kernel.org/lkml/[email protected]/
> [2] (v4) https://lore.kernel.org/lkml/[email protected]/
> [4] (v5) https://lore.kernel.org/lkml/[email protected]/
>
> Eric Chan (4):
> riscv/barrier: Define __{mb,rmb,wmb}
> riscv/barrier: Define RISCV_FULL_BARRIER
> riscv/barrier: Consolidate fence definitions
> riscv/barrier: Resolve checkpatch.pl error
>
> arch/riscv/include/asm/atomic.h | 17 ++++++++---------
> arch/riscv/include/asm/barrier.h | 21 ++++++++++-----------
> arch/riscv/include/asm/cmpxchg.h | 5 ++---
> arch/riscv/include/asm/fence.h | 10 ++++++++--
> arch/riscv/include/asm/io.h | 8 ++++----
> arch/riscv/include/asm/mmio.h | 5 +++--
> arch/riscv/include/asm/mmiowb.h | 2 +-
> 7 files changed, 36 insertions(+), 32 deletions(-)
For the series:
Reviewed-by: Samuel Holland <[email protected]>
Tested-by: Samuel Holland <[email protected]>
Hello:
This series was applied to riscv/linux.git (for-next)
by Palmer Dabbelt <[email protected]>:
On Sat, 17 Feb 2024 13:12:06 +0000 you wrote:
> This series makes barrier-related macro more neat and clear.
> This is a follow-up to [0-3], change to multiple patches,
> for readability, create new message thread.
>
> v5 -> v6: [PATCH 1/4] let this change to pass review by checkpatch.pl
> instead of overwriting again in [PATCH 4/4]. for [PATCH 4/4] change
> the error message example in commit message to make it more relevant
>
> [...]
Here is the summary with links:
- [v6,1/4] riscv/barrier: Define __{mb,rmb,wmb}
https://git.kernel.org/riscv/c/89f4fd7b1ab7
- [v6,2/4] riscv/barrier: Define RISCV_FULL_BARRIER
https://git.kernel.org/riscv/c/b3c8064ccc44
- [v6,3/4] riscv/barrier: Consolidate fence definitions
https://git.kernel.org/riscv/c/c85688e2b0f0
- [v6,4/4] riscv/barrier: Add missing space after ','
https://git.kernel.org/riscv/c/9133e6e6908d
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html