2024-02-13 14:27:19

by Eric Chan

[permalink] [raw]
Subject: [PATCH v3 0/4] riscv/barrier: tidying up barrier-related macro

This series makes barrier-related macro more neat and clear.
This is a follow-up to [0], change to multiple patches,
for readability, create new message thread.

v2 -> v3: split the patch into multiple patches for one problem per patch.
Also review the changelog to make the description more precise.

v1 -> v2: makes compilation pass with allyesconfig instead of
defconfig only, also satisfy scripts/checkpatch.pl.
- (__asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"))
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })

[0] https://lore.kernel.org/lkml/[email protected]/

Eric Chan (4):
riscv/barrier: Define __{mb,rmb,wmb}
riscv/barrier: Define RISCV_FULL_BARRIER
riscv/barrier: Consolidate fence definitions
riscv/barrier: Resolve checkpath.pl error

arch/riscv/include/asm/atomic.h | 24 ++++++++++--------------
arch/riscv/include/asm/barrier.h | 21 ++++++++++-----------
arch/riscv/include/asm/cmpxchg.h | 5 ++---
arch/riscv/include/asm/fence.h | 10 ++++++++--
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 5 +++--
arch/riscv/include/asm/mmiowb.h | 2 +-
7 files changed, 38 insertions(+), 37 deletions(-)

--
2.43.0.687.g38aa6559b0-goog


2024-02-13 14:29:25

by Eric Chan

[permalink] [raw]
Subject: [PATCH v3 1/4] riscv/barrier: Define __{mb,rmb,wmb}

Introduce __{mb,rmb,wmb}, and rely on the generic definitions
for {mb,rmb,wmb}.
Although KCSAN is not yet support,
it can be made more consistent with generic instrumentation.

Signed-off-by: Eric Chan <[email protected]>
---
arch/riscv/include/asm/barrier.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 110752594228..4c49a8ff2c68 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -20,9 +20,9 @@
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")

/* These barriers need to enforce ordering on both devices or memory. */
-#define mb() RISCV_FENCE(iorw,iorw)
-#define rmb() RISCV_FENCE(ir,ir)
-#define wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw,iorw)
+#define __rmb() RISCV_FENCE(ir,ir)
+#define __wmb() RISCV_FENCE(ow,ow)

/* These barriers do not need to enforce ordering on devices, just memory. */
#define __smp_mb() RISCV_FENCE(rw,rw)
--
2.43.0.687.g38aa6559b0-goog


2024-02-13 14:30:24

by Eric Chan

[permalink] [raw]
Subject: [PATCH v3 3/4] riscv/barrier: Consolidate fence definitions

Disparate fence implementations are consolidated into fence.h.
And align with the existing form.

Signed-off-by: Eric Chan <[email protected]>
---
arch/riscv/include/asm/atomic.h | 8 ++------
arch/riscv/include/asm/barrier.h | 3 +--
arch/riscv/include/asm/cmpxchg.h | 1 -
arch/riscv/include/asm/fence.h | 10 +++++++---
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 5 +++--
arch/riscv/include/asm/mmiowb.h | 2 +-
7 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 31e6e2e7cc18..1b2ae3259f1d 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -17,13 +17,9 @@
#endif

#include <asm/cmpxchg.h>
-#include <asm/barrier.h>

-#define __atomic_acquire_fence() \
- __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
-
-#define __atomic_release_fence() \
- __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
+#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
+#define __atomic_release_fence() RISCV_FENCE(rw,r)

static __always_inline int arch_atomic_read(const atomic_t *v)
{
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 4c49a8ff2c68..4f4743d7440d 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -11,13 +11,12 @@
#define _ASM_RISCV_BARRIER_H

#ifndef __ASSEMBLY__
+#include <asm/fence.h>

#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))

-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")

/* These barriers need to enforce ordering on both devices or memory. */
#define __mb() RISCV_FENCE(iorw,iorw)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index a608e4d1a0a4..2fee65cc8443 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,7 +8,6 @@

#include <linux/bug.h>

-#include <asm/barrier.h>
#include <asm/fence.h>

#define __xchg_relaxed(ptr, new, size) \
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6c26c44dfcd6..ca094d72ec20 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,10 +1,14 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H

+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
-#define RISCV_FULL_BARRIER "\tfence rw, rw\n"
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r,rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw,r)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw,rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 42497d487a17..afb5ead7552e 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+#define __io_pbr() RISCV_FENCE(io,i)
+#define __io_par(v) RISCV_FENCE(i,ior)
+#define __io_pbw() RISCV_FENCE(iow,o)
+#define __io_paw() RISCV_FENCE(o,io)

/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index 4c58ee7f95ec..a708968d4a0f 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H

#include <linux/types.h>
+#include <asm/fence.h>
#include <asm/mmiowb.h>

/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_ar(v) RISCV_FENCE(i,ir)
+#define __io_bw() RISCV_FENCE(w,o)
#define __io_aw() mmiowb_set_pending()

#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 0b2333e71fdc..3bcae97d4803 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#define mmiowb() RISCV_FENCE(o,w)

#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
--
2.43.0.687.g38aa6559b0-goog


2024-02-13 14:31:18

by Eric Chan

[permalink] [raw]
Subject: [PATCH v3 4/4] riscv/barrier: Resolve checkpath.pl error

The original form would cause checkpath.pl to issue a error.
The error message is as follows:
ERROR: space required after that ',' (ctx:VxV)
+#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
^
correct the form of RISCV_FENCE and RISCV_FENCE_ASM even if they
already exist.

Signed-off-by: Eric Chan <[email protected]>
---
arch/riscv/include/asm/atomic.h | 4 ++--
arch/riscv/include/asm/barrier.h | 18 +++++++++---------
arch/riscv/include/asm/fence.h | 6 +++---
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 4 ++--
arch/riscv/include/asm/mmiowb.h | 2 +-
6 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 1b2ae3259f1d..19050d13b6c1 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -18,8 +18,8 @@

#include <asm/cmpxchg.h>

-#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
-#define __atomic_release_fence() RISCV_FENCE(rw,r)
+#define __atomic_acquire_fence() RISCV_FENCE(r, rw)
+#define __atomic_release_fence() RISCV_FENCE(rw, r)

static __always_inline int arch_atomic_read(const atomic_t *v)
{
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 4f4743d7440d..880b56d8480d 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -19,19 +19,19 @@


/* These barriers need to enforce ordering on both devices or memory. */
-#define __mb() RISCV_FENCE(iorw,iorw)
-#define __rmb() RISCV_FENCE(ir,ir)
-#define __wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)

/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)

#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)

@@ -39,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})

@@ -68,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)

#include <asm-generic/barrier.h>

diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index ca094d72ec20..5b46f96a3ec8 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -6,9 +6,9 @@
({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })

#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r,rw)
-#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw,r)
-#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw,rw)
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, r)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index afb5ead7552e..1c5c641075d2 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() RISCV_FENCE(io,i)
-#define __io_par(v) RISCV_FENCE(i,ior)
-#define __io_pbw() RISCV_FENCE(iow,o)
-#define __io_paw() RISCV_FENCE(o,io)
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)

/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index a708968d4a0f..06cadfd7a237 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -132,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) RISCV_FENCE(i,ir)
-#define __io_bw() RISCV_FENCE(w,o)
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()

#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 3bcae97d4803..52ce4a399d9b 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() RISCV_FENCE(o,w)
+#define mmiowb() RISCV_FENCE(o, w)

#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
--
2.43.0.687.g38aa6559b0-goog


2024-02-13 14:33:42

by Eric Chan

[permalink] [raw]
Subject: [PATCH v3 2/4] riscv/barrier: Define RISCV_FULL_BARRIER

Introduce RISCV_FULL_BARRIER and use in arch_atomic* function.
like RISCV_ACQUIRE_BARRIER and RISCV_RELEASE_BARRIER, the fence
instruction can be eliminated When SMP is not enabled.

Signed-off-by: Eric Chan <[email protected]>
---
arch/riscv/include/asm/atomic.h | 16 ++++++++--------
arch/riscv/include/asm/cmpxchg.h | 4 ++--
arch/riscv/include/asm/fence.h | 2 ++
3 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c2153..31e6e2e7cc18 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -207,7 +207,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -228,7 +228,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -248,7 +248,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -268,7 +268,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -288,7 +288,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -310,7 +310,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -331,7 +331,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -352,7 +352,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2f4726d3cfcc..a608e4d1a0a4 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -313,7 +313,7 @@
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
@@ -325,7 +325,7 @@
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 2b443a3a487f..6c26c44dfcd6 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -4,9 +4,11 @@
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#define RISCV_FULL_BARRIER "\tfence rw, rw\n"
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
+#define RISCV_FULL_BARRIER
#endif

#endif /* _ASM_RISCV_FENCE_H */
--
2.43.0.687.g38aa6559b0-goog


2024-02-13 15:51:21

by Emil Renner Berthing

[permalink] [raw]
Subject: Re: [PATCH v3 1/4] riscv/barrier: Define __{mb,rmb,wmb}

Eric Chan wrote:
> Introduce __{mb,rmb,wmb}, and rely on the generic definitions
> for {mb,rmb,wmb}.
> Although KCSAN is not yet support,
> it can be made more consistent with generic instrumentation.

nit: this commit message has some weird line breaks

>
> Signed-off-by: Eric Chan <[email protected]>
> ---
> arch/riscv/include/asm/barrier.h | 6 +++---
> 1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
> index 110752594228..4c49a8ff2c68 100644
> --- a/arch/riscv/include/asm/barrier.h
> +++ b/arch/riscv/include/asm/barrier.h
> @@ -20,9 +20,9 @@
> __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
>
> /* These barriers need to enforce ordering on both devices or memory. */
> -#define mb() RISCV_FENCE(iorw,iorw)
> -#define rmb() RISCV_FENCE(ir,ir)
> -#define wmb() RISCV_FENCE(ow,ow)
> +#define __mb() RISCV_FENCE(iorw,iorw)
> +#define __rmb() RISCV_FENCE(ir,ir)
> +#define __wmb() RISCV_FENCE(ow,ow)
>
> /* These barriers do not need to enforce ordering on devices, just memory. */
> #define __smp_mb() RISCV_FENCE(rw,rw)
> --
> 2.43.0.687.g38aa6559b0-goog
>
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv

2024-02-13 15:53:47

by Emil Renner Berthing

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] riscv/barrier: Resolve checkpath.pl error

Eric Chan wrote:
> The original form would cause checkpath.pl to issue a error.
> The error message is as follows:
> ERROR: space required after that ',' (ctx:VxV)
> +#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
> ^
> correct the form of RISCV_FENCE and RISCV_FENCE_ASM even if they
> already exist.

A lot of the changes in this patch fixes lines that was added in the previous
patches. I'd prefer to add new code correctly and then only fix the remaining
instances in this patch.

/Emil

>
> Signed-off-by: Eric Chan <[email protected]>
> ---
> arch/riscv/include/asm/atomic.h | 4 ++--
> arch/riscv/include/asm/barrier.h | 18 +++++++++---------
> arch/riscv/include/asm/fence.h | 6 +++---
> arch/riscv/include/asm/io.h | 8 ++++----
> arch/riscv/include/asm/mmio.h | 4 ++--
> arch/riscv/include/asm/mmiowb.h | 2 +-
> 6 files changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index 1b2ae3259f1d..19050d13b6c1 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -18,8 +18,8 @@
>
> #include <asm/cmpxchg.h>
>
> -#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
> -#define __atomic_release_fence() RISCV_FENCE(rw,r)
> +#define __atomic_acquire_fence() RISCV_FENCE(r, rw)
> +#define __atomic_release_fence() RISCV_FENCE(rw, r)
>
> static __always_inline int arch_atomic_read(const atomic_t *v)
> {
> diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
> index 4f4743d7440d..880b56d8480d 100644
> --- a/arch/riscv/include/asm/barrier.h
> +++ b/arch/riscv/include/asm/barrier.h
> @@ -19,19 +19,19 @@
>
>
> /* These barriers need to enforce ordering on both devices or memory. */
> -#define __mb() RISCV_FENCE(iorw,iorw)
> -#define __rmb() RISCV_FENCE(ir,ir)
> -#define __wmb() RISCV_FENCE(ow,ow)
> +#define __mb() RISCV_FENCE(iorw, iorw)
> +#define __rmb() RISCV_FENCE(ir, ir)
> +#define __wmb() RISCV_FENCE(ow, ow)
>
> /* These barriers do not need to enforce ordering on devices, just memory. */
> -#define __smp_mb() RISCV_FENCE(rw,rw)
> -#define __smp_rmb() RISCV_FENCE(r,r)
> -#define __smp_wmb() RISCV_FENCE(w,w)
> +#define __smp_mb() RISCV_FENCE(rw, rw)
> +#define __smp_rmb() RISCV_FENCE(r, r)
> +#define __smp_wmb() RISCV_FENCE(w, w)
>
> #define __smp_store_release(p, v) \
> do { \
> compiletime_assert_atomic_type(*p); \
> - RISCV_FENCE(rw,w); \
> + RISCV_FENCE(rw, w); \
> WRITE_ONCE(*p, v); \
> } while (0)
>
> @@ -39,7 +39,7 @@ do { \
> ({ \
> typeof(*p) ___p1 = READ_ONCE(*p); \
> compiletime_assert_atomic_type(*p); \
> - RISCV_FENCE(r,rw); \
> + RISCV_FENCE(r, rw); \
> ___p1; \
> })
>
> @@ -68,7 +68,7 @@ do { \
> * instances the scheduler pairs this with an mb(), so nothing is necessary on
> * the new hart.
> */
> -#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
> +#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
>
> #include <asm-generic/barrier.h>
>
> diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
> index ca094d72ec20..5b46f96a3ec8 100644
> --- a/arch/riscv/include/asm/fence.h
> +++ b/arch/riscv/include/asm/fence.h
> @@ -6,9 +6,9 @@
> ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
>
> #ifdef CONFIG_SMP
> -#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r,rw)
> -#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw,r)
> -#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw,rw)
> +#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
> +#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, r)
> +#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
> #else
> #define RISCV_ACQUIRE_BARRIER
> #define RISCV_RELEASE_BARRIER
> diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
> index afb5ead7552e..1c5c641075d2 100644
> --- a/arch/riscv/include/asm/io.h
> +++ b/arch/riscv/include/asm/io.h
> @@ -47,10 +47,10 @@
> * sufficient to ensure this works sanely on controllers that support I/O
> * writes.
> */
> -#define __io_pbr() RISCV_FENCE(io,i)
> -#define __io_par(v) RISCV_FENCE(i,ior)
> -#define __io_pbw() RISCV_FENCE(iow,o)
> -#define __io_paw() RISCV_FENCE(o,io)
> +#define __io_pbr() RISCV_FENCE(io, i)
> +#define __io_par(v) RISCV_FENCE(i, ior)
> +#define __io_pbw() RISCV_FENCE(iow, o)
> +#define __io_paw() RISCV_FENCE(o, io)
>
> /*
> * Accesses from a single hart to a single I/O address must be ordered. This
> diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
> index a708968d4a0f..06cadfd7a237 100644
> --- a/arch/riscv/include/asm/mmio.h
> +++ b/arch/riscv/include/asm/mmio.h
> @@ -132,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
> * doesn't define any ordering between the memory space and the I/O space.
> */
> #define __io_br() do {} while (0)
> -#define __io_ar(v) RISCV_FENCE(i,ir)
> -#define __io_bw() RISCV_FENCE(w,o)
> +#define __io_ar(v) RISCV_FENCE(i, ir)
> +#define __io_bw() RISCV_FENCE(w, o)
> #define __io_aw() mmiowb_set_pending()
>
> #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
> diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
> index 3bcae97d4803..52ce4a399d9b 100644
> --- a/arch/riscv/include/asm/mmiowb.h
> +++ b/arch/riscv/include/asm/mmiowb.h
> @@ -7,7 +7,7 @@
> * "o,w" is sufficient to ensure that all writes to the device have completed
> * before the write to the spinlock is allowed to commit.
> */
> -#define mmiowb() RISCV_FENCE(o,w)
> +#define mmiowb() RISCV_FENCE(o, w)
>
> #include <linux/smp.h>
> #include <asm-generic/mmiowb.h>
> --
> 2.43.0.687.g38aa6559b0-goog
>
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv

2024-02-13 20:30:37

by Eric Chan

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] riscv/barrier: Resolve checkpatch.pl error

Hi Emil,

Thank you for reviewing my patch! I appreciate the feedback.
I've updated patch v4 at [0].
Please let me know if you have any further questions or suggestions.

[0] [https://lore.kernel.org/lkml/[email protected]/]

Sincerely,
Eric Chan