Some functions use mostly the same asm for 32-bit and 64-bit versions.
Make a macro that is generic enough and avoid code duplication.
Signed-off-by: Leonardo Bras <[email protected]>
---
arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
1 file changed, 76 insertions(+), 88 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 0dfe9d857a762..85eb2edbc8219 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
+#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " beq %[p], %[u], 1f\n" \
+ " add %[rc], %[p], %[a]\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : [a]"r" (_a), [u]"r" (_u) \
+ : "memory"); \
+})
+
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
+
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
+
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
@@ -310,61 +307,79 @@ ATOMIC_OPS()
#undef ATOMIC_OPS
#undef ATOMIC_OP
+#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bltz %[p], 1f\n" \
+ " addi %[rc], %[p], 1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
+
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bgtz %[p], 1f\n" \
+ " addi %[rc], %[p], -1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
+
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " addi %[rc], %[p], -1\n" \
+ " bltz %[rc], 1f\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
+
return prev - 1;
}
@@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
+
return !(prev < 0);
}
@@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
+
return !(prev > 0);
}
@@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
+
return prev - 1;
}
--
2.40.0
Friendly ping?
On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
>
> Some functions use mostly the same asm for 32-bit and 64-bit versions.
>
> Make a macro that is generic enough and avoid code duplication.
>
> Signed-off-by: Leonardo Bras <[email protected]>
> ---
> arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> 1 file changed, 76 insertions(+), 88 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index 0dfe9d857a762..85eb2edbc8219 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> #undef ATOMIC_FETCH_OP
> #undef ATOMIC_OP_RETURN
>
> +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " beq %[p], %[u], 1f\n" \
> + " add %[rc], %[p], %[a]\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : [a]"r" (_a), [u]"r" (_u) \
> + : "memory"); \
> +})
> +
> /* This is required to provide a full barrier on success. */
> static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " beq %[p], %[u], 1f\n"
> - " add %[rc], %[p], %[a]\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - : [a]"r" (a), [u]"r" (u)
> - : "memory");
> + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> +
> return prev;
> }
> #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " beq %[p], %[u], 1f\n"
> - " add %[rc], %[p], %[a]\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - : [a]"r" (a), [u]"r" (u)
> - : "memory");
> + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> +
> return prev;
> }
> #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> @@ -310,61 +307,79 @@ ATOMIC_OPS()
> #undef ATOMIC_OPS
> #undef ATOMIC_OP
>
> +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " bltz %[p], 1f\n" \
> + " addi %[rc], %[p], 1\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " bltz %[p], 1f\n"
> - " addi %[rc], %[p], 1\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> +
> return !(prev < 0);
> }
>
> #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
>
> +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " bgtz %[p], 1f\n" \
> + " addi %[rc], %[p], -1\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " bgtz %[p], 1f\n"
> - " addi %[rc], %[p], -1\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> +
> return !(prev > 0);
> }
>
> #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
>
> +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " addi %[rc], %[p], -1\n" \
> + " bltz %[rc], 1f\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " addi %[rc], %[p], -1\n"
> - " bltz %[rc], 1f\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> +
> return prev - 1;
> }
>
> @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " bltz %[p], 1f\n"
> - " addi %[rc], %[p], 1\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> +
> return !(prev < 0);
> }
>
> @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " bgtz %[p], 1f\n"
> - " addi %[rc], %[p], -1\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> +
> return !(prev > 0);
> }
>
> @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " addi %[rc], %[p], -1\n"
> - " bltz %[rc], 1f\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> +
> return prev - 1;
> }
>
> --
> 2.40.0
>
On Thu, May 25, 2023 at 5:31 PM Leonardo Bras Soares Passos
<[email protected]> wrote:
>
> Friendly ping?
>
> On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
> >
> > Some functions use mostly the same asm for 32-bit and 64-bit versions.
> >
> > Make a macro that is generic enough and avoid code duplication.
> >
> > Signed-off-by: Leonardo Bras <[email protected]>
> > ---
> > arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> > 1 file changed, 76 insertions(+), 88 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> > index 0dfe9d857a762..85eb2edbc8219 100644
> > --- a/arch/riscv/include/asm/atomic.h
> > +++ b/arch/riscv/include/asm/atomic.h
> > @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> > #undef ATOMIC_FETCH_OP
> > #undef ATOMIC_OP_RETURN
> >
> > +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> > +({ \
> > + __asm__ __volatile__ ( \
> > + "0: lr." sfx " %[p], %[c]\n" \
> > + " beq %[p], %[u], 1f\n" \
> > + " add %[rc], %[p], %[a]\n" \
> > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > + " bnez %[rc], 0b\n" \
> > + " fence rw, rw\n" \
> > + "1:\n" \
> > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > + : [a]"r" (_a), [u]"r" (_u) \
> > + : "memory"); \
> > +})
> > +
> > /* This is required to provide a full barrier on success. */
> > static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> > {
> > int prev, rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.w %[p], %[c]\n"
> > - " beq %[p], %[u], 1f\n"
> > - " add %[rc], %[p], %[a]\n"
> > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - : [a]"r" (a), [u]"r" (u)
> > - : "memory");
> > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> > +
> > return prev;
> > }
> > #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> > @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> > s64 prev;
> > long rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.d %[p], %[c]\n"
> > - " beq %[p], %[u], 1f\n"
> > - " add %[rc], %[p], %[a]\n"
> > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - : [a]"r" (a), [u]"r" (u)
> > - : "memory");
> > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> > +
> > return prev;
> > }
> > #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> > @@ -310,61 +307,79 @@ ATOMIC_OPS()
> > #undef ATOMIC_OPS
> > #undef ATOMIC_OP
> >
> > +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> > +({ \
> > + __asm__ __volatile__ ( \
> > + "0: lr." sfx " %[p], %[c]\n" \
> > + " bltz %[p], 1f\n" \
> > + " addi %[rc], %[p], 1\n" \
> > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > + " bnez %[rc], 0b\n" \
> > + " fence rw, rw\n" \
> > + "1:\n" \
> > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > + : \
> > + : "memory"); \
> > +})
> > +
> > static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> > {
> > int prev, rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.w %[p], %[c]\n"
> > - " bltz %[p], 1f\n"
> > - " addi %[rc], %[p], 1\n"
> > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> > +
> > return !(prev < 0);
> > }
> >
> > #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
> >
> > +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> > +({ \
> > + __asm__ __volatile__ ( \
> > + "0: lr." sfx " %[p], %[c]\n" \
> > + " bgtz %[p], 1f\n" \
> > + " addi %[rc], %[p], -1\n" \
> > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > + " bnez %[rc], 0b\n" \
> > + " fence rw, rw\n" \
> > + "1:\n" \
> > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > + : \
> > + : "memory"); \
> > +})
> > +
> > static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> > {
> > int prev, rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.w %[p], %[c]\n"
> > - " bgtz %[p], 1f\n"
> > - " addi %[rc], %[p], -1\n"
> > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> > +
> > return !(prev > 0);
> > }
> >
> > #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
> >
> > +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> > +({ \
> > + __asm__ __volatile__ ( \
> > + "0: lr." sfx " %[p], %[c]\n" \
> > + " addi %[rc], %[p], -1\n" \
> > + " bltz %[rc], 1f\n" \
> > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > + " bnez %[rc], 0b\n" \
> > + " fence rw, rw\n" \
> > + "1:\n" \
> > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > + : \
> > + : "memory"); \
> > +})
> > +
> > static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> > {
> > int prev, rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.w %[p], %[c]\n"
> > - " addi %[rc], %[p], -1\n"
> > - " bltz %[rc], 1f\n"
> > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> > +
> > return prev - 1;
> > }
> >
> > @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> > s64 prev;
> > long rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.d %[p], %[c]\n"
> > - " bltz %[p], 1f\n"
> > - " addi %[rc], %[p], 1\n"
> > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> > +
> > return !(prev < 0);
> > }
> >
> > @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> > s64 prev;
> > long rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.d %[p], %[c]\n"
> > - " bgtz %[p], 1f\n"
> > - " addi %[rc], %[p], -1\n"
> > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> > +
> > return !(prev > 0);
> > }
> >
> > @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> > s64 prev;
> > long rc;
> >
> > - __asm__ __volatile__ (
> > - "0: lr.d %[p], %[c]\n"
> > - " addi %[rc], %[p], -1\n"
> > - " bltz %[rc], 1f\n"
> > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > - " bnez %[rc], 0b\n"
> > - " fence rw, rw\n"
> > - "1:\n"
> > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > - :
> > - : "memory");
> > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> > +
> > return prev - 1;
> > }
> >
> > --
> > 2.40.0
> >
>
A safe cleanup, no problem found.
Reviewed-by: Guo Ren <[email protected]>
--
Best Regards
Guo Ren
On Thu, 2023-05-25 at 18:06 +0800, Guo Ren wrote:
> On Thu, May 25, 2023 at 5:31 PM Leonardo Bras Soares Passos
> <[email protected]> wrote:
> >
> > Friendly ping?
> >
> > On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
> > >
> > > Some functions use mostly the same asm for 32-bit and 64-bit versions.
> > >
> > > Make a macro that is generic enough and avoid code duplication.
> > >
> > > Signed-off-by: Leonardo Bras <[email protected]>
> > > ---
> > > arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> > > 1 file changed, 76 insertions(+), 88 deletions(-)
> > >
> > > diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> > > index 0dfe9d857a762..85eb2edbc8219 100644
> > > --- a/arch/riscv/include/asm/atomic.h
> > > +++ b/arch/riscv/include/asm/atomic.h
> > > @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> > > #undef ATOMIC_FETCH_OP
> > > #undef ATOMIC_OP_RETURN
> > >
> > > +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " beq %[p], %[u], 1f\n" \
> > > + " add %[rc], %[p], %[a]\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : [a]"r" (_a), [u]"r" (_u) \
> > > + : "memory"); \
> > > +})
> > > +
> > > /* This is required to provide a full barrier on success. */
> > > static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " beq %[p], %[u], 1f\n"
> > > - " add %[rc], %[p], %[a]\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - : [a]"r" (a), [u]"r" (u)
> > > - : "memory");
> > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> > > +
> > > return prev;
> > > }
> > > #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> > > @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " beq %[p], %[u], 1f\n"
> > > - " add %[rc], %[p], %[a]\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - : [a]"r" (a), [u]"r" (u)
> > > - : "memory");
> > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> > > +
> > > return prev;
> > > }
> > > #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> > > @@ -310,61 +307,79 @@ ATOMIC_OPS()
> > > #undef ATOMIC_OPS
> > > #undef ATOMIC_OP
> > >
> > > +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " bltz %[p], 1f\n" \
> > > + " addi %[rc], %[p], 1\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " bltz %[p], 1f\n"
> > > - " addi %[rc], %[p], 1\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> > > +
> > > return !(prev < 0);
> > > }
> > >
> > > #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
> > >
> > > +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " bgtz %[p], 1f\n" \
> > > + " addi %[rc], %[p], -1\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " bgtz %[p], 1f\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> > > +
> > > return !(prev > 0);
> > > }
> > >
> > > #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
> > >
> > > +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " addi %[rc], %[p], -1\n" \
> > > + " bltz %[rc], 1f\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " bltz %[rc], 1f\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> > > +
> > > return prev - 1;
> > > }
> > >
> > > @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " bltz %[p], 1f\n"
> > > - " addi %[rc], %[p], 1\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> > > +
> > > return !(prev < 0);
> > > }
> > >
> > > @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " bgtz %[p], 1f\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> > > +
> > > return !(prev > 0);
> > > }
> > >
> > > @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " bltz %[rc], 1f\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> > > +
> > > return prev - 1;
> > > }
> > >
> > > --
> > > 2.40.0
> > >
> >
> A safe cleanup, no problem found.
>
> Reviewed-by: Guo Ren <[email protected]>
>
>
Hello Palmer,
Any chance this can get in 6.5?
Also, same question for this series:
https://patchwork.kernel.org/project/linux-riscv/list/?series=737491
Thanks!
Leo
On Thu, May 25, 2023 at 7:07 AM Guo Ren <[email protected]> wrote:
>
> On Thu, May 25, 2023 at 5:31 PM Leonardo Bras Soares Passos
> <[email protected]> wrote:
> >
> > Friendly ping?
> >
> > On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
> > >
> > > Some functions use mostly the same asm for 32-bit and 64-bit versions.
> > >
> > > Make a macro that is generic enough and avoid code duplication.
> > >
> > > Signed-off-by: Leonardo Bras <[email protected]>
> > > ---
> > > arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> > > 1 file changed, 76 insertions(+), 88 deletions(-)
> > >
> > > diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> > > index 0dfe9d857a762..85eb2edbc8219 100644
> > > --- a/arch/riscv/include/asm/atomic.h
> > > +++ b/arch/riscv/include/asm/atomic.h
> > > @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> > > #undef ATOMIC_FETCH_OP
> > > #undef ATOMIC_OP_RETURN
> > >
> > > +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " beq %[p], %[u], 1f\n" \
> > > + " add %[rc], %[p], %[a]\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : [a]"r" (_a), [u]"r" (_u) \
> > > + : "memory"); \
> > > +})
> > > +
> > > /* This is required to provide a full barrier on success. */
> > > static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " beq %[p], %[u], 1f\n"
> > > - " add %[rc], %[p], %[a]\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - : [a]"r" (a), [u]"r" (u)
> > > - : "memory");
> > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> > > +
> > > return prev;
> > > }
> > > #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> > > @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " beq %[p], %[u], 1f\n"
> > > - " add %[rc], %[p], %[a]\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - : [a]"r" (a), [u]"r" (u)
> > > - : "memory");
> > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> > > +
> > > return prev;
> > > }
> > > #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> > > @@ -310,61 +307,79 @@ ATOMIC_OPS()
> > > #undef ATOMIC_OPS
> > > #undef ATOMIC_OP
> > >
> > > +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " bltz %[p], 1f\n" \
> > > + " addi %[rc], %[p], 1\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " bltz %[p], 1f\n"
> > > - " addi %[rc], %[p], 1\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> > > +
> > > return !(prev < 0);
> > > }
> > >
> > > #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
> > >
> > > +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " bgtz %[p], 1f\n" \
> > > + " addi %[rc], %[p], -1\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " bgtz %[p], 1f\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> > > +
> > > return !(prev > 0);
> > > }
> > >
> > > #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
> > >
> > > +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> > > +({ \
> > > + __asm__ __volatile__ ( \
> > > + "0: lr." sfx " %[p], %[c]\n" \
> > > + " addi %[rc], %[p], -1\n" \
> > > + " bltz %[rc], 1f\n" \
> > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > + " bnez %[rc], 0b\n" \
> > > + " fence rw, rw\n" \
> > > + "1:\n" \
> > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > + : \
> > > + : "memory"); \
> > > +})
> > > +
> > > static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> > > {
> > > int prev, rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.w %[p], %[c]\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " bltz %[rc], 1f\n"
> > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> > > +
> > > return prev - 1;
> > > }
> > >
> > > @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " bltz %[p], 1f\n"
> > > - " addi %[rc], %[p], 1\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> > > +
> > > return !(prev < 0);
> > > }
> > >
> > > @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " bgtz %[p], 1f\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> > > +
> > > return !(prev > 0);
> > > }
> > >
> > > @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> > > s64 prev;
> > > long rc;
> > >
> > > - __asm__ __volatile__ (
> > > - "0: lr.d %[p], %[c]\n"
> > > - " addi %[rc], %[p], -1\n"
> > > - " bltz %[rc], 1f\n"
> > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > - " bnez %[rc], 0b\n"
> > > - " fence rw, rw\n"
> > > - "1:\n"
> > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > - :
> > > - : "memory");
> > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> > > +
> > > return prev - 1;
> > > }
> > >
> > > --
> > > 2.40.0
> > >
> >
> A safe cleanup, no problem found.
>
> Reviewed-by: Guo Ren <[email protected]>
>
>
Hello Palmer,
Any improvements you suggest for this patch?
Best regards,
Leonardo Bras
On Wed, Aug 2, 2023 at 5:00 PM Leonardo Bras Soares Passos
<[email protected]> wrote:
>
> On Thu, May 25, 2023 at 7:07 AM Guo Ren <[email protected]> wrote:
> >
> > On Thu, May 25, 2023 at 5:31 PM Leonardo Bras Soares Passos
> > <[email protected]> wrote:
> > >
> > > Friendly ping?
> > >
> > > On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
> > > >
> > > > Some functions use mostly the same asm for 32-bit and 64-bit versions.
> > > >
> > > > Make a macro that is generic enough and avoid code duplication.
> > > >
> > > > Signed-off-by: Leonardo Bras <[email protected]>
> > > > ---
> > > > arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> > > > 1 file changed, 76 insertions(+), 88 deletions(-)
> > > >
> > > > diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> > > > index 0dfe9d857a762..85eb2edbc8219 100644
> > > > --- a/arch/riscv/include/asm/atomic.h
> > > > +++ b/arch/riscv/include/asm/atomic.h
> > > > @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> > > > #undef ATOMIC_FETCH_OP
> > > > #undef ATOMIC_OP_RETURN
> > > >
> > > > +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " beq %[p], %[u], 1f\n" \
> > > > + " add %[rc], %[p], %[a]\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : [a]"r" (_a), [u]"r" (_u) \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > /* This is required to provide a full barrier on success. */
> > > > static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " beq %[p], %[u], 1f\n"
> > > > - " add %[rc], %[p], %[a]\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - : [a]"r" (a), [u]"r" (u)
> > > > - : "memory");
> > > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> > > > +
> > > > return prev;
> > > > }
> > > > #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> > > > @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " beq %[p], %[u], 1f\n"
> > > > - " add %[rc], %[p], %[a]\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - : [a]"r" (a), [u]"r" (u)
> > > > - : "memory");
> > > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> > > > +
> > > > return prev;
> > > > }
> > > > #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> > > > @@ -310,61 +307,79 @@ ATOMIC_OPS()
> > > > #undef ATOMIC_OPS
> > > > #undef ATOMIC_OP
> > > >
> > > > +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " bltz %[p], 1f\n" \
> > > > + " addi %[rc], %[p], 1\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " bltz %[p], 1f\n"
> > > > - " addi %[rc], %[p], 1\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> > > > +
> > > > return !(prev < 0);
> > > > }
> > > >
> > > > #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
> > > >
> > > > +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " bgtz %[p], 1f\n" \
> > > > + " addi %[rc], %[p], -1\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " bgtz %[p], 1f\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> > > > +
> > > > return !(prev > 0);
> > > > }
> > > >
> > > > #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
> > > >
> > > > +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " addi %[rc], %[p], -1\n" \
> > > > + " bltz %[rc], 1f\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " bltz %[rc], 1f\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> > > > +
> > > > return prev - 1;
> > > > }
> > > >
> > > > @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " bltz %[p], 1f\n"
> > > > - " addi %[rc], %[p], 1\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> > > > +
> > > > return !(prev < 0);
> > > > }
> > > >
> > > > @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " bgtz %[p], 1f\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> > > > +
> > > > return !(prev > 0);
> > > > }
> > > >
> > > > @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " bltz %[rc], 1f\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> > > > +
> > > > return prev - 1;
> > > > }
> > > >
> > > > --
> > > > 2.40.0
> > > >
> > >
> > A safe cleanup, no problem found.
> >
> > Reviewed-by: Guo Ren <[email protected]>
> >
> >
>
> Hello Palmer,
>
> Any improvements you suggest for this patch?
>
> Best regards,
> Leonardo Bras
CC: Palmer Dabbelt <[email protected]>
On Wed, 2023-08-02 at 17:00 -0300, Leonardo Bras Soares Passos wrote:
> On Thu, May 25, 2023 at 7:07 AM Guo Ren <[email protected]> wrote:
> >
> > On Thu, May 25, 2023 at 5:31 PM Leonardo Bras Soares Passos
> > <[email protected]> wrote:
> > >
> > > Friendly ping?
> > >
> > > On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <[email protected]> wrote:
> > > >
> > > > Some functions use mostly the same asm for 32-bit and 64-bit versions.
> > > >
> > > > Make a macro that is generic enough and avoid code duplication.
> > > >
> > > > Signed-off-by: Leonardo Bras <[email protected]>
> > > > ---
> > > > arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> > > > 1 file changed, 76 insertions(+), 88 deletions(-)
> > > >
> > > > diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> > > > index 0dfe9d857a762..85eb2edbc8219 100644
> > > > --- a/arch/riscv/include/asm/atomic.h
> > > > +++ b/arch/riscv/include/asm/atomic.h
> > > > @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> > > > #undef ATOMIC_FETCH_OP
> > > > #undef ATOMIC_OP_RETURN
> > > >
> > > > +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " beq %[p], %[u], 1f\n" \
> > > > + " add %[rc], %[p], %[a]\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : [a]"r" (_a), [u]"r" (_u) \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > /* This is required to provide a full barrier on success. */
> > > > static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " beq %[p], %[u], 1f\n"
> > > > - " add %[rc], %[p], %[a]\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - : [a]"r" (a), [u]"r" (u)
> > > > - : "memory");
> > > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> > > > +
> > > > return prev;
> > > > }
> > > > #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> > > > @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " beq %[p], %[u], 1f\n"
> > > > - " add %[rc], %[p], %[a]\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - : [a]"r" (a), [u]"r" (u)
> > > > - : "memory");
> > > > + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> > > > +
> > > > return prev;
> > > > }
> > > > #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> > > > @@ -310,61 +307,79 @@ ATOMIC_OPS()
> > > > #undef ATOMIC_OPS
> > > > #undef ATOMIC_OP
> > > >
> > > > +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " bltz %[p], 1f\n" \
> > > > + " addi %[rc], %[p], 1\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " bltz %[p], 1f\n"
> > > > - " addi %[rc], %[p], 1\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> > > > +
> > > > return !(prev < 0);
> > > > }
> > > >
> > > > #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
> > > >
> > > > +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " bgtz %[p], 1f\n" \
> > > > + " addi %[rc], %[p], -1\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " bgtz %[p], 1f\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> > > > +
> > > > return !(prev > 0);
> > > > }
> > > >
> > > > #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
> > > >
> > > > +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> > > > +({ \
> > > > + __asm__ __volatile__ ( \
> > > > + "0: lr." sfx " %[p], %[c]\n" \
> > > > + " addi %[rc], %[p], -1\n" \
> > > > + " bltz %[rc], 1f\n" \
> > > > + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> > > > + " bnez %[rc], 0b\n" \
> > > > + " fence rw, rw\n" \
> > > > + "1:\n" \
> > > > + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> > > > + : \
> > > > + : "memory"); \
> > > > +})
> > > > +
> > > > static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> > > > {
> > > > int prev, rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.w %[p], %[c]\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " bltz %[rc], 1f\n"
> > > > - " sc.w.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> > > > +
> > > > return prev - 1;
> > > > }
> > > >
> > > > @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " bltz %[p], 1f\n"
> > > > - " addi %[rc], %[p], 1\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> > > > +
> > > > return !(prev < 0);
> > > > }
> > > >
> > > > @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " bgtz %[p], 1f\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> > > > +
> > > > return !(prev > 0);
> > > > }
> > > >
> > > > @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> > > > s64 prev;
> > > > long rc;
> > > >
> > > > - __asm__ __volatile__ (
> > > > - "0: lr.d %[p], %[c]\n"
> > > > - " addi %[rc], %[p], -1\n"
> > > > - " bltz %[rc], 1f\n"
> > > > - " sc.d.rl %[rc], %[rc], %[c]\n"
> > > > - " bnez %[rc], 0b\n"
> > > > - " fence rw, rw\n"
> > > > - "1:\n"
> > > > - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> > > > - :
> > > > - : "memory");
> > > > + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> > > > +
> > > > return prev - 1;
> > > > }
> > > >
> > > > --
> > > > 2.40.0
> > > >
> > >
> > A safe cleanup, no problem found.
> >
> > Reviewed-by: Guo Ren <[email protected]>
> >
> >
>
> Hello Palmer,
>
> Any improvements you suggest for this patch?
>
> Best regards,
> Leonardo Bras
Superseded by v2:
https://patchwork.kernel.org/project/linux-riscv/list/?series=772422&state=%2A&archive=both
Leo