Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/hexagon/include/asm/atomic.h | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, at
); \
} \
-#define ATOMIC_OP_RETURN(op) \
+#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
@@ -127,16 +127,39 @@ static inline int atomic_##op##_return(i
return output; \
}
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int output, val; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%2);\n" \
+ " %1 = "#op "(%0,%3);\n" \
+ " memw_locked(%2,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+#define atomic_fetch_or atomic_fetch_or
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
On Fri, Apr 22, 2016 at 11:04:23AM +0200, Peter Zijlstra wrote:
> +#define ATOMIC_FETCH_OP(op) \
> +static inline int atomic_fetch_##op(int i, atomic_t *v) \
> +{ \
> + int output, val; \
> + \
> + __asm__ __volatile__ ( \
> + "1: %0 = memw_locked(%2);\n" \
> + " %1 = "#op "(%0,%3);\n" \
> + " memw_locked(%2,P3)=%0;\n" \
I'm thinking that wants to be:
memw_locked(%2,P3)=%1;
> + " if !P3 jump 1b;\n" \
> + : "=&r" (output), "=&r" (val) \
> + : "r" (&v->counter), "r" (i) \
> + : "memory", "p3" \
> + ); \
> + return output; \
> +}
On Sat, Apr 23, 2016 at 04:16:58AM +0200, Peter Zijlstra wrote:
> On Fri, Apr 22, 2016 at 11:04:23AM +0200, Peter Zijlstra wrote:
> > +#define ATOMIC_FETCH_OP(op) \
> > +static inline int atomic_fetch_##op(int i, atomic_t *v) \
> > +{ \
> > + int output, val; \
> > + \
> > + __asm__ __volatile__ ( \
> > + "1: %0 = memw_locked(%2);\n" \
> > + " %1 = "#op "(%0,%3);\n" \
> > + " memw_locked(%2,P3)=%0;\n" \
>
> I'm thinking that wants to be:
>
> memw_locked(%2,P3)=%1;
>
> > + " if !P3 jump 1b;\n" \
> > + : "=&r" (output), "=&r" (val) \
> > + : "r" (&v->counter), "r" (i) \
> > + : "memory", "p3" \
> > + ); \
> > + return output; \
> > +}
I think you are right. With the above fix,
Acked-by: Richard Kuo <[email protected]>
--
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project