2018-02-22 09:26:39

by Andrea Parri

[permalink] [raw]
Subject: [PATCH 1/2] locking/xchg/alpha: Use smp_mb() in place of __ASM__MB

Replace each occurrence of __ASM__MB with a (trailing) smp_mb() in
xchg(), cmpxchg(), and remove the now unused __ASM__MB definitions;
this improves readability, with no additional synchronization cost.

Suggested-by: Will Deacon <[email protected]>
Signed-off-by: Andrea Parri <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Alan Stern <[email protected]>
Cc: Ivan Kokshaysky <[email protected]>
Cc: Matt Turner <[email protected]>
Cc: Richard Henderson <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
arch/alpha/include/asm/cmpxchg.h | 6 ------
arch/alpha/include/asm/xchg.h | 16 ++++++++--------
2 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e5..8a2b331e43feb 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
* Atomic exchange routines.
*/

-#define __ASM__MB
#define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h>
@@ -33,10 +32,6 @@
cmpxchg_local((ptr), (o), (n)); \
})

-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
#undef ____xchg
#undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
cmpxchg((ptr), (o), (n)); \
})

-#undef __ASM__MB
#undef ____cmpxchg

#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index e2660866ce972..e1facf6fc2446 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();

return ret;
}
@@ -52,12 +52,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();

return ret;
}
@@ -72,12 +72,12 @@ ____xchg(_u32, volatile int *m, unsigned long val)
" bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();

return val;
}
@@ -92,12 +92,12 @@ ____xchg(_u64, volatile long *m, unsigned long val)
" bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();

return val;
}
@@ -150,12 +150,12 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();

return prev;
}
@@ -177,12 +177,12 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();

return prev;
}
@@ -200,12 +200,12 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
" stl_c %1,%2\n"
" beq %1,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();

return prev;
}
@@ -223,12 +223,12 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
" stq_c %1,%2\n"
" beq %1,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();

return prev;
}
--
2.7.4



2018-02-22 21:48:30

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 1/2] locking/xchg/alpha: Use smp_mb() in place of __ASM__MB

On Thu, Feb 22, 2018 at 10:24:29AM +0100, Andrea Parri wrote:
> Replace each occurrence of __ASM__MB with a (trailing) smp_mb() in
> xchg(), cmpxchg(), and remove the now unused __ASM__MB definitions;
> this improves readability, with no additional synchronization cost.
>
> Suggested-by: Will Deacon <[email protected]>
> Signed-off-by: Andrea Parri <[email protected]>

I am a bit confused by the use of out-of-line branches to do a backwards
branch, but those were in place to start with. Maybe the point is to
defeat backwards-branch prediction or some such.

Regardless...

Acked-by: Paul E. McKenney <[email protected]>

> Cc: Peter Zijlstra <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Alan Stern <[email protected]>
> Cc: Ivan Kokshaysky <[email protected]>
> Cc: Matt Turner <[email protected]>
> Cc: Richard Henderson <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> ---
> arch/alpha/include/asm/cmpxchg.h | 6 ------
> arch/alpha/include/asm/xchg.h | 16 ++++++++--------
> 2 files changed, 8 insertions(+), 14 deletions(-)
>
> diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
> index 46ebf14aed4e5..8a2b331e43feb 100644
> --- a/arch/alpha/include/asm/cmpxchg.h
> +++ b/arch/alpha/include/asm/cmpxchg.h
> @@ -6,7 +6,6 @@
> * Atomic exchange routines.
> */
>
> -#define __ASM__MB
> #define ____xchg(type, args...) __xchg ## type ## _local(args)
> #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
> #include <asm/xchg.h>
> @@ -33,10 +32,6 @@
> cmpxchg_local((ptr), (o), (n)); \
> })
>
> -#ifdef CONFIG_SMP
> -#undef __ASM__MB
> -#define __ASM__MB "\tmb\n"
> -#endif
> #undef ____xchg
> #undef ____cmpxchg
> #define ____xchg(type, args...) __xchg ##type(args)
> @@ -64,7 +59,6 @@
> cmpxchg((ptr), (o), (n)); \
> })
>
> -#undef __ASM__MB
> #undef ____cmpxchg
>
> #endif /* _ALPHA_CMPXCHG_H */
> diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
> index e2660866ce972..e1facf6fc2446 100644
> --- a/arch/alpha/include/asm/xchg.h
> +++ b/arch/alpha/include/asm/xchg.h
> @@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
> " or %1,%2,%2\n"
> " stq_c %2,0(%3)\n"
> " beq %2,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
> : "r" ((long)m), "1" (val) : "memory");
> + smp_mb();
>
> return ret;
> }
> @@ -52,12 +52,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
> " or %1,%2,%2\n"
> " stq_c %2,0(%3)\n"
> " beq %2,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
> : "r" ((long)m), "1" (val) : "memory");
> + smp_mb();
>
> return ret;
> }
> @@ -72,12 +72,12 @@ ____xchg(_u32, volatile int *m, unsigned long val)
> " bis $31,%3,%1\n"
> " stl_c %1,%2\n"
> " beq %1,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (val), "=&r" (dummy), "=m" (*m)
> : "rI" (val), "m" (*m) : "memory");
> + smp_mb();
>
> return val;
> }
> @@ -92,12 +92,12 @@ ____xchg(_u64, volatile long *m, unsigned long val)
> " bis $31,%3,%1\n"
> " stq_c %1,%2\n"
> " beq %1,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (val), "=&r" (dummy), "=m" (*m)
> : "rI" (val), "m" (*m) : "memory");
> + smp_mb();
>
> return val;
> }
> @@ -150,12 +150,12 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
> " stq_c %2,0(%4)\n"
> " beq %2,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
> : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -177,12 +177,12 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
> " stq_c %2,0(%4)\n"
> " beq %2,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
> : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -200,12 +200,12 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
> " stl_c %1,%2\n"
> " beq %1,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r"(prev), "=&r"(cmp), "=m"(*m)
> : "r"((long) old), "r"(new), "m"(*m) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -223,12 +223,12 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
> " stq_c %1,%2\n"
> " beq %1,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r"(prev), "=&r"(cmp), "=m"(*m)
> : "r"((long) old), "r"(new), "m"(*m) : "memory");
> + smp_mb();
>
> return prev;
> }
> --
> 2.7.4
>


Subject: [tip:locking/urgent] locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB

Commit-ID: 79d442461df7478cdd0c50d9b8a76f431f150fa3
Gitweb: https://git.kernel.org/tip/79d442461df7478cdd0c50d9b8a76f431f150fa3
Author: Andrea Parri <[email protected]>
AuthorDate: Thu, 22 Feb 2018 10:24:29 +0100
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 23 Feb 2018 08:38:15 +0100

locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB

Replace each occurrence of __ASM__MB with a (trailing) smp_mb() in
xchg(), cmpxchg(), and remove the now unused __ASM__MB definitions;
this improves readability, with no additional synchronization cost.

Suggested-by: Will Deacon <[email protected]>
Signed-off-by: Andrea Parri <[email protected]>
Acked-by: Paul E. McKenney <[email protected]>
Cc: Alan Stern <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ivan Kokshaysky <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Matt Turner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Richard Henderson <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/alpha/include/asm/cmpxchg.h | 6 ------
arch/alpha/include/asm/xchg.h | 16 ++++++++--------
2 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
* Atomic exchange routines.
*/

-#define __ASM__MB
#define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h>
@@ -33,10 +32,6 @@
cmpxchg_local((ptr), (o), (n)); \
})

-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
#undef ____xchg
#undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
cmpxchg((ptr), (o), (n)); \
})

-#undef __ASM__MB
#undef ____cmpxchg

#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index e2660866ce97..e1facf6fc244 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();

return ret;
}
@@ -52,12 +52,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();

return ret;
}
@@ -72,12 +72,12 @@ ____xchg(_u32, volatile int *m, unsigned long val)
" bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();

return val;
}
@@ -92,12 +92,12 @@ ____xchg(_u64, volatile long *m, unsigned long val)
" bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();

return val;
}
@@ -150,12 +150,12 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();

return prev;
}
@@ -177,12 +177,12 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();

return prev;
}
@@ -200,12 +200,12 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
" stl_c %1,%2\n"
" beq %1,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();

return prev;
}
@@ -223,12 +223,12 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
" stq_c %1,%2\n"
" beq %1,3f\n"
"2:\n"
- __ASM__MB
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();

return prev;
}

2018-02-26 18:07:02

by Will Deacon

[permalink] [raw]
Subject: Re: [tip:locking/urgent] locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB

Hi Andrea,

I know this is in mainline now, but I think the way you've got the barriers
here:

On Fri, Feb 23, 2018 at 12:27:54AM -0800, tip-bot for Andrea Parri wrote:
> diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
> index 46ebf14aed4e..8a2b331e43fe 100644
> --- a/arch/alpha/include/asm/cmpxchg.h
> +++ b/arch/alpha/include/asm/cmpxchg.h
> @@ -6,7 +6,6 @@
> * Atomic exchange routines.
> */
>
> -#define __ASM__MB
> #define ____xchg(type, args...) __xchg ## type ## _local(args)
> #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
> #include <asm/xchg.h>
> @@ -33,10 +32,6 @@
> cmpxchg_local((ptr), (o), (n)); \
> })
>
> -#ifdef CONFIG_SMP
> -#undef __ASM__MB
> -#define __ASM__MB "\tmb\n"
> -#endif
> #undef ____xchg
> #undef ____cmpxchg
> #define ____xchg(type, args...) __xchg ##type(args)
> @@ -64,7 +59,6 @@
> cmpxchg((ptr), (o), (n)); \
> })
>
> -#undef __ASM__MB
> #undef ____cmpxchg
>
> #endif /* _ALPHA_CMPXCHG_H */
> diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
> index e2660866ce97..e1facf6fc244 100644
> --- a/arch/alpha/include/asm/xchg.h
> +++ b/arch/alpha/include/asm/xchg.h
> @@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
> " or %1,%2,%2\n"
> " stq_c %2,0(%3)\n"
> " beq %2,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
> : "r" ((long)m), "1" (val) : "memory");
> + smp_mb();
>
> return ret;

ends up adding unnecessary barriers to the _local variants, which the
previous code took care to avoid. That's why I suggesting adding
the smp_mb() into the cmpxchg macro rather than the ____cmpxchg variants.

I think it's worth spinning another patch to fix this properly.

Will