2024-05-20 08:10:08

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH 1/2] x86/percpu: Rename percpu_stable_op() to __raw_cpu_read_stable()

Rename percpu_stable_op() to __raw_cpu_read_stable() to be
in line with other read/write percpu accessors.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
---
arch/x86/include/asm/percpu.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index c77393cd0273..39762fcfe328 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -190,10 +190,10 @@ do { \

#endif /* CONFIG_USE_X86_SEG_SUPPORT */

-#define percpu_stable_op(size, op, _var) \
+#define __raw_cpu_read_stable(size, _var) \
({ \
__pcpu_type_##size pfo_val__; \
- asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \
+ asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \
: [val] __pcpu_reg_##size("=", pfo_val__) \
: [var] "i" (&(_var))); \
(typeof(_var))(unsigned long) pfo_val__; \
@@ -480,9 +480,9 @@ do { \

#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)

-#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
-#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
-#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
+#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp)
+#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp)
+#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp)

#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
@@ -535,7 +535,7 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
-#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
+#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp)

#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val)
--
2.45.1



2024-05-20 08:10:11

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH 2/2] x86/percpu: Move some percpu accessors around to reduce ifdeffery

Move some percpu accessors around, mainly to reduce ifdeffery
and improve readabilty by following dependencies between
accessors.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
---
arch/x86/include/asm/percpu.h | 40 +++++++++++++++++------------------
1 file changed, 19 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 39762fcfe328..0f0d8973f8df 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -445,17 +445,6 @@ do { \
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
#endif

-/*
- * this_cpu_read() makes gcc load the percpu variable every time it is
- * accessed while this_cpu_read_stable() allows the value to be cached.
- * this_cpu_read_stable() is more efficient and can be used if its value
- * is guaranteed to be valid across cpus. The current users include
- * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
- * actually per-thread variables implemented as per-CPU variables and
- * thus stable for the duration of the respective task.
- */
-#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
-
#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp)
#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp)
#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp)
@@ -470,16 +459,6 @@ do { \
#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val)
#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val)

-#ifdef CONFIG_X86_64
-#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
-#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val)
-
-#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp)
-#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val)
-#endif
-
-#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
-
#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp)
#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp)
#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp)
@@ -535,6 +514,12 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
+#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
+#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val)
+
+#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp)
+#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val)
+
#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp)

#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
@@ -561,6 +546,19 @@ do { \
#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
#endif

+#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
+
+/*
+ * this_cpu_read() makes gcc load the percpu variable every time it is
+ * accessed while this_cpu_read_stable() allows the value to be cached.
+ * this_cpu_read_stable() is more efficient and can be used if its value
+ * is guaranteed to be valid across cpus. The current users include
+ * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
+ * actually per-thread variables implemented as per-CPU variables and
+ * thus stable for the duration of the respective task.
+ */
+#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
+
#define x86_this_cpu_constant_test_bit(_nr, _var) \
({ \
unsigned long __percpu *addr__ = \
--
2.45.1


Subject: [tip: x86/percpu] x86/percpu: Move some percpu accessors around to reduce ifdeffery

The following commit has been merged into the x86/percpu branch of tip:

Commit-ID: 47c9dbd2fb5f98453840e18ebced9138ec8b4cc5
Gitweb: https://git.kernel.org/tip/47c9dbd2fb5f98453840e18ebced9138ec8b4cc5
Author: Uros Bizjak <[email protected]>
AuthorDate: Mon, 20 May 2024 10:09:25 +02:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Mon, 20 May 2024 10:25:31 +02:00

x86/percpu: Move some percpu accessors around to reduce ifdeffery

Move some percpu accessors around, mainly to reduce ifdeffery
and improve readabilty by following dependencies between
accessors.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/include/asm/percpu.h | 40 ++++++++++++++++------------------
1 file changed, 19 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 39762fc..0f0d897 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -445,17 +445,6 @@ do { \
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
#endif

-/*
- * this_cpu_read() makes gcc load the percpu variable every time it is
- * accessed while this_cpu_read_stable() allows the value to be cached.
- * this_cpu_read_stable() is more efficient and can be used if its value
- * is guaranteed to be valid across cpus. The current users include
- * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
- * actually per-thread variables implemented as per-CPU variables and
- * thus stable for the duration of the respective task.
- */
-#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
-
#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp)
#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp)
#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp)
@@ -470,16 +459,6 @@ do { \
#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val)
#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val)

-#ifdef CONFIG_X86_64
-#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
-#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val)
-
-#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp)
-#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val)
-#endif
-
-#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
-
#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp)
#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp)
#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp)
@@ -535,6 +514,12 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
+#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp)
+#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val)
+
+#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp)
+#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val)
+
#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp)

#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
@@ -561,6 +546,19 @@ do { \
#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
#endif

+#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)
+
+/*
+ * this_cpu_read() makes gcc load the percpu variable every time it is
+ * accessed while this_cpu_read_stable() allows the value to be cached.
+ * this_cpu_read_stable() is more efficient and can be used if its value
+ * is guaranteed to be valid across cpus. The current users include
+ * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
+ * actually per-thread variables implemented as per-CPU variables and
+ * thus stable for the duration of the respective task.
+ */
+#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
+
#define x86_this_cpu_constant_test_bit(_nr, _var) \
({ \
unsigned long __percpu *addr__ = \

Subject: [tip: x86/percpu] x86/percpu: Rename percpu_stable_op() to __raw_cpu_read_stable()

The following commit has been merged into the x86/percpu branch of tip:

Commit-ID: 48908919c9062bf9472def7389dd7cd9c6a45b70
Gitweb: https://git.kernel.org/tip/48908919c9062bf9472def7389dd7cd9c6a45b70
Author: Uros Bizjak <[email protected]>
AuthorDate: Mon, 20 May 2024 10:09:24 +02:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Mon, 20 May 2024 10:17:10 +02:00

x86/percpu: Rename percpu_stable_op() to __raw_cpu_read_stable()

Rename percpu_stable_op() to __raw_cpu_read_stable() to be
in line with other read/write percpu accessors.

No functional change intended.

Signed-off-by: Uros Bizjak <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: Uros Bizjak <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/include/asm/percpu.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index c77393c..39762fc 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -190,10 +190,10 @@ do { \

#endif /* CONFIG_USE_X86_SEG_SUPPORT */

-#define percpu_stable_op(size, op, _var) \
+#define __raw_cpu_read_stable(size, _var) \
({ \
__pcpu_type_##size pfo_val__; \
- asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \
+ asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \
: [val] __pcpu_reg_##size("=", pfo_val__) \
: [var] "i" (&(_var))); \
(typeof(_var))(unsigned long) pfo_val__; \
@@ -480,9 +480,9 @@ do { \

#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp)

-#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
-#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
-#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
+#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp)
+#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp)
+#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp)

#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
@@ -535,7 +535,7 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
-#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
+#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp)

#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val)