From: Thomas Gleixner <[email protected]>
Ensure inline asm functions are consistently aligned with compiler
generated and SYM_FUNC_START*() functions.
Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/x86/include/asm/paravirt.h | 1 +
arch/x86/include/asm/qspinlock_paravirt.h | 2 +-
arch/x86/kernel/kvm.c | 1 +
arch/x86/kernel/paravirt.c | 2 ++
4 files changed, 5 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -665,6 +665,7 @@ bool __raw_callee_save___native_vcpu_is_
asm(".pushsection " section ", \"ax\";" \
".globl " PV_THUNK_NAME(func) ";" \
".type " PV_THUNK_NAME(func) ", @function;" \
+ ASM_FUNC_ALIGN \
PV_THUNK_NAME(func) ":" \
ASM_ENDBR \
FRAME_BEGIN \
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -39,7 +39,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_sp
asm (".pushsection .text;"
".globl " PV_UNLOCK ";"
".type " PV_UNLOCK ", @function;"
- ".align 4,0x90;"
+ ASM_FUNC_ALIGN
PV_UNLOCK ": "
ASM_ENDBR
FRAME_BEGIN
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -802,6 +802,7 @@ asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
+ASM_FUNC_ALIGN
"__raw_callee_save___kvm_vcpu_is_preempted:"
ASM_ENDBR
"movq __per_cpu_offset(,%rdi,8), %rax;"
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -40,6 +40,7 @@
extern void _paravirt_nop(void);
asm (".pushsection .entry.text, \"ax\"\n"
".global _paravirt_nop\n"
+ ASM_FUNC_ALIGN
"_paravirt_nop:\n\t"
ASM_ENDBR
ASM_RET
@@ -50,6 +51,7 @@ asm (".pushsection .entry.text, \"ax\"\n
/* stub always returning 0. */
asm (".pushsection .entry.text, \"ax\"\n"
".global paravirt_ret0\n"
+ ASM_FUNC_ALIGN
"paravirt_ret0:\n\t"
ASM_ENDBR
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
On 15.09.22 13:10, Peter Zijlstra wrote:
> From: Thomas Gleixner <[email protected]>
>
> Ensure inline asm functions are consistently aligned with compiler
> generated and SYM_FUNC_START*() functions.
>
> Signed-off-by: Thomas Gleixner <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Juergen
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 1d293758e548aa6ff65e4dd3f5a9bc2a34b38ce3
Gitweb: https://git.kernel.org/tip/1d293758e548aa6ff65e4dd3f5a9bc2a34b38ce3
Author: Thomas Gleixner <[email protected]>
AuthorDate: Thu, 15 Sep 2022 13:10:50 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Mon, 17 Oct 2022 16:40:59 +02:00
x86/paravirt: Properly align PV functions
Ensure inline asm functions are consistently aligned with compiler
generated and SYM_FUNC_START*() functions.
Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/include/asm/paravirt.h | 1 +
arch/x86/include/asm/qspinlock_paravirt.h | 2 +-
arch/x86/kernel/kvm.c | 1 +
arch/x86/kernel/paravirt.c | 2 ++
4 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 2a0b8dd..1be66c1 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -665,6 +665,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
asm(".pushsection " section ", \"ax\";" \
".globl " PV_THUNK_NAME(func) ";" \
".type " PV_THUNK_NAME(func) ", @function;" \
+ ASM_FUNC_ALIGN \
PV_THUNK_NAME(func) ":" \
ASM_ENDBR \
FRAME_BEGIN \
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 60ece59..082551b 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -40,7 +40,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
asm (".pushsection .spinlock.text;"
".globl " PV_UNLOCK ";"
".type " PV_UNLOCK ", @function;"
- ".align 4,0x90;"
+ ASM_FUNC_ALIGN
PV_UNLOCK ": "
ASM_ENDBR
FRAME_BEGIN
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d4e48b4..95fb85b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -802,6 +802,7 @@ asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
+ASM_FUNC_ALIGN
"__raw_callee_save___kvm_vcpu_is_preempted:"
ASM_ENDBR
"movq __per_cpu_offset(,%rdi,8), %rax;"
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 7ca2d46..e244c49 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -40,6 +40,7 @@
extern void _paravirt_nop(void);
asm (".pushsection .entry.text, \"ax\"\n"
".global _paravirt_nop\n"
+ ASM_FUNC_ALIGN
"_paravirt_nop:\n\t"
ASM_ENDBR
ASM_RET
@@ -50,6 +51,7 @@ asm (".pushsection .entry.text, \"ax\"\n"
/* stub always returning 0. */
asm (".pushsection .entry.text, \"ax\"\n"
".global paravirt_ret0\n"
+ ASM_FUNC_ALIGN
"paravirt_ret0:\n\t"
ASM_ENDBR
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"