2023-12-10 06:22:02

by Juergen Gross

[permalink] [raw]
Subject: [PATCH v6 1/5] x86/paravirt: introduce ALT_NOT_XEN

Introduce the macro ALT_NOT_XEN as a short form of
ALT_NOT(X86_FEATURE_XENPV).

Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
---
V3:
- split off from next patch
V5:
- move patch to the start of the series (Boris Petkov)
---
arch/x86/include/asm/paravirt.h | 42 ++++++++++++---------------
arch/x86/include/asm/paravirt_types.h | 3 ++
2 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 693c61dbdd9c..aa76ac7c806c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -142,8 +142,7 @@ static inline void write_cr0(unsigned long x)
static __always_inline unsigned long read_cr2(void)
{
return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
- "mov %%cr2, %%rax;",
- ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%cr2, %%rax;", ALT_NOT_XEN);
}

static __always_inline void write_cr2(unsigned long x)
@@ -154,13 +153,12 @@ static __always_inline void write_cr2(unsigned long x)
static inline unsigned long __read_cr3(void)
{
return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
- "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%cr3, %%rax;", ALT_NOT_XEN);
}

static inline void write_cr3(unsigned long x)
{
- PVOP_ALT_VCALL1(mmu.write_cr3, x,
- "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
}

static inline void __write_cr4(unsigned long x)
@@ -182,7 +180,7 @@ extern noinstr void pv_native_wbinvd(void);

static __always_inline void wbinvd(void)
{
- PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
}

static inline u64 paravirt_read_msr(unsigned msr)
@@ -390,27 +388,25 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val)
{
return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}

static inline pteval_t pte_val(pte_t pte)
{
return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}

static inline pgd_t __pgd(pgdval_t val)
{
return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}

static inline pgdval_t pgd_val(pgd_t pgd)
{
return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}

#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -444,14 +440,13 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline pmd_t __pmd(pmdval_t val)
{
return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV)) };
+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
}

static inline pmdval_t pmd_val(pmd_t pmd)
{
return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}

static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -464,7 +459,7 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;

ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);

return (pud_t) { ret };
}
@@ -472,7 +467,7 @@ static inline pud_t __pud(pudval_t val)
static inline pudval_t pud_val(pud_t pud)
{
return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}

static inline void pud_clear(pud_t *pudp)
@@ -492,8 +487,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline p4d_t __p4d(p4dval_t val)
{
p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
- "mov %%rdi, %%rax",
- ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);

return (p4d_t) { ret };
}
@@ -501,7 +495,7 @@ static inline p4d_t __p4d(p4dval_t val)
static inline p4dval_t p4d_val(p4d_t p4d)
{
return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
+ "mov %%rdi, %%rax", ALT_NOT_XEN);
}

static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
@@ -687,17 +681,17 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
static __always_inline unsigned long arch_local_save_flags(void)
{
return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
- ALT_NOT(X86_FEATURE_XENPV));
+ ALT_NOT_XEN);
}

static __always_inline void arch_local_irq_disable(void)
{
- PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
}

static __always_inline void arch_local_irq_enable(void)
{
- PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
+ PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
}

static __always_inline unsigned long arch_local_irq_save(void)
@@ -769,7 +763,7 @@ void native_pv_lock_init(void) __init;
.endm

#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
- ALT_NOT(X86_FEATURE_XENPV)
+ ALT_NOT_XEN
#endif
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index f4fb2e3ec7b8..483e19e5ca7a 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -557,5 +557,8 @@ extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];

#endif /* __ASSEMBLY__ */
+
+#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
+
#endif /* CONFIG_PARAVIRT */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
--
2.35.3


2023-12-10 10:56:19

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH v6 1/5] x86/paravirt: introduce ALT_NOT_XEN

On December 9, 2023 10:21:34 PM PST, Juergen Gross <[email protected]> wrote:
>Introduce the macro ALT_NOT_XEN as a short form of
>ALT_NOT(X86_FEATURE_XENPV).
>
>Suggested-by: Peter Zijlstra (Intel) <[email protected]>
>Signed-off-by: Juergen Gross <[email protected]>
>---
>V3:
>- split off from next patch
>V5:
>- move patch to the start of the series (Boris Petkov)
>---
> arch/x86/include/asm/paravirt.h | 42 ++++++++++++---------------
> arch/x86/include/asm/paravirt_types.h | 3 ++
> 2 files changed, 21 insertions(+), 24 deletions(-)
>
>diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
>index 693c61dbdd9c..aa76ac7c806c 100644
>--- a/arch/x86/include/asm/paravirt.h
>+++ b/arch/x86/include/asm/paravirt.h
>@@ -142,8 +142,7 @@ static inline void write_cr0(unsigned long x)
> static __always_inline unsigned long read_cr2(void)
> {
> return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
>- "mov %%cr2, %%rax;",
>- ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%cr2, %%rax;", ALT_NOT_XEN);
> }
>
> static __always_inline void write_cr2(unsigned long x)
>@@ -154,13 +153,12 @@ static __always_inline void write_cr2(unsigned long x)
> static inline unsigned long __read_cr3(void)
> {
> return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
>- "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%cr3, %%rax;", ALT_NOT_XEN);
> }
>
> static inline void write_cr3(unsigned long x)
> {
>- PVOP_ALT_VCALL1(mmu.write_cr3, x,
>- "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
>+ PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
> }
>
> static inline void __write_cr4(unsigned long x)
>@@ -182,7 +180,7 @@ extern noinstr void pv_native_wbinvd(void);
>
> static __always_inline void wbinvd(void)
> {
>- PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
>+ PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
> }
>
> static inline u64 paravirt_read_msr(unsigned msr)
>@@ -390,27 +388,25 @@ static inline void paravirt_release_p4d(unsigned long pfn)
> static inline pte_t __pte(pteval_t val)
> {
> return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
>- "mov %%rdi, %%rax",
>- ALT_NOT(X86_FEATURE_XENPV)) };
>+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
> }
>
> static inline pteval_t pte_val(pte_t pte)
> {
> return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
> }
>
> static inline pgd_t __pgd(pgdval_t val)
> {
> return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
>- "mov %%rdi, %%rax",
>- ALT_NOT(X86_FEATURE_XENPV)) };
>+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
> }
>
> static inline pgdval_t pgd_val(pgd_t pgd)
> {
> return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
> }
>
> #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
>@@ -444,14 +440,13 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
> static inline pmd_t __pmd(pmdval_t val)
> {
> return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
>- "mov %%rdi, %%rax",
>- ALT_NOT(X86_FEATURE_XENPV)) };
>+ "mov %%rdi, %%rax", ALT_NOT_XEN) };
> }
>
> static inline pmdval_t pmd_val(pmd_t pmd)
> {
> return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
> }
>
> static inline void set_pud(pud_t *pudp, pud_t pud)
>@@ -464,7 +459,7 @@ static inline pud_t __pud(pudval_t val)
> pudval_t ret;
>
> ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
>
> return (pud_t) { ret };
> }
>@@ -472,7 +467,7 @@ static inline pud_t __pud(pudval_t val)
> static inline pudval_t pud_val(pud_t pud)
> {
> return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
> }
>
> static inline void pud_clear(pud_t *pudp)
>@@ -492,8 +487,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
> static inline p4d_t __p4d(p4dval_t val)
> {
> p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
>- "mov %%rdi, %%rax",
>- ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
>
> return (p4d_t) { ret };
> }
>@@ -501,7 +495,7 @@ static inline p4d_t __p4d(p4dval_t val)
> static inline p4dval_t p4d_val(p4d_t p4d)
> {
> return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
>- "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
>+ "mov %%rdi, %%rax", ALT_NOT_XEN);
> }
>
> static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
>@@ -687,17 +681,17 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
> static __always_inline unsigned long arch_local_save_flags(void)
> {
> return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
>- ALT_NOT(X86_FEATURE_XENPV));
>+ ALT_NOT_XEN);
> }
>
> static __always_inline void arch_local_irq_disable(void)
> {
>- PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
>+ PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
> }
>
> static __always_inline void arch_local_irq_enable(void)
> {
>- PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
>+ PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
> }
>
> static __always_inline unsigned long arch_local_irq_save(void)
>@@ -769,7 +763,7 @@ void native_pv_lock_init(void) __init;
> .endm
>
> #define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
>- ALT_NOT(X86_FEATURE_XENPV)
>+ ALT_NOT_XEN
> #endif
> #endif /* CONFIG_PARAVIRT_XXL */
> #endif /* CONFIG_X86_64 */
>diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
>index f4fb2e3ec7b8..483e19e5ca7a 100644
>--- a/arch/x86/include/asm/paravirt_types.h
>+++ b/arch/x86/include/asm/paravirt_types.h
>@@ -557,5 +557,8 @@ extern struct paravirt_patch_site __parainstructions[],
> __parainstructions_end[];
>
> #endif /* __ASSEMBLY__ */
>+
>+#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
>+
> #endif /* CONFIG_PARAVIRT */
> #endif /* _ASM_X86_PARAVIRT_TYPES_H */

X86_FEATURE_NOT_XEN ;)