2020-07-22 16:45:37

by David Brazdil

[permalink] [raw]
Subject: [PATCH 0/9] Independent per-CPU data section for nVHE

Introduce '.hyp.data..percpu' as part of ongoing effort to make nVHE
hyp code self-contained and independent of the rest of the kernel.

The series builds on top of the "Split off nVHE hyp code" series which
used objcopy to rename '.text' to '.hyp.text' and prefix all ELF
symbols with '__kvm_nvhe' for all object files under kvm/hyp/nvhe.

The series is structured as follows:

- patch 1: Modify generic PERCPU_* linker script macros to make it
possible to define multiple per-CPU ELF sections with prefixed
section and symbol names.

- patches 2-3: Replace hyp helpers for accessing per-CPU variables
with common helpers modified to work correctly in hyp. Per-CPU
variables can now be accessed with one API anywhere.

- patches 4-6: Where VHE and nVHE use per-CPU variables defined in
kernel proper, move their definitions to hyp/ where they are
duplicated and owned by VHE/nVHE, respectively. Non-VHE hyp code
now refers only to per-CPU variables defined in its source files.
Helpers are added so that kernel proper can continue to access
nVHE hyp variables, same way as it does with other nVHE symbols.

- patches 7-9: Introduce '.hyp.data..percpu' ELF section and allocate
memory for every CPU core during KVM init. All nVHE per-CPU state
is now grouped together in ELF and in memory. Introducing a new
per-CPU variable does not require adding new memory mappings any
more. nVHE hyp code cannot accidentally refer to kernel-proper
per-CPU data as it only has the pointer to its own per-CPU memory.

The patches are rebased on current kvmarm/next (commit b72eb1f6813)
and are available in branch 'topic/percpu' at:
https://android-kvm.googlesource.com/linux

David Brazdil (9):
Macros to override naming of percpu symbols and sections
kvm: arm64: Remove __hyp_this_cpu_read
kvm: arm64: Remove hyp_adr/ldr_this_cpu
kvm: arm64: Add helpers for accessing nVHE hyp per-cpu vars
kvm: arm64: Duplicate arm64_ssbd_callback_required for nVHE hyp
kvm: arm64: Create separate instances of kvm_host_data for VHE/nVHE
kvm: arm64: Mark hyp stack pages reserved
kvm: arm64: Set up hyp percpu data for nVHE
kvm: arm64: Remove unnecessary hyp mappings

arch/arm64/include/asm/assembler.h | 27 ++++--
arch/arm64/include/asm/kvm_asm.h | 74 ++++++++-------
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/arm64/include/asm/kvm_mmu.h | 23 ++---
arch/arm64/include/asm/percpu.h | 33 ++++++-
arch/arm64/include/asm/sections.h | 1 +
arch/arm64/kernel/image-vars.h | 2 -
arch/arm64/kernel/vmlinux.lds.S | 10 ++
arch/arm64/kvm/arm.c | 110 ++++++++++++++++++----
arch/arm64/kvm/hyp/hyp-entry.S | 2 +-
arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 4 +-
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 +-
arch/arm64/kvm/hyp/nvhe/Makefile | 2 +
arch/arm64/kvm/hyp/nvhe/switch.c | 8 +-
arch/arm64/kvm/hyp/vhe/switch.c | 5 +-
arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 4 +-
arch/arm64/kvm/pmu.c | 13 ++-
include/asm-generic/vmlinux.lds.h | 40 +++++---
18 files changed, 257 insertions(+), 109 deletions(-)

--
2.27.0


2020-07-22 16:45:43

by David Brazdil

[permalink] [raw]
Subject: [PATCH 1/9] Macros to override naming of percpu symbols and sections

Modify generic linker script macros to generate section/symbol names for
percpu area using overridable macros. No functional changes.

This will allow arm64 linker script to define a second KVM-specific percpu
data section using the generic PERCPU_SECTION macro.

Signed-off-by: David Brazdil <[email protected]>
---
include/asm-generic/vmlinux.lds.h | 40 +++++++++++++++++++++----------
1 file changed, 27 insertions(+), 13 deletions(-)

diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db600ef218d7..1bfc002ecfce 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -892,6 +892,20 @@
#define INIT_RAM_FS
#endif

+/*
+ * Macros to override the naming of percpu symbols and sections.
+ * Used by arm64 linker script to define a separate percpu area for KVM.
+ */
+#define PERCPU_SECTION_BASE_NAME .data..percpu
+
+#ifndef PERCPU_SECTION_NAME
+#define PERCPU_SECTION_NAME(suffix) PERCPU_SECTION_BASE_NAME ## suffix
+#endif
+
+#ifndef PERCPU_SYMBOL_NAME
+#define PERCPU_SYMBOL_NAME(name) name
+#endif
+
/*
* Memory encryption operates on a page basis. Since we need to clear
* the memory encryption mask for this section, it needs to be aligned
@@ -903,7 +917,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
- *(.data..percpu..decrypted) \
+ *(PERCPU_SECTION_NAME(..decrypted)) \
. = ALIGN(PAGE_SIZE);
#else
#define PERCPU_DECRYPTED_SECTION
@@ -947,17 +961,17 @@
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
- __per_cpu_start = .; \
- *(.data..percpu..first) \
+ PERCPU_SYMBOL_NAME(__per_cpu_start) = .; \
+ *(PERCPU_SECTION_NAME(..first)) \
. = ALIGN(PAGE_SIZE); \
- *(.data..percpu..page_aligned) \
+ *(PERCPU_SECTION_NAME(..page_aligned)) \
. = ALIGN(cacheline); \
- *(.data..percpu..read_mostly) \
+ *(PERCPU_SECTION_NAME(..read_mostly)) \
. = ALIGN(cacheline); \
- *(.data..percpu) \
- *(.data..percpu..shared_aligned) \
+ *(PERCPU_SECTION_NAME()) \
+ *(PERCPU_SECTION_NAME(..shared_aligned)) \
PERCPU_DECRYPTED_SECTION \
- __per_cpu_end = .;
+ PERCPU_SYMBOL_NAME(__per_cpu_end) = .;

/**
* PERCPU_VADDR - define output section for percpu area
@@ -984,11 +998,11 @@
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- __per_cpu_load = .; \
- .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
+ PERCPU_SYMBOL_NAME(__per_cpu_load) = .; \
+ PERCPU_SECTION_NAME() vaddr : AT(PERCPU_SYMBOL_NAME(__per_cpu_load) - LOAD_OFFSET) { \
PERCPU_INPUT(cacheline) \
} phdr \
- . = __per_cpu_load + SIZEOF(.data..percpu);
+ . = PERCPU_SYMBOL_NAME(__per_cpu_load) + SIZEOF(PERCPU_SECTION_NAME());

/**
* PERCPU_SECTION - define output section for percpu area, simple version
@@ -1004,8 +1018,8 @@
*/
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
- .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- __per_cpu_load = .; \
+ PERCPU_SECTION_NAME() : AT(ADDR(PERCPU_SECTION_NAME()) - LOAD_OFFSET) { \
+ PERCPU_SYMBOL_NAME(__per_cpu_load) = .; \
PERCPU_INPUT(cacheline) \
}

--
2.27.0

2020-07-22 16:45:48

by David Brazdil

[permalink] [raw]
Subject: [PATCH 3/9] kvm: arm64: Remove hyp_adr/ldr_this_cpu

The hyp_adr/ldr_this_cpu helpers were introduced for use in hyp code because
they always needed to use TPIDR_EL2 for base, while adr/ldr_this_cpu from
kernel proper would select between TPIDR_EL2 and _EL1 based on VHE/nVHE.

Simplify this now that the nVHE hyp mode case can be handled using the
__KVM_NVHE_HYPERVISOR__ macro. VHE hyp code selects _EL2 with alternatives.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/assembler.h | 27 +++++++++++++++++----------
arch/arm64/include/asm/kvm_asm.h | 14 +-------------
arch/arm64/kvm/hyp/hyp-entry.S | 2 +-
3 files changed, 19 insertions(+), 24 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 54d181177656..b392a977efb6 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -218,6 +218,21 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm

+ /*
+ * @dst: destination register (32 or 64 bit wide)
+ */
+ .macro this_cpu_offset, dst
+#ifdef __KVM_NVHE_HYPERVISOR__
+ mrs \dst, tpidr_el2
+#else
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ mrs \dst, tpidr_el1
+alternative_else
+ mrs \dst, tpidr_el2
+alternative_endif
+#endif
+ .endm
+
/*
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @sym: The name of the per-cpu variable
@@ -226,11 +241,7 @@ lr .req x30 // link register
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs \tmp, tpidr_el1
-alternative_else
- mrs \tmp, tpidr_el2
-alternative_endif
+ this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm

@@ -241,11 +252,7 @@ alternative_endif
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs \tmp, tpidr_el1
-alternative_else
- mrs \tmp, tpidr_el2
-alternative_endif
+ this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index da4a0826cacd..bbd14e205aba 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -151,20 +151,8 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];

#else /* __ASSEMBLY__ */

-.macro hyp_adr_this_cpu reg, sym, tmp
- adr_l \reg, \sym
- mrs \tmp, tpidr_el2
- add \reg, \reg, \tmp
-.endm
-
-.macro hyp_ldr_this_cpu reg, sym, tmp
- adr_l \reg, \sym
- mrs \tmp, tpidr_el2
- ldr \reg, [\reg, \tmp]
-.endm
-
.macro get_host_ctxt reg, tmp
- hyp_adr_this_cpu \reg, kvm_host_data, \tmp
+ adr_this_cpu \reg, kvm_host_data, \tmp
add \reg, \reg, #HOST_DATA_CONTEXT
.endm

diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 689fccbc9de7..0a0cb1d3acd3 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -108,7 +108,7 @@ alternative_cb_end
str x0, [x2, #VCPU_WORKAROUND_FLAGS]

/* Check that we actually need to perform the call */
- hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
+ ldr_this_cpu x0, arm64_ssbd_callback_required, x2
cbz x0, wa2_end

mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
--
2.27.0

2020-07-22 16:45:56

by David Brazdil

[permalink] [raw]
Subject: [PATCH 4/9] kvm: arm64: Add helpers for accessing nVHE hyp per-cpu vars

Defining a per-CPU variable in hyp/nvhe will result in its name being prefixed
with __kvm_nvhe_. Add helpers for declaring these variables in kernel proper
and accessing them with this_cpu_ptr and per_cpu_ptr.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 25 +++++++++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index bbd14e205aba..3d69cab873e4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -60,9 +60,21 @@
DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_NVHE_SYM(sym)

+#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
+ DECLARE_PER_CPU(type, sym)
+#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
+ DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
+
+#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
+ DECLARE_KVM_VHE_PER_CPU(type, sym); \
+ DECLARE_KVM_NVHE_PER_CPU(type, sym)
+
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)

+#define this_cpu_ptr_nvhe(sym) this_cpu_ptr(&kvm_nvhe_sym(sym))
+#define per_cpu_ptr_nvhe(sym, cpu) per_cpu_ptr(&kvm_nvhe_sym(sym), cpu)
+
#ifndef __KVM_NVHE_HYPERVISOR__
/*
* BIG FAT WARNINGS:
@@ -75,12 +87,21 @@
* - Don't let the nVHE hypervisor have access to this, as it will
* pick the *wrong* symbol (yes, it runs at EL2...).
*/
-#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
+#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
+ ? CHOOSE_VHE_SYM(sym) \
: CHOOSE_NVHE_SYM(sym))
+#define this_cpu_ptr_hyp(sym) (is_kernel_in_hyp_mode() \
+ ? this_cpu_ptr(&sym) \
+ : this_cpu_ptr_nvhe(sym))
+#define per_cpu_ptr_hyp(sym, cpu) (is_kernel_in_hyp_mode() \
+ ? per_cpu_ptr(&sym, cpu) \
+ : per_cpu_ptr_nvhe(sym, cpu))
#else
/* The nVHE hypervisor shouldn't even try to access anything */
extern void *__nvhe_undefined_symbol;
-#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
+#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
+#define this_cpu_ptr_hyp(sym) &__nvhe_undefined_symbol
+#define per_cpu_ptr_hyp(sym, cpu) &__nvhe_undefined_symbol
#endif

/* Translate a kernel address @ptr into its equivalent linear mapping */
--
2.27.0

2020-07-22 16:45:58

by David Brazdil

[permalink] [raw]
Subject: [PATCH 7/9] kvm: arm64: Mark hyp stack pages reserved

In preparation for unmapping hyp pages from host stage-2, allocate/free hyp
stack using new helpers which automatically mark the pages reserved.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kvm/arm.c | 49 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 47 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 0700c3d21b23..dc557b380c87 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1451,13 +1451,58 @@ static int init_subsystems(void)
return err;
}

+/*
+ * Alloc pages and mark them reserved so the kernel never tries to
+ * take them away from the hypervisor.
+ */
+static unsigned long alloc_hyp_pages(gfp_t flags, unsigned int order)
+{
+ struct page *page;
+ unsigned long i;
+
+ page = alloc_pages(flags, order);
+ if (!page)
+ return 0;
+
+ for (i = 0; i < (1ul << order); ++i)
+ mark_page_reserved(page + i);
+
+ return (unsigned long)page_address(page);
+}
+
+static unsigned long alloc_hyp_page(gfp_t flags)
+{
+ return alloc_hyp_pages(flags, 0);
+}
+
+/*
+ * Free pages which were previously marked reserved for the hypervisor.
+ */
+static void free_hyp_pages(unsigned long addr, unsigned int order)
+{
+ unsigned long i;
+ struct page *page;
+
+ if (!addr)
+ return;
+
+ page = virt_to_page(addr);
+ for (i = 0; i < (1ul << order); ++i)
+ free_reserved_page(page + i);
+}
+
+static void free_hyp_page(unsigned long addr)
+{
+ return free_hyp_pages(addr, 0);
+}
+
static void teardown_hyp_mode(void)
{
int cpu;

free_hyp_pgds();
for_each_possible_cpu(cpu)
- free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ free_hyp_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
}

/**
@@ -1481,7 +1526,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
unsigned long stack_page;

- stack_page = __get_free_page(GFP_KERNEL);
+ stack_page = alloc_hyp_page(GFP_KERNEL);
if (!stack_page) {
err = -ENOMEM;
goto out_err;
--
2.27.0

2020-07-22 16:45:59

by David Brazdil

[permalink] [raw]
Subject: [PATCH 8/9] kvm: arm64: Set up hyp percpu data for nVHE

Add hyp percpu section to linker script and rename the corresponding ELF
sections of hyp/nvhe object files. This moves all nVHE-specific percpu
variables to the new hyp percpu section.

Allocate sufficient amount of memory for all percpu hyp regions at global KVM
init time, and create corresponding hyp mappings.

The base addresses of hyp percpu regions are kept in a dynamically allocated
array in the kernel.

Add NULL checks in PMU event-reset code as it may run before KVM memory is
initialized.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 19 +++++++++--
arch/arm64/include/asm/sections.h | 1 +
arch/arm64/kernel/vmlinux.lds.S | 10 ++++++
arch/arm64/kvm/arm.c | 55 +++++++++++++++++++++++++++++--
arch/arm64/kvm/hyp/nvhe/Makefile | 2 ++
arch/arm64/kvm/pmu.c | 5 ++-
6 files changed, 86 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 3d69cab873e4..22e63e651702 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -72,8 +72,23 @@
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)

-#define this_cpu_ptr_nvhe(sym) this_cpu_ptr(&kvm_nvhe_sym(sym))
-#define per_cpu_ptr_nvhe(sym, cpu) per_cpu_ptr(&kvm_nvhe_sym(sym), cpu)
+/* Array of percpu base addresses. Length of the array is nr_cpu_ids. */
+extern unsigned long *kvm_arm_hyp_percpu_base;
+
+/*
+ * Compute pointer to a symbol defined in nVHE percpu region.
+ * Returns NULL if percpu memory has not been allocated yet.
+ */
+#define this_cpu_ptr_nvhe(sym) per_cpu_ptr_nvhe(sym, smp_processor_id())
+#define per_cpu_ptr_nvhe(sym, cpu) \
+ ({ \
+ unsigned long base, off; \
+ base = kvm_arm_hyp_percpu_base \
+ ? kvm_arm_hyp_percpu_base[cpu] : 0; \
+ off = (unsigned long)&kvm_nvhe_sym(sym) - \
+ (unsigned long)&kvm_nvhe_sym(__per_cpu_start); \
+ base ? (typeof(kvm_nvhe_sym(sym))*)(base + off) : NULL; \
+ })

#ifndef __KVM_NVHE_HYPERVISOR__
/*
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 3994169985ef..5062553a6847 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -18,5 +18,6 @@ extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __kvm_nvhe___per_cpu_start[], __kvm_nvhe___per_cpu_end[];

#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6827da7f3aa5..c678615ccd6d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -16,6 +16,9 @@

#include "image.h"

+#define __CONCAT3(x, y, z) x ## y ## z
+#define CONCAT3(x, y, z) __CONCAT3(x, y, z)
+
OUTPUT_ARCH(aarch64)
ENTRY(_text)

@@ -187,6 +190,13 @@ SECTIONS

PERCPU_SECTION(L1_CACHE_BYTES)

+ /* KVM nVHE per-cpu section */
+ #undef PERCPU_SECTION_NAME
+ #undef PERCPU_SYMBOL_NAME
+ #define PERCPU_SECTION_NAME(suffix) CONCAT3(.hyp, PERCPU_SECTION_BASE_NAME, suffix)
+ #define PERCPU_SYMBOL_NAME(name) __kvm_nvhe_ ## name
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
.rela.dyn : ALIGN(8) {
*(.rela .rela*)
}
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index dc557b380c87..bbbc5c1519a9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -47,6 +47,7 @@ __asm__(".arch_extension virt");
#endif

static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+unsigned long *kvm_arm_hyp_percpu_base;

/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@ -1253,6 +1254,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}

+#define kvm_hyp_percpu_base(cpu) ((unsigned long)per_cpu_ptr_nvhe(__per_cpu_start, cpu))
+#define kvm_hyp_percpu_array_size (nr_cpu_ids * sizeof(*kvm_arm_hyp_percpu_base))
+#define kvm_hyp_percpu_array_order (get_order(kvm_hyp_percpu_array_size))
+#define kvm_hyp_percpu_begin CHOOSE_NVHE_SYM(__per_cpu_start)
+#define kvm_hyp_percpu_size ((unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - \
+ (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start))
+#define kvm_hyp_percpu_order (kvm_hyp_percpu_size \
+ ? get_order(kvm_hyp_percpu_size) : 0)
+
static void cpu_init_hyp_mode(void)
{
phys_addr_t pgd_ptr;
@@ -1268,8 +1278,8 @@ static void cpu_init_hyp_mode(void)
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
- tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
- (unsigned long)kvm_ksym_ref(&kvm_host_data));
+ tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe(__per_cpu_start) -
+ (unsigned long)kvm_ksym_ref(kvm_hyp_percpu_begin);

pgd_ptr = kvm_mmu_get_httbr();
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
@@ -1501,8 +1511,11 @@ static void teardown_hyp_mode(void)
int cpu;

free_hyp_pgds();
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
free_hyp_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ free_hyp_pages(kvm_hyp_percpu_base(cpu), kvm_hyp_percpu_order);
+ }
+ free_hyp_pages((unsigned long)kvm_arm_hyp_percpu_base, kvm_hyp_percpu_array_order);
}

/**
@@ -1535,6 +1548,28 @@ static int init_hyp_mode(void)
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
}

+ /*
+ * Allocate and initialize pages for Hypervisor-mode percpu regions.
+ */
+ kvm_arm_hyp_percpu_base = (unsigned long*)alloc_hyp_pages(
+ GFP_KERNEL | __GFP_ZERO, kvm_hyp_percpu_array_order);
+ if (!kvm_arm_hyp_percpu_base) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ for_each_possible_cpu(cpu) {
+ unsigned long percpu_base;
+
+ percpu_base = alloc_hyp_pages(GFP_KERNEL, kvm_hyp_percpu_order);
+ if (!percpu_base) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ memcpy((void*)percpu_base, kvm_hyp_percpu_begin, kvm_hyp_percpu_size);
+ kvm_arm_hyp_percpu_base[cpu] = percpu_base;
+ }
+
/*
* Map the Hyp-code called directly from the host
*/
@@ -1579,6 +1614,20 @@ static int init_hyp_mode(void)
}
}

+ /*
+ * Map Hyp percpu pages
+ */
+ for_each_possible_cpu(cpu) {
+ char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+ char *percpu_end = percpu_begin + PAGE_ALIGN(kvm_hyp_percpu_size);
+ err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
+
+ if (err) {
+ kvm_err("Cannot map hyp percpu region\n");
+ goto out_err;
+ }
+ }
+
for_each_possible_cpu(cpu) {
kvm_host_data_t *cpu_data;

diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 0b34414557d6..1d415698e60e 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -23,6 +23,8 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
--rename-section=.text=.hyp.text \
+ --rename-section=.data..percpu=.hyp.data..percpu \
+ --rename-section=.data..percpu..read_mostly=.hyp.data..percpu..read_mostly \
$< $@

# Remove ftrace and Shadow Call Stack CFLAGS.
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 717941910723..b4c8302f0798 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -33,7 +33,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);

- if (!kvm_pmu_switch_needed(attr))
+ if (!ctx || !kvm_pmu_switch_needed(attr))
return;

if (!attr->exclude_host)
@@ -49,6 +49,9 @@ void kvm_clr_pmu_events(u32 clr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);

+ if (!ctx)
+ return;
+
ctx->pmu_events.events_host &= ~clr;
ctx->pmu_events.events_guest &= ~clr;
}
--
2.27.0

2020-07-22 16:46:16

by David Brazdil

[permalink] [raw]
Subject: [PATCH 9/9] kvm: arm64: Remove unnecessary hyp mappings

With all nVHE per-CPU variables being part of the hyp per-CPU region,
mapping them individual is not necessary any longer. They are mapped to hyp
as part of the overall per-CPU region.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_mmu.h | 25 +++++++------------------
arch/arm64/kvm/arm.c | 17 +----------------
2 files changed, 8 insertions(+), 34 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e9e5875274cb..1a66089cf4f4 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -531,28 +531,17 @@ static inline int kvm_map_vectors(void)
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
DECLARE_KVM_NVHE_PER_CPU(u64, arm64_ssbd_callback_required);

-static inline int hyp_init_aux_data(void)
+static inline void hyp_init_aux_data(void)
{
- int cpu, err;
+ int cpu;

- for_each_possible_cpu(cpu) {
- u64 *ptr;
-
- ptr = per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu);
- err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
- if (err)
- return err;
-
- /* Copy value from kernel to hyp. */
- *ptr = per_cpu(arm64_ssbd_callback_required, cpu);
- }
- return 0;
+ /* Copy arm64_ssbd_callback_required values from kernel to hyp. */
+ for_each_possible_cpu(cpu)
+ *(per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu)) =
+ per_cpu(arm64_ssbd_callback_required, cpu);
}
#else
-static inline int hyp_init_aux_data(void)
-{
- return 0;
-}
+static inline void hyp_init_aux_data(void) {}
#endif

#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index bbbc5c1519a9..f2e537d99d2b 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1628,22 +1628,7 @@ static int init_hyp_mode(void)
}
}

- for_each_possible_cpu(cpu) {
- kvm_host_data_t *cpu_data;
-
- cpu_data = per_cpu_ptr_hyp(kvm_host_data, cpu);
- err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map host CPU state: %d\n", err);
- goto out_err;
- }
- }
-
- err = hyp_init_aux_data();
- if (err)
- kvm_err("Cannot map host auxiliary data: %d\n", err);
-
+ hyp_init_aux_data();
return 0;

out_err:
--
2.27.0

2020-07-22 16:46:51

by David Brazdil

[permalink] [raw]
Subject: [PATCH 6/9] kvm: arm64: Create separate instances of kvm_host_data for VHE/nVHE

Host CPU context is stored in a global per-cpu variable `kvm_host_data`.
In preparation for introducing independent per-CPU region for nVHE hyp,
create two separate instances of `kvm_host_data`, one for VHE and one
for nVHE.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/arm64/kernel/image-vars.h | 1 -
arch/arm64/kvm/arm.c | 5 ++---
arch/arm64/kvm/hyp/nvhe/switch.c | 3 +++
arch/arm64/kvm/hyp/vhe/switch.c | 3 +++
arch/arm64/kvm/pmu.c | 8 ++++----
6 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e1a32c0707bb..a6d61a708056 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -574,7 +574,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

-DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
+DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data);

static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 034cf21e67ce..e23b044c4081 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -69,7 +69,6 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);

/* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);

/* Kernel constant needed to compute idmap addresses. */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a53e87305fa0..0700c3d21b23 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -46,7 +46,6 @@
__asm__(".arch_extension virt");
#endif

-DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);

/* The VMID used in the VTTBR */
@@ -1303,7 +1302,7 @@ static void cpu_hyp_reset(void)

static void cpu_hyp_reinit(void)
{
- kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+ kvm_init_host_cpu_context(&this_cpu_ptr_hyp(kvm_host_data)->host_ctxt);

cpu_hyp_reset();

@@ -1538,7 +1537,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_host_data_t *cpu_data;

- cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
+ cpu_data = per_cpu_ptr_hyp(kvm_host_data, cpu);
err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);

if (err) {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 8c2bd04df813..4488d14de1b5 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -30,6 +30,9 @@
/* Non-VHE copy of the kernel symbol. */
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

+/* Non-VHE instance of kvm_host_data. */
+DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 746fcc3974c7..7927a969eca4 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -28,6 +28,9 @@

const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";

+/* VHE instance of kvm_host_data. */
+DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index b5ae3a5d509e..717941910723 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -31,7 +31,7 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);

if (!kvm_pmu_switch_needed(attr))
return;
@@ -47,7 +47,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp(kvm_host_data);

ctx->pmu_events.events_host &= ~clr;
ctx->pmu_events.events_guest &= ~clr;
@@ -169,7 +169,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;

- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;

@@ -188,7 +188,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;

- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;

--
2.27.0

2020-07-22 16:47:11

by David Brazdil

[permalink] [raw]
Subject: [PATCH 2/9] kvm: arm64: Remove __hyp_this_cpu_read

this_cpu_ptr is meant for use in kernel proper because it selects between
TPIDR_EL1/2 based on nVHE/VHE. __hyp_this_cpu_ptr was used in hyp to always
select TPIDR_EL2. Unify all users behind this_cpu_ptr and friends by
selecting _EL2 register under __KVM_NVHE_HYPERVISOR__.

Under CONFIG_DEBUG_PREEMPT, the kernel helpers perform a preemption check
which is omitted by the hyp helpers. Preserve the behavior for nVHE by
overriding the corresponding macros under __KVM_NVHE_HYPERVISOR__. Extend
the checks into VHE hyp code.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 20 --------------
arch/arm64/include/asm/percpu.h | 33 +++++++++++++++++++++--
arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 4 +--
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 ++---
arch/arm64/kvm/hyp/nvhe/switch.c | 2 +-
arch/arm64/kvm/hyp/vhe/switch.c | 2 +-
arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 4 +--
7 files changed, 40 insertions(+), 31 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index fb1a922b31ba..da4a0826cacd 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -149,26 +149,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
addr; \
})

-/*
- * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
- * provided that sym is really a *symbol* and not a pointer obtained from
- * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
- * sparse quiet.
- */
-#define __hyp_this_cpu_ptr(sym) \
- ({ \
- void *__ptr; \
- __verify_pcpu_ptr(&sym); \
- __ptr = hyp_symbol_addr(sym); \
- __ptr += read_sysreg(tpidr_el2); \
- (typeof(sym) __kernel __force *)__ptr; \
- })
-
-#define __hyp_this_cpu_read(sym) \
- ({ \
- *__hyp_this_cpu_ptr(sym); \
- })
-
#else /* __ASSEMBLY__ */

.macro hyp_adr_this_cpu reg, sym, tmp
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 0b6409b89e5e..b4008331475b 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -19,7 +19,21 @@ static inline void set_my_cpu_offset(unsigned long off)
:: "r" (off) : "memory");
}

-static inline unsigned long __my_cpu_offset(void)
+static inline unsigned long __hyp_my_cpu_offset(void)
+{
+ unsigned long off;
+
+ /*
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrs %0, tpidr_el2" : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
+
+ return off;
+}
+
+static inline unsigned long __kern_my_cpu_offset(void)
{
unsigned long off;

@@ -35,7 +49,12 @@ static inline unsigned long __my_cpu_offset(void)

return off;
}
-#define __my_cpu_offset __my_cpu_offset()
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define __my_cpu_offset __hyp_my_cpu_offset()
+#else
+#define __my_cpu_offset __kern_my_cpu_offset()
+#endif

#define PERCPU_RW_OPS(sz) \
static inline unsigned long __percpu_read_##sz(void *ptr) \
@@ -227,4 +246,14 @@ PERCPU_RET_OP(add, add, ldadd)

#include <asm-generic/percpu.h>

+/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
+#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
+#undef this_cpu_ptr
+#define this_cpu_ptr raw_cpu_ptr
+#undef __this_cpu_read
+#define __this_cpu_read raw_cpu_read
+#undef __this_cpu_write
+#define __this_cpu_write raw_cpu_write
+#endif
+
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 0297dc63988c..3b2056a225ff 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 0511af14dc81..e69c2c6098a1 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -382,7 +382,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return false;

- ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt, APDA);
@@ -491,7 +491,7 @@ static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
* guest wants it disabled, so be it...
*/
if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ __this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
#endif
}
@@ -503,7 +503,7 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
* If the guest has disabled the workaround, bring it back on.
*/
if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ __this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
#endif
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 341be2f2f312..ddb602ffb022 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -175,7 +175,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)

vcpu = kern_hyp_va(vcpu);

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;

diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index c52d714e0d75..746fcc3974c7 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -108,7 +108,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;

diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 996471e4c138..2a0b8c88d74f 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);

/*
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;

- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();

__sysreg_save_el1_state(guest_ctxt);
--
2.27.0

2020-07-22 16:48:56

by David Brazdil

[permalink] [raw]
Subject: [PATCH 5/9] kvm: arm64: Duplicate arm64_ssbd_callback_required for nVHE hyp

Hyp keeps track of which cores require SSBD callback by accessing a
kernel-proper global variable. Create an nVHE symbol of the same name
and copy the value from kernel proper to nVHE at KVM init time.

Done in preparation for separating percpu memory owned by kernel
proper and nVHE.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_mmu.h | 10 +++++++---
arch/arm64/kernel/image-vars.h | 1 -
arch/arm64/kvm/arm.c | 2 +-
arch/arm64/kvm/hyp/nvhe/switch.c | 3 +++
4 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 22157ded04ca..e9e5875274cb 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -529,23 +529,27 @@ static inline int kvm_map_vectors(void)

#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+DECLARE_KVM_NVHE_PER_CPU(u64, arm64_ssbd_callback_required);

-static inline int hyp_map_aux_data(void)
+static inline int hyp_init_aux_data(void)
{
int cpu, err;

for_each_possible_cpu(cpu) {
u64 *ptr;

- ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+ ptr = per_cpu_ptr_nvhe(arm64_ssbd_callback_required, cpu);
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
if (err)
return err;
+
+ /* Copy value from kernel to hyp. */
+ *ptr = per_cpu(arm64_ssbd_callback_required, cpu);
}
return 0;
}
#else
-static inline int hyp_map_aux_data(void)
+static inline int hyp_init_aux_data(void)
{
return 0;
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 9e897c500237..034cf21e67ce 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -69,7 +69,6 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);

/* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 98f05bdac3c1..a53e87305fa0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1547,7 +1547,7 @@ static int init_hyp_mode(void)
}
}

- err = hyp_map_aux_data();
+ err = hyp_init_aux_data();
if (err)
kvm_err("Cannot map host auxiliary data: %d\n", err);

diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index ddb602ffb022..8c2bd04df813 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -27,6 +27,9 @@
#include <asm/processor.h>
#include <asm/thread_info.h>

+/* Non-VHE copy of the kernel symbol. */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
--
2.27.0