Once we start initializing KVM on newly booted cores before the rest of
the kernel, parameters to __do_hyp_init will need to be provided by EL2
rather than EL1. At that point it will not be possible to pass its four
arguments directly because PSCI_CPU_ON only supports one context
argument.
Refactor __do_hyp_init to accept its parameters in a struct. This
prepares the code for KVM booting cores as well as removes any limits on
the number of __do_hyp_init arguments.
Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 7 +++++++
arch/arm64/include/asm/kvm_hyp.h | 4 ++++
arch/arm64/kernel/asm-offsets.c | 4 ++++
arch/arm64/kvm/arm.c | 26 ++++++++++++++------------
arch/arm64/kvm/hyp/nvhe/hyp-init.S | 21 ++++++++++-----------
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 ++
6 files changed, 41 insertions(+), 23 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 54387ccd1ab2..01904e88cead 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -150,6 +150,13 @@ extern void *__vhe_undefined_symbol;
#endif
+struct kvm_nvhe_init_params {
+ unsigned long tpidr_el2;
+ unsigned long vector_hyp_va;
+ unsigned long stack_hyp_va;
+ phys_addr_t pgd_pa;
+};
+
/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr) \
({ \
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 6b664de5ec1f..a3289071f3d8 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -15,6 +15,10 @@
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
+#ifdef __KVM_NVHE_HYPERVISOR__
+DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
+#endif
+
#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 7d32fc959b1a..4435ad8be938 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -110,6 +110,10 @@ int main(void)
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
+ DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2));
+ DEFINE(NVHE_INIT_VECTOR_HYP_VA, offsetof(struct kvm_nvhe_init_params, vector_hyp_va));
+ DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
+ DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c0ffb019ca8b..4838556920fb 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -50,6 +50,7 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@ -1347,10 +1348,7 @@ static int kvm_map_vectors(void)
static void cpu_init_hyp_mode(void)
{
- phys_addr_t pgd_ptr;
- unsigned long hyp_stack_ptr;
- unsigned long vector_ptr;
- unsigned long tpidr_el2;
+ struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
struct arm_smccc_res res;
/* Switch from the HYP stub to our own HYP init vector */
@@ -1361,13 +1359,18 @@ static void cpu_init_hyp_mode(void)
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
- tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
- (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
+ params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
- pgd_ptr = kvm_mmu_get_httbr();
- hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
- hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
- vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
+ params->vector_hyp_va = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
+ params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
+ params->pgd_pa = kvm_mmu_get_httbr();
+
+ /*
+ * Flush the init params from the data cache because the struct will
+ * be read while the MMU is off.
+ */
+ __flush_dcache_area(params, sizeof(*params));
/*
* Call initialization code, and switch to the full blown HYP code.
@@ -1376,8 +1379,7 @@ static void cpu_init_hyp_mode(void)
* cpus_have_const_cap() wrapper.
*/
BUG_ON(!system_capabilities_finalized());
- arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
- pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
+ arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 17b58dbc3a2f..67342cc9930f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -47,10 +47,7 @@ __invalid:
/*
* x0: SMCCC function ID
- * x1: HYP pgd
- * x2: per-CPU offset
- * x3: HYP stack
- * x4: HYP vectors
+ * x1: struct kvm_nvhe_init_params PA
*/
__do_hyp_init:
/* Check for a stub HVC call */
@@ -71,10 +68,16 @@ __do_hyp_init:
mov x0, #SMCCC_RET_NOT_SUPPORTED
eret
-1:
- /* Set tpidr_el2 for use by HYP to free a register */
- msr tpidr_el2, x2
+1: ldr x0, [x1, #NVHE_INIT_TPIDR_EL2]
+ msr tpidr_el2, x0
+ ldr x0, [x1, #NVHE_INIT_VECTOR_HYP_VA]
+ msr vbar_el2, x0
+
+ ldr x0, [x1, #NVHE_INIT_STACK_HYP_VA]
+ mov sp, x0
+
+ ldr x1, [x1, #NVHE_INIT_PGD_PA]
phys_to_ttbr x0, x1
alternative_if ARM64_HAS_CNP
orr x0, x0, #TTBR_CNP_BIT
@@ -134,10 +137,6 @@ alternative_else_nop_endif
msr sctlr_el2, x0
isb
- /* Set the stack and new vectors */
- mov sp, x3
- msr vbar_el2, x4
-
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
eret
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index e2eafe2c93af..411b0f652417 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -14,6 +14,8 @@
#include <kvm/arm_hypercalls.h>
+DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
+
static void handle_host_hcall(unsigned long func_id,
struct kvm_cpu_context *host_ctxt)
{
--
2.29.2.299.gdc1121823c-goog
On Mon, 16 Nov 2020 20:43:00 +0000,
David Brazdil <[email protected]> wrote:
>
> Once we start initializing KVM on newly booted cores before the rest of
> the kernel, parameters to __do_hyp_init will need to be provided by EL2
> rather than EL1. At that point it will not be possible to pass its four
> arguments directly because PSCI_CPU_ON only supports one context
> argument.
>
> Refactor __do_hyp_init to accept its parameters in a struct. This
> prepares the code for KVM booting cores as well as removes any limits on
> the number of __do_hyp_init arguments.
>
> Signed-off-by: David Brazdil <[email protected]>
> ---
> arch/arm64/include/asm/kvm_asm.h | 7 +++++++
> arch/arm64/include/asm/kvm_hyp.h | 4 ++++
> arch/arm64/kernel/asm-offsets.c | 4 ++++
> arch/arm64/kvm/arm.c | 26 ++++++++++++++------------
> arch/arm64/kvm/hyp/nvhe/hyp-init.S | 21 ++++++++++-----------
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 ++
> 6 files changed, 41 insertions(+), 23 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 54387ccd1ab2..01904e88cead 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -150,6 +150,13 @@ extern void *__vhe_undefined_symbol;
>
> #endif
>
> +struct kvm_nvhe_init_params {
> + unsigned long tpidr_el2;
> + unsigned long vector_hyp_va;
> + unsigned long stack_hyp_va;
> + phys_addr_t pgd_pa;
> +};
> +
> /* Translate a kernel address @ptr into its equivalent linear mapping */
> #define kvm_ksym_ref(ptr) \
> ({ \
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index 6b664de5ec1f..a3289071f3d8 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -15,6 +15,10 @@
> DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
> DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
>
> +#ifdef __KVM_NVHE_HYPERVISOR__
> +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> +#endif
I'm not sure we should bother with this #ifdefery. Having the
declaration present at all times doesn't really hurt, since it is only
defined in the HYP code. Cutting down on the conditionals would
certainly help readability.
> +
> #define read_sysreg_elx(r,nvh,vh) \
> ({ \
> u64 reg; \
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index 7d32fc959b1a..4435ad8be938 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -110,6 +110,10 @@ int main(void)
> DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
> DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
> DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
> + DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2));
> + DEFINE(NVHE_INIT_VECTOR_HYP_VA, offsetof(struct kvm_nvhe_init_params, vector_hyp_va));
> + DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
> + DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa));
> #endif
> #ifdef CONFIG_CPU_PM
> DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index c0ffb019ca8b..4838556920fb 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -50,6 +50,7 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
>
> static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
> unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
> +DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
>
> /* The VMID used in the VTTBR */
> static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
> @@ -1347,10 +1348,7 @@ static int kvm_map_vectors(void)
>
> static void cpu_init_hyp_mode(void)
> {
> - phys_addr_t pgd_ptr;
> - unsigned long hyp_stack_ptr;
> - unsigned long vector_ptr;
> - unsigned long tpidr_el2;
> + struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
> struct arm_smccc_res res;
>
> /* Switch from the HYP stub to our own HYP init vector */
> @@ -1361,13 +1359,18 @@ static void cpu_init_hyp_mode(void)
> * kernel's mapping to the linear mapping, and store it in tpidr_el2
> * so that we can use adr_l to access per-cpu variables in EL2.
> */
> - tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
> - (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
> + params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
> + (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
>
> - pgd_ptr = kvm_mmu_get_httbr();
> - hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
> - hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
> - vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
> + params->vector_hyp_va = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
> + params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
> + params->pgd_pa = kvm_mmu_get_httbr();
Note to self: rename this to kvm_mmu_get_hyp_pgd() (another AArch32-ism).
> +
> + /*
> + * Flush the init params from the data cache because the struct will
> + * be read while the MMU is off.
> + */
> + __flush_dcache_area(params, sizeof(*params));
nit: please use kvm_flush_dcache_to_poc(), as it clearly indicates to
which point we are flushing.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
On Mon, Nov 23, 2020 at 02:20:07PM +0000, Marc Zyngier wrote:
> On Mon, 16 Nov 2020 20:43:00 +0000,
> David Brazdil <[email protected]> wrote:
> >
> > Once we start initializing KVM on newly booted cores before the rest of
> > the kernel, parameters to __do_hyp_init will need to be provided by EL2
> > rather than EL1. At that point it will not be possible to pass its four
> > arguments directly because PSCI_CPU_ON only supports one context
> > argument.
> >
> > Refactor __do_hyp_init to accept its parameters in a struct. This
> > prepares the code for KVM booting cores as well as removes any limits on
> > the number of __do_hyp_init arguments.
> >
> > Signed-off-by: David Brazdil <[email protected]>
> > ---
> > arch/arm64/include/asm/kvm_asm.h | 7 +++++++
> > arch/arm64/include/asm/kvm_hyp.h | 4 ++++
> > arch/arm64/kernel/asm-offsets.c | 4 ++++
> > arch/arm64/kvm/arm.c | 26 ++++++++++++++------------
> > arch/arm64/kvm/hyp/nvhe/hyp-init.S | 21 ++++++++++-----------
> > arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 ++
> > 6 files changed, 41 insertions(+), 23 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> > index 54387ccd1ab2..01904e88cead 100644
> > --- a/arch/arm64/include/asm/kvm_asm.h
> > +++ b/arch/arm64/include/asm/kvm_asm.h
> > @@ -150,6 +150,13 @@ extern void *__vhe_undefined_symbol;
> >
> > #endif
> >
> > +struct kvm_nvhe_init_params {
> > + unsigned long tpidr_el2;
> > + unsigned long vector_hyp_va;
> > + unsigned long stack_hyp_va;
> > + phys_addr_t pgd_pa;
> > +};
> > +
> > /* Translate a kernel address @ptr into its equivalent linear mapping */
> > #define kvm_ksym_ref(ptr) \
> > ({ \
> > diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> > index 6b664de5ec1f..a3289071f3d8 100644
> > --- a/arch/arm64/include/asm/kvm_hyp.h
> > +++ b/arch/arm64/include/asm/kvm_hyp.h
> > @@ -15,6 +15,10 @@
> > DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
> > DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
> >
> > +#ifdef __KVM_NVHE_HYPERVISOR__
> > +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> > +#endif
>
> I'm not sure we should bother with this #ifdefery. Having the
> declaration present at all times doesn't really hurt, since it is only
> defined in the HYP code. Cutting down on the conditionals would
> certainly help readability.
>
> > +
> > #define read_sysreg_elx(r,nvh,vh) \
> > ({ \
> > u64 reg; \
> > diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> > index 7d32fc959b1a..4435ad8be938 100644
> > --- a/arch/arm64/kernel/asm-offsets.c
> > +++ b/arch/arm64/kernel/asm-offsets.c
> > @@ -110,6 +110,10 @@ int main(void)
> > DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
> > DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
> > DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
> > + DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2));
> > + DEFINE(NVHE_INIT_VECTOR_HYP_VA, offsetof(struct kvm_nvhe_init_params, vector_hyp_va));
> > + DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
> > + DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa));
> > #endif
> > #ifdef CONFIG_CPU_PM
> > DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index c0ffb019ca8b..4838556920fb 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -50,6 +50,7 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
> >
> > static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
> > unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
> > +DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> >
> > /* The VMID used in the VTTBR */
> > static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
> > @@ -1347,10 +1348,7 @@ static int kvm_map_vectors(void)
> >
> > static void cpu_init_hyp_mode(void)
> > {
> > - phys_addr_t pgd_ptr;
> > - unsigned long hyp_stack_ptr;
> > - unsigned long vector_ptr;
> > - unsigned long tpidr_el2;
> > + struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
> > struct arm_smccc_res res;
> >
> > /* Switch from the HYP stub to our own HYP init vector */
> > @@ -1361,13 +1359,18 @@ static void cpu_init_hyp_mode(void)
> > * kernel's mapping to the linear mapping, and store it in tpidr_el2
> > * so that we can use adr_l to access per-cpu variables in EL2.
> > */
> > - tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
> > - (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
> > + params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
> > + (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
> >
> > - pgd_ptr = kvm_mmu_get_httbr();
> > - hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
> > - hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
> > - vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
> > + params->vector_hyp_va = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
> > + params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
> > + params->pgd_pa = kvm_mmu_get_httbr();
>
> Note to self: rename this to kvm_mmu_get_hyp_pgd() (another AArch32-ism).
>
> > +
> > + /*
> > + * Flush the init params from the data cache because the struct will
> > + * be read while the MMU is off.
> > + */
> > + __flush_dcache_area(params, sizeof(*params));
>
> nit: please use kvm_flush_dcache_to_poc(), as it clearly indicates to
> which point we are flushing.
Will change, but out of curiosity - how is it different? AFAICT, it is just
an alias with a single use in __clean_dcache_guest_page:
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
>
> Thanks,
>
> M.
>
> --
> Without deviation from the norm, progress is not possible.
On 2020-11-25 10:39, David Brazdil wrote:
> On Mon, Nov 23, 2020 at 02:20:07PM +0000, Marc Zyngier wrote:
[...]
>> > +
>> > + /*
>> > + * Flush the init params from the data cache because the struct will
>> > + * be read while the MMU is off.
>> > + */
>> > + __flush_dcache_area(params, sizeof(*params));
>>
>> nit: please use kvm_flush_dcache_to_poc(), as it clearly indicates to
>> which point we are flushing.
>
> Will change, but out of curiosity - how is it different? AFAICT, it is
> just
> an alias with a single use in __clean_dcache_guest_page:
>
> #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
It is indeed the exact same thing, but it says clearly in the name that
we
are cleaning to the "Point Of Coherency", as opposed to any other
architectural level (Unification or Persistence).
It makes it clear that we are cleaning all the way to the point where it
can
be accessed reliably with an uncacheable mapping, and not leaving the
data
dangling at a shallower cache level.
Thanks,
M.
--
Jazz is not dead. It just smells funny...