Now that we mark memory owned by the hypervisor in the host stage-2
during __pkvm_init(), we no longer need to rely on the host to
explicitly mark the hyp sections later on.
Remove the __pkvm_mark_hyp() hypercall altogether.
Signed-off-by: Quentin Perret <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 3 +-
arch/arm64/kvm/arm.c | 46 -------------------
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 -
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ----
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 19 --------
5 files changed, 1 insertion(+), 77 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9f0bf2109be7..432a9ea1f02e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -63,8 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
-#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
-#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e9a2b8f27792..2f378482471b 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1954,57 +1954,11 @@ static void _kvm_host_prot_finalize(void *discard)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
}
-static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
-{
- return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
-}
-
-#define pkvm_mark_hyp_section(__section) \
- pkvm_mark_hyp(__pa_symbol(__section##_start), \
- __pa_symbol(__section##_end))
-
static int finalize_hyp_mode(void)
{
- int cpu, ret;
-
if (!is_protected_kvm_enabled())
return 0;
- ret = pkvm_mark_hyp_section(__hyp_idmap_text);
- if (ret)
- return ret;
-
- ret = pkvm_mark_hyp_section(__hyp_text);
- if (ret)
- return ret;
-
- ret = pkvm_mark_hyp_section(__hyp_rodata);
- if (ret)
- return ret;
-
- ret = pkvm_mark_hyp_section(__hyp_bss);
- if (ret)
- return ret;
-
- ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
- if (ret)
- return ret;
-
- for_each_possible_cpu(cpu) {
- phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
- phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
-
- ret = pkvm_mark_hyp(start, end);
- if (ret)
- return ret;
-
- start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
- end = start + PAGE_SIZE;
- ret = pkvm_mark_hyp(start, end);
- if (ret)
- return ret;
- }
-
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 49db0ec5a606..0118527b07b0 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -49,7 +49,6 @@ extern struct host_kvm host_kvm;
extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void);
-int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 1632f001f4ed..7900d5b66ba3 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -163,14 +163,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
}
-
-static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
- DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
-
- cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
-}
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -196,7 +188,6 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_create_mappings),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_prot_finalize),
- HANDLE_FUNC(__pkvm_mark_hyp),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index cb023d31666e..2991dc6996b9 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -339,25 +339,6 @@ static int host_stage2_idmap(u64 addr)
return ret;
}
-int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
-{
- int ret;
-
- /*
- * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
- * non-persistent, so don't allow changing page ownership in MMIO range.
- */
- if (!range_is_memory(start, end))
- return -EINVAL;
-
- hyp_spin_lock(&host_kvm.lock);
- ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
- start, end - start, &host_s2_pool, pkvm_hyp_id);
- hyp_spin_unlock(&host_kvm.lock);
-
- return ret != -EAGAIN ? ret : 0;
-}
-
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;
--
2.32.0.605.g8dce9f2422-goog
Hi Quentin,
On Mon, Aug 9, 2021 at 5:25 PM Quentin Perret <[email protected]> wrote:
>
> Now that we mark memory owned by the hypervisor in the host stage-2
> during __pkvm_init(), we no longer need to rely on the host to
> explicitly mark the hyp sections later on.
>
> Remove the __pkvm_mark_hyp() hypercall altogether.
>
> Signed-off-by: Quentin Perret <[email protected]>
> ---
Reviewed-by: Fuad Tabba <[email protected]>
Thanks,
/fuad
> arch/arm64/include/asm/kvm_asm.h | 3 +-
> arch/arm64/kvm/arm.c | 46 -------------------
> arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 -
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ----
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 19 --------
> 5 files changed, 1 insertion(+), 77 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 9f0bf2109be7..432a9ea1f02e 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -63,8 +63,7 @@
> #define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
> #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
> #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
> -#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
> -#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
> +#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
>
> #ifndef __ASSEMBLY__
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index e9a2b8f27792..2f378482471b 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1954,57 +1954,11 @@ static void _kvm_host_prot_finalize(void *discard)
> WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
> }
>
> -static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
> -{
> - return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
> -}
> -
> -#define pkvm_mark_hyp_section(__section) \
> - pkvm_mark_hyp(__pa_symbol(__section##_start), \
> - __pa_symbol(__section##_end))
> -
> static int finalize_hyp_mode(void)
> {
> - int cpu, ret;
> -
> if (!is_protected_kvm_enabled())
> return 0;
>
> - ret = pkvm_mark_hyp_section(__hyp_idmap_text);
> - if (ret)
> - return ret;
> -
> - ret = pkvm_mark_hyp_section(__hyp_text);
> - if (ret)
> - return ret;
> -
> - ret = pkvm_mark_hyp_section(__hyp_rodata);
> - if (ret)
> - return ret;
> -
> - ret = pkvm_mark_hyp_section(__hyp_bss);
> - if (ret)
> - return ret;
> -
> - ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
> - if (ret)
> - return ret;
> -
> - for_each_possible_cpu(cpu) {
> - phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
> - phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
> -
> - ret = pkvm_mark_hyp(start, end);
> - if (ret)
> - return ret;
> -
> - start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
> - end = start + PAGE_SIZE;
> - ret = pkvm_mark_hyp(start, end);
> - if (ret)
> - return ret;
> - }
> -
> /*
> * Flip the static key upfront as that may no longer be possible
> * once the host stage 2 is installed.
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index 49db0ec5a606..0118527b07b0 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -49,7 +49,6 @@ extern struct host_kvm host_kvm;
> extern const u8 pkvm_hyp_id;
>
> int __pkvm_prot_finalize(void);
> -int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
>
> bool addr_is_memory(phys_addr_t phys);
> int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index 1632f001f4ed..7900d5b66ba3 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -163,14 +163,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
> {
> cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
> }
> -
> -static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
> -{
> - DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
> - DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
> -
> - cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
> -}
> typedef void (*hcall_t)(struct kvm_cpu_context *);
>
> #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
> @@ -196,7 +188,6 @@ static const hcall_t host_hcall[] = {
> HANDLE_FUNC(__pkvm_create_mappings),
> HANDLE_FUNC(__pkvm_create_private_mapping),
> HANDLE_FUNC(__pkvm_prot_finalize),
> - HANDLE_FUNC(__pkvm_mark_hyp),
> };
>
> static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index cb023d31666e..2991dc6996b9 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -339,25 +339,6 @@ static int host_stage2_idmap(u64 addr)
> return ret;
> }
>
> -int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
> -{
> - int ret;
> -
> - /*
> - * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
> - * non-persistent, so don't allow changing page ownership in MMIO range.
> - */
> - if (!range_is_memory(start, end))
> - return -EINVAL;
> -
> - hyp_spin_lock(&host_kvm.lock);
> - ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
> - start, end - start, &host_s2_pool, pkvm_hyp_id);
> - hyp_spin_unlock(&host_kvm.lock);
> -
> - return ret != -EAGAIN ? ret : 0;
> -}
> -
> void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
> {
> struct kvm_vcpu_fault_info fault;
> --
> 2.32.0.605.g8dce9f2422-goog
>