2021-03-04 06:30:44

by Quentin Perret

[permalink] [raw]
Subject: [PATCH v3 32/32] KVM: arm64: Protect the .hyp sections from the host

When KVM runs in nVHE protected mode, use the host stage 2 to unmap the
hypervisor sections. The long-term goal is to ensure the EL2 code can
remain robust regardless of the host's state, so this starts by making
sure the host cannot e.g. write to the .hyp sections directly.

Signed-off-by: Quentin Perret <[email protected]>
---
arch/arm64/include/asm/kvm_asm.h | 1 +
arch/arm64/kvm/arm.c | 46 +++++++++++++++++++
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ++++
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 22 +++++++++
5 files changed, 80 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index b127af02bd45..9accf5350858 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -62,6 +62,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
+#define __KVM_HOST_SMCCC_FUNC___pkvm_host_unmap 20

#ifndef __ASSEMBLY__

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a31c56bc55b3..73c26d206542 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1894,11 +1894,57 @@ void _kvm_host_prot_finalize(void *discard)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
}

+static inline int pkvm_host_unmap(phys_addr_t start, phys_addr_t end)
+{
+ return kvm_call_hyp_nvhe(__pkvm_host_unmap, start, end);
+}
+
+#define pkvm_host_unmap_section(__section) \
+ pkvm_host_unmap(__pa_symbol(__section##_start), \
+ __pa_symbol(__section##_end))
+
static int finalize_hyp_mode(void)
{
+ int cpu, ret;
+
if (!is_protected_kvm_enabled())
return 0;

+ ret = pkvm_host_unmap_section(__hyp_idmap_text);
+ if (ret)
+ return ret;
+
+ ret = pkvm_host_unmap_section(__hyp_text);
+ if (ret)
+ return ret;
+
+ ret = pkvm_host_unmap_section(__hyp_rodata);
+ if (ret)
+ return ret;
+
+ ret = pkvm_host_unmap_section(__hyp_bss);
+ if (ret)
+ return ret;
+
+ ret = pkvm_host_unmap(hyp_mem_base, hyp_mem_base + hyp_mem_size);
+ if (ret)
+ return ret;
+
+ for_each_possible_cpu(cpu) {
+ phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
+ phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
+
+ ret = pkvm_host_unmap(start, end);
+ if (ret)
+ return ret;
+
+ start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
+ end = start + PAGE_SIZE;
+ ret = pkvm_host_unmap(start, end);
+ if (ret)
+ return ret;
+ }
+
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index d293cb328cc4..39890d4f1dc8 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -21,6 +21,8 @@ struct host_kvm {
extern struct host_kvm host_kvm;

int __pkvm_prot_finalize(void);
+int __pkvm_host_unmap(phys_addr_t start, phys_addr_t end);
+
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);

diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f47028d3fd0a..2069136fdaec 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -156,6 +156,14 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
}
+
+static void handle___pkvm_host_unmap(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_host_unmap(start, end);
+}
typedef void (*hcall_t)(struct kvm_cpu_context *);

#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -180,6 +188,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_create_mappings),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_prot_finalize),
+ HANDLE_FUNC(__pkvm_host_unmap),
};

static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 2252ad1a8945..ed480facdc88 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -196,6 +196,28 @@ static int host_stage2_idmap(u64 addr)
return ret;
}

+int __pkvm_host_unmap(phys_addr_t start, phys_addr_t end)
+{
+ struct kvm_mem_range r1, r2;
+ int ret;
+
+ /*
+ * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
+ * non-persistent, so don't allow PROT_NONE in MMIO range.
+ */
+ if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+ return -EINVAL;
+ if (r1.start != r2.start)
+ return -EINVAL;
+
+ hyp_spin_lock(&host_kvm.lock);
+ ret = kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
+ KVM_PGTABLE_PROT_NONE, &host_s2_mem);
+ hyp_spin_unlock(&host_kvm.lock);
+
+ return ret;
+}
+
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;
--
2.30.1.766.gb4fecdf3b7-goog


2021-03-05 19:15:51

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH v3 32/32] KVM: arm64: Protect the .hyp sections from the host

On Tue, Mar 02, 2021 at 03:00:02PM +0000, Quentin Perret wrote:
> When KVM runs in nVHE protected mode, use the host stage 2 to unmap the
> hypervisor sections. The long-term goal is to ensure the EL2 code can
> remain robust regardless of the host's state, so this starts by making
> sure the host cannot e.g. write to the .hyp sections directly.
>
> Signed-off-by: Quentin Perret <[email protected]>
> ---
> arch/arm64/include/asm/kvm_asm.h | 1 +
> arch/arm64/kvm/arm.c | 46 +++++++++++++++++++
> arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ++++
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 22 +++++++++
> 5 files changed, 80 insertions(+)

[...]

> static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 2252ad1a8945..ed480facdc88 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -196,6 +196,28 @@ static int host_stage2_idmap(u64 addr)
> return ret;
> }
>
> +int __pkvm_host_unmap(phys_addr_t start, phys_addr_t end)
> +{
> + struct kvm_mem_range r1, r2;
> + int ret;
> +
> + /*
> + * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
> + * non-persistent, so don't allow PROT_NONE in MMIO range.
> + */
> + if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
> + return -EINVAL;
> + if (r1.start != r2.start)
> + return -EINVAL;


Feels like this should be in a helper to determine whether or not a range is
solely covered by memory.

Either way:

Acked-by: Will Deacon <[email protected]>

Will