Add handler of host SMCs in KVM nVHE trap handler. Forward all SMCs to
EL3 and propagate the result back to EL1. This is done in preparation
for validating host SMCs in KVM nVHE protected mode.
The implementation assumes that firmware uses SMCCC v1.2 or older. That
means x0-x17 can be used both for arguments and results, other GPRs are
preserved.
Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kvm/hyp/nvhe/host.S | 38 ++++++++++++++++++++++++++++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 26 ++++++++++++++++++++
2 files changed, 64 insertions(+)
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index ed27f06a31ba..52dae5cd5a28 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -183,3 +183,41 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el1_vect // FIQ 32-bit EL1
invalid_host_el1_vect // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_host_vector)
+
+/*
+ * Forward SMC with arguments in struct kvm_cpu_context, and
+ * store the result into the same struct. Assumes SMCCC 1.2 or older.
+ *
+ * x0: struct kvm_cpu_context*
+ */
+SYM_CODE_START(__kvm_hyp_host_forward_smc)
+ /*
+ * Use x18 to keep a pointer to the host context because x18
+ * is callee-saved SMCCC but not in AAPCS64.
+ */
+ mov x18, x0
+
+ ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
+ ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
+ ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
+ ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
+ ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
+ ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
+ ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
+ ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
+ ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
+
+ smc #0
+
+ stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
+ stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
+ stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
+ stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
+ stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
+ stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
+ stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
+ stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
+ stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
+
+ ret
+SYM_CODE_END(__kvm_hyp_host_forward_smc)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 19332c20fcde..71a17af05953 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -16,6 +16,8 @@
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
+extern void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
unsigned long func_id = host_ctxt->regs.regs[0];
@@ -106,6 +108,27 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
host_ctxt->regs.regs[1] = ret;
}
+static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
+{
+ __kvm_hyp_host_forward_smc(host_ctxt);
+}
+
+static void skip_host_instruction(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
+static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
+{
+ default_host_smc_handler(host_ctxt);
+
+ /*
+ * Unlike HVC, the return address of an SMC is the instruction's PC.
+ * Move the return address past the instruction.
+ */
+ skip_host_instruction();
+}
+
void handle_trap(struct kvm_cpu_context *host_ctxt)
{
u64 esr = read_sysreg_el2(SYS_ESR);
@@ -114,6 +137,9 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_HVC64:
handle_host_hcall(host_ctxt);
break;
+ case ESR_ELx_EC_SMC64:
+ handle_host_smc(host_ctxt);
+ break;
default:
hyp_panic();
}
--
2.29.2.299.gdc1121823c-goog
On Mon, 16 Nov 2020 20:43:02 +0000,
David Brazdil <[email protected]> wrote:
>
> Add handler of host SMCs in KVM nVHE trap handler. Forward all SMCs to
> EL3 and propagate the result back to EL1. This is done in preparation
> for validating host SMCs in KVM nVHE protected mode.
>
> The implementation assumes that firmware uses SMCCC v1.2 or older. That
> means x0-x17 can be used both for arguments and results, other GPRs are
> preserved.
>
> Signed-off-by: David Brazdil <[email protected]>
> ---
> arch/arm64/kvm/hyp/nvhe/host.S | 38 ++++++++++++++++++++++++++++++
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 26 ++++++++++++++++++++
> 2 files changed, 64 insertions(+)
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
> index ed27f06a31ba..52dae5cd5a28 100644
> --- a/arch/arm64/kvm/hyp/nvhe/host.S
> +++ b/arch/arm64/kvm/hyp/nvhe/host.S
> @@ -183,3 +183,41 @@ SYM_CODE_START(__kvm_hyp_host_vector)
> invalid_host_el1_vect // FIQ 32-bit EL1
> invalid_host_el1_vect // Error 32-bit EL1
> SYM_CODE_END(__kvm_hyp_host_vector)
> +
> +/*
> + * Forward SMC with arguments in struct kvm_cpu_context, and
> + * store the result into the same struct. Assumes SMCCC 1.2 or older.
> + *
> + * x0: struct kvm_cpu_context*
> + */
> +SYM_CODE_START(__kvm_hyp_host_forward_smc)
> + /*
> + * Use x18 to keep a pointer to the host context because x18
> + * is callee-saved SMCCC but not in AAPCS64.
> + */
> + mov x18, x0
> +
> + ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
> + ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
> + ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
> + ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
> + ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
> + ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
> + ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
> + ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
> + ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
> +
> + smc #0
> +
> + stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
> + stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
> + stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
> + stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
> + stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
> + stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
> + stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
> + stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
> + stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
This is going to be really good for CPUs that need to use ARCH_WA1 for
their Spectre-v2 mitigation... :-( If that's too expensive, we may
have to reduce the number of save/restored registers, but I'm worried
the battle is already lost by the time we reach this (the host trap
path is already a huge hammer).
Eventually, we'll have to insert the mitigation in the vectors anyway,
just like we have on the guest exit path. Boo.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.