2020-12-08 14:29:05

by David Brazdil

[permalink] [raw]
Subject: [PATCH 0/6] Fixes and cleanups of PSCI relay for kvmarm/next

Small batch of improvements for the 'Opt-in always-on nVHE hypervisor'
series, now merged in kvmarm/next.

Patch #1 fixes potential use of invalid v0.1 functions IDs reported
by Mark Rutland, patch #2 fixes a warning reported by Qian Cai.
Patch #3 avoids a code path not used in VHE, can be dropped if any
concerns arise. The remaining patches are minor cleanups from review.

-David

David Brazdil (6):
kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs
kvm: arm64: Use lm_alias in nVHE-only VA conversion
kvm: arm64: Skip computing hyp VA layout for VHE
kvm: arm64: Minor cleanup of hyp variables used in host
kvm: arm64: Remove unused includes in psci-relay.c
kvm: arm64: Move skip_host_instruction to adjust_pc.h

arch/arm64/include/asm/kvm_host.h | 26 ++++++++++
arch/arm64/kernel/smp.c | 2 +-
arch/arm64/kvm/arm.c | 18 ++++---
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 9 ++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 12 +----
arch/arm64/kvm/hyp/nvhe/hyp-smp.c | 6 +--
arch/arm64/kvm/hyp/nvhe/psci-relay.c | 56 +++++++++++++++-------
arch/arm64/kvm/va_layout.c | 7 ++-
8 files changed, 95 insertions(+), 41 deletions(-)

--
2.29.2.576.ga3fc446d84-goog


2020-12-08 14:29:14

by David Brazdil

[permalink] [raw]
Subject: [PATCH 2/6] kvm: arm64: Use lm_alias in nVHE-only VA conversion

init_hyp_physvirt_offset() computes PA from a kernel VA. Conversion to
kernel linear-map is required first but the code used kvm_ksym_ref() for
this purpose. Under VHE that is a NOP and resulted in a runtime warning.
Replace kvm_ksym_ref with lm_alias.

Reported-by: Qian Cai <[email protected]>
Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kvm/va_layout.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index d8cc51bd60bf..914732b88c69 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -42,7 +42,7 @@ static void init_hyp_physvirt_offset(void)
u64 kern_va, hyp_va;

/* Compute the offset from the hyp VA and PA of a random symbol. */
- kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
}
--
2.29.2.576.ga3fc446d84-goog

2020-12-08 14:29:49

by David Brazdil

[permalink] [raw]
Subject: [PATCH 6/6] kvm: arm64: Move skip_host_instruction to adjust_pc.h

Move function for skipping host instruction in the host trap handler to
a header file containing analogical helpers for guests.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 9 +++++++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 12 ++----------
2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index b1f60923a8fe..61716359035d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}

+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index bde658d51404..a906f9e2ff34 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}

-static void skip_host_instruction(void)
-{
- write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);

- /*
- * Unlike HVC, the return address of an SMC is the instruction's PC.
- * Move the return address past the instruction.
- */
- skip_host_instruction();
+ /* SMC was trapped, move ELR past the current PC. */
+ kvm_skip_host_instr();
}

void handle_trap(struct kvm_cpu_context *host_ctxt)
--
2.29.2.576.ga3fc446d84-goog

2020-12-08 14:30:22

by David Brazdil

[permalink] [raw]
Subject: [PATCH 5/6] kvm: arm64: Remove unused includes in psci-relay.c

Minor cleanup removing unused includes.

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kvm/hyp/nvhe/psci-relay.c | 3 ---
1 file changed, 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 0d6f4aa39621..1f7237e45148 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>

#include <nvhe/trap_handler.h>
--
2.29.2.576.ga3fc446d84-goog

2020-12-08 14:30:46

by David Brazdil

[permalink] [raw]
Subject: [PATCH 1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs

PSCI driver exposes a struct containing the PSCI v0.1 function IDs
configured in the DT. However, the struct does not convey the
information whether these were set from DT or contain the default value
zero. This could be a problem for PSCI proxy in KVM protected mode.

Extend config passed to KVM with a bit mask with individual bits set
depending on whether the corresponding function pointer in psci_ops is
set, eg. set bit for PSCI_CPU_SUSPEND if psci_ops.cpu_suspend != NULL.

Previously config was split into multiple global variables. Put
everything into a single struct for convenience.

Reported-by: Mark Rutland <[email protected]>
Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/include/asm/kvm_host.h | 20 +++++++++++
arch/arm64/kvm/arm.c | 14 +++++---
arch/arm64/kvm/hyp/nvhe/psci-relay.c | 53 +++++++++++++++++++++-------
3 files changed, 70 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 11beda85ee7e..828d50d40dc2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
#include <linux/percpu.h>
+#include <linux/psci.h>
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
@@ -240,6 +241,25 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};

+#define KVM_HOST_PSCI_0_1_CPU_SUSPEND BIT(0)
+#define KVM_HOST_PSCI_0_1_CPU_ON BIT(1)
+#define KVM_HOST_PSCI_0_1_CPU_OFF BIT(2)
+#define KVM_HOST_PSCI_0_1_MIGRATE BIT(3)
+
+struct kvm_host_psci_config {
+ /* PSCI version used by host. */
+ u32 version;
+
+ /* Function IDs used by host if version is v0.1. */
+ struct psci_0_1_function_ids function_ids_0_1;
+
+ /* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*. */
+ unsigned int enabled_functions_0_1;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6e637d2b4cfb..6a2f4e01b04f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -66,8 +66,6 @@ static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
@@ -1618,8 +1616,16 @@ static bool init_psci_relay(void)
return false;
}

- kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
- kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+ kvm_host_psci_config.version = psci_ops.get_version();
+
+ if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+ kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+ kvm_host_psci_config.enabled_functions_0_1 =
+ (psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
+ (psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
+ (psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
+ (psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);
+ }
return true;
}

diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08dc9de69314..0d6f4aa39621 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -22,9 +22,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);

/* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;

#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)

@@ -54,12 +53,41 @@ static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
return func_id;
}

+static inline bool is_psci_0_1_function_enabled(unsigned int fn_bit)
+{
+ return kvm_host_psci_config.enabled_functions_0_1 & fn_bit;
+}
+
+static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
+{
+ return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
+ (func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend);
+}
+
+static inline bool is_psci_0_1_cpu_on(u64 func_id)
+{
+ return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_ON) &&
+ (func_id == kvm_host_psci_config.function_ids_0_1.cpu_on);
+}
+
+static inline bool is_psci_0_1_cpu_off(u64 func_id)
+{
+ return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_OFF) &&
+ (func_id == kvm_host_psci_config.function_ids_0_1.cpu_off);
+}
+
+static inline bool is_psci_0_1_migrate(u64 func_id)
+{
+ return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_MIGRATE) &&
+ (func_id == kvm_host_psci_config.function_ids_0_1.migrate);
+}
+
static bool is_psci_0_1_call(u64 func_id)
{
- return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate);
+ return is_psci_0_1_cpu_suspend(func_id) ||
+ is_psci_0_1_cpu_on(func_id) ||
+ is_psci_0_1_cpu_off(func_id) ||
+ is_psci_0_1_migrate(func_id);
}

static bool is_psci_0_2_call(u64 func_id)
@@ -71,7 +99,7 @@ static bool is_psci_0_2_call(u64 func_id)

static bool is_psci_call(u64 func_id)
{
- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
return is_psci_0_1_call(func_id);
default:
@@ -248,12 +276,11 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)

static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate))
+ if (is_psci_0_1_cpu_off(func_id) || is_psci_0_1_migrate(func_id))
return psci_forward(host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+ else if (is_psci_0_1_cpu_on(func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+ else if (is_psci_0_1_cpu_suspend(func_id))
return psci_cpu_suspend(func_id, host_ctxt);
else
return PSCI_RET_NOT_SUPPORTED;
@@ -304,7 +331,7 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
if (!is_psci_call(func_id))
return false;

- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
ret = psci_0_1_handler(func_id, host_ctxt);
break;
--
2.29.2.576.ga3fc446d84-goog

2020-12-08 14:30:50

by David Brazdil

[permalink] [raw]
Subject: [PATCH 3/6] kvm: arm64: Skip computing hyp VA layout for VHE

Computing the hyp VA layout is redundant when the kernel runs in EL2 and
hyp shares its VA mappings. Make calling kvm_compute_layout()
conditional on not just CONFIG_KVM but also !is_kernel_in_hyp_mode().

Signed-off-by: David Brazdil <[email protected]>
---
arch/arm64/kernel/smp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 18e9727d3f64..4e585cc892e8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM))
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout();
}

--
2.29.2.576.ga3fc446d84-goog

2020-12-08 16:00:31

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH 1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs

On 2020-12-08 14:24, David Brazdil wrote:
> PSCI driver exposes a struct containing the PSCI v0.1 function IDs
> configured in the DT. However, the struct does not convey the
> information whether these were set from DT or contain the default value
> zero. This could be a problem for PSCI proxy in KVM protected mode.
>
> Extend config passed to KVM with a bit mask with individual bits set
> depending on whether the corresponding function pointer in psci_ops is
> set, eg. set bit for PSCI_CPU_SUSPEND if psci_ops.cpu_suspend != NULL.
>
> Previously config was split into multiple global variables. Put
> everything into a single struct for convenience.
>
> Reported-by: Mark Rutland <[email protected]>
> Signed-off-by: David Brazdil <[email protected]>
> ---
> arch/arm64/include/asm/kvm_host.h | 20 +++++++++++
> arch/arm64/kvm/arm.c | 14 +++++---
> arch/arm64/kvm/hyp/nvhe/psci-relay.c | 53 +++++++++++++++++++++-------
> 3 files changed, 70 insertions(+), 17 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h
> b/arch/arm64/include/asm/kvm_host.h
> index 11beda85ee7e..828d50d40dc2 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -17,6 +17,7 @@
> #include <linux/jump_label.h>
> #include <linux/kvm_types.h>
> #include <linux/percpu.h>
> +#include <linux/psci.h>
> #include <asm/arch_gicv3.h>
> #include <asm/barrier.h>
> #include <asm/cpufeature.h>
> @@ -240,6 +241,25 @@ struct kvm_host_data {
> struct kvm_pmu_events pmu_events;
> };
>
> +#define KVM_HOST_PSCI_0_1_CPU_SUSPEND BIT(0)
> +#define KVM_HOST_PSCI_0_1_CPU_ON BIT(1)
> +#define KVM_HOST_PSCI_0_1_CPU_OFF BIT(2)
> +#define KVM_HOST_PSCI_0_1_MIGRATE BIT(3)
> +
> +struct kvm_host_psci_config {
> + /* PSCI version used by host. */
> + u32 version;
> +
> + /* Function IDs used by host if version is v0.1. */
> + struct psci_0_1_function_ids function_ids_0_1;
> +
> + /* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*.
> */
> + unsigned int enabled_functions_0_1;

Nit: the conventional type for bitmaps is 'unsigned long'.
Also, "enabled" seems odd. Isn't it actually "available"?

> +};
> +
> +extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
> +#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
> +
> struct vcpu_reset_state {
> unsigned long pc;
> unsigned long r0;
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 6e637d2b4cfb..6a2f4e01b04f 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -66,8 +66,6 @@ static DEFINE_PER_CPU(unsigned char,
> kvm_arm_hardware_enabled);
> DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
>
> extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
> -extern u32 kvm_nvhe_sym(kvm_host_psci_version);
> -extern struct psci_0_1_function_ids
> kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
>
> int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
> {
> @@ -1618,8 +1616,16 @@ static bool init_psci_relay(void)
> return false;
> }
>
> - kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
> - kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) =
> get_psci_0_1_function_ids();
> + kvm_host_psci_config.version = psci_ops.get_version();
> +
> + if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
> + kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
> + kvm_host_psci_config.enabled_functions_0_1 =
> + (psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
> + (psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
> + (psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
> + (psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);
> + }
> return true;
> }
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> index 08dc9de69314..0d6f4aa39621 100644
> --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> @@ -22,9 +22,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
> void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
>
> /* Config options set by the host. */
> -__ro_after_init u32 kvm_host_psci_version;
> -__ro_after_init struct psci_0_1_function_ids
> kvm_host_psci_0_1_function_ids;
> -__ro_after_init s64 hyp_physvirt_offset;
> +struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
> +s64 __ro_after_init hyp_physvirt_offset;

Unrelated change?

>
> #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
>
> @@ -54,12 +53,41 @@ static u64 get_psci_func_id(struct kvm_cpu_context
> *host_ctxt)
> return func_id;
> }
>
> +static inline bool is_psci_0_1_function_enabled(unsigned int fn_bit)

Don't bother with "inline" outside of an include file. It really
doesn't mean much (the compiler is free to ignore it), and it is
likely that the compiler will optimise better without guidance
(not to mention this is hardly a fast path anyway).

> +{
> + return kvm_host_psci_config.enabled_functions_0_1 & fn_bit;
> +}
> +
> +static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
> +{
> + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
> + (func_id ==
> kvm_host_psci_config.function_ids_0_1.cpu_suspend);
> +}
> +
> +static inline bool is_psci_0_1_cpu_on(u64 func_id)
> +{
> + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_ON) &&
> + (func_id == kvm_host_psci_config.function_ids_0_1.cpu_on);
> +}
> +
> +static inline bool is_psci_0_1_cpu_off(u64 func_id)
> +{
> + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_OFF) &&
> + (func_id == kvm_host_psci_config.function_ids_0_1.cpu_off);
> +}
> +
> +static inline bool is_psci_0_1_migrate(u64 func_id)
> +{
> + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_MIGRATE) &&
> + (func_id == kvm_host_psci_config.function_ids_0_1.migrate);
> +}
> +
> static bool is_psci_0_1_call(u64 func_id)
> {
> - return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
> - (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
> - (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
> - (func_id == kvm_host_psci_0_1_function_ids.migrate);
> + return is_psci_0_1_cpu_suspend(func_id) ||
> + is_psci_0_1_cpu_on(func_id) ||
> + is_psci_0_1_cpu_off(func_id) ||
> + is_psci_0_1_migrate(func_id);
> }
>
> static bool is_psci_0_2_call(u64 func_id)
> @@ -71,7 +99,7 @@ static bool is_psci_0_2_call(u64 func_id)
>
> static bool is_psci_call(u64 func_id)
> {
> - switch (kvm_host_psci_version) {
> + switch (kvm_host_psci_config.version) {
> case PSCI_VERSION(0, 1):
> return is_psci_0_1_call(func_id);
> default:
> @@ -248,12 +276,11 @@ asmlinkage void __noreturn
> kvm_host_psci_cpu_entry(bool is_cpu_on)
>
> static unsigned long psci_0_1_handler(u64 func_id, struct
> kvm_cpu_context *host_ctxt)
> {
> - if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
> - (func_id == kvm_host_psci_0_1_function_ids.migrate))
> + if (is_psci_0_1_cpu_off(func_id) || is_psci_0_1_migrate(func_id))
> return psci_forward(host_ctxt);
> - else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
> + else if (is_psci_0_1_cpu_on(func_id))
> return psci_cpu_on(func_id, host_ctxt);
> - else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
> + else if (is_psci_0_1_cpu_suspend(func_id))
> return psci_cpu_suspend(func_id, host_ctxt);
> else
> return PSCI_RET_NOT_SUPPORTED;
> @@ -304,7 +331,7 @@ bool kvm_host_psci_handler(struct kvm_cpu_context
> *host_ctxt)
> if (!is_psci_call(func_id))
> return false;
>
> - switch (kvm_host_psci_version) {
> + switch (kvm_host_psci_config.version) {
> case PSCI_VERSION(0, 1):
> ret = psci_0_1_handler(func_id, host_ctxt);
> break;

Otherwise looks OK. Don't bother respinning the series for
my comments, I can tidy things up as I apply it if there
are no other issues.

Thanks,

M.
--
Jazz is not dead. It just smells funny...

2020-12-08 17:32:59

by Mark Rutland

[permalink] [raw]
Subject: Re: [PATCH 1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs

On Tue, Dec 08, 2020 at 03:56:39PM +0000, Marc Zyngier wrote:
> On 2020-12-08 14:24, David Brazdil wrote:
> > PSCI driver exposes a struct containing the PSCI v0.1 function IDs
> > configured in the DT. However, the struct does not convey the
> > information whether these were set from DT or contain the default value
> > zero. This could be a problem for PSCI proxy in KVM protected mode.
> >
> > Extend config passed to KVM with a bit mask with individual bits set
> > depending on whether the corresponding function pointer in psci_ops is
> > set, eg. set bit for PSCI_CPU_SUSPEND if psci_ops.cpu_suspend != NULL.
> >
> > Previously config was split into multiple global variables. Put
> > everything into a single struct for convenience.
> >
> > Reported-by: Mark Rutland <[email protected]>
> > Signed-off-by: David Brazdil <[email protected]>
> > ---
> > arch/arm64/include/asm/kvm_host.h | 20 +++++++++++
> > arch/arm64/kvm/arm.c | 14 +++++---
> > arch/arm64/kvm/hyp/nvhe/psci-relay.c | 53 +++++++++++++++++++++-------
> > 3 files changed, 70 insertions(+), 17 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h
> > b/arch/arm64/include/asm/kvm_host.h
> > index 11beda85ee7e..828d50d40dc2 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -17,6 +17,7 @@
> > #include <linux/jump_label.h>
> > #include <linux/kvm_types.h>
> > #include <linux/percpu.h>
> > +#include <linux/psci.h>
> > #include <asm/arch_gicv3.h>
> > #include <asm/barrier.h>
> > #include <asm/cpufeature.h>
> > @@ -240,6 +241,25 @@ struct kvm_host_data {
> > struct kvm_pmu_events pmu_events;
> > };
> >
> > +#define KVM_HOST_PSCI_0_1_CPU_SUSPEND BIT(0)
> > +#define KVM_HOST_PSCI_0_1_CPU_ON BIT(1)
> > +#define KVM_HOST_PSCI_0_1_CPU_OFF BIT(2)
> > +#define KVM_HOST_PSCI_0_1_MIGRATE BIT(3)
> > +
> > +struct kvm_host_psci_config {
> > + /* PSCI version used by host. */
> > + u32 version;
> > +
> > + /* Function IDs used by host if version is v0.1. */
> > + struct psci_0_1_function_ids function_ids_0_1;
> > +
> > + /* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*. */
> > + unsigned int enabled_functions_0_1;
>
> Nit: the conventional type for bitmaps is 'unsigned long'.
> Also, "enabled" seems odd. Isn't it actually "available"?

Sure, that or "implemented" works here.

Since there are only 4 functions here, it might make sense to use
independent bools rather than a bitmap, which might make this a bit
simpler...

> > get_psci_0_1_function_ids();
> > + kvm_host_psci_config.version = psci_ops.get_version();
> > +
> > + if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
> > + kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
> > + kvm_host_psci_config.enabled_functions_0_1 =
> > + (psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
> > + (psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
> > + (psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
> > + (psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);

... since e.g. this could be roughly:

kvm_host_psci_config.cpu_suspend_implemented = psci_ops.cpu_suspend;
kvm_host_psci_config.cpu_off_implemented = psci_ops.cpu_off;
kvm_host_psci_config.cpu_on_implemented = psci_ops.cpu_on;
kvm_host_psci_config.migrate_implemented = psci_ops.migrate;

> > +static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
> > +{
> > + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
> > + (func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend);
> > +}

...and similarly:

return kvm_host_psci_config.cpu_suspend_implemented &&
func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend)

> Otherwise looks OK. Don't bother respinning the series for my
> comments, I can tidy things up as I apply it if there are no other
> issues.

FWIW, I'm happy with whatever choose to do here, so don't feel like you
have to follow my suggestions above.

Thanks,
Mark.

2020-12-22 16:08:34

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH 0/6] Fixes and cleanups of PSCI relay for kvmarm/next

On Tue, 8 Dec 2020 14:24:46 +0000, David Brazdil wrote:
> Small batch of improvements for the 'Opt-in always-on nVHE hypervisor'
> series, now merged in kvmarm/next.
>
> Patch #1 fixes potential use of invalid v0.1 functions IDs reported
> by Mark Rutland, patch #2 fixes a warning reported by Qian Cai.
> Patch #3 avoids a code path not used in VHE, can be dropped if any
> concerns arise. The remaining patches are minor cleanups from review.
>
> [...]

Applied to next, thanks!

[1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs
commit: ff367fe473a9857160c17827931375a899076394
[2/6] kvm: arm64: Use lm_alias in nVHE-only VA conversion
commit: 7a96a0687b80a1870c689418d7b72012c8bdd53d
[3/6] kvm: arm64: Skip computing hyp VA layout for VHE
commit: c3e181aec96f6ada84df1cb72a72be8970f8b284
[4/6] kvm: arm64: Minor cleanup of hyp variables used in host
commit: 61fe0c37af57ac35472a870581a7d0bb5ac2f63a
[5/6] kvm: arm64: Remove unused includes in psci-relay.c
commit: e6829e0384a49efe68537298132230bebd8bd1b3
[6/6] kvm: arm64: Move skip_host_instruction to adjust_pc.h
commit: 860a4c3d1e04a3c3e62bacbbba64417bf49768e2

Cheers,

M.
--
Without deviation from the norm, progress is not possible.


2020-12-22 16:08:50

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH 1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs

On Tue, 08 Dec 2020 17:26:28 +0000,
Mark Rutland <[email protected]> wrote:
>
> On Tue, Dec 08, 2020 at 03:56:39PM +0000, Marc Zyngier wrote:
> > On 2020-12-08 14:24, David Brazdil wrote:
> > > PSCI driver exposes a struct containing the PSCI v0.1 function IDs
> > > configured in the DT. However, the struct does not convey the
> > > information whether these were set from DT or contain the default value
> > > zero. This could be a problem for PSCI proxy in KVM protected mode.
> > >
> > > Extend config passed to KVM with a bit mask with individual bits set
> > > depending on whether the corresponding function pointer in psci_ops is
> > > set, eg. set bit for PSCI_CPU_SUSPEND if psci_ops.cpu_suspend != NULL.
> > >
> > > Previously config was split into multiple global variables. Put
> > > everything into a single struct for convenience.
> > >
> > > Reported-by: Mark Rutland <[email protected]>
> > > Signed-off-by: David Brazdil <[email protected]>
> > > ---
> > > arch/arm64/include/asm/kvm_host.h | 20 +++++++++++
> > > arch/arm64/kvm/arm.c | 14 +++++---
> > > arch/arm64/kvm/hyp/nvhe/psci-relay.c | 53 +++++++++++++++++++++-------
> > > 3 files changed, 70 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/arch/arm64/include/asm/kvm_host.h
> > > b/arch/arm64/include/asm/kvm_host.h
> > > index 11beda85ee7e..828d50d40dc2 100644
> > > --- a/arch/arm64/include/asm/kvm_host.h
> > > +++ b/arch/arm64/include/asm/kvm_host.h
> > > @@ -17,6 +17,7 @@
> > > #include <linux/jump_label.h>
> > > #include <linux/kvm_types.h>
> > > #include <linux/percpu.h>
> > > +#include <linux/psci.h>
> > > #include <asm/arch_gicv3.h>
> > > #include <asm/barrier.h>
> > > #include <asm/cpufeature.h>
> > > @@ -240,6 +241,25 @@ struct kvm_host_data {
> > > struct kvm_pmu_events pmu_events;
> > > };
> > >
> > > +#define KVM_HOST_PSCI_0_1_CPU_SUSPEND BIT(0)
> > > +#define KVM_HOST_PSCI_0_1_CPU_ON BIT(1)
> > > +#define KVM_HOST_PSCI_0_1_CPU_OFF BIT(2)
> > > +#define KVM_HOST_PSCI_0_1_MIGRATE BIT(3)
> > > +
> > > +struct kvm_host_psci_config {
> > > + /* PSCI version used by host. */
> > > + u32 version;
> > > +
> > > + /* Function IDs used by host if version is v0.1. */
> > > + struct psci_0_1_function_ids function_ids_0_1;
> > > +
> > > + /* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*. */
> > > + unsigned int enabled_functions_0_1;
> >
> > Nit: the conventional type for bitmaps is 'unsigned long'.
> > Also, "enabled" seems odd. Isn't it actually "available"?
>
> Sure, that or "implemented" works here.
>
> Since there are only 4 functions here, it might make sense to use
> independent bools rather than a bitmap, which might make this a bit
> simpler...
>
> > > get_psci_0_1_function_ids();
> > > + kvm_host_psci_config.version = psci_ops.get_version();
> > > +
> > > + if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
> > > + kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
> > > + kvm_host_psci_config.enabled_functions_0_1 =
> > > + (psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
> > > + (psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
> > > + (psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
> > > + (psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);
>
> ... since e.g. this could be roughly:
>
> kvm_host_psci_config.cpu_suspend_implemented = psci_ops.cpu_suspend;
> kvm_host_psci_config.cpu_off_implemented = psci_ops.cpu_off;
> kvm_host_psci_config.cpu_on_implemented = psci_ops.cpu_on;
> kvm_host_psci_config.migrate_implemented = psci_ops.migrate;
>
> > > +static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
> > > +{
> > > + return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
> > > + (func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend);
> > > +}
>
> ...and similarly:
>
> return kvm_host_psci_config.cpu_suspend_implemented &&
> func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend)
>
> > Otherwise looks OK. Don't bother respinning the series for my
> > comments, I can tidy things up as I apply it if there are no other
> > issues.
>
> FWIW, I'm happy with whatever choose to do here, so don't feel like you
> have to follow my suggestions above.

FWIW, I've queued the following patch on top of the series, using the
above suggestions and some creative use of macros, allowing some
cleanup.

M.

From 767c973f2e4a9264a4f159c9fad5ca8acdb9915e Mon Sep 17 00:00:00 2001
From: Marc Zyngier <[email protected]>
Date: Tue, 22 Dec 2020 12:46:41 +0000
Subject: [PATCH] KVM: arm64: Declutter host PSCI 0.1 handling

Although there is nothing wrong with the current host PSCI relay
implementation, we can clean it up and remove some of the helpers
that do not improve the overall readability of the legacy PSCI 0.1
handling.

Opportunity is taken to turn the bitmap into a set of booleans,
and creative use of preprocessor macros make init and check
more concise/readable.

Suggested-by: Mark Rutland <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
---
arch/arm64/include/asm/kvm_host.h | 11 ++--
arch/arm64/kvm/arm.c | 12 +++--
arch/arm64/kvm/hyp/nvhe/psci-relay.c | 77 +++++++---------------------
3 files changed, 30 insertions(+), 70 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bce2452b305c..8fcfab0c2567 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -241,11 +241,6 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};

-#define KVM_HOST_PSCI_0_1_CPU_SUSPEND BIT(0)
-#define KVM_HOST_PSCI_0_1_CPU_ON BIT(1)
-#define KVM_HOST_PSCI_0_1_CPU_OFF BIT(2)
-#define KVM_HOST_PSCI_0_1_MIGRATE BIT(3)
-
struct kvm_host_psci_config {
/* PSCI version used by host. */
u32 version;
@@ -253,8 +248,10 @@ struct kvm_host_psci_config {
/* Function IDs used by host if version is v0.1. */
struct psci_0_1_function_ids function_ids_0_1;

- /* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*. */
- unsigned int enabled_functions_0_1;
+ bool psci_0_1_cpu_suspend_implemented;
+ bool psci_0_1_cpu_on_implemented;
+ bool psci_0_1_cpu_off_implemented;
+ bool psci_0_1_migrate_implemented;
};

extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 836ca763b91d..e207e4541f55 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1603,6 +1603,9 @@ static void init_cpu_logical_map(void)
hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}

+#define init_psci_0_1_impl_state(config, what) \
+ config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
static bool init_psci_relay(void)
{
/*
@@ -1618,11 +1621,10 @@ static bool init_psci_relay(void)

if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
- kvm_host_psci_config.enabled_functions_0_1 =
- (psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
- (psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
- (psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
- (psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+ init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
}
return true;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 1f7237e45148..e3947846ffcb 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -43,48 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;

-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
-
- return func_id;
-}
-
-static inline bool is_psci_0_1_function_enabled(unsigned int fn_bit)
-{
- return kvm_host_psci_config.enabled_functions_0_1 & fn_bit;
-}
-
-static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
-{
- return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
- (func_id == kvm_host_psci_config.function_ids_0_1.cpu_suspend);
-}
-
-static inline bool is_psci_0_1_cpu_on(u64 func_id)
-{
- return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_ON) &&
- (func_id == kvm_host_psci_config.function_ids_0_1.cpu_on);
-}
-
-static inline bool is_psci_0_1_cpu_off(u64 func_id)
-{
- return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_OFF) &&
- (func_id == kvm_host_psci_config.function_ids_0_1.cpu_off);
-}
-
-static inline bool is_psci_0_1_migrate(u64 func_id)
-{
- return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_MIGRATE) &&
- (func_id == kvm_host_psci_config.function_ids_0_1.migrate);
-}
+#define is_psci_0_1(what, func_id) \
+ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
+ (func_id) == kvm_host_psci_config.function_ids_0_1.what)

static bool is_psci_0_1_call(u64 func_id)
{
- return is_psci_0_1_cpu_suspend(func_id) ||
- is_psci_0_1_cpu_on(func_id) ||
- is_psci_0_1_cpu_off(func_id) ||
- is_psci_0_1_migrate(func_id);
+ return (is_psci_0_1(cpu_suspend, func_id) ||
+ is_psci_0_1(cpu_on, func_id) ||
+ is_psci_0_1(cpu_off, func_id) ||
+ is_psci_0_1(migrate, func_id));
}

static bool is_psci_0_2_call(u64 func_id)
@@ -94,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}

-static bool is_psci_call(u64 func_id)
-{
- switch (kvm_host_psci_config.version) {
- case PSCI_VERSION(0, 1):
- return is_psci_0_1_call(func_id);
- default:
- return is_psci_0_2_call(func_id);
- }
-}
-
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
@@ -273,14 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)

static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if (is_psci_0_1_cpu_off(func_id) || is_psci_0_1_migrate(func_id))
+ if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
- else if (is_psci_0_1_cpu_on(func_id))
+ if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (is_psci_0_1_cpu_suspend(func_id))
+ if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
- else
- return PSCI_RET_NOT_SUPPORTED;
+
+ return PSCI_RET_NOT_SUPPORTED;
}

static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -322,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_

bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
- u64 func_id = get_psci_func_id(host_ctxt);
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;

- if (!is_psci_call(func_id))
- return false;
-
switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
+ if (!is_psci_0_1_call(func_id))
+ return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
--
2.29.2


--
Without deviation from the norm, progress is not possible.