2021-11-06 05:40:45

by Atish Patra

[permalink] [raw]
Subject: [PATCH v4 0/5] Add SBI v0.2 support for KVM

The Supervisor Binary Interface(SBI) specification[1] now defines a
base extension that provides extendability to add future extensions
while maintaining backward compatibility with previous versions.
The new version is defined as 0.2 and older version is marked as 0.1.

This series adds following features to RISC-V Linux KVM.
1. Adds support for SBI v0.2 in KVM
2. SBI Hart state management extension (HSM) in KVM
3. Ordered booting of guest vcpus in guest Linux

This series is based on base KVM series which is already part of the kvm-next[2].

Guest kernel needs to also support SBI v0.2 and HSM extension in Kernel
to boot multiple vcpus. Linux kernel supports both starting v5.7.
In absense of that, guest can only boot 1 vcpu.

Changes from v3->v4:
1. Fixed the commit text title.
2. Removed a redundant memory barrier from patch 4.
3. Replaced preempt_enable/disable with get_cpu/put_cpu.
4. Renamed the exixting implementation as v01 instead of legacy.

Changes from v2->v3:
1. Rebased on the latest merged kvm series.
2. Dropped the reset extension patch because reset extension is not merged in kernel.
However, my tree[3] still contains it in case anybody wants to test it.

Changes from v1->v2:
1. Sent the patch 1 separately as it can merged independently.
2. Added Reset extension functionality.

Tested on Qemu and Rocket core FPGA.

[1] https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc
[3] https://github.com/atishp04/linux/tree/kvm_sbi_v03_reset
[4] https://github.com/atishp04/linux/tree/kvm_sbi_v03

Atish Patra (5):
RISC-V: KVM: Mark the existing SBI implementation as v01
RISC-V: KVM: Reorganize SBI code by moving SBI v0.1 to its own file
RISC-V: KVM: Add SBI v0.2 base extension
RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v02
RISC-V: KVM: Add SBI HSM extension in KVM

arch/riscv/include/asm/kvm_vcpu_sbi.h | 33 +++++
arch/riscv/include/asm/sbi.h | 9 ++
arch/riscv/kvm/Makefile | 4 +
arch/riscv/kvm/vcpu.c | 23 +++
arch/riscv/kvm/vcpu_sbi.c | 206 ++++++++++++--------------
arch/riscv/kvm/vcpu_sbi_base.c | 73 +++++++++
arch/riscv/kvm/vcpu_sbi_hsm.c | 107 +++++++++++++
arch/riscv/kvm/vcpu_sbi_replace.c | 136 +++++++++++++++++
arch/riscv/kvm/vcpu_sbi_v01.c | 129 ++++++++++++++++
9 files changed, 609 insertions(+), 111 deletions(-)
create mode 100644 arch/riscv/include/asm/kvm_vcpu_sbi.h
create mode 100644 arch/riscv/kvm/vcpu_sbi_base.c
create mode 100644 arch/riscv/kvm/vcpu_sbi_hsm.c
create mode 100644 arch/riscv/kvm/vcpu_sbi_replace.c
create mode 100644 arch/riscv/kvm/vcpu_sbi_v01.c

--
2.31.1


2021-11-06 06:42:22

by Atish Patra

[permalink] [raw]
Subject: [PATCH v4 4/5] RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v02

The SBI v0.2 contains some of the improved versions of required v0.1
extensions such as remote fence, timer and IPI.

This patch implements those extensions.

Signed-off-by: Atish Patra <[email protected]>
---
arch/riscv/kvm/Makefile | 1 +
arch/riscv/kvm/vcpu_sbi.c | 7 ++
arch/riscv/kvm/vcpu_sbi_replace.c | 136 ++++++++++++++++++++++++++++++
3 files changed, 144 insertions(+)
create mode 100644 arch/riscv/kvm/vcpu_sbi_replace.c

diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 84c02922a329..4757ae158bf3 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -25,4 +25,5 @@ kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_sbi_base.o
+kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_timer.o
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 92b682f4f29e..3502c6166eba 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -40,9 +40,16 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
};
#endif
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+
static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
&vcpu_sbi_ext_v01,
&vcpu_sbi_ext_base,
+ &vcpu_sbi_ext_time,
+ &vcpu_sbi_ext_ipi,
+ &vcpu_sbi_ext_rfence,
};

void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
new file mode 100644
index 000000000000..a80fa7691b14
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <[email protected]>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ u64 next_cycle;
+
+ if (!cp || (cp->a6 != SBI_EXT_TIME_SET_TIMER))
+ return -EINVAL;
+
+#if __riscv_xlen == 32
+ next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+ next_cycle = (u64)cp->a0;
+#endif
+ kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
+ .extid_start = SBI_EXT_TIME,
+ .extid_end = SBI_EXT_TIME,
+ .handler = kvm_sbi_ext_time_handler,
+};
+
+static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int i, ret = 0;
+ struct kvm_vcpu *tmp;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long hmask = cp->a0;
+ unsigned long hbase = cp->a1;
+
+ if (!cp || (cp->a6 != SBI_EXT_IPI_SEND_IPI))
+ return -EINVAL;
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (hbase != -1UL) {
+ if (tmp->vcpu_id < hbase)
+ continue;
+ if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ continue;
+ }
+ ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
+ .extid_start = SBI_EXT_IPI,
+ .extid_end = SBI_EXT_IPI,
+ .handler = kvm_sbi_ext_ipi_handler,
+};
+
+static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int i, ret = 0;
+ struct cpumask cm, hm;
+ struct kvm_vcpu *tmp;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long hmask = cp->a0;
+ unsigned long hbase = cp->a1;
+ unsigned long funcid = cp->a6;
+
+ if (!cp)
+ return -EINVAL;
+
+ cpumask_clear(&cm);
+ cpumask_clear(&hm);
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (hbase != -1UL) {
+ if (tmp->vcpu_id < hbase)
+ continue;
+ if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ continue;
+ }
+ if (tmp->cpu < 0)
+ continue;
+ cpumask_set_cpu(tmp->cpu, &cm);
+ }
+
+ riscv_cpuid_to_hartid_mask(&cm, &hm);
+
+ switch (funcid) {
+ case SBI_EXT_RFENCE_REMOTE_FENCE_I:
+ ret = sbi_remote_fence_i(cpumask_bits(&hm));
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), cp->a2, cp->a3);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), cp->a2,
+ cp->a3, cp->a4);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
+ /* TODO: implement for nested hypervisor case */
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
+ .extid_start = SBI_EXT_RFENCE,
+ .extid_end = SBI_EXT_RFENCE,
+ .handler = kvm_sbi_ext_rfence_handler,
+};
--
2.31.1

2021-11-06 11:55:27

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v4 4/5] RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v02

On Sat, Nov 6, 2021 at 5:29 AM Atish Patra <[email protected]> wrote:
>
> The SBI v0.2 contains some of the improved versions of required v0.1
> extensions such as remote fence, timer and IPI.
>
> This patch implements those extensions.
>
> Signed-off-by: Atish Patra <[email protected]>
> ---
> arch/riscv/kvm/Makefile | 1 +
> arch/riscv/kvm/vcpu_sbi.c | 7 ++
> arch/riscv/kvm/vcpu_sbi_replace.c | 136 ++++++++++++++++++++++++++++++
> 3 files changed, 144 insertions(+)
> create mode 100644 arch/riscv/kvm/vcpu_sbi_replace.c
>
> diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
> index 84c02922a329..4757ae158bf3 100644
> --- a/arch/riscv/kvm/Makefile
> +++ b/arch/riscv/kvm/Makefile
> @@ -25,4 +25,5 @@ kvm-y += vcpu_switch.o
> kvm-y += vcpu_sbi.o
> kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
> kvm-y += vcpu_sbi_base.o
> +kvm-y += vcpu_sbi_replace.o
> kvm-y += vcpu_timer.o
> diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
> index 92b682f4f29e..3502c6166eba 100644
> --- a/arch/riscv/kvm/vcpu_sbi.c
> +++ b/arch/riscv/kvm/vcpu_sbi.c
> @@ -40,9 +40,16 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
> };
> #endif
> extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
> +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
> +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
> +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
> +
> static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
> &vcpu_sbi_ext_v01,
> &vcpu_sbi_ext_base,
> + &vcpu_sbi_ext_time,
> + &vcpu_sbi_ext_ipi,
> + &vcpu_sbi_ext_rfence,
> };
>
> void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
> diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
> new file mode 100644
> index 000000000000..a80fa7691b14
> --- /dev/null
> +++ b/arch/riscv/kvm/vcpu_sbi_replace.c
> @@ -0,0 +1,136 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2021 Western Digital Corporation or its affiliates.
> + *
> + * Authors:
> + * Atish Patra <[email protected]>
> + */
> +
> +#include <linux/errno.h>
> +#include <linux/err.h>
> +#include <linux/kvm_host.h>
> +#include <asm/csr.h>
> +#include <asm/sbi.h>
> +#include <asm/kvm_vcpu_timer.h>
> +#include <asm/kvm_vcpu_sbi.h>
> +
> +static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + unsigned long *out_val,
> + struct kvm_cpu_trap *utrap, bool *exit)
> +{
> + int ret = 0;
> + struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> + u64 next_cycle;
> +
> + if (!cp || (cp->a6 != SBI_EXT_TIME_SET_TIMER))
> + return -EINVAL;
> +
> +#if __riscv_xlen == 32
> + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
> +#else
> + next_cycle = (u64)cp->a0;
> +#endif
> + kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
> +
> + return ret;
> +}
> +
> +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
> + .extid_start = SBI_EXT_TIME,
> + .extid_end = SBI_EXT_TIME,
> + .handler = kvm_sbi_ext_time_handler,
> +};
> +
> +static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + unsigned long *out_val,
> + struct kvm_cpu_trap *utrap, bool *exit)
> +{
> + int i, ret = 0;
> + struct kvm_vcpu *tmp;
> + struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> + unsigned long hmask = cp->a0;
> + unsigned long hbase = cp->a1;
> +
> + if (!cp || (cp->a6 != SBI_EXT_IPI_SEND_IPI))
> + return -EINVAL;
> +
> + kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
> + if (hbase != -1UL) {
> + if (tmp->vcpu_id < hbase)
> + continue;
> + if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
> + continue;
> + }
> + ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
> + if (ret < 0)
> + break;
> + }
> +
> + return ret;
> +}
> +
> +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
> + .extid_start = SBI_EXT_IPI,
> + .extid_end = SBI_EXT_IPI,
> + .handler = kvm_sbi_ext_ipi_handler,
> +};
> +
> +static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + unsigned long *out_val,
> + struct kvm_cpu_trap *utrap, bool *exit)
> +{
> + int i, ret = 0;
> + struct cpumask cm, hm;
> + struct kvm_vcpu *tmp;
> + struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> + unsigned long hmask = cp->a0;
> + unsigned long hbase = cp->a1;
> + unsigned long funcid = cp->a6;
> +
> + if (!cp)
> + return -EINVAL;
> +
> + cpumask_clear(&cm);
> + cpumask_clear(&hm);
> + kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
> + if (hbase != -1UL) {
> + if (tmp->vcpu_id < hbase)
> + continue;
> + if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
> + continue;
> + }
> + if (tmp->cpu < 0)
> + continue;
> + cpumask_set_cpu(tmp->cpu, &cm);
> + }
> +
> + riscv_cpuid_to_hartid_mask(&cm, &hm);
> +
> + switch (funcid) {
> + case SBI_EXT_RFENCE_REMOTE_FENCE_I:
> + ret = sbi_remote_fence_i(cpumask_bits(&hm));
> + break;
> + case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
> + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), cp->a2, cp->a3);
> + break;
> + case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
> + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), cp->a2,
> + cp->a3, cp->a4);
> + break;
> + case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
> + case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
> + case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
> + case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
> + /* TODO: implement for nested hypervisor case */
> + default:
> + ret = -EOPNOTSUPP;
> + }
> +
> + return ret;
> +}
> +
> +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
> + .extid_start = SBI_EXT_RFENCE,
> + .extid_end = SBI_EXT_RFENCE,
> + .handler = kvm_sbi_ext_rfence_handler,
> +};
> --
> 2.31.1
>

Drop checks on "cp" pointer in various functions above.

Otherwise it looks good to me.

Reviewed-by: Anup Patel <[email protected]>

Regards,
Anup