2023-11-15 09:32:28

by zhaotianrui

[permalink] [raw]
Subject: [PATCH v1 0/2] LoongArch: KVM: Add LSX,LASX support

This patch series add LSX,LASX support for LoongArch KVM.
LSX: LoongArch 128-bits vector instruction
LASX:LoongArch 256-bits vector instruction

There will be LSX,LASX exception in KVM when guest use the
LSX,LASX instructions. KVM will enable LSX,LASX and restore
the vector registers for guest then return to guest to continue
running.

Changes for v1:
(1) Add LSX support for LoongArch KVM.
(2) Add LASX support for LoongArch KVM.

Tianrui Zhao (1):
LoongArch: KVM: Add lsx support

zhaotianrui (1):
LoongArch: KVM: Add lasx support

arch/loongarch/include/asm/kvm_host.h | 12 ++++
arch/loongarch/include/asm/kvm_vcpu.h | 22 +++++++
arch/loongarch/kernel/fpu.S | 1 +
arch/loongarch/kvm/exit.c | 36 +++++++++++
arch/loongarch/kvm/switch.S | 38 ++++++++++++
arch/loongarch/kvm/trace.h | 6 +-
arch/loongarch/kvm/vcpu.c | 88 ++++++++++++++++++++++++++-
7 files changed, 199 insertions(+), 4 deletions(-)

--
2.39.1


2023-11-15 09:32:32

by zhaotianrui

[permalink] [raw]
Subject: [PATCH v1 2/2] LoongArch: KVM: Add lasx support

This patch adds LASX support for LoongArch KVM. The LASX means
LoongArch 256-bits vector instruction.
There will be LASX exception in KVM when guest use the LASX
instruction. KVM will enable LASX and restore the vector
registers for guest then return to guest to continue running.

Signed-off-by: Tianrui Zhao <[email protected]>
---
arch/loongarch/include/asm/kvm_host.h | 6 ++++
arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
arch/loongarch/kernel/fpu.S | 1 +
arch/loongarch/kvm/exit.c | 18 +++++++++++
arch/loongarch/kvm/switch.S | 16 ++++++++++
arch/loongarch/kvm/trace.h | 4 ++-
arch/loongarch/kvm/vcpu.c | 43 ++++++++++++++++++++++++++-
7 files changed, 96 insertions(+), 2 deletions(-)

diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 6c65c25169..4c05b5eca0 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -95,6 +95,7 @@ enum emulation_result {
#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
#define KVM_LARCH_LSX (0x1 << 3)
+#define KVM_LARCH_LASX (0x1 << 4)

struct kvm_vcpu_arch {
/*
@@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LSX;
}

+static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & CPUCFG2_LASX;
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);

diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index c629771e12..4f87f16018 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
#endif

+#ifdef CONFIG_CPU_HAS_LASX
+void kvm_own_lasx(struct kvm_vcpu *vcpu);
+void kvm_save_lasx(struct loongarch_fpu *fpu);
+void kvm_restore_lasx(struct loongarch_fpu *fpu);
+#else
+static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
+static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
+static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
+#endif
+
void kvm_acquire_timer(struct kvm_vcpu *vcpu);
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index d53ab10f46..f4524fe866 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
lasx_restore_all_upper a0 t0 t1
jr ra
SYM_FUNC_END(_restore_lasx_upper)
+EXPORT_SYMBOL(_restore_lasx_upper)

SYM_FUNC_START(_init_lasx_upper)
lasx_init_all_upper t1
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index 1b1c58ccc8..57bd5bf562 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -676,6 +676,23 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}

+/*
+ * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use LASX when it is disabled in the root
+ * context.
+ */
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lasx(&vcpu->arch))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+ else
+ kvm_own_lasx(vcpu);
+
+ return RESUME_GUEST;
+}
+
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
@@ -705,6 +722,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_TLBM] = kvm_handle_write_fault,
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
+ [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
};

diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 32ba092a44..a129f8e82c 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -267,6 +267,22 @@ SYM_FUNC_START(kvm_restore_lsx_upper)
SYM_FUNC_END(kvm_restore_lsx_upper)
#endif

+#ifdef CONFIG_CPU_HAS_LASX
+SYM_FUNC_START(kvm_save_lasx)
+ fpu_save_csr a0 t1
+ fpu_save_cc a0 t1 t2
+ lasx_save_data a0 t1
+
+ jirl zero, ra, 0
+SYM_FUNC_END(kvm_save_lasx)
+
+SYM_FUNC_START(kvm_restore_lasx)
+ lasx_restore_data a0 t1
+ fpu_restore_cc a0 t1 t2
+ fpu_restore_csr a0 t1
+ jirl zero, ra, 0
+SYM_FUNC_END(kvm_restore_lasx)
+#endif
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index 7da4e230e8..c2484ad4cf 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -103,6 +103,7 @@ TRACE_EVENT(kvm_exit_gspr,

#define KVM_TRACE_AUX_FPU 1
#define KVM_TRACE_AUX_LSX 2
+#define KVM_TRACE_AUX_LASX 3

#define kvm_trace_symbol_aux_op \
{ KVM_TRACE_AUX_SAVE, "save" }, \
@@ -113,7 +114,8 @@ TRACE_EVENT(kvm_exit_gspr,

#define kvm_trace_symbol_aux_state \
{ KVM_TRACE_AUX_FPU, "FPU" }, \
- { KVM_TRACE_AUX_LSX, "LSX" }
+ { KVM_TRACE_AUX_LSX, "LSX" }, \
+ { KVM_TRACE_AUX_LASX, "LASX" }

TRACE_EVENT(kvm_aux,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index f0bb583353..204654a359 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -384,6 +384,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
ret = -EINVAL;
}
+ if (id == 2 && v & CPUCFG2_LASX && !cpu_has_lasx) {
+ vcpu->arch.cpucfg[id] &= ~CPUCFG2_LASX;
+ ret = -EINVAL;
+ }
} else
ret = -EINVAL;
break;
@@ -595,12 +599,49 @@ void kvm_own_lsx(struct kvm_vcpu *vcpu)
}
#endif

+#ifdef CONFIG_CPU_HAS_LASX
+/* Enable LASX for guest and restore context */
+void kvm_own_lasx(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+
+ set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+ switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
+ case KVM_LARCH_LSX | KVM_LARCH_FPU:
+ case KVM_LARCH_LSX:
+ /* Guest LSX state already loaded, only restore upper LASX state */
+ _restore_lasx_upper(&vcpu->arch.fpu);
+ break;
+ case KVM_LARCH_FPU:
+ /* Guest FP state already loaded, only restore 64~256 LASX state */
+ kvm_restore_lsx_upper(&vcpu->arch.fpu);
+ _restore_lasx_upper(&vcpu->arch.fpu);
+ break;
+ default:
+ /* Neither FP or LSX already active, restore full LASX state */
+ kvm_restore_lasx(&vcpu->arch.fpu);
+ break;
+ }
+
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
+ preempt_enable();
+}
+#endif
+
/* Save context and disable FPU */
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();

- if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
+ kvm_save_lasx(&vcpu->arch.fpu);
+ vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
+
+ /* Disable LASX & LSX & FPU */
+ clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+ } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
kvm_save_lsx(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
--
2.39.1

2023-11-15 09:32:40

by zhaotianrui

[permalink] [raw]
Subject: [PATCH v1 1/2] LoongArch: KVM: Add lsx support

This patch adds LSX support for LoongArch KVM. The LSX means
LoongArch 128-bits vector instruction.
There will be LSX exception in KVM when guest use the LSX
instruction. KVM will enable LSX and restore the vector
registers for guest then return to guest to continue running.


Signed-off-by: Tianrui Zhao <[email protected]>
---
arch/loongarch/include/asm/kvm_host.h | 6 ++++
arch/loongarch/include/asm/kvm_vcpu.h | 12 +++++++
arch/loongarch/kvm/exit.c | 18 ++++++++++
arch/loongarch/kvm/switch.S | 22 +++++++++++++
arch/loongarch/kvm/trace.h | 4 ++-
arch/loongarch/kvm/vcpu.c | 47 +++++++++++++++++++++++++--
6 files changed, 105 insertions(+), 4 deletions(-)

diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 11328700d4..6c65c25169 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -94,6 +94,7 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
+#define KVM_LARCH_LSX (0x1 << 3)

struct kvm_vcpu_arch {
/*
@@ -175,6 +176,11 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned
csr->csrs[reg] = val;
}

+static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & CPUCFG2_LSX;
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);

diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 553cfa2b2b..c629771e12 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -55,6 +55,18 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
void kvm_restore_fpu(struct loongarch_fpu *fpu);
void kvm_restore_fcsr(struct loongarch_fpu *fpu);

+#ifdef CONFIG_CPU_HAS_LSX
+void kvm_own_lsx(struct kvm_vcpu *vcpu);
+void kvm_save_lsx(struct loongarch_fpu *fpu);
+void kvm_restore_lsx(struct loongarch_fpu *fpu);
+void kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
+#else
+static inline void kvm_own_lsx(struct kvm_vcpu *vcpu) { }
+static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
+static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
+static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
+#endif
+
void kvm_acquire_timer(struct kvm_vcpu *vcpu);
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ce8de3fa47..1b1c58ccc8 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -659,6 +659,23 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}

+/*
+ * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use LSX when it is disabled in the root
+ * context.
+ */
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lsx(&vcpu->arch))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+ else
+ kvm_own_lsx(vcpu);
+
+ return RESUME_GUEST;
+}
+
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
@@ -687,6 +704,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_TLBS] = kvm_handle_write_fault,
[EXCCODE_TLBM] = kvm_handle_write_fault,
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
+ [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
};

diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 0ed9040307..32ba092a44 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -245,6 +245,28 @@ SYM_FUNC_START(kvm_restore_fpu)
jr ra
SYM_FUNC_END(kvm_restore_fpu)

+#ifdef CONFIG_CPU_HAS_LSX
+SYM_FUNC_START(kvm_save_lsx)
+ fpu_save_csr a0 t1
+ fpu_save_cc a0 t1 t2
+ lsx_save_data a0 t1
+ jirl zero, ra, 0
+SYM_FUNC_END(kvm_save_lsx)
+
+SYM_FUNC_START(kvm_restore_lsx)
+ lsx_restore_data a0 t1
+ fpu_restore_cc a0 t1 t2
+ fpu_restore_csr a0 t1
+ jirl zero, ra, 0
+SYM_FUNC_END(kvm_restore_lsx)
+
+SYM_FUNC_START(kvm_restore_lsx_upper)
+ lsx_restore_all_upper a0 t0 t1
+
+ jirl zero, ra, 0
+SYM_FUNC_END(kvm_restore_lsx_upper)
+#endif
+
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
index a1e35d6554..7da4e230e8 100644
--- a/arch/loongarch/kvm/trace.h
+++ b/arch/loongarch/kvm/trace.h
@@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr,
#define KVM_TRACE_AUX_DISCARD 4

#define KVM_TRACE_AUX_FPU 1
+#define KVM_TRACE_AUX_LSX 2

#define kvm_trace_symbol_aux_op \
{ KVM_TRACE_AUX_SAVE, "save" }, \
@@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr,
{ KVM_TRACE_AUX_DISCARD, "discard" }

#define kvm_trace_symbol_aux_state \
- { KVM_TRACE_AUX_FPU, "FPU" }
+ { KVM_TRACE_AUX_FPU, "FPU" }, \
+ { KVM_TRACE_AUX_LSX, "LSX" }

TRACE_EVENT(kvm_aux,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 73d0c2b9c1..f0bb583353 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -378,9 +378,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
break;
case KVM_REG_LOONGARCH_CPUCFG:
id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
- if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
+ if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) {
vcpu->arch.cpucfg[id] = (u32)v;
- else
+ if (id == 2 && v & CPUCFG2_LSX && !cpu_has_lsx) {
+ vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
+ ret = -EINVAL;
+ }
+ } else
ret = -EINVAL;
break;
case KVM_REG_LOONGARCH_KVM:
@@ -561,12 +565,49 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
preempt_enable();
}

+#ifdef CONFIG_CPU_HAS_LSX
+/* Enable LSX for guest and restore context */
+void kvm_own_lsx(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+
+ /* Enable LSX for guest */
+ set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
+ switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+ case KVM_LARCH_FPU:
+ /*
+ * Guest FPU state already loaded,
+ * only restore upper LSX state
+ */
+ kvm_restore_lsx_upper(&vcpu->arch.fpu);
+ break;
+ default:
+ /* Neither FP or LSX already active,
+ * restore full LSX state
+ */
+ kvm_restore_lsx(&vcpu->arch.fpu);
+ break;
+ }
+
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
+ preempt_enable();
+}
+#endif
+
/* Save context and disable FPU */
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();

- if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
+ kvm_save_lsx(&vcpu->arch.fpu);
+ vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
+
+ /* Disable LSX & FPU */
+ clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
+ } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
kvm_save_fpu(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
--
2.39.1

2023-11-15 19:36:29

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v1 1/2] LoongArch: KVM: Add lsx support

Hi Tianrui,

kernel test robot noticed the following build errors:

[auto build test ERROR on kvm/queue]
[also build test ERROR on linus/master v6.7-rc1 next-20231115]
[cannot apply to mst-vhost/linux-next kvm/linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Tianrui-Zhao/LoongArch-KVM-Add-lsx-support/20231115-173658
base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
patch link: https://lore.kernel.org/r/20231115091921.85516-2-zhaotianrui%40loongson.cn
patch subject: [PATCH v1 1/2] LoongArch: KVM: Add lsx support
config: loongarch-allmodconfig (https://download.01.org/0day-ci/archive/20231116/[email protected]/config)
compiler: loongarch64-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231116/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All errors (new ones prefixed by >>):

arch/loongarch/kvm/switch.S: Assembler messages:
>> arch/loongarch/kvm/switch.S:68: Error: no match insn: la.pcrel ,1f
arch/loongarch/kvm/switch.S:259: Info: macro invoked from here
>> arch/loongarch/kvm/switch.S:69: Error: no match insn: alsl.d ,$r13,,3
arch/loongarch/kvm/switch.S:259: Info: macro invoked from here
>> arch/loongarch/kvm/switch.S:70: Error: no match insn: jr
arch/loongarch/kvm/switch.S:259: Info: macro invoked from here


vim +68 arch/loongarch/kvm/switch.S

39fdf4be72f2b8 Tianrui Zhao 2023-10-02 43
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 44 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 45 * Prepare switch to guest, save host regs and restore guest regs.
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 46 * a2: kvm_vcpu_arch, don't touch it until 'ertn'
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 47 * t0, t1: temp register
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 48 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 49 .macro kvm_switch_to_guest
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 50 /* Set host ECFG.VS=0, all exceptions share one exception entry */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 51 csrrd t0, LOONGARCH_CSR_ECFG
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 52 bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 53 csrwr t0, LOONGARCH_CSR_ECFG
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 54
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 55 /* Load up the new EENTRY */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 56 ld.d t0, a2, KVM_ARCH_GEENTRY
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 57 csrwr t0, LOONGARCH_CSR_EENTRY
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 58
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 59 /* Set Guest ERA */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 60 ld.d t0, a2, KVM_ARCH_GPC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 61 csrwr t0, LOONGARCH_CSR_ERA
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 62
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 63 /* Save host PGDL */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 64 csrrd t0, LOONGARCH_CSR_PGDL
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 65 st.d t0, a2, KVM_ARCH_HPGD
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 66
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 67 /* Switch to kvm */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 @68 ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 @69
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 @70 /* Load guest PGDL */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 71 li.w t0, KVM_GPGD
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 72 ldx.d t0, t1, t0
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 73 csrwr t0, LOONGARCH_CSR_PGDL
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 74
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 75 /* Mix GID and RID */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 76 csrrd t1, LOONGARCH_CSR_GSTAT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 77 bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 78 csrrd t0, LOONGARCH_CSR_GTLBC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 79 bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 80 csrwr t0, LOONGARCH_CSR_GTLBC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 81
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 82 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 83 * Enable intr in root mode with future ertn so that host interrupt
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 84 * can be responsed during VM runs
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 85 * Guest CRMD comes from separate GCSR_CRMD register
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 86 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 87 ori t0, zero, CSR_PRMD_PIE
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 88 csrxchg t0, t0, LOONGARCH_CSR_PRMD
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 89
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 90 /* Set PVM bit to setup ertn to guest context */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 91 ori t0, zero, CSR_GSTAT_PVM
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 92 csrxchg t0, t0, LOONGARCH_CSR_GSTAT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 93
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 94 /* Load Guest GPRs */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 95 kvm_restore_guest_gprs a2
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 96 /* Load KVM_ARCH register */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 97 ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 98
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 99 ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 100 .endm
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 101
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 102 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 103 * Exception entry for general exception from guest mode
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 104 * - IRQ is disabled
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 105 * - kernel privilege in root mode
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 106 * - page mode keep unchanged from previous PRMD in root mode
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 107 * - Fixme: tlb exception cannot happen since registers relative with TLB
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 108 * - is still in guest mode, such as pgd table/vmid registers etc,
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 109 * - will fix with hw page walk enabled in future
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 110 * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 111 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 112 .text
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 113 .cfi_sections .debug_frame
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 114 SYM_CODE_START(kvm_exc_entry)
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 115 csrwr a2, KVM_TEMP_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 116 csrrd a2, KVM_VCPU_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 117 addi.d a2, a2, KVM_VCPU_ARCH
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 118
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 119 /* After save GPRs, free to use any GPR */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 120 kvm_save_guest_gprs a2
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 121 /* Save guest A2 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 122 csrrd t0, KVM_TEMP_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 123 st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 124
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 125 /* A2 is kvm_vcpu_arch, A1 is free to use */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 126 csrrd s1, KVM_VCPU_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 127 ld.d s0, s1, KVM_VCPU_RUN
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 128
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 129 csrrd t0, LOONGARCH_CSR_ESTAT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 130 st.d t0, a2, KVM_ARCH_HESTAT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 131 csrrd t0, LOONGARCH_CSR_ERA
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 132 st.d t0, a2, KVM_ARCH_GPC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 133 csrrd t0, LOONGARCH_CSR_BADV
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 134 st.d t0, a2, KVM_ARCH_HBADV
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 135 csrrd t0, LOONGARCH_CSR_BADI
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 136 st.d t0, a2, KVM_ARCH_HBADI
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 137
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 138 /* Restore host ECFG.VS */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 139 csrrd t0, LOONGARCH_CSR_ECFG
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 140 ld.d t1, a2, KVM_ARCH_HECFG
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 141 or t0, t0, t1
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 142 csrwr t0, LOONGARCH_CSR_ECFG
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 143
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 144 /* Restore host EENTRY */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 145 ld.d t0, a2, KVM_ARCH_HEENTRY
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 146 csrwr t0, LOONGARCH_CSR_EENTRY
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 147
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 148 /* Restore host pgd table */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 149 ld.d t0, a2, KVM_ARCH_HPGD
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 150 csrwr t0, LOONGARCH_CSR_PGDL
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 151
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 152 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 153 * Disable PGM bit to enter root mode by default with next ertn
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 154 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 155 ori t0, zero, CSR_GSTAT_PVM
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 156 csrxchg zero, t0, LOONGARCH_CSR_GSTAT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 157
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 158 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 159 * Clear GTLBC.TGID field
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 160 * 0: for root tlb update in future tlb instr
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 161 * others: for guest tlb update like gpa to hpa in future tlb instr
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 162 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 163 csrrd t0, LOONGARCH_CSR_GTLBC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 164 bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 165 csrwr t0, LOONGARCH_CSR_GTLBC
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 166 ld.d tp, a2, KVM_ARCH_HTP
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 167 ld.d sp, a2, KVM_ARCH_HSP
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 168 /* restore per cpu register */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 169 ld.d u0, a2, KVM_ARCH_HPERCPU
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 170 addi.d sp, sp, -PT_SIZE
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 171
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 172 /* Prepare handle exception */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 173 or a0, s0, zero
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 174 or a1, s1, zero
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 175 ld.d t8, a2, KVM_ARCH_HANDLE_EXIT
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 176 jirl ra, t8, 0
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 177
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 178 or a2, s1, zero
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 179 addi.d a2, a2, KVM_VCPU_ARCH
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 180
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 181 /* Resume host when ret <= 0 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 182 blez a0, ret_to_host
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 183
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 184 /*
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 185 * Return to guest
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 186 * Save per cpu register again, maybe switched to another cpu
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 187 */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 188 st.d u0, a2, KVM_ARCH_HPERCPU
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 189
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 190 /* Save kvm_vcpu to kscratch */
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 191 csrwr s1, KVM_VCPU_KS
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 192 kvm_switch_to_guest
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 193
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 194 ret_to_host:
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 195 ld.d a2, a2, KVM_ARCH_HSP
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 196 addi.d a2, a2, -PT_SIZE
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 197 kvm_restore_host_gpr a2
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 198 jr ra
39fdf4be72f2b8 Tianrui Zhao 2023-10-02 199

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-11-16 07:15:52

by WANG Xuerui

[permalink] [raw]
Subject: Re: [PATCH v1 1/2] LoongArch: KVM: Add lsx support


On 11/15/23 17:19, Tianrui Zhao wrote:
> This patch adds LSX support for LoongArch KVM. The LSX means
> LoongArch 128-bits vector instruction.
Maybe we don't need to explain what "LSX" is; people working on
LoongArch kernel should already know this through the kernel docs and
the various occurrences in the code.
> There will be LSX exception in KVM when guest use the LSX
> instruction. KVM will enable LSX and restore the vector
> registers for guest then return to guest to continue running.
>
One more extra line that should get removed.
> Signed-off-by: Tianrui Zhao <[email protected]>
> ---
> arch/loongarch/include/asm/kvm_host.h | 6 ++++
> arch/loongarch/include/asm/kvm_vcpu.h | 12 +++++++
> arch/loongarch/kvm/exit.c | 18 ++++++++++
> arch/loongarch/kvm/switch.S | 22 +++++++++++++
> arch/loongarch/kvm/trace.h | 4 ++-
> arch/loongarch/kvm/vcpu.c | 47 +++++++++++++++++++++++++--
> 6 files changed, 105 insertions(+), 4 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index 11328700d4..6c65c25169 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -94,6 +94,7 @@ enum emulation_result {
> #define KVM_LARCH_FPU (0x1 << 0)
> #define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
> #define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> +#define KVM_LARCH_LSX (0x1 << 3)
>
> struct kvm_vcpu_arch {
> /*
> @@ -175,6 +176,11 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned
> csr->csrs[reg] = val;
> }
>
> +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
> +{
> + return arch->cpucfg[2] & CPUCFG2_LSX;
> +}
> +
> /* Debug: dump vcpu state */
> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> index 553cfa2b2b..c629771e12 100644
> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -55,6 +55,18 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
> void kvm_restore_fpu(struct loongarch_fpu *fpu);
> void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +void kvm_own_lsx(struct kvm_vcpu *vcpu);
> +void kvm_save_lsx(struct loongarch_fpu *fpu);
> +void kvm_restore_lsx(struct loongarch_fpu *fpu);
> +void kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
> +#else
> +static inline void kvm_own_lsx(struct kvm_vcpu *vcpu) { }
> +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
> +#endif
> +
> void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> void kvm_reset_timer(struct kvm_vcpu *vcpu);
> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> index ce8de3fa47..1b1c58ccc8 100644
> --- a/arch/loongarch/kvm/exit.c
> +++ b/arch/loongarch/kvm/exit.c
> @@ -659,6 +659,23 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
> return RESUME_GUEST;
> }
>
> +/*
> + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
> + * @vcpu: Virtual CPU context.
> + *
> + * Handle when the guest attempts to use LSX when it is disabled in the root
> + * context.
> + */
> +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
> +{
> + if (!kvm_guest_has_lsx(&vcpu->arch))
> + kvm_queue_exception(vcpu, EXCCODE_INE, 0);
> + else
> + kvm_own_lsx(vcpu);
> +
> + return RESUME_GUEST;
> +}
> +
> /*
> * LoongArch KVM callback handling for unimplemented guest exiting
> */
> @@ -687,6 +704,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
> [EXCCODE_TLBS] = kvm_handle_write_fault,
> [EXCCODE_TLBM] = kvm_handle_write_fault,
> [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
> + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
> [EXCCODE_GSPR] = kvm_handle_gspr,
> };
>
> diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
> index 0ed9040307..32ba092a44 100644
> --- a/arch/loongarch/kvm/switch.S
> +++ b/arch/loongarch/kvm/switch.S
> @@ -245,6 +245,28 @@ SYM_FUNC_START(kvm_restore_fpu)
> jr ra
> SYM_FUNC_END(kvm_restore_fpu)
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +SYM_FUNC_START(kvm_save_lsx)
> + fpu_save_csr a0 t1
> + fpu_save_cc a0 t1 t2
> + lsx_save_data a0 t1
> + jirl zero, ra, 0
"jr ra" for consistency (e.g. with the function immediately above);
similarly for other such usages.
> +SYM_FUNC_END(kvm_save_lsx)
> +
> +SYM_FUNC_START(kvm_restore_lsx)
> + lsx_restore_data a0 t1
> + fpu_restore_cc a0 t1 t2
> + fpu_restore_csr a0 t1
> + jirl zero, ra, 0
> +SYM_FUNC_END(kvm_restore_lsx)
> +
> +SYM_FUNC_START(kvm_restore_lsx_upper)
> + lsx_restore_all_upper a0 t0 t1
> +
> + jirl zero, ra, 0
> +SYM_FUNC_END(kvm_restore_lsx_upper)
> +#endif
> +
> .section ".rodata"
> SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
> SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
> index a1e35d6554..7da4e230e8 100644
> --- a/arch/loongarch/kvm/trace.h
> +++ b/arch/loongarch/kvm/trace.h
> @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr,
> #define KVM_TRACE_AUX_DISCARD 4
>
> #define KVM_TRACE_AUX_FPU 1
> +#define KVM_TRACE_AUX_LSX 2
>
> #define kvm_trace_symbol_aux_op \
> { KVM_TRACE_AUX_SAVE, "save" }, \
> @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr,
> { KVM_TRACE_AUX_DISCARD, "discard" }
>
> #define kvm_trace_symbol_aux_state \
> - { KVM_TRACE_AUX_FPU, "FPU" }
> + { KVM_TRACE_AUX_FPU, "FPU" }, \
> + { KVM_TRACE_AUX_LSX, "LSX" }
>
> TRACE_EVENT(kvm_aux,
> TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 73d0c2b9c1..f0bb583353 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -378,9 +378,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
> break;
> case KVM_REG_LOONGARCH_CPUCFG:
> id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
> - if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
> + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) {
> vcpu->arch.cpucfg[id] = (u32)v;
> - else
> + if (id == 2 && v & CPUCFG2_LSX && !cpu_has_lsx) {
> + vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
> + ret = -EINVAL;
> + }
> + } else
> ret = -EINVAL;
> break;
> case KVM_REG_LOONGARCH_KVM:
> @@ -561,12 +565,49 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
> preempt_enable();
> }
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +/* Enable LSX for guest and restore context */
> +void kvm_own_lsx(struct kvm_vcpu *vcpu)
> +{
> + preempt_disable();
> +
> + /* Enable LSX for guest */
> + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
> + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> + case KVM_LARCH_FPU:
> + /*
> + * Guest FPU state already loaded,
> + * only restore upper LSX state
> + */
> + kvm_restore_lsx_upper(&vcpu->arch.fpu);
> + break;
> + default:
> + /* Neither FP or LSX already active,
> + * restore full LSX state
> + */
> + kvm_restore_lsx(&vcpu->arch.fpu);
> + break;
> + }
> +
> + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
> + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
> + preempt_enable();
> +}
> +#endif
> +
> /* Save context and disable FPU */
> void kvm_lose_fpu(struct kvm_vcpu *vcpu)
> {
> preempt_disable();
>
> - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> + if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
> + kvm_save_lsx(&vcpu->arch.fpu);
> + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
> + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
> +
> + /* Disable LSX & FPU */
> + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
> + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> kvm_save_fpu(&vcpu->arch.fpu);
> vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
> trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);

--
WANG "xen0n" Xuerui

Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/

2023-11-16 07:19:45

by WANG Xuerui

[permalink] [raw]
Subject: Re: [PATCH v1 2/2] LoongArch: KVM: Add lasx support

On 11/15/23 17:19, Tianrui Zhao wrote:
> This patch adds LASX support for LoongArch KVM. The LASX means
> LoongArch 256-bits vector instruction.
> There will be LASX exception in KVM when guest use the LASX
> instruction. KVM will enable LASX and restore the vector
> registers for guest then return to guest to continue running.
>
> Signed-off-by: Tianrui Zhao <[email protected]>
> ---
> arch/loongarch/include/asm/kvm_host.h | 6 ++++
> arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
> arch/loongarch/kernel/fpu.S | 1 +
> arch/loongarch/kvm/exit.c | 18 +++++++++++
> arch/loongarch/kvm/switch.S | 16 ++++++++++
> arch/loongarch/kvm/trace.h | 4 ++-
> arch/loongarch/kvm/vcpu.c | 43 ++++++++++++++++++++++++++-
> 7 files changed, 96 insertions(+), 2 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index 6c65c25169..4c05b5eca0 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -95,6 +95,7 @@ enum emulation_result {
> #define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
> #define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> #define KVM_LARCH_LSX (0x1 << 3)
> +#define KVM_LARCH_LASX (0x1 << 4)
>
> struct kvm_vcpu_arch {
> /*
> @@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
> return arch->cpucfg[2] & CPUCFG2_LSX;
> }
>
> +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
> +{
> + return arch->cpucfg[2] & CPUCFG2_LASX;
> +}
> +
> /* Debug: dump vcpu state */
> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> index c629771e12..4f87f16018 100644
> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
> static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
> #endif
>
> +#ifdef CONFIG_CPU_HAS_LASX
> +void kvm_own_lasx(struct kvm_vcpu *vcpu);
> +void kvm_save_lasx(struct loongarch_fpu *fpu);
> +void kvm_restore_lasx(struct loongarch_fpu *fpu);
> +#else
> +static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
> +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
> +#endif
> +
> void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> void kvm_reset_timer(struct kvm_vcpu *vcpu);
> diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
> index d53ab10f46..f4524fe866 100644
> --- a/arch/loongarch/kernel/fpu.S
> +++ b/arch/loongarch/kernel/fpu.S
> @@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
> lasx_restore_all_upper a0 t0 t1
> jr ra
> SYM_FUNC_END(_restore_lasx_upper)
> +EXPORT_SYMBOL(_restore_lasx_upper)

Why the added export? It doesn't seem necessary, given the previous
patch doesn't have a similar export added for _restore_lsx_upper. (Or if
it's truly needed it should probably become EXPORT_SYMBOL_GPL.)

--
WANG "xen0n" Xuerui

Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/

2023-11-17 08:24:41

by Bibo Mao

[permalink] [raw]
Subject: Re: [PATCH v1 1/2] LoongArch: KVM: Add lsx support



On 2023/11/15 下午5:19, Tianrui Zhao wrote:
> This patch adds LSX support for LoongArch KVM. The LSX means
> LoongArch 128-bits vector instruction.
> There will be LSX exception in KVM when guest use the LSX
> instruction. KVM will enable LSX and restore the vector
> registers for guest then return to guest to continue running.
>
>
> Signed-off-by: Tianrui Zhao <[email protected]>
> ---
> arch/loongarch/include/asm/kvm_host.h | 6 ++++
> arch/loongarch/include/asm/kvm_vcpu.h | 12 +++++++
> arch/loongarch/kvm/exit.c | 18 ++++++++++
> arch/loongarch/kvm/switch.S | 22 +++++++++++++
> arch/loongarch/kvm/trace.h | 4 ++-
> arch/loongarch/kvm/vcpu.c | 47 +++++++++++++++++++++++++--
> 6 files changed, 105 insertions(+), 4 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index 11328700d4..6c65c25169 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -94,6 +94,7 @@ enum emulation_result {
> #define KVM_LARCH_FPU (0x1 << 0)
> #define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
> #define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> +#define KVM_LARCH_LSX (0x1 << 3)
>
> struct kvm_vcpu_arch {
> /*
> @@ -175,6 +176,11 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned
> csr->csrs[reg] = val;
> }
>
> +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
> +{
> + return arch->cpucfg[2] & CPUCFG2_LSX;
> +}
> +
> /* Debug: dump vcpu state */
> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> index 553cfa2b2b..c629771e12 100644
> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -55,6 +55,18 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
> void kvm_restore_fpu(struct loongarch_fpu *fpu);
> void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +void kvm_own_lsx(struct kvm_vcpu *vcpu);
> +void kvm_save_lsx(struct loongarch_fpu *fpu);
> +void kvm_restore_lsx(struct loongarch_fpu *fpu);
> +void kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
> +#else
> +static inline void kvm_own_lsx(struct kvm_vcpu *vcpu) { }
> +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
> +static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
> +#endif
> +
> void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> void kvm_reset_timer(struct kvm_vcpu *vcpu);
> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> index ce8de3fa47..1b1c58ccc8 100644
> --- a/arch/loongarch/kvm/exit.c
> +++ b/arch/loongarch/kvm/exit.c
> @@ -659,6 +659,23 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
> return RESUME_GUEST;
> }
>
> +/*
> + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
> + * @vcpu: Virtual CPU context.
> + *
> + * Handle when the guest attempts to use LSX when it is disabled in the root
> + * context.
> + */
> +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
> +{
> + if (!kvm_guest_has_lsx(&vcpu->arch))
> + kvm_queue_exception(vcpu, EXCCODE_INE, 0);
> + else
> + kvm_own_lsx(vcpu);
> +
> + return RESUME_GUEST;
> +}
> +
> /*
> * LoongArch KVM callback handling for unimplemented guest exiting
> */
> @@ -687,6 +704,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
> [EXCCODE_TLBS] = kvm_handle_write_fault,
> [EXCCODE_TLBM] = kvm_handle_write_fault,
> [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
> + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
> [EXCCODE_GSPR] = kvm_handle_gspr,
> };
>
> diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
> index 0ed9040307..32ba092a44 100644
> --- a/arch/loongarch/kvm/switch.S
> +++ b/arch/loongarch/kvm/switch.S
> @@ -245,6 +245,28 @@ SYM_FUNC_START(kvm_restore_fpu)
> jr ra
> SYM_FUNC_END(kvm_restore_fpu)
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +SYM_FUNC_START(kvm_save_lsx)
> + fpu_save_csr a0 t1
> + fpu_save_cc a0 t1 t2
> + lsx_save_data a0 t1
> + jirl zero, ra, 0
> +SYM_FUNC_END(kvm_save_lsx)
> +
> +SYM_FUNC_START(kvm_restore_lsx)
> + lsx_restore_data a0 t1
> + fpu_restore_cc a0 t1 t2
> + fpu_restore_csr a0 t1
> + jirl zero, ra, 0
> +SYM_FUNC_END(kvm_restore_lsx)
> +
> +SYM_FUNC_START(kvm_restore_lsx_upper)
> + lsx_restore_all_upper a0 t0 t1
> +
> + jirl zero, ra, 0
> +SYM_FUNC_END(kvm_restore_lsx_upper)
> +#endif
> +
> .section ".rodata"
> SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
> SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
> index a1e35d6554..7da4e230e8 100644
> --- a/arch/loongarch/kvm/trace.h
> +++ b/arch/loongarch/kvm/trace.h
> @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr,
> #define KVM_TRACE_AUX_DISCARD 4
>
> #define KVM_TRACE_AUX_FPU 1
> +#define KVM_TRACE_AUX_LSX 2
>
> #define kvm_trace_symbol_aux_op \
> { KVM_TRACE_AUX_SAVE, "save" }, \
> @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr,
> { KVM_TRACE_AUX_DISCARD, "discard" }
>
> #define kvm_trace_symbol_aux_state \
> - { KVM_TRACE_AUX_FPU, "FPU" }
> + { KVM_TRACE_AUX_FPU, "FPU" }, \
> + { KVM_TRACE_AUX_LSX, "LSX" }
>
> TRACE_EVENT(kvm_aux,
> TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 73d0c2b9c1..f0bb583353 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -378,9 +378,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
> break;
> case KVM_REG_LOONGARCH_CPUCFG:
> id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
> - if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
> + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) {
> vcpu->arch.cpucfg[id] = (u32)v;
> - else
> + if (id == 2 && v & CPUCFG2_LSX && !cpu_has_lsx) {
Hi Tianrui,

Can you add some annotations about these piece of codes? so that
people can understand easily.

And do we need interface to get host capabilities to user application?
Such as QEMU first gets supported capabilities from kvm and then sets
the required ones.

Regards
Bibo Mao
> + vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
> + ret = -EINVAL;
> + }
> + } else
> ret = -EINVAL;
> break;
> case KVM_REG_LOONGARCH_KVM:
> @@ -561,12 +565,49 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
> preempt_enable();
> }
>
> +#ifdef CONFIG_CPU_HAS_LSX
> +/* Enable LSX for guest and restore context */
> +void kvm_own_lsx(struct kvm_vcpu *vcpu)
> +{
> + preempt_disable();
> +
> + /* Enable LSX for guest */
> + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
> + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> + case KVM_LARCH_FPU:
> + /*
> + * Guest FPU state already loaded,
> + * only restore upper LSX state
> + */
> + kvm_restore_lsx_upper(&vcpu->arch.fpu);
> + break;
> + default:
> + /* Neither FP or LSX already active,
> + * restore full LSX state
> + */
> + kvm_restore_lsx(&vcpu->arch.fpu);
> + break;
> + }
> +
> + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
> + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
> + preempt_enable();
> +}
> +#endif
> +
> /* Save context and disable FPU */
> void kvm_lose_fpu(struct kvm_vcpu *vcpu)
> {
> preempt_disable();
>
> - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> + if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
> + kvm_save_lsx(&vcpu->arch.fpu);
> + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
> + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
> +
> + /* Disable LSX & FPU */
> + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
> + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
> kvm_save_fpu(&vcpu->arch.fpu);
> vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
> trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
>

2023-11-20 01:29:22

by zhaotianrui

[permalink] [raw]
Subject: Re: [PATCH v1 1/2] LoongArch: KVM: Add lsx support


在 2023/11/17 下午4:24, maobibo 写道:
>
>
> On 2023/11/15 下午5:19, Tianrui Zhao wrote:
>> This patch adds LSX support for LoongArch KVM. The LSX means
>> LoongArch 128-bits vector instruction.
>> There will be LSX exception in KVM when guest use the LSX
>> instruction. KVM will enable LSX and restore the vector
>> registers for guest then return to guest to continue running.
>>
>>
>> Signed-off-by: Tianrui Zhao <[email protected]>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h |  6 ++++
>>   arch/loongarch/include/asm/kvm_vcpu.h | 12 +++++++
>>   arch/loongarch/kvm/exit.c             | 18 ++++++++++
>>   arch/loongarch/kvm/switch.S           | 22 +++++++++++++
>>   arch/loongarch/kvm/trace.h            |  4 ++-
>>   arch/loongarch/kvm/vcpu.c             | 47 +++++++++++++++++++++++++--
>>   6 files changed, 105 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h
>> b/arch/loongarch/include/asm/kvm_host.h
>> index 11328700d4..6c65c25169 100644
>> --- a/arch/loongarch/include/asm/kvm_host.h
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -94,6 +94,7 @@ enum emulation_result {
>>   #define KVM_LARCH_FPU        (0x1 << 0)
>>   #define KVM_LARCH_SWCSR_LATEST    (0x1 << 1)
>>   #define KVM_LARCH_HWCSR_USABLE    (0x1 << 2)
>> +#define KVM_LARCH_LSX        (0x1 << 3)
>>     struct kvm_vcpu_arch {
>>       /*
>> @@ -175,6 +176,11 @@ static inline void writel_sw_gcsr(struct
>> loongarch_csrs *csr, int reg, unsigned
>>       csr->csrs[reg] = val;
>>   }
>>   +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
>> +{
>> +    return arch->cpucfg[2] & CPUCFG2_LSX;
>> +}
>> +
>>   /* Debug: dump vcpu state */
>>   int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>   diff --git a/arch/loongarch/include/asm/kvm_vcpu.h
>> b/arch/loongarch/include/asm/kvm_vcpu.h
>> index 553cfa2b2b..c629771e12 100644
>> --- a/arch/loongarch/include/asm/kvm_vcpu.h
>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>> @@ -55,6 +55,18 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
>>   void kvm_restore_fpu(struct loongarch_fpu *fpu);
>>   void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +void kvm_own_lsx(struct kvm_vcpu *vcpu);
>> +void kvm_save_lsx(struct loongarch_fpu *fpu);
>> +void kvm_restore_lsx(struct loongarch_fpu *fpu);
>> +void kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
>> +#else
>> +static inline void kvm_own_lsx(struct kvm_vcpu *vcpu) { }
>> +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
>> +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
>> +static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
>> +#endif
>> +
>>   void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>   void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>   void kvm_reset_timer(struct kvm_vcpu *vcpu);
>> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
>> index ce8de3fa47..1b1c58ccc8 100644
>> --- a/arch/loongarch/kvm/exit.c
>> +++ b/arch/loongarch/kvm/exit.c
>> @@ -659,6 +659,23 @@ static int kvm_handle_fpu_disabled(struct
>> kvm_vcpu *vcpu)
>>       return RESUME_GUEST;
>>   }
>>   +/*
>> + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
>> + * @vcpu:      Virtual CPU context.
>> + *
>> + * Handle when the guest attempts to use LSX when it is disabled in
>> the root
>> + * context.
>> + */
>> +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
>> +{
>> +    if (!kvm_guest_has_lsx(&vcpu->arch))
>> +        kvm_queue_exception(vcpu, EXCCODE_INE, 0);
>> +    else
>> +        kvm_own_lsx(vcpu);
>> +
>> +    return RESUME_GUEST;
>> +}
>> +
>>   /*
>>    * LoongArch KVM callback handling for unimplemented guest exiting
>>    */
>> @@ -687,6 +704,7 @@ static exit_handle_fn
>> kvm_fault_tables[EXCCODE_INT_START] = {
>>       [EXCCODE_TLBS]            = kvm_handle_write_fault,
>>       [EXCCODE_TLBM]            = kvm_handle_write_fault,
>>       [EXCCODE_FPDIS]            = kvm_handle_fpu_disabled,
>> +    [EXCCODE_LSXDIS]                = kvm_handle_lsx_disabled,
>>       [EXCCODE_GSPR]            = kvm_handle_gspr,
>>   };
>>   diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
>> index 0ed9040307..32ba092a44 100644
>> --- a/arch/loongarch/kvm/switch.S
>> +++ b/arch/loongarch/kvm/switch.S
>> @@ -245,6 +245,28 @@ SYM_FUNC_START(kvm_restore_fpu)
>>       jr                 ra
>>   SYM_FUNC_END(kvm_restore_fpu)
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +SYM_FUNC_START(kvm_save_lsx)
>> +    fpu_save_csr    a0 t1
>> +    fpu_save_cc     a0 t1 t2
>> +    lsx_save_data   a0 t1
>> +    jirl            zero, ra, 0
>> +SYM_FUNC_END(kvm_save_lsx)
>> +
>> +SYM_FUNC_START(kvm_restore_lsx)
>> +    lsx_restore_data a0 t1
>> +    fpu_restore_cc   a0 t1 t2
>> +    fpu_restore_csr  a0 t1
>> +    jirl             zero, ra, 0
>> +SYM_FUNC_END(kvm_restore_lsx)
>> +
>> +SYM_FUNC_START(kvm_restore_lsx_upper)
>> +    lsx_restore_all_upper a0 t0 t1
>> +
>> +    jirl                  zero, ra, 0
>> +SYM_FUNC_END(kvm_restore_lsx_upper)
>> +#endif
>> +
>>       .section ".rodata"
>>   SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
>>   SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end -
>> kvm_enter_guest)
>> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
>> index a1e35d6554..7da4e230e8 100644
>> --- a/arch/loongarch/kvm/trace.h
>> +++ b/arch/loongarch/kvm/trace.h
>> @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr,
>>   #define KVM_TRACE_AUX_DISCARD        4
>>     #define KVM_TRACE_AUX_FPU        1
>> +#define KVM_TRACE_AUX_LSX        2
>>     #define kvm_trace_symbol_aux_op                \
>>       { KVM_TRACE_AUX_SAVE,        "save" },    \
>> @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr,
>>       { KVM_TRACE_AUX_DISCARD,    "discard" }
>>     #define kvm_trace_symbol_aux_state            \
>> -    { KVM_TRACE_AUX_FPU,     "FPU" }
>> +    { KVM_TRACE_AUX_FPU,     "FPU" },        \
>> +    { KVM_TRACE_AUX_LSX,     "LSX" }
>>     TRACE_EVENT(kvm_aux,
>>           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
>> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
>> index 73d0c2b9c1..f0bb583353 100644
>> --- a/arch/loongarch/kvm/vcpu.c
>> +++ b/arch/loongarch/kvm/vcpu.c
>> @@ -378,9 +378,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
>>           break;
>>       case KVM_REG_LOONGARCH_CPUCFG:
>>           id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
>> -        if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
>> +        if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) {
>>               vcpu->arch.cpucfg[id] = (u32)v;
>> -        else
>> +            if (id == 2 && v & CPUCFG2_LSX && !cpu_has_lsx) {
> Hi Tianrui,
>
> Can you add some annotations about these piece of codes? so that
> people can understand easily.
>
> And do we need interface to get host capabilities to user application?
> Such as QEMU first gets supported capabilities from kvm and then sets
> the required ones.
>
> Regards
> Bibo Mao
Thanks, I will add annotations for this and I think it is better to add
the checking LSX,LASX capabilities interfaces for user space and I will
supplement it later.

Thanks
Tianrui Zhao
>> +                vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
>> +                ret = -EINVAL;
>> +            }
>> +        } else
>>               ret = -EINVAL;
>>           break;
>>       case KVM_REG_LOONGARCH_KVM:
>> @@ -561,12 +565,49 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
>>       preempt_enable();
>>   }
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +/* Enable LSX for guest and restore context */
>> +void kvm_own_lsx(struct kvm_vcpu *vcpu)
>> +{
>> +    preempt_disable();
>> +
>> +    /* Enable LSX for guest */
>> +    set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
>> +    switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>> +    case KVM_LARCH_FPU:
>> +        /*
>> +         * Guest FPU state already loaded,
>> +         * only restore upper LSX state
>> +         */
>> +        kvm_restore_lsx_upper(&vcpu->arch.fpu);
>> +        break;
>> +    default:
>> +        /* Neither FP or LSX already active,
>> +         * restore full LSX state
>> +         */
>> +        kvm_restore_lsx(&vcpu->arch.fpu);
>> +    break;
>> +    }
>> +
>> +    trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
>> +    vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
>> +    preempt_enable();
>> +}
>> +#endif
>> +
>>   /* Save context and disable FPU */
>>   void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>>   {
>>       preempt_disable();
>>   -    if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>> +    if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
>> +        kvm_save_lsx(&vcpu->arch.fpu);
>> +        vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
>> +        trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
>> +
>> +        /* Disable LSX & FPU */
>> +        clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
>> +    } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>>           kvm_save_fpu(&vcpu->arch.fpu);
>>           vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
>>           trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
>>

2023-11-21 09:54:48

by zhaotianrui

[permalink] [raw]
Subject: Re: [PATCH v1 1/2] LoongArch: KVM: Add lsx support


在 2023/11/16 下午3:15, WANG Xuerui 写道:
>
> On 11/15/23 17:19, Tianrui Zhao wrote:
>> This patch adds LSX support for LoongArch KVM. The LSX means
>> LoongArch 128-bits vector instruction.
> Maybe we don't need to explain what "LSX" is; people working on
> LoongArch kernel should already know this through the kernel docs and
> the various occurrences in the code.
Thanks, I will remove the explain about "LSX, LASX".
>> There will be LSX exception in KVM when guest use the LSX
>> instruction. KVM will enable LSX and restore the vector
>> registers for guest then return to guest to continue running.
>>
> One more extra line that should get removed.
Thanks, I will remove the extra line.
>> Signed-off-by: Tianrui Zhao <[email protected]>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h |  6 ++++
>>   arch/loongarch/include/asm/kvm_vcpu.h | 12 +++++++
>>   arch/loongarch/kvm/exit.c             | 18 ++++++++++
>>   arch/loongarch/kvm/switch.S           | 22 +++++++++++++
>>   arch/loongarch/kvm/trace.h            |  4 ++-
>>   arch/loongarch/kvm/vcpu.c             | 47 +++++++++++++++++++++++++--
>>   6 files changed, 105 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h
>> b/arch/loongarch/include/asm/kvm_host.h
>> index 11328700d4..6c65c25169 100644
>> --- a/arch/loongarch/include/asm/kvm_host.h
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -94,6 +94,7 @@ enum emulation_result {
>>   #define KVM_LARCH_FPU        (0x1 << 0)
>>   #define KVM_LARCH_SWCSR_LATEST    (0x1 << 1)
>>   #define KVM_LARCH_HWCSR_USABLE    (0x1 << 2)
>> +#define KVM_LARCH_LSX        (0x1 << 3)
>>     struct kvm_vcpu_arch {
>>       /*
>> @@ -175,6 +176,11 @@ static inline void writel_sw_gcsr(struct
>> loongarch_csrs *csr, int reg, unsigned
>>       csr->csrs[reg] = val;
>>   }
>>   +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
>> +{
>> +    return arch->cpucfg[2] & CPUCFG2_LSX;
>> +}
>> +
>>   /* Debug: dump vcpu state */
>>   int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>   diff --git a/arch/loongarch/include/asm/kvm_vcpu.h
>> b/arch/loongarch/include/asm/kvm_vcpu.h
>> index 553cfa2b2b..c629771e12 100644
>> --- a/arch/loongarch/include/asm/kvm_vcpu.h
>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>> @@ -55,6 +55,18 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
>>   void kvm_restore_fpu(struct loongarch_fpu *fpu);
>>   void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +void kvm_own_lsx(struct kvm_vcpu *vcpu);
>> +void kvm_save_lsx(struct loongarch_fpu *fpu);
>> +void kvm_restore_lsx(struct loongarch_fpu *fpu);
>> +void kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
>> +#else
>> +static inline void kvm_own_lsx(struct kvm_vcpu *vcpu) { }
>> +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
>> +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
>> +static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu) { }
>> +#endif
>> +
>>   void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>   void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>   void kvm_reset_timer(struct kvm_vcpu *vcpu);
>> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
>> index ce8de3fa47..1b1c58ccc8 100644
>> --- a/arch/loongarch/kvm/exit.c
>> +++ b/arch/loongarch/kvm/exit.c
>> @@ -659,6 +659,23 @@ static int kvm_handle_fpu_disabled(struct
>> kvm_vcpu *vcpu)
>>       return RESUME_GUEST;
>>   }
>>   +/*
>> + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
>> + * @vcpu:      Virtual CPU context.
>> + *
>> + * Handle when the guest attempts to use LSX when it is disabled in
>> the root
>> + * context.
>> + */
>> +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
>> +{
>> +    if (!kvm_guest_has_lsx(&vcpu->arch))
>> +        kvm_queue_exception(vcpu, EXCCODE_INE, 0);
>> +    else
>> +        kvm_own_lsx(vcpu);
>> +
>> +    return RESUME_GUEST;
>> +}
>> +
>>   /*
>>    * LoongArch KVM callback handling for unimplemented guest exiting
>>    */
>> @@ -687,6 +704,7 @@ static exit_handle_fn
>> kvm_fault_tables[EXCCODE_INT_START] = {
>>       [EXCCODE_TLBS]            = kvm_handle_write_fault,
>>       [EXCCODE_TLBM]            = kvm_handle_write_fault,
>>       [EXCCODE_FPDIS]            = kvm_handle_fpu_disabled,
>> +    [EXCCODE_LSXDIS]                = kvm_handle_lsx_disabled,
>>       [EXCCODE_GSPR]            = kvm_handle_gspr,
>>   };
>>   diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
>> index 0ed9040307..32ba092a44 100644
>> --- a/arch/loongarch/kvm/switch.S
>> +++ b/arch/loongarch/kvm/switch.S
>> @@ -245,6 +245,28 @@ SYM_FUNC_START(kvm_restore_fpu)
>>       jr                 ra
>>   SYM_FUNC_END(kvm_restore_fpu)
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +SYM_FUNC_START(kvm_save_lsx)
>> +    fpu_save_csr    a0 t1
>> +    fpu_save_cc     a0 t1 t2
>> +    lsx_save_data   a0 t1
>> +    jirl            zero, ra, 0
> "jr ra" for consistency (e.g. with the function immediately above);
> similarly for other such usages.
Thanks, I will use "jr ra" there.

Tianrui Zhao
>> +SYM_FUNC_END(kvm_save_lsx)
>> +
>> +SYM_FUNC_START(kvm_restore_lsx)
>> +    lsx_restore_data a0 t1
>> +    fpu_restore_cc   a0 t1 t2
>> +    fpu_restore_csr  a0 t1
>> +    jirl             zero, ra, 0
>> +SYM_FUNC_END(kvm_restore_lsx)
>> +
>> +SYM_FUNC_START(kvm_restore_lsx_upper)
>> +    lsx_restore_all_upper a0 t0 t1
>> +
>> +    jirl                  zero, ra, 0
>> +SYM_FUNC_END(kvm_restore_lsx_upper)
>> +#endif
>> +
>>       .section ".rodata"
>>   SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
>>   SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end -
>> kvm_enter_guest)
>> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
>> index a1e35d6554..7da4e230e8 100644
>> --- a/arch/loongarch/kvm/trace.h
>> +++ b/arch/loongarch/kvm/trace.h
>> @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr,
>>   #define KVM_TRACE_AUX_DISCARD        4
>>     #define KVM_TRACE_AUX_FPU        1
>> +#define KVM_TRACE_AUX_LSX        2
>>     #define kvm_trace_symbol_aux_op                \
>>       { KVM_TRACE_AUX_SAVE,        "save" },    \
>> @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr,
>>       { KVM_TRACE_AUX_DISCARD,    "discard" }
>>     #define kvm_trace_symbol_aux_state            \
>> -    { KVM_TRACE_AUX_FPU,     "FPU" }
>> +    { KVM_TRACE_AUX_FPU,     "FPU" },        \
>> +    { KVM_TRACE_AUX_LSX,     "LSX" }
>>     TRACE_EVENT(kvm_aux,
>>           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
>> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
>> index 73d0c2b9c1..f0bb583353 100644
>> --- a/arch/loongarch/kvm/vcpu.c
>> +++ b/arch/loongarch/kvm/vcpu.c
>> @@ -378,9 +378,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
>>           break;
>>       case KVM_REG_LOONGARCH_CPUCFG:
>>           id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
>> -        if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
>> +        if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) {
>>               vcpu->arch.cpucfg[id] = (u32)v;
>> -        else
>> +            if (id == 2 && v & CPUCFG2_LSX && !cpu_has_lsx) {
>> +                vcpu->arch.cpucfg[id] &= ~CPUCFG2_LSX;
>> +                ret = -EINVAL;
>> +            }
>> +        } else
>>               ret = -EINVAL;
>>           break;
>>       case KVM_REG_LOONGARCH_KVM:
>> @@ -561,12 +565,49 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
>>       preempt_enable();
>>   }
>>   +#ifdef CONFIG_CPU_HAS_LSX
>> +/* Enable LSX for guest and restore context */
>> +void kvm_own_lsx(struct kvm_vcpu *vcpu)
>> +{
>> +    preempt_disable();
>> +
>> +    /* Enable LSX for guest */
>> +    set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
>> +    switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>> +    case KVM_LARCH_FPU:
>> +        /*
>> +         * Guest FPU state already loaded,
>> +         * only restore upper LSX state
>> +         */
>> +        kvm_restore_lsx_upper(&vcpu->arch.fpu);
>> +        break;
>> +    default:
>> +        /* Neither FP or LSX already active,
>> +         * restore full LSX state
>> +         */
>> +        kvm_restore_lsx(&vcpu->arch.fpu);
>> +    break;
>> +    }
>> +
>> +    trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
>> +    vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
>> +    preempt_enable();
>> +}
>> +#endif
>> +
>>   /* Save context and disable FPU */
>>   void kvm_lose_fpu(struct kvm_vcpu *vcpu)
>>   {
>>       preempt_disable();
>>   -    if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>> +    if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
>> +        kvm_save_lsx(&vcpu->arch.fpu);
>> +        vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
>> +        trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
>> +
>> +        /* Disable LSX & FPU */
>> +        clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
>> +    } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
>>           kvm_save_fpu(&vcpu->arch.fpu);
>>           vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
>>           trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
>

2023-11-21 10:01:34

by zhaotianrui

[permalink] [raw]
Subject: Re: [PATCH v1 2/2] LoongArch: KVM: Add lasx support


在 2023/11/16 下午3:19, WANG Xuerui 写道:
> On 11/15/23 17:19, Tianrui Zhao wrote:
>> This patch adds LASX support for LoongArch KVM. The LASX means
>> LoongArch 256-bits vector instruction.
>> There will be LASX exception in KVM when guest use the LASX
>> instruction. KVM will enable LASX and restore the vector
>> registers for guest then return to guest to continue running.
>>
>> Signed-off-by: Tianrui Zhao <[email protected]>
>> ---
>>   arch/loongarch/include/asm/kvm_host.h |  6 ++++
>>   arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
>>   arch/loongarch/kernel/fpu.S           |  1 +
>>   arch/loongarch/kvm/exit.c             | 18 +++++++++++
>>   arch/loongarch/kvm/switch.S           | 16 ++++++++++
>>   arch/loongarch/kvm/trace.h            |  4 ++-
>>   arch/loongarch/kvm/vcpu.c             | 43 ++++++++++++++++++++++++++-
>>   7 files changed, 96 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/loongarch/include/asm/kvm_host.h
>> b/arch/loongarch/include/asm/kvm_host.h
>> index 6c65c25169..4c05b5eca0 100644
>> --- a/arch/loongarch/include/asm/kvm_host.h
>> +++ b/arch/loongarch/include/asm/kvm_host.h
>> @@ -95,6 +95,7 @@ enum emulation_result {
>>   #define KVM_LARCH_SWCSR_LATEST    (0x1 << 1)
>>   #define KVM_LARCH_HWCSR_USABLE    (0x1 << 2)
>>   #define KVM_LARCH_LSX        (0x1 << 3)
>> +#define KVM_LARCH_LASX        (0x1 << 4)
>>     struct kvm_vcpu_arch {
>>       /*
>> @@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct
>> kvm_vcpu_arch *arch)
>>       return arch->cpucfg[2] & CPUCFG2_LSX;
>>   }
>>   +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
>> +{
>> +    return arch->cpucfg[2] & CPUCFG2_LASX;
>> +}
>> +
>>   /* Debug: dump vcpu state */
>>   int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>   diff --git a/arch/loongarch/include/asm/kvm_vcpu.h
>> b/arch/loongarch/include/asm/kvm_vcpu.h
>> index c629771e12..4f87f16018 100644
>> --- a/arch/loongarch/include/asm/kvm_vcpu.h
>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>> @@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct
>> loongarch_fpu *fpu) { }
>>   static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu)
>> { }
>>   #endif
>>   +#ifdef CONFIG_CPU_HAS_LASX
>> +void kvm_own_lasx(struct kvm_vcpu *vcpu);
>> +void kvm_save_lasx(struct loongarch_fpu *fpu);
>> +void kvm_restore_lasx(struct loongarch_fpu *fpu);
>> +#else
>> +static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
>> +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
>> +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
>> +#endif
>> +
>>   void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>   void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>   void kvm_reset_timer(struct kvm_vcpu *vcpu);
>> diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
>> index d53ab10f46..f4524fe866 100644
>> --- a/arch/loongarch/kernel/fpu.S
>> +++ b/arch/loongarch/kernel/fpu.S
>> @@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
>>       lasx_restore_all_upper a0 t0 t1
>>       jr    ra
>>   SYM_FUNC_END(_restore_lasx_upper)
>> +EXPORT_SYMBOL(_restore_lasx_upper)
>
> Why the added export? It doesn't seem necessary, given the previous
> patch doesn't have a similar export added for _restore_lsx_upper. (Or
> if it's truly needed it should probably become EXPORT_SYMBOL_GPL.)
It is needed to be exported, as it is called by kvm_own_lasx. However
the "_restore_lsx_upper" is not used in kvm.

Thanks
Tianrui Zhao

2023-11-21 11:57:46

by Huacai Chen

[permalink] [raw]
Subject: Re: [PATCH v1 2/2] LoongArch: KVM: Add lasx support

On Tue, Nov 21, 2023 at 5:59 PM zhaotianrui <[email protected]> wrote:
>
>
> 在 2023/11/16 下午3:19, WANG Xuerui 写道:
> > On 11/15/23 17:19, Tianrui Zhao wrote:
> >> This patch adds LASX support for LoongArch KVM. The LASX means
> >> LoongArch 256-bits vector instruction.
> >> There will be LASX exception in KVM when guest use the LASX
> >> instruction. KVM will enable LASX and restore the vector
> >> registers for guest then return to guest to continue running.
> >>
> >> Signed-off-by: Tianrui Zhao <[email protected]>
> >> ---
> >> arch/loongarch/include/asm/kvm_host.h | 6 ++++
> >> arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
> >> arch/loongarch/kernel/fpu.S | 1 +
> >> arch/loongarch/kvm/exit.c | 18 +++++++++++
> >> arch/loongarch/kvm/switch.S | 16 ++++++++++
> >> arch/loongarch/kvm/trace.h | 4 ++-
> >> arch/loongarch/kvm/vcpu.c | 43 ++++++++++++++++++++++++++-
> >> 7 files changed, 96 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/arch/loongarch/include/asm/kvm_host.h
> >> b/arch/loongarch/include/asm/kvm_host.h
> >> index 6c65c25169..4c05b5eca0 100644
> >> --- a/arch/loongarch/include/asm/kvm_host.h
> >> +++ b/arch/loongarch/include/asm/kvm_host.h
> >> @@ -95,6 +95,7 @@ enum emulation_result {
> >> #define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
> >> #define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
> >> #define KVM_LARCH_LSX (0x1 << 3)
> >> +#define KVM_LARCH_LASX (0x1 << 4)
> >> struct kvm_vcpu_arch {
> >> /*
> >> @@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct
> >> kvm_vcpu_arch *arch)
> >> return arch->cpucfg[2] & CPUCFG2_LSX;
> >> }
> >> +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
> >> +{
> >> + return arch->cpucfg[2] & CPUCFG2_LASX;
> >> +}
> >> +
> >> /* Debug: dump vcpu state */
> >> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
> >> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h
> >> b/arch/loongarch/include/asm/kvm_vcpu.h
> >> index c629771e12..4f87f16018 100644
> >> --- a/arch/loongarch/include/asm/kvm_vcpu.h
> >> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> >> @@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct
> >> loongarch_fpu *fpu) { }
> >> static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu)
> >> { }
> >> #endif
> >> +#ifdef CONFIG_CPU_HAS_LASX
> >> +void kvm_own_lasx(struct kvm_vcpu *vcpu);
> >> +void kvm_save_lasx(struct loongarch_fpu *fpu);
> >> +void kvm_restore_lasx(struct loongarch_fpu *fpu);
> >> +#else
> >> +static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
> >> +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
> >> +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
> >> +#endif
> >> +
> >> void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> >> void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> >> void kvm_reset_timer(struct kvm_vcpu *vcpu);
> >> diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
> >> index d53ab10f46..f4524fe866 100644
> >> --- a/arch/loongarch/kernel/fpu.S
> >> +++ b/arch/loongarch/kernel/fpu.S
> >> @@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
> >> lasx_restore_all_upper a0 t0 t1
> >> jr ra
> >> SYM_FUNC_END(_restore_lasx_upper)
> >> +EXPORT_SYMBOL(_restore_lasx_upper)
> >
> > Why the added export? It doesn't seem necessary, given the previous
> > patch doesn't have a similar export added for _restore_lsx_upper. (Or
> > if it's truly needed it should probably become EXPORT_SYMBOL_GPL.)
> It is needed to be exported, as it is called by kvm_own_lasx. However
> the "_restore_lsx_upper" is not used in kvm.
To keep consistency it is better to export both.

Huacai

>
> Thanks
> Tianrui Zhao
>

2023-11-21 12:15:59

by zhaotianrui

[permalink] [raw]
Subject: Re: [PATCH v1 2/2] LoongArch: KVM: Add lasx support


在 2023/11/21 下午7:55, Huacai Chen 写道:
> On Tue, Nov 21, 2023 at 5:59 PM zhaotianrui <[email protected]> wrote:
>>
>> 在 2023/11/16 下午3:19, WANG Xuerui 写道:
>>> On 11/15/23 17:19, Tianrui Zhao wrote:
>>>> This patch adds LASX support for LoongArch KVM. The LASX means
>>>> LoongArch 256-bits vector instruction.
>>>> There will be LASX exception in KVM when guest use the LASX
>>>> instruction. KVM will enable LASX and restore the vector
>>>> registers for guest then return to guest to continue running.
>>>>
>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>> ---
>>>> arch/loongarch/include/asm/kvm_host.h | 6 ++++
>>>> arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++++
>>>> arch/loongarch/kernel/fpu.S | 1 +
>>>> arch/loongarch/kvm/exit.c | 18 +++++++++++
>>>> arch/loongarch/kvm/switch.S | 16 ++++++++++
>>>> arch/loongarch/kvm/trace.h | 4 ++-
>>>> arch/loongarch/kvm/vcpu.c | 43 ++++++++++++++++++++++++++-
>>>> 7 files changed, 96 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/arch/loongarch/include/asm/kvm_host.h
>>>> b/arch/loongarch/include/asm/kvm_host.h
>>>> index 6c65c25169..4c05b5eca0 100644
>>>> --- a/arch/loongarch/include/asm/kvm_host.h
>>>> +++ b/arch/loongarch/include/asm/kvm_host.h
>>>> @@ -95,6 +95,7 @@ enum emulation_result {
>>>> #define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
>>>> #define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
>>>> #define KVM_LARCH_LSX (0x1 << 3)
>>>> +#define KVM_LARCH_LASX (0x1 << 4)
>>>> struct kvm_vcpu_arch {
>>>> /*
>>>> @@ -181,6 +182,11 @@ static inline bool kvm_guest_has_lsx(struct
>>>> kvm_vcpu_arch *arch)
>>>> return arch->cpucfg[2] & CPUCFG2_LSX;
>>>> }
>>>> +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
>>>> +{
>>>> + return arch->cpucfg[2] & CPUCFG2_LASX;
>>>> +}
>>>> +
>>>> /* Debug: dump vcpu state */
>>>> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
>>>> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h
>>>> b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> index c629771e12..4f87f16018 100644
>>>> --- a/arch/loongarch/include/asm/kvm_vcpu.h
>>>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> @@ -67,6 +67,16 @@ static inline void kvm_restore_lsx(struct
>>>> loongarch_fpu *fpu) { }
>>>> static inline void kvm_restore_lsx_upper(struct loongarch_fpu *fpu)
>>>> { }
>>>> #endif
>>>> +#ifdef CONFIG_CPU_HAS_LASX
>>>> +void kvm_own_lasx(struct kvm_vcpu *vcpu);
>>>> +void kvm_save_lasx(struct loongarch_fpu *fpu);
>>>> +void kvm_restore_lasx(struct loongarch_fpu *fpu);
>>>> +#else
>>>> +static inline void kvm_own_lasx(struct kvm_vcpu *vcpu) { }
>>>> +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
>>>> +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
>>>> +#endif
>>>> +
>>>> void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>>> void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>>> void kvm_reset_timer(struct kvm_vcpu *vcpu);
>>>> diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
>>>> index d53ab10f46..f4524fe866 100644
>>>> --- a/arch/loongarch/kernel/fpu.S
>>>> +++ b/arch/loongarch/kernel/fpu.S
>>>> @@ -384,6 +384,7 @@ SYM_FUNC_START(_restore_lasx_upper)
>>>> lasx_restore_all_upper a0 t0 t1
>>>> jr ra
>>>> SYM_FUNC_END(_restore_lasx_upper)
>>>> +EXPORT_SYMBOL(_restore_lasx_upper)
>>> Why the added export? It doesn't seem necessary, given the previous
>>> patch doesn't have a similar export added for _restore_lsx_upper. (Or
>>> if it's truly needed it should probably become EXPORT_SYMBOL_GPL.)
>> It is needed to be exported, as it is called by kvm_own_lasx. However
>> the "_restore_lsx_upper" is not used in kvm.
> To keep consistency it is better to export both.
>
> Huacai
Thanks, I will export both functions.

Tianrui Zhao
>
>> Thanks
>> Tianrui Zhao
>>