Loongson Binary Translation (LBT) is used to accelerate binary
translation, which contains 4 scratch registers (scr0 to scr3), x86/ARM
eflags (eflags) and x87 fpu stack pointer (ftop).
Like FPU extension, here late enabling method is used for LBT. LBT context
is saved/restored on vcpu context switch path.
Also this patch set BT capability detection, and BT register get/set
interface for userspace vmm, so that vm supports migration with BT
extension.
Bibo Mao (3):
LoongArch: KVM: Add HW Binary Translation extension support
LoongArch: KVM: Add LBT feature detection with cpucfg
LoongArch: KVM: Add vm migration support for LBT feature
arch/loongarch/include/asm/kvm_host.h | 8 ++
arch/loongarch/include/asm/kvm_vcpu.h | 10 ++
arch/loongarch/include/uapi/asm/kvm.h | 7 ++
arch/loongarch/kvm/exit.c | 9 ++
arch/loongarch/kvm/vcpu.c | 168 +++++++++++++++++++++++++-
5 files changed, 201 insertions(+), 1 deletion(-)
base-commit: f4345f05c0dfc73c617e66f3b809edb8ddd41075
--
2.39.3
Every vcpu has separate LBT registers. And there are four scr registers,
one flags and ftop register for LBT extension. When VM migrates, VMM
needs to get LBT registers for every vcpu.
Here macro KVM_LOONGARCH_VCPU_LBT is added for vcpu attr control info,
the following macro is added to get/put LBT registers.
KVM_LOONGARCH_VCPU_LBT_SCR0
KVM_LOONGARCH_VCPU_LBT_SCR1
KVM_LOONGARCH_VCPU_LBT_SCR2
KVM_LOONGARCH_VCPU_LBT_SCR3
KVM_LOONGARCH_VCPU_LBT_FLAGS
KVM_LOONGARCH_VCPU_LBT_FTOP
Signed-off-by: Bibo Mao <[email protected]>
---
arch/loongarch/include/uapi/asm/kvm.h | 7 ++
arch/loongarch/kvm/vcpu.c | 103 ++++++++++++++++++++++++++
2 files changed, 110 insertions(+)
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index 286b5ce93a57..9c3de257fddf 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -85,6 +85,13 @@ struct kvm_fpu {
#define KVM_LOONGARCH_VCPU_CPUCFG 0
#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
+#define KVM_LOONGARCH_VCPU_LBT 2
+#define KVM_LOONGARCH_VCPU_LBT_SCR0 0
+#define KVM_LOONGARCH_VCPU_LBT_SCR1 1
+#define KVM_LOONGARCH_VCPU_LBT_SCR2 2
+#define KVM_LOONGARCH_VCPU_LBT_SCR3 3
+#define KVM_LOONGARCH_VCPU_LBT_FLAGS 4
+#define KVM_LOONGARCH_VCPU_LBT_FTOP 5
struct kvm_debug_exit_arch {
};
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index b2856539368a..a84c9d527d9d 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -765,6 +765,100 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+static int kvm_loongarch_lbt_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VCPU_LBT_SCR0:
+ case KVM_LOONGARCH_VCPU_LBT_SCR1:
+ case KVM_LOONGARCH_VCPU_LBT_SCR2:
+ case KVM_LOONGARCH_VCPU_LBT_SCR3:
+ case KVM_LOONGARCH_VCPU_LBT_FLAGS:
+ case KVM_LOONGARCH_VCPU_LBT_FTOP:
+ return 0;
+ default:
+ return -ENXIO;
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_loongarch_lbt_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ uint64_t val;
+
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VCPU_LBT_SCR0:
+ val = vcpu->arch.lbt.scr0;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR1:
+ val = vcpu->arch.lbt.scr1;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR2:
+ val = vcpu->arch.lbt.scr2;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR3:
+ val = vcpu->arch.lbt.scr3;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_FLAGS:
+ val = vcpu->arch.lbt.eflags;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_FTOP:
+ val = vcpu->arch.fpu.ftop;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ if (put_user(val, (uint64_t __user *)attr->addr))
+ return -EFAULT;
+ return 0;
+}
+
+static int kvm_loongarch_lbt_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 val;
+
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ if (get_user(val, (u64 __user *)attr->addr))
+ return -EFAULT;
+
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VCPU_LBT_SCR0:
+ vcpu->arch.lbt.scr0 = val;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR1:
+ vcpu->arch.lbt.scr1 = val;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR2:
+ vcpu->arch.lbt.scr2 = val;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_SCR3:
+ vcpu->arch.lbt.scr3 = val;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_FLAGS:
+ vcpu->arch.lbt.eflags = val;
+ break;
+ case KVM_LOONGARCH_VCPU_LBT_FTOP:
+ vcpu->arch.fpu.ftop = val;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -790,6 +884,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_LBT:
+ ret = kvm_loongarch_lbt_has_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -825,6 +922,9 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_LBT:
+ ret = kvm_loongarch_lbt_get_attr(vcpu, attr);
+ break;
default:
break;
}
@@ -850,6 +950,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
break;
+ case KVM_LOONGARCH_VCPU_LBT:
+ ret = kvm_loongarch_lbt_set_attr(vcpu, attr);
+ break;
default:
break;
}
--
2.39.3
Loongson Binary Translation (LBT) is used to accelerate binary translation,
which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags)
and x87 fpu stack pointer (ftop).
Like FPU extension, here late enabling method is used for LBT. LBT context
is saved/restored on vcpu context switch path.
Signed-off-by: Bibo Mao <[email protected]>
---
arch/loongarch/include/asm/kvm_host.h | 8 ++++
arch/loongarch/include/asm/kvm_vcpu.h | 10 +++++
arch/loongarch/kvm/exit.c | 9 ++++
arch/loongarch/kvm/vcpu.c | 59 ++++++++++++++++++++++++++-
4 files changed, 85 insertions(+), 1 deletion(-)
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 30bda553c54d..63052449dc6b 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -130,6 +130,7 @@ enum emulation_result {
#define KVM_LARCH_LASX (0x1 << 2)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
+#define KVM_LARCH_LBT (0x1 << 5)
struct kvm_vcpu_arch {
/*
@@ -163,6 +164,7 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
@@ -232,6 +234,12 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT
+ | CPUCFG2_MIPSBT);
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index de6b17262d8e..e2961dd98413 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -75,6 +75,16 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
+static inline void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu,
+ unsigned long fcsr) { }
+static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu) { }
+#endif
+
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index af0f1c46e4eb..683c3e95f630 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -819,6 +819,14 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+{
+ if (kvm_own_lbt(vcpu))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+ return RESUME_GUEST;
+}
+
/*
* LoongArch KVM callback handling for unimplemented guest exiting
*/
@@ -851,6 +859,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
[EXCCODE_HVC] = kvm_handle_hypercall,
+ [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
};
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 4289a0f545fe..d93ec21269da 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <linux/entry-kvm.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -947,12 +948,64 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return 0;
}
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -EINVAL;
+
+ preempt_disable();
+ set_csr_euen(CSR_EUEN_LBTEN);
+
+ _restore_lbt(&vcpu->arch.lbt);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ preempt_enable();
+ return 0;
+}
+
+static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
+ _save_lbt(&vcpu->arch.lbt);
+ clear_csr_euen(CSR_EUEN_LBTEN);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
+ }
+ preempt_enable();
+}
+
+static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)
+{
+ /*
+ * if TM is enabled, top register save/restore will
+ * cause lbt exception, here enable lbt in advance
+ */
+ if (fcsr & FPU_CSR_TM)
+ kvm_own_lbt(vcpu);
+}
+
+static void kvm_check_fcsr(struct kvm_vcpu *vcpu)
+{
+ unsigned long fcsr;
+
+ if (vcpu->arch.aux_inuse & KVM_LARCH_FPU)
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
+ fcsr = read_fcsr(LOONGARCH_FCSR0);
+ kvm_enable_lbt_fpu(vcpu, fcsr);
+ }
+}
+#endif
+
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- /* Enable FPU */
+ /*
+ * Enable FPU for guest
+ * We set FR and FRE according to guest context
+ */
+ kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN);
kvm_restore_fpu(&vcpu->arch.fpu);
@@ -972,6 +1025,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
preempt_disable();
/* Enable LSX for guest */
+ kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
case KVM_LARCH_FPU:
@@ -1006,6 +1060,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
preempt_disable();
+ kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
case KVM_LARCH_LSX:
@@ -1037,6 +1092,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
+ kvm_check_fcsr(vcpu);
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
kvm_save_lasx(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -1059,6 +1115,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable FPU */
clear_csr_euen(CSR_EUEN_FPEN);
}
+ kvm_lose_lbt(vcpu);
preempt_enable();
}
--
2.39.3