Add LoongArch vcpu related header files, including vcpu csr
information, irq number defines, and some vcpu interfaces.
Reviewed-by: Bibo Mao <[email protected]>
Signed-off-by: Tianrui Zhao <[email protected]>
---
arch/loongarch/include/asm/insn-def.h | 55 ++++++
arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
arch/loongarch/include/asm/loongarch.h | 20 ++-
arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
5 files changed, 566 insertions(+), 5 deletions(-)
create mode 100644 arch/loongarch/include/asm/insn-def.h
create mode 100644 arch/loongarch/include/asm/kvm_csr.h
create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
create mode 100644 arch/loongarch/kvm/trace.h
diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
new file mode 100644
index 000000000000..e285ee108fb0
--- /dev/null
+++ b/arch/loongarch/include/asm/insn-def.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+#include <asm/asm.h>
+
+#define INSN_STR(x) __stringify(x)
+#define CSR_RD_SHIFT 0
+#define CSR_RJ_SHIFT 5
+#define CSR_SIMM14_SHIFT 10
+#define CSR_OPCODE_SHIFT 24
+
+#define DEFINE_INSN_CSR \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_csr, opcode, rj, rd, simm14\n" \
+" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
+" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
+" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
+" .endm\n"
+
+#define UNDEFINE_INSN_CSR \
+" .purgem insn_csr\n"
+
+#define __INSN_CSR(opcode, rj, rd, simm14) \
+ DEFINE_INSN_CSR \
+ "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
+ UNDEFINE_INSN_CSR
+
+
+#define INSN_CSR(opcode, rj, rd, simm14) \
+ __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
+ LARCH_##simm14)
+
+#define __ASM_STR(x) #x
+#define LARCH_OPCODE(v) __ASM_STR(v)
+#define LARCH_SIMM14(v) __ASM_STR(v)
+#define __LARCH_REG(v) __ASM_STR(v)
+#define LARCH___RD(v) __LARCH_REG(v)
+#define LARCH___RJ(v) __LARCH_REG(v)
+#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
+
+#define GCSR_read(csr, rd) \
+ INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
+
+#define GCSR_write(csr, rd) \
+ INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
+
+#define GCSR_xchg(csr, rj, rd) \
+ INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
new file mode 100644
index 000000000000..10dba5bc6df1
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_CSR_H__
+#define __ASM_LOONGARCH_KVM_CSR_H__
+#include <asm/loongarch.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+
+/*
+ * Instructions will be available in binutils later
+ * read val from guest csr register %[csr]
+ * gcsrrd %[val], %[csr]
+ */
+#define gcsr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ (GCSR_read(csr, %0) \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+/*
+ * Instructions will be available in binutils later
+ * write val to guest csr register %[csr]
+ * gcsrwr %[val], %[csr]
+ */
+#define gcsr_write(val, csr) \
+({ \
+ register unsigned long __v = val; \
+ __asm__ __volatile__ (GCSR_write(csr, %0) \
+ : "+r" (__v) : \
+ : "memory"); \
+})
+
+/*
+ * Instructions will be available in binutils later
+ * replace masked bits of guest csr register %[csr] with val
+ * gcsrxchg %[val], %[mask], %[csr]
+ */
+#define gcsr_xchg(val, mask, csr) \
+({ \
+ register unsigned long __v = val; \
+ __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
+ : "+r" (__v) \
+ : "r" (mask) \
+ : "memory"); \
+ __v; \
+})
+
+/* Guest CSRS read and write */
+#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
+#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
+#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
+#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
+#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
+#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
+#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
+#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
+#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
+#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
+#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
+#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
+#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
+#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
+#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
+#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
+#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
+#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
+#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
+#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
+
+#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
+#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
+#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
+#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
+#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
+#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
+#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
+#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
+
+#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
+#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
+#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
+#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
+#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
+#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
+#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
+#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
+#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
+#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
+#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
+#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
+#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
+
+#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
+#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
+#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
+#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
+#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
+#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
+#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
+#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
+
+#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
+#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
+#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
+#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
+#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
+#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
+#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
+#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
+#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
+#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
+#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
+#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
+#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
+#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
+#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
+#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
+
+#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
+#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
+#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
+#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
+#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
+#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
+#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
+#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
+
+#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
+#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
+
+#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
+#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
+#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
+#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
+#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
+#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
+#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
+#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
+#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
+#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
+#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
+#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
+#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
+#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
+#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
+#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
+
+#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
+#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
+#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
+#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
+#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
+#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
+#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
+#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
+
+/* Guest related CSRs */
+#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
+#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
+#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
+#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
+#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
+#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
+#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
+#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
+#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
+#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
+#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
+
+#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
+
+__BUILD_GCSR_OP(llbctl)
+__BUILD_GCSR_OP(tlbidx)
+__BUILD_CSR_OP(gcfg)
+__BUILD_CSR_OP(gstat)
+__BUILD_CSR_OP(gtlbc)
+__BUILD_CSR_OP(gintc)
+
+#define set_gcsr_estat(val) \
+ gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_gcsr_estat(val) \
+ gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#define kvm_read_hw_gcsr(id) gcsr_read(id)
+#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
+
+int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
+int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
+
+int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
+#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+
+static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ return csr->csrs[gid];
+}
+
+static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] = val;
+}
+
+static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] |= val;
+}
+
+static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long mask,
+ unsigned long val)
+{
+ unsigned long _mask = mask;
+
+ csr->csrs[gid] &= ~_mask;
+ csr->csrs[gid] |= val & _mask;
+}
+#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
new file mode 100644
index 000000000000..74deaf55d22c
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
+#define __ASM_LOONGARCH_KVM_VCPU_H__
+
+#include <linux/kvm_host.h>
+#include <asm/loongarch.h>
+
+/* Controlled by 0x5 guest exst */
+#define CPU_SIP0 (_ULCAST_(1))
+#define CPU_SIP1 (_ULCAST_(1) << 1)
+#define CPU_PMU (_ULCAST_(1) << 10)
+#define CPU_TIMER (_ULCAST_(1) << 11)
+#define CPU_IPI (_ULCAST_(1) << 12)
+
+/* Controlled by 0x52 guest exception VIP
+ * aligned to exst bit 5~12
+ */
+#define CPU_IP0 (_ULCAST_(1))
+#define CPU_IP1 (_ULCAST_(1) << 1)
+#define CPU_IP2 (_ULCAST_(1) << 2)
+#define CPU_IP3 (_ULCAST_(1) << 3)
+#define CPU_IP4 (_ULCAST_(1) << 4)
+#define CPU_IP5 (_ULCAST_(1) << 5)
+#define CPU_IP6 (_ULCAST_(1) << 6)
+#define CPU_IP7 (_ULCAST_(1) << 7)
+
+#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
+#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
+#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
+#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
+#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
+#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
+
+/* Irq_type field */
+#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
+#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
+#define KVM_LOONGSON_IRQ_TYPE_HT 2
+#define KVM_LOONGSON_IRQ_TYPE_MSI 3
+#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
+#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
+
+/* Out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_LOONGSON_IRQ_CPU_IRQ 0
+#define KVM_LOONGSON_IRQ_CPU_FIQ 1
+#define KVM_LOONGSON_CPU_IP_NUM 8
+
+typedef union loongarch_instruction larch_inst;
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+
+int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
+int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
+int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int _kvm_emu_idle(struct kvm_vcpu *vcpu);
+int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
+int _kvm_pending_timer(struct kvm_vcpu *vcpu);
+int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
+void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
+
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+void kvm_save_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fcsr(struct loongarch_fpu *fpu);
+
+void kvm_acquire_timer(struct kvm_vcpu *vcpu);
+void kvm_reset_timer(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
+void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
+void kvm_restore_timer(struct kvm_vcpu *vcpu);
+void kvm_save_timer(struct kvm_vcpu *vcpu);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_loongarch_interrupt *irq);
+/*
+ * Loongarch KVM guest interrupt handling
+ */
+static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ set_bit(irq, &vcpu->arch.irq_pending);
+ clear_bit(irq, &vcpu->arch.irq_clear);
+}
+
+static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ clear_bit(irq, &vcpu->arch.irq_pending);
+ set_bit(irq, &vcpu->arch.irq_clear);
+}
+
+#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 35e8a52fea11..cb1c66c5a92f 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -11,6 +11,7 @@
#ifndef __ASSEMBLY__
#include <larchintrin.h>
+#include <asm/insn-def.h>
/*
* parse_r var, r - Helper assembler macro for parsing register names.
@@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
#define CSR_ECFG_VS_SHIFT 16
#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
#define CSR_ECFG_IM_SHIFT 0
#define CSR_ECFG_IM_WIDTH 14
@@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
-#define CSR_GTLBC_RID_SHIFT 16
-#define CSR_GTLBC_RID_WIDTH 8
-#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TGID_SHIFT 16
+#define CSR_GTLBC_TGID_WIDTH 8
+#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
+#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
#define CSR_GTLBC_TOTI_SHIFT 13
#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
-#define CSR_GTLBC_USERID_SHIFT 12
-#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_USETGID_SHIFT 12
+#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
#define CSR_GTLBC_GMTLBSZ_SHIFT 0
#define CSR_GTLBC_GMTLBSZ_WIDTH 6
#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
@@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
#define CSR_GSTAT_GID_SHIFT 16
#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
#define CSR_GSTAT_GIDBIT_SHIFT 4
#define CSR_GSTAT_GIDBIT_WIDTH 6
@@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATP_NEST_SHIFT 2
+#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
+#define CSR_GCFG_MATP_ROOT_SHIFT 1
+#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
+#define CSR_GCFG_MATP_GUEST_SHIFT 0
+#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
#define CSR_GINTC_HC_SHIFT 16
diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
new file mode 100644
index 000000000000..17b28d94d569
--- /dev/null
+++ b/arch/loongarch/kvm/trace.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+#include <asm/kvm_csr.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for VM enters
+ */
+DECLARE_EVENT_CLASS(kvm_transition,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ ),
+
+ TP_printk("PC: 0x%08lx",
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_transition, kvm_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_reenter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_out,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+/* Further exit reasons */
+#define KVM_TRACE_EXIT_IDLE 64
+#define KVM_TRACE_EXIT_CACHE 65
+#define KVM_TRACE_EXIT_SIGNAL 66
+
+/* Tracepoints for VM exits */
+#define kvm_trace_symbol_exit_types \
+ { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }
+
+TRACE_EVENT(kvm_exit_gspr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
+ TP_ARGS(vcpu, inst_word),
+ TP_STRUCT__entry(
+ __field(unsigned int, inst_word)
+ ),
+
+ TP_fast_assign(
+ __entry->inst_word = inst_word;
+ ),
+
+ TP_printk("inst word: 0x%08x",
+ __entry->inst_word)
+);
+
+
+DECLARE_EVENT_CLASS(kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(unsigned int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("[%s]PC: 0x%08lx",
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_exit, kvm_exit_idle,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+DEFINE_EVENT(kvm_exit, kvm_exit_cache,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+DEFINE_EVENT(kvm_exit, kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason));
+
+#define KVM_TRACE_AUX_RESTORE 0
+#define KVM_TRACE_AUX_SAVE 1
+#define KVM_TRACE_AUX_ENABLE 2
+#define KVM_TRACE_AUX_DISABLE 3
+#define KVM_TRACE_AUX_DISCARD 4
+
+#define KVM_TRACE_AUX_FPU 1
+
+#define kvm_trace_symbol_aux_op \
+ { KVM_TRACE_AUX_RESTORE, "restore" }, \
+ { KVM_TRACE_AUX_SAVE, "save" }, \
+ { KVM_TRACE_AUX_ENABLE, "enable" }, \
+ { KVM_TRACE_AUX_DISABLE, "disable" }, \
+ { KVM_TRACE_AUX_DISCARD, "discard" }
+
+#define kvm_trace_symbol_aux_state \
+ { KVM_TRACE_AUX_FPU, "FPU" }
+
+TRACE_EVENT(kvm_aux,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
+ unsigned int state),
+ TP_ARGS(vcpu, op, state),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, op)
+ __field(u8, state)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->op = op;
+ __entry->state = state;
+ ),
+
+ TP_printk("%s %s PC: 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_aux_op),
+ __print_symbolic(__entry->state,
+ kvm_trace_symbol_aux_state),
+ __entry->pc)
+);
+
+TRACE_EVENT(kvm_vpid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
+ TP_ARGS(vcpu, vpid),
+ TP_STRUCT__entry(
+ __field(unsigned long, vpid)
+ ),
+
+ TP_fast_assign(
+ __entry->vpid = vpid;
+ ),
+
+ TP_printk("vpid: 0x%08lx",
+ __entry->vpid)
+);
+
+#endif /* _TRACE_LOONGARCH64_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--
2.39.1
在 2023/7/14 15:11, Huacai Chen 写道:
> Hi, Tianrui,
>
> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>
>> Add LoongArch vcpu related header files, including vcpu csr
>> information, irq number defines, and some vcpu interfaces.
>>
>> Reviewed-by: Bibo Mao <[email protected]>
>> Signed-off-by: Tianrui Zhao <[email protected]>
>> ---
>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>> 5 files changed, 566 insertions(+), 5 deletions(-)
>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>> create mode 100644 arch/loongarch/kvm/trace.h
>>
>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>> new file mode 100644
>> index 000000000000..e285ee108fb0
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/insn-def.h
>> @@ -0,0 +1,55 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +
>> +#ifndef __ASM_INSN_DEF_H
>> +#define __ASM_INSN_DEF_H
>> +
>> +#include <linux/stringify.h>
>> +#include <asm/gpr-num.h>
>> +#include <asm/asm.h>
>> +
>> +#define INSN_STR(x) __stringify(x)
>> +#define CSR_RD_SHIFT 0
>> +#define CSR_RJ_SHIFT 5
>> +#define CSR_SIMM14_SHIFT 10
>> +#define CSR_OPCODE_SHIFT 24
> As all needed instructions have already upstream in binutils now and
> binutils 2.41 will be released soon, I suggest again to introduce
> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
It is a good news that binutils 2.41 has supported LVZ assemble language.
we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
need write beautiful code, also we should write code with pratical usage.
Paolo, what is your suggestion?
x86 has similar binary assemble code like this:
static inline void __tpause(u32 ecx, u32 edx, u32 eax)
{
/* "tpause %ecx, %edx, %eax;" */
#ifdef CONFIG_AS_TPAUSE
asm volatile("tpause %%ecx\n"
:
: "c"(ecx), "d"(edx), "a"(eax));
#else
asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
:
: "c"(ecx), "d"(edx), "a"(eax));
#endif
}
Also riscv has similiar code:
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
{
asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
}
Regards
Bibo Mao
>
> And this is easier than I suggested before (introduce
> CC_HAS_LVZ_EXTENSION), because CC_HAS_XXX depends on both gcc and
> binutils, but AS_HAS_XXX only depends on binutils.
>
> Huacai
>
>> +
>> +#define DEFINE_INSN_CSR \
>> + __DEFINE_ASM_GPR_NUMS \
>> +" .macro insn_csr, opcode, rj, rd, simm14\n" \
>> +" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
>> +" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
>> +" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
>> +" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
>> +" .endm\n"
>> +
>> +#define UNDEFINE_INSN_CSR \
>> +" .purgem insn_csr\n"
>> +
>> +#define __INSN_CSR(opcode, rj, rd, simm14) \
>> + DEFINE_INSN_CSR \
>> + "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
>> + UNDEFINE_INSN_CSR
>> +
>> +
>> +#define INSN_CSR(opcode, rj, rd, simm14) \
>> + __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
>> + LARCH_##simm14)
>> +
>> +#define __ASM_STR(x) #x
>> +#define LARCH_OPCODE(v) __ASM_STR(v)
>> +#define LARCH_SIMM14(v) __ASM_STR(v)
>> +#define __LARCH_REG(v) __ASM_STR(v)
>> +#define LARCH___RD(v) __LARCH_REG(v)
>> +#define LARCH___RJ(v) __LARCH_REG(v)
>> +#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
>> +
>> +#define GCSR_read(csr, rd) \
>> + INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
>> +
>> +#define GCSR_write(csr, rd) \
>> + INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
>> +
>> +#define GCSR_xchg(csr, rj, rd) \
>> + INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
>> +
>> +#endif /* __ASM_INSN_DEF_H */
>> diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
>> new file mode 100644
>> index 000000000000..10dba5bc6df1
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_csr.h
>> @@ -0,0 +1,231 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __ASM_LOONGARCH_KVM_CSR_H__
>> +#define __ASM_LOONGARCH_KVM_CSR_H__
>> +#include <asm/loongarch.h>
>> +#include <asm/kvm_vcpu.h>
>> +#include <linux/uaccess.h>
>> +#include <linux/kvm_host.h>
>> +
>> +/*
>> + * Instructions will be available in binutils later
>> + * read val from guest csr register %[csr]
>> + * gcsrrd %[val], %[csr]
>> + */
>> +#define gcsr_read(csr) \
>> +({ \
>> + register unsigned long __v; \
>> + __asm__ __volatile__ (GCSR_read(csr, %0) \
>> + : "=r" (__v) : \
>> + : "memory"); \
>> + __v; \
>> +})
>> +
>> +/*
>> + * Instructions will be available in binutils later
>> + * write val to guest csr register %[csr]
>> + * gcsrwr %[val], %[csr]
>> + */
>> +#define gcsr_write(val, csr) \
>> +({ \
>> + register unsigned long __v = val; \
>> + __asm__ __volatile__ (GCSR_write(csr, %0) \
>> + : "+r" (__v) : \
>> + : "memory"); \
>> +})
>> +
>> +/*
>> + * Instructions will be available in binutils later
>> + * replace masked bits of guest csr register %[csr] with val
>> + * gcsrxchg %[val], %[mask], %[csr]
>> + */
>> +#define gcsr_xchg(val, mask, csr) \
>> +({ \
>> + register unsigned long __v = val; \
>> + __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
>> + : "+r" (__v) \
>> + : "r" (mask) \
>> + : "memory"); \
>> + __v; \
>> +})
>> +
>> +/* Guest CSRS read and write */
>> +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
>> +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
>> +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
>> +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
>> +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
>> +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
>> +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
>> +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
>> +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
>> +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
>> +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
>> +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
>> +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
>> +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
>> +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
>> +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
>> +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
>> +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
>> +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
>> +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
>> +
>> +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
>> +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
>> +#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
>> +#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
>> +#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
>> +#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
>> +#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
>> +#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
>> +
>> +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
>> +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
>> +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
>> +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
>> +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
>> +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
>> +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
>> +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
>> +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
>> +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
>> +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
>> +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
>> +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
>> +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
>> +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
>> +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
>> +
>> +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
>> +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
>> +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
>> +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
>> +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
>> +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
>> +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
>> +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
>> +
>> +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
>> +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
>> +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
>> +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
>> +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
>> +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
>> +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
>> +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
>> +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
>> +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
>> +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
>> +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
>> +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
>> +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
>> +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
>> +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
>> +
>> +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
>> +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
>> +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
>> +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
>> +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
>> +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
>> +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
>> +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
>> +
>> +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
>> +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
>> +
>> +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
>> +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
>> +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
>> +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
>> +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
>> +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
>> +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
>> +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
>> +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
>> +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
>> +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
>> +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
>> +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
>> +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
>> +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
>> +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
>> +
>> +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
>> +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
>> +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
>> +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
>> +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
>> +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
>> +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
>> +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
>> +
>> +/* Guest related CSRs */
>> +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
>> +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
>> +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
>> +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
>> +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
>> +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
>> +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
>> +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
>> +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
>> +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
>> +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
>> +
>> +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
>> +
>> +__BUILD_GCSR_OP(llbctl)
>> +__BUILD_GCSR_OP(tlbidx)
>> +__BUILD_CSR_OP(gcfg)
>> +__BUILD_CSR_OP(gstat)
>> +__BUILD_CSR_OP(gtlbc)
>> +__BUILD_CSR_OP(gintc)
>> +
>> +#define set_gcsr_estat(val) \
>> + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
>> +#define clear_gcsr_estat(val) \
>> + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
>> +
>> +#define kvm_read_hw_gcsr(id) gcsr_read(id)
>> +#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
>> +
>> +int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
>> +int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
>> +
>> +int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
>> +
>> +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
>> +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
>> +
>> +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
>> +{
>> + return csr->csrs[gid];
>> +}
>> +
>> +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
>> + int gid, unsigned long val)
>> +{
>> + csr->csrs[gid] = val;
>> +}
>> +
>> +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
>> + int gid, unsigned long val)
>> +{
>> + csr->csrs[gid] |= val;
>> +}
>> +
>> +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
>> + int gid, unsigned long mask,
>> + unsigned long val)
>> +{
>> + unsigned long _mask = mask;
>> +
>> + csr->csrs[gid] &= ~_mask;
>> + csr->csrs[gid] |= val & _mask;
>> +}
>> +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
>> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
>> new file mode 100644
>> index 000000000000..74deaf55d22c
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>> @@ -0,0 +1,97 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
>> +#define __ASM_LOONGARCH_KVM_VCPU_H__
>> +
>> +#include <linux/kvm_host.h>
>> +#include <asm/loongarch.h>
>> +
>> +/* Controlled by 0x5 guest exst */
>> +#define CPU_SIP0 (_ULCAST_(1))
>> +#define CPU_SIP1 (_ULCAST_(1) << 1)
>> +#define CPU_PMU (_ULCAST_(1) << 10)
>> +#define CPU_TIMER (_ULCAST_(1) << 11)
>> +#define CPU_IPI (_ULCAST_(1) << 12)
>> +
>> +/* Controlled by 0x52 guest exception VIP
>> + * aligned to exst bit 5~12
>> + */
>> +#define CPU_IP0 (_ULCAST_(1))
>> +#define CPU_IP1 (_ULCAST_(1) << 1)
>> +#define CPU_IP2 (_ULCAST_(1) << 2)
>> +#define CPU_IP3 (_ULCAST_(1) << 3)
>> +#define CPU_IP4 (_ULCAST_(1) << 4)
>> +#define CPU_IP5 (_ULCAST_(1) << 5)
>> +#define CPU_IP6 (_ULCAST_(1) << 6)
>> +#define CPU_IP7 (_ULCAST_(1) << 7)
>> +
>> +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
>> +
>> +/* KVM_IRQ_LINE irq field index values */
>> +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
>> +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
>> +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
>> +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
>> +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
>> +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
>> +
>> +/* Irq_type field */
>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
>> +#define KVM_LOONGSON_IRQ_TYPE_HT 2
>> +#define KVM_LOONGSON_IRQ_TYPE_MSI 3
>> +#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
>> +#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
>> +
>> +/* Out-of-kernel GIC cpu interrupt injection irq_number field */
>> +#define KVM_LOONGSON_IRQ_CPU_IRQ 0
>> +#define KVM_LOONGSON_IRQ_CPU_FIQ 1
>> +#define KVM_LOONGSON_CPU_IP_NUM 8
>> +
>> +typedef union loongarch_instruction larch_inst;
>> +typedef int (*exit_handle_fn)(struct kvm_vcpu *);
>> +
>> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
>> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
>> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>> +int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>> +int _kvm_emu_idle(struct kvm_vcpu *vcpu);
>> +int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
>> +int _kvm_pending_timer(struct kvm_vcpu *vcpu);
>> +int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
>> +void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
>> +
>> +void kvm_own_fpu(struct kvm_vcpu *vcpu);
>> +void kvm_lose_fpu(struct kvm_vcpu *vcpu);
>> +void kvm_save_fpu(struct loongarch_fpu *fpu);
>> +void kvm_restore_fpu(struct loongarch_fpu *fpu);
>> +void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>> +
>> +void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>> +void kvm_reset_timer(struct kvm_vcpu *vcpu);
>> +enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
>> +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>> +void kvm_restore_timer(struct kvm_vcpu *vcpu);
>> +void kvm_save_timer(struct kvm_vcpu *vcpu);
>> +
>> +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
>> + struct kvm_loongarch_interrupt *irq);
>> +/*
>> + * Loongarch KVM guest interrupt handling
>> + */
>> +static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>> +{
>> + set_bit(irq, &vcpu->arch.irq_pending);
>> + clear_bit(irq, &vcpu->arch.irq_clear);
>> +}
>> +
>> +static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>> +{
>> + clear_bit(irq, &vcpu->arch.irq_pending);
>> + set_bit(irq, &vcpu->arch.irq_clear);
>> +}
>> +
>> +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
>> diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
>> index 35e8a52fea11..cb1c66c5a92f 100644
>> --- a/arch/loongarch/include/asm/loongarch.h
>> +++ b/arch/loongarch/include/asm/loongarch.h
>> @@ -11,6 +11,7 @@
>>
>> #ifndef __ASSEMBLY__
>> #include <larchintrin.h>
>> +#include <asm/insn-def.h>
>>
>> /*
>> * parse_r var, r - Helper assembler macro for parsing register names.
>> @@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>> #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
>> #define CSR_ECFG_VS_SHIFT 16
>> #define CSR_ECFG_VS_WIDTH 3
>> +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
>> #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
>> #define CSR_ECFG_IM_SHIFT 0
>> #define CSR_ECFG_IM_WIDTH 14
>> @@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>> #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
>>
>> #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
>> -#define CSR_GTLBC_RID_SHIFT 16
>> -#define CSR_GTLBC_RID_WIDTH 8
>> -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
>> +#define CSR_GTLBC_TGID_SHIFT 16
>> +#define CSR_GTLBC_TGID_WIDTH 8
>> +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
>> +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
>> #define CSR_GTLBC_TOTI_SHIFT 13
>> #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
>> -#define CSR_GTLBC_USERID_SHIFT 12
>> -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
>> +#define CSR_GTLBC_USETGID_SHIFT 12
>> +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
>> #define CSR_GTLBC_GMTLBSZ_SHIFT 0
>> #define CSR_GTLBC_GMTLBSZ_WIDTH 6
>> #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
>> @@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>> #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
>> #define CSR_GSTAT_GID_SHIFT 16
>> #define CSR_GSTAT_GID_WIDTH 8
>> +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
>> #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
>> #define CSR_GSTAT_GIDBIT_SHIFT 4
>> #define CSR_GSTAT_GIDBIT_WIDTH 6
>> @@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>> #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
>> #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
>> #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
>> +#define CSR_GCFG_MATP_NEST_SHIFT 2
>> +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
>> +#define CSR_GCFG_MATP_ROOT_SHIFT 1
>> +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
>> +#define CSR_GCFG_MATP_GUEST_SHIFT 0
>> +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
>>
>> #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
>> #define CSR_GINTC_HC_SHIFT 16
>> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
>> new file mode 100644
>> index 000000000000..17b28d94d569
>> --- /dev/null
>> +++ b/arch/loongarch/kvm/trace.h
>> @@ -0,0 +1,168 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>> + */
>> +
>> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
>> +#define _TRACE_KVM_H
>> +
>> +#include <linux/tracepoint.h>
>> +#include <asm/kvm_csr.h>
>> +
>> +#undef TRACE_SYSTEM
>> +#define TRACE_SYSTEM kvm
>> +
>> +/*
>> + * Tracepoints for VM enters
>> + */
>> +DECLARE_EVENT_CLASS(kvm_transition,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu),
>> + TP_STRUCT__entry(
>> + __field(unsigned long, pc)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->pc = vcpu->arch.pc;
>> + ),
>> +
>> + TP_printk("PC: 0x%08lx",
>> + __entry->pc)
>> +);
>> +
>> +DEFINE_EVENT(kvm_transition, kvm_enter,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu));
>> +
>> +DEFINE_EVENT(kvm_transition, kvm_reenter,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu));
>> +
>> +DEFINE_EVENT(kvm_transition, kvm_out,
>> + TP_PROTO(struct kvm_vcpu *vcpu),
>> + TP_ARGS(vcpu));
>> +
>> +/* Further exit reasons */
>> +#define KVM_TRACE_EXIT_IDLE 64
>> +#define KVM_TRACE_EXIT_CACHE 65
>> +#define KVM_TRACE_EXIT_SIGNAL 66
>> +
>> +/* Tracepoints for VM exits */
>> +#define kvm_trace_symbol_exit_types \
>> + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
>> + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
>> + { KVM_TRACE_EXIT_SIGNAL, "Signal" }
>> +
>> +TRACE_EVENT(kvm_exit_gspr,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
>> + TP_ARGS(vcpu, inst_word),
>> + TP_STRUCT__entry(
>> + __field(unsigned int, inst_word)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->inst_word = inst_word;
>> + ),
>> +
>> + TP_printk("inst word: 0x%08x",
>> + __entry->inst_word)
>> +);
>> +
>> +
>> +DECLARE_EVENT_CLASS(kvm_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>> + TP_ARGS(vcpu, reason),
>> + TP_STRUCT__entry(
>> + __field(unsigned long, pc)
>> + __field(unsigned int, reason)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->pc = vcpu->arch.pc;
>> + __entry->reason = reason;
>> + ),
>> +
>> + TP_printk("[%s]PC: 0x%08lx",
>> + __print_symbolic(__entry->reason,
>> + kvm_trace_symbol_exit_types),
>> + __entry->pc)
>> +);
>> +
>> +DEFINE_EVENT(kvm_exit, kvm_exit_idle,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>> + TP_ARGS(vcpu, reason));
>> +
>> +DEFINE_EVENT(kvm_exit, kvm_exit_cache,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>> + TP_ARGS(vcpu, reason));
>> +
>> +DEFINE_EVENT(kvm_exit, kvm_exit,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>> + TP_ARGS(vcpu, reason));
>> +
>> +#define KVM_TRACE_AUX_RESTORE 0
>> +#define KVM_TRACE_AUX_SAVE 1
>> +#define KVM_TRACE_AUX_ENABLE 2
>> +#define KVM_TRACE_AUX_DISABLE 3
>> +#define KVM_TRACE_AUX_DISCARD 4
>> +
>> +#define KVM_TRACE_AUX_FPU 1
>> +
>> +#define kvm_trace_symbol_aux_op \
>> + { KVM_TRACE_AUX_RESTORE, "restore" }, \
>> + { KVM_TRACE_AUX_SAVE, "save" }, \
>> + { KVM_TRACE_AUX_ENABLE, "enable" }, \
>> + { KVM_TRACE_AUX_DISABLE, "disable" }, \
>> + { KVM_TRACE_AUX_DISCARD, "discard" }
>> +
>> +#define kvm_trace_symbol_aux_state \
>> + { KVM_TRACE_AUX_FPU, "FPU" }
>> +
>> +TRACE_EVENT(kvm_aux,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
>> + unsigned int state),
>> + TP_ARGS(vcpu, op, state),
>> + TP_STRUCT__entry(
>> + __field(unsigned long, pc)
>> + __field(u8, op)
>> + __field(u8, state)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->pc = vcpu->arch.pc;
>> + __entry->op = op;
>> + __entry->state = state;
>> + ),
>> +
>> + TP_printk("%s %s PC: 0x%08lx",
>> + __print_symbolic(__entry->op,
>> + kvm_trace_symbol_aux_op),
>> + __print_symbolic(__entry->state,
>> + kvm_trace_symbol_aux_state),
>> + __entry->pc)
>> +);
>> +
>> +TRACE_EVENT(kvm_vpid_change,
>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
>> + TP_ARGS(vcpu, vpid),
>> + TP_STRUCT__entry(
>> + __field(unsigned long, vpid)
>> + ),
>> +
>> + TP_fast_assign(
>> + __entry->vpid = vpid;
>> + ),
>> +
>> + TP_printk("vpid: 0x%08lx",
>> + __entry->vpid)
>> +);
>> +
>> +#endif /* _TRACE_LOONGARCH64_KVM_H */
>> +
>> +#undef TRACE_INCLUDE_PATH
>> +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
>> +#undef TRACE_INCLUDE_FILE
>> +#define TRACE_INCLUDE_FILE trace
>> +
>> +/* This part must be outside protection */
>> +#include <trace/define_trace.h>
>> --
>> 2.39.1
>>
>>
Hi, Tianrui,
On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>
> Add LoongArch vcpu related header files, including vcpu csr
> information, irq number defines, and some vcpu interfaces.
>
> Reviewed-by: Bibo Mao <[email protected]>
> Signed-off-by: Tianrui Zhao <[email protected]>
> ---
> arch/loongarch/include/asm/insn-def.h | 55 ++++++
> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
> arch/loongarch/include/asm/loongarch.h | 20 ++-
> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
> 5 files changed, 566 insertions(+), 5 deletions(-)
> create mode 100644 arch/loongarch/include/asm/insn-def.h
> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
> create mode 100644 arch/loongarch/kvm/trace.h
>
> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
> new file mode 100644
> index 000000000000..e285ee108fb0
> --- /dev/null
> +++ b/arch/loongarch/include/asm/insn-def.h
> @@ -0,0 +1,55 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#ifndef __ASM_INSN_DEF_H
> +#define __ASM_INSN_DEF_H
> +
> +#include <linux/stringify.h>
> +#include <asm/gpr-num.h>
> +#include <asm/asm.h>
> +
> +#define INSN_STR(x) __stringify(x)
> +#define CSR_RD_SHIFT 0
> +#define CSR_RJ_SHIFT 5
> +#define CSR_SIMM14_SHIFT 10
> +#define CSR_OPCODE_SHIFT 24
As all needed instructions have already upstream in binutils now and
binutils 2.41 will be released soon, I suggest again to introduce
AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
And this is easier than I suggested before (introduce
CC_HAS_LVZ_EXTENSION), because CC_HAS_XXX depends on both gcc and
binutils, but AS_HAS_XXX only depends on binutils.
Huacai
> +
> +#define DEFINE_INSN_CSR \
> + __DEFINE_ASM_GPR_NUMS \
> +" .macro insn_csr, opcode, rj, rd, simm14\n" \
> +" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
> +" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
> +" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
> +" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
> +" .endm\n"
> +
> +#define UNDEFINE_INSN_CSR \
> +" .purgem insn_csr\n"
> +
> +#define __INSN_CSR(opcode, rj, rd, simm14) \
> + DEFINE_INSN_CSR \
> + "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
> + UNDEFINE_INSN_CSR
> +
> +
> +#define INSN_CSR(opcode, rj, rd, simm14) \
> + __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
> + LARCH_##simm14)
> +
> +#define __ASM_STR(x) #x
> +#define LARCH_OPCODE(v) __ASM_STR(v)
> +#define LARCH_SIMM14(v) __ASM_STR(v)
> +#define __LARCH_REG(v) __ASM_STR(v)
> +#define LARCH___RD(v) __LARCH_REG(v)
> +#define LARCH___RJ(v) __LARCH_REG(v)
> +#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
> +
> +#define GCSR_read(csr, rd) \
> + INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
> +
> +#define GCSR_write(csr, rd) \
> + INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
> +
> +#define GCSR_xchg(csr, rj, rd) \
> + INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
> +
> +#endif /* __ASM_INSN_DEF_H */
> diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
> new file mode 100644
> index 000000000000..10dba5bc6df1
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_csr.h
> @@ -0,0 +1,231 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __ASM_LOONGARCH_KVM_CSR_H__
> +#define __ASM_LOONGARCH_KVM_CSR_H__
> +#include <asm/loongarch.h>
> +#include <asm/kvm_vcpu.h>
> +#include <linux/uaccess.h>
> +#include <linux/kvm_host.h>
> +
> +/*
> + * Instructions will be available in binutils later
> + * read val from guest csr register %[csr]
> + * gcsrrd %[val], %[csr]
> + */
> +#define gcsr_read(csr) \
> +({ \
> + register unsigned long __v; \
> + __asm__ __volatile__ (GCSR_read(csr, %0) \
> + : "=r" (__v) : \
> + : "memory"); \
> + __v; \
> +})
> +
> +/*
> + * Instructions will be available in binutils later
> + * write val to guest csr register %[csr]
> + * gcsrwr %[val], %[csr]
> + */
> +#define gcsr_write(val, csr) \
> +({ \
> + register unsigned long __v = val; \
> + __asm__ __volatile__ (GCSR_write(csr, %0) \
> + : "+r" (__v) : \
> + : "memory"); \
> +})
> +
> +/*
> + * Instructions will be available in binutils later
> + * replace masked bits of guest csr register %[csr] with val
> + * gcsrxchg %[val], %[mask], %[csr]
> + */
> +#define gcsr_xchg(val, mask, csr) \
> +({ \
> + register unsigned long __v = val; \
> + __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
> + : "+r" (__v) \
> + : "r" (mask) \
> + : "memory"); \
> + __v; \
> +})
> +
> +/* Guest CSRS read and write */
> +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
> +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
> +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
> +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
> +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
> +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
> +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
> +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
> +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
> +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
> +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
> +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
> +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
> +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
> +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
> +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
> +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
> +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
> +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
> +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
> +
> +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
> +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
> +#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
> +#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
> +#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
> +#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
> +#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
> +#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
> +
> +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
> +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
> +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
> +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
> +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
> +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
> +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
> +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
> +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
> +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
> +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
> +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
> +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
> +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
> +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
> +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
> +
> +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
> +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
> +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
> +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
> +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
> +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
> +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
> +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
> +
> +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
> +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
> +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
> +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
> +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
> +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
> +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
> +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
> +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
> +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
> +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
> +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
> +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
> +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
> +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
> +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
> +
> +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
> +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
> +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
> +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
> +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
> +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
> +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
> +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
> +
> +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
> +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
> +
> +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
> +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
> +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
> +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
> +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
> +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
> +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
> +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
> +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
> +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
> +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
> +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
> +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
> +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
> +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
> +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
> +
> +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
> +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
> +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
> +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
> +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
> +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
> +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
> +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
> +
> +/* Guest related CSRs */
> +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
> +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
> +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
> +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
> +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
> +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
> +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
> +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
> +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
> +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
> +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
> +
> +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
> +
> +__BUILD_GCSR_OP(llbctl)
> +__BUILD_GCSR_OP(tlbidx)
> +__BUILD_CSR_OP(gcfg)
> +__BUILD_CSR_OP(gstat)
> +__BUILD_CSR_OP(gtlbc)
> +__BUILD_CSR_OP(gintc)
> +
> +#define set_gcsr_estat(val) \
> + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
> +#define clear_gcsr_estat(val) \
> + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
> +
> +#define kvm_read_hw_gcsr(id) gcsr_read(id)
> +#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
> +
> +int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
> +int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
> +
> +int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
> +
> +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
> +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
> +
> +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
> +{
> + return csr->csrs[gid];
> +}
> +
> +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
> + int gid, unsigned long val)
> +{
> + csr->csrs[gid] = val;
> +}
> +
> +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
> + int gid, unsigned long val)
> +{
> + csr->csrs[gid] |= val;
> +}
> +
> +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
> + int gid, unsigned long mask,
> + unsigned long val)
> +{
> + unsigned long _mask = mask;
> +
> + csr->csrs[gid] &= ~_mask;
> + csr->csrs[gid] |= val & _mask;
> +}
> +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> new file mode 100644
> index 000000000000..74deaf55d22c
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> @@ -0,0 +1,97 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
> +#define __ASM_LOONGARCH_KVM_VCPU_H__
> +
> +#include <linux/kvm_host.h>
> +#include <asm/loongarch.h>
> +
> +/* Controlled by 0x5 guest exst */
> +#define CPU_SIP0 (_ULCAST_(1))
> +#define CPU_SIP1 (_ULCAST_(1) << 1)
> +#define CPU_PMU (_ULCAST_(1) << 10)
> +#define CPU_TIMER (_ULCAST_(1) << 11)
> +#define CPU_IPI (_ULCAST_(1) << 12)
> +
> +/* Controlled by 0x52 guest exception VIP
> + * aligned to exst bit 5~12
> + */
> +#define CPU_IP0 (_ULCAST_(1))
> +#define CPU_IP1 (_ULCAST_(1) << 1)
> +#define CPU_IP2 (_ULCAST_(1) << 2)
> +#define CPU_IP3 (_ULCAST_(1) << 3)
> +#define CPU_IP4 (_ULCAST_(1) << 4)
> +#define CPU_IP5 (_ULCAST_(1) << 5)
> +#define CPU_IP6 (_ULCAST_(1) << 6)
> +#define CPU_IP7 (_ULCAST_(1) << 7)
> +
> +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
> +
> +/* KVM_IRQ_LINE irq field index values */
> +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
> +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
> +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
> +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
> +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
> +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
> +
> +/* Irq_type field */
> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
> +#define KVM_LOONGSON_IRQ_TYPE_HT 2
> +#define KVM_LOONGSON_IRQ_TYPE_MSI 3
> +#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
> +#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
> +
> +/* Out-of-kernel GIC cpu interrupt injection irq_number field */
> +#define KVM_LOONGSON_IRQ_CPU_IRQ 0
> +#define KVM_LOONGSON_IRQ_CPU_FIQ 1
> +#define KVM_LOONGSON_CPU_IP_NUM 8
> +
> +typedef union loongarch_instruction larch_inst;
> +typedef int (*exit_handle_fn)(struct kvm_vcpu *);
> +
> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
> +int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
> +int _kvm_emu_idle(struct kvm_vcpu *vcpu);
> +int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
> +int _kvm_pending_timer(struct kvm_vcpu *vcpu);
> +int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
> +void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
> +
> +void kvm_own_fpu(struct kvm_vcpu *vcpu);
> +void kvm_lose_fpu(struct kvm_vcpu *vcpu);
> +void kvm_save_fpu(struct loongarch_fpu *fpu);
> +void kvm_restore_fpu(struct loongarch_fpu *fpu);
> +void kvm_restore_fcsr(struct loongarch_fpu *fpu);
> +
> +void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> +void kvm_reset_timer(struct kvm_vcpu *vcpu);
> +enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
> +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> +void kvm_restore_timer(struct kvm_vcpu *vcpu);
> +void kvm_save_timer(struct kvm_vcpu *vcpu);
> +
> +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
> + struct kvm_loongarch_interrupt *irq);
> +/*
> + * Loongarch KVM guest interrupt handling
> + */
> +static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
> +{
> + set_bit(irq, &vcpu->arch.irq_pending);
> + clear_bit(irq, &vcpu->arch.irq_clear);
> +}
> +
> +static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
> +{
> + clear_bit(irq, &vcpu->arch.irq_pending);
> + set_bit(irq, &vcpu->arch.irq_clear);
> +}
> +
> +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
> diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
> index 35e8a52fea11..cb1c66c5a92f 100644
> --- a/arch/loongarch/include/asm/loongarch.h
> +++ b/arch/loongarch/include/asm/loongarch.h
> @@ -11,6 +11,7 @@
>
> #ifndef __ASSEMBLY__
> #include <larchintrin.h>
> +#include <asm/insn-def.h>
>
> /*
> * parse_r var, r - Helper assembler macro for parsing register names.
> @@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
> #define CSR_ECFG_VS_SHIFT 16
> #define CSR_ECFG_VS_WIDTH 3
> +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
> #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
> #define CSR_ECFG_IM_SHIFT 0
> #define CSR_ECFG_IM_WIDTH 14
> @@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
>
> #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
> -#define CSR_GTLBC_RID_SHIFT 16
> -#define CSR_GTLBC_RID_WIDTH 8
> -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
> +#define CSR_GTLBC_TGID_SHIFT 16
> +#define CSR_GTLBC_TGID_WIDTH 8
> +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
> +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
> #define CSR_GTLBC_TOTI_SHIFT 13
> #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
> -#define CSR_GTLBC_USERID_SHIFT 12
> -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
> +#define CSR_GTLBC_USETGID_SHIFT 12
> +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
> #define CSR_GTLBC_GMTLBSZ_SHIFT 0
> #define CSR_GTLBC_GMTLBSZ_WIDTH 6
> #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
> @@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
> #define CSR_GSTAT_GID_SHIFT 16
> #define CSR_GSTAT_GID_WIDTH 8
> +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
> #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
> #define CSR_GSTAT_GIDBIT_SHIFT 4
> #define CSR_GSTAT_GIDBIT_WIDTH 6
> @@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
> #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
> #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
> +#define CSR_GCFG_MATP_NEST_SHIFT 2
> +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
> +#define CSR_GCFG_MATP_ROOT_SHIFT 1
> +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
> +#define CSR_GCFG_MATP_GUEST_SHIFT 0
> +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
>
> #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
> #define CSR_GINTC_HC_SHIFT 16
> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
> new file mode 100644
> index 000000000000..17b28d94d569
> --- /dev/null
> +++ b/arch/loongarch/kvm/trace.h
> @@ -0,0 +1,168 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> + */
> +
> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_KVM_H
> +
> +#include <linux/tracepoint.h>
> +#include <asm/kvm_csr.h>
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM kvm
> +
> +/*
> + * Tracepoints for VM enters
> + */
> +DECLARE_EVENT_CLASS(kvm_transition,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu),
> + TP_STRUCT__entry(
> + __field(unsigned long, pc)
> + ),
> +
> + TP_fast_assign(
> + __entry->pc = vcpu->arch.pc;
> + ),
> +
> + TP_printk("PC: 0x%08lx",
> + __entry->pc)
> +);
> +
> +DEFINE_EVENT(kvm_transition, kvm_enter,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu));
> +
> +DEFINE_EVENT(kvm_transition, kvm_reenter,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu));
> +
> +DEFINE_EVENT(kvm_transition, kvm_out,
> + TP_PROTO(struct kvm_vcpu *vcpu),
> + TP_ARGS(vcpu));
> +
> +/* Further exit reasons */
> +#define KVM_TRACE_EXIT_IDLE 64
> +#define KVM_TRACE_EXIT_CACHE 65
> +#define KVM_TRACE_EXIT_SIGNAL 66
> +
> +/* Tracepoints for VM exits */
> +#define kvm_trace_symbol_exit_types \
> + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
> + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
> + { KVM_TRACE_EXIT_SIGNAL, "Signal" }
> +
> +TRACE_EVENT(kvm_exit_gspr,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
> + TP_ARGS(vcpu, inst_word),
> + TP_STRUCT__entry(
> + __field(unsigned int, inst_word)
> + ),
> +
> + TP_fast_assign(
> + __entry->inst_word = inst_word;
> + ),
> +
> + TP_printk("inst word: 0x%08x",
> + __entry->inst_word)
> +);
> +
> +
> +DECLARE_EVENT_CLASS(kvm_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> + TP_ARGS(vcpu, reason),
> + TP_STRUCT__entry(
> + __field(unsigned long, pc)
> + __field(unsigned int, reason)
> + ),
> +
> + TP_fast_assign(
> + __entry->pc = vcpu->arch.pc;
> + __entry->reason = reason;
> + ),
> +
> + TP_printk("[%s]PC: 0x%08lx",
> + __print_symbolic(__entry->reason,
> + kvm_trace_symbol_exit_types),
> + __entry->pc)
> +);
> +
> +DEFINE_EVENT(kvm_exit, kvm_exit_idle,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> + TP_ARGS(vcpu, reason));
> +
> +DEFINE_EVENT(kvm_exit, kvm_exit_cache,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> + TP_ARGS(vcpu, reason));
> +
> +DEFINE_EVENT(kvm_exit, kvm_exit,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> + TP_ARGS(vcpu, reason));
> +
> +#define KVM_TRACE_AUX_RESTORE 0
> +#define KVM_TRACE_AUX_SAVE 1
> +#define KVM_TRACE_AUX_ENABLE 2
> +#define KVM_TRACE_AUX_DISABLE 3
> +#define KVM_TRACE_AUX_DISCARD 4
> +
> +#define KVM_TRACE_AUX_FPU 1
> +
> +#define kvm_trace_symbol_aux_op \
> + { KVM_TRACE_AUX_RESTORE, "restore" }, \
> + { KVM_TRACE_AUX_SAVE, "save" }, \
> + { KVM_TRACE_AUX_ENABLE, "enable" }, \
> + { KVM_TRACE_AUX_DISABLE, "disable" }, \
> + { KVM_TRACE_AUX_DISCARD, "discard" }
> +
> +#define kvm_trace_symbol_aux_state \
> + { KVM_TRACE_AUX_FPU, "FPU" }
> +
> +TRACE_EVENT(kvm_aux,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
> + unsigned int state),
> + TP_ARGS(vcpu, op, state),
> + TP_STRUCT__entry(
> + __field(unsigned long, pc)
> + __field(u8, op)
> + __field(u8, state)
> + ),
> +
> + TP_fast_assign(
> + __entry->pc = vcpu->arch.pc;
> + __entry->op = op;
> + __entry->state = state;
> + ),
> +
> + TP_printk("%s %s PC: 0x%08lx",
> + __print_symbolic(__entry->op,
> + kvm_trace_symbol_aux_op),
> + __print_symbolic(__entry->state,
> + kvm_trace_symbol_aux_state),
> + __entry->pc)
> +);
> +
> +TRACE_EVENT(kvm_vpid_change,
> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
> + TP_ARGS(vcpu, vpid),
> + TP_STRUCT__entry(
> + __field(unsigned long, vpid)
> + ),
> +
> + TP_fast_assign(
> + __entry->vpid = vpid;
> + ),
> +
> + TP_printk("vpid: 0x%08lx",
> + __entry->vpid)
> +);
> +
> +#endif /* _TRACE_LOONGARCH64_KVM_H */
> +
> +#undef TRACE_INCLUDE_PATH
> +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
> +#undef TRACE_INCLUDE_FILE
> +#define TRACE_INCLUDE_FILE trace
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> --
> 2.39.1
>
>
Hi, Bibo,
On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>
>
>
> 在 2023/7/14 15:11, Huacai Chen 写道:
> > Hi, Tianrui,
> >
> > On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
> >>
> >> Add LoongArch vcpu related header files, including vcpu csr
> >> information, irq number defines, and some vcpu interfaces.
> >>
> >> Reviewed-by: Bibo Mao <[email protected]>
> >> Signed-off-by: Tianrui Zhao <[email protected]>
> >> ---
> >> arch/loongarch/include/asm/insn-def.h | 55 ++++++
> >> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
> >> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
> >> arch/loongarch/include/asm/loongarch.h | 20 ++-
> >> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
> >> 5 files changed, 566 insertions(+), 5 deletions(-)
> >> create mode 100644 arch/loongarch/include/asm/insn-def.h
> >> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
> >> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
> >> create mode 100644 arch/loongarch/kvm/trace.h
> >>
> >> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
> >> new file mode 100644
> >> index 000000000000..e285ee108fb0
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/insn-def.h
> >> @@ -0,0 +1,55 @@
> >> +/* SPDX-License-Identifier: GPL-2.0-only */
> >> +
> >> +#ifndef __ASM_INSN_DEF_H
> >> +#define __ASM_INSN_DEF_H
> >> +
> >> +#include <linux/stringify.h>
> >> +#include <asm/gpr-num.h>
> >> +#include <asm/asm.h>
> >> +
> >> +#define INSN_STR(x) __stringify(x)
> >> +#define CSR_RD_SHIFT 0
> >> +#define CSR_RJ_SHIFT 5
> >> +#define CSR_SIMM14_SHIFT 10
> >> +#define CSR_OPCODE_SHIFT 24
> > As all needed instructions have already upstream in binutils now and
> > binutils 2.41 will be released soon, I suggest again to introduce
> > AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
> It is a good news that binutils 2.41 has supported LVZ assemble language.
> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
> need write beautiful code, also we should write code with pratical usage.
1, For pure upstream: the CI toolchain comes from
https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
2.41 will be released soon, CI toolchain will also be updated soon.
2, For community distributions, such as Fedora rawhide, Debian
unstable and Arch: they usually choose the latest released version, so
binutils 2.41 will be used quickly.
3, For downstream distributions, such as UOS and Kylin: if they choose
kernel as new as 6.6, they may probably choose binutils as new as
2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
KVM support to the kernel, then they don't have any reason to not
backport LVZ instructions support to binutils.
Huacai
>
> Paolo, what is your suggestion?
>
> x86 has similar binary assemble code like this:
> static inline void __tpause(u32 ecx, u32 edx, u32 eax)
> {
> /* "tpause %ecx, %edx, %eax;" */
> #ifdef CONFIG_AS_TPAUSE
> asm volatile("tpause %%ecx\n"
> :
> : "c"(ecx), "d"(edx), "a"(eax));
> #else
> asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
> :
> : "c"(ecx), "d"(edx), "a"(eax));
> #endif
> }
>
> Also riscv has similiar code:
> void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
> {
> asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
> }
>
> Regards
> Bibo Mao
> >
> > And this is easier than I suggested before (introduce
> > CC_HAS_LVZ_EXTENSION), because CC_HAS_XXX depends on both gcc and
> > binutils, but AS_HAS_XXX only depends on binutils.
> >
> > Huacai
> >
> >> +
> >> +#define DEFINE_INSN_CSR \
> >> + __DEFINE_ASM_GPR_NUMS \
> >> +" .macro insn_csr, opcode, rj, rd, simm14\n" \
> >> +" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
> >> +" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
> >> +" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
> >> +" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
> >> +" .endm\n"
> >> +
> >> +#define UNDEFINE_INSN_CSR \
> >> +" .purgem insn_csr\n"
> >> +
> >> +#define __INSN_CSR(opcode, rj, rd, simm14) \
> >> + DEFINE_INSN_CSR \
> >> + "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
> >> + UNDEFINE_INSN_CSR
> >> +
> >> +
> >> +#define INSN_CSR(opcode, rj, rd, simm14) \
> >> + __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
> >> + LARCH_##simm14)
> >> +
> >> +#define __ASM_STR(x) #x
> >> +#define LARCH_OPCODE(v) __ASM_STR(v)
> >> +#define LARCH_SIMM14(v) __ASM_STR(v)
> >> +#define __LARCH_REG(v) __ASM_STR(v)
> >> +#define LARCH___RD(v) __LARCH_REG(v)
> >> +#define LARCH___RJ(v) __LARCH_REG(v)
> >> +#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
> >> +
> >> +#define GCSR_read(csr, rd) \
> >> + INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
> >> +
> >> +#define GCSR_write(csr, rd) \
> >> + INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
> >> +
> >> +#define GCSR_xchg(csr, rj, rd) \
> >> + INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
> >> +
> >> +#endif /* __ASM_INSN_DEF_H */
> >> diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
> >> new file mode 100644
> >> index 000000000000..10dba5bc6df1
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_csr.h
> >> @@ -0,0 +1,231 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __ASM_LOONGARCH_KVM_CSR_H__
> >> +#define __ASM_LOONGARCH_KVM_CSR_H__
> >> +#include <asm/loongarch.h>
> >> +#include <asm/kvm_vcpu.h>
> >> +#include <linux/uaccess.h>
> >> +#include <linux/kvm_host.h>
> >> +
> >> +/*
> >> + * Instructions will be available in binutils later
> >> + * read val from guest csr register %[csr]
> >> + * gcsrrd %[val], %[csr]
> >> + */
> >> +#define gcsr_read(csr) \
> >> +({ \
> >> + register unsigned long __v; \
> >> + __asm__ __volatile__ (GCSR_read(csr, %0) \
> >> + : "=r" (__v) : \
> >> + : "memory"); \
> >> + __v; \
> >> +})
> >> +
> >> +/*
> >> + * Instructions will be available in binutils later
> >> + * write val to guest csr register %[csr]
> >> + * gcsrwr %[val], %[csr]
> >> + */
> >> +#define gcsr_write(val, csr) \
> >> +({ \
> >> + register unsigned long __v = val; \
> >> + __asm__ __volatile__ (GCSR_write(csr, %0) \
> >> + : "+r" (__v) : \
> >> + : "memory"); \
> >> +})
> >> +
> >> +/*
> >> + * Instructions will be available in binutils later
> >> + * replace masked bits of guest csr register %[csr] with val
> >> + * gcsrxchg %[val], %[mask], %[csr]
> >> + */
> >> +#define gcsr_xchg(val, mask, csr) \
> >> +({ \
> >> + register unsigned long __v = val; \
> >> + __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
> >> + : "+r" (__v) \
> >> + : "r" (mask) \
> >> + : "memory"); \
> >> + __v; \
> >> +})
> >> +
> >> +/* Guest CSRS read and write */
> >> +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
> >> +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
> >> +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
> >> +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
> >> +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
> >> +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
> >> +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
> >> +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
> >> +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
> >> +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
> >> +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
> >> +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
> >> +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
> >> +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
> >> +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
> >> +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
> >> +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
> >> +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
> >> +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
> >> +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
> >> +
> >> +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
> >> +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
> >> +#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
> >> +#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
> >> +#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
> >> +#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
> >> +#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
> >> +#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
> >> +
> >> +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
> >> +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
> >> +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
> >> +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
> >> +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
> >> +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
> >> +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
> >> +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
> >> +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
> >> +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
> >> +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
> >> +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
> >> +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
> >> +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
> >> +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
> >> +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
> >> +
> >> +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
> >> +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
> >> +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
> >> +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
> >> +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
> >> +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
> >> +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
> >> +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
> >> +
> >> +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
> >> +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
> >> +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
> >> +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
> >> +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
> >> +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
> >> +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
> >> +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
> >> +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
> >> +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
> >> +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
> >> +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
> >> +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
> >> +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
> >> +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
> >> +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
> >> +
> >> +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
> >> +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
> >> +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
> >> +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
> >> +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
> >> +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
> >> +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
> >> +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
> >> +
> >> +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
> >> +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
> >> +
> >> +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
> >> +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
> >> +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
> >> +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
> >> +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
> >> +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
> >> +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
> >> +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
> >> +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
> >> +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
> >> +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
> >> +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
> >> +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
> >> +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
> >> +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
> >> +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
> >> +
> >> +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
> >> +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
> >> +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
> >> +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
> >> +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
> >> +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
> >> +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
> >> +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
> >> +
> >> +/* Guest related CSRs */
> >> +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
> >> +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
> >> +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
> >> +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
> >> +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
> >> +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
> >> +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
> >> +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
> >> +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
> >> +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
> >> +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
> >> +
> >> +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
> >> +
> >> +__BUILD_GCSR_OP(llbctl)
> >> +__BUILD_GCSR_OP(tlbidx)
> >> +__BUILD_CSR_OP(gcfg)
> >> +__BUILD_CSR_OP(gstat)
> >> +__BUILD_CSR_OP(gtlbc)
> >> +__BUILD_CSR_OP(gintc)
> >> +
> >> +#define set_gcsr_estat(val) \
> >> + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
> >> +#define clear_gcsr_estat(val) \
> >> + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
> >> +
> >> +#define kvm_read_hw_gcsr(id) gcsr_read(id)
> >> +#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
> >> +
> >> +int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
> >> +int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
> >> +
> >> +int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
> >> +
> >> +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
> >> +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
> >> +
> >> +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
> >> +{
> >> + return csr->csrs[gid];
> >> +}
> >> +
> >> +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
> >> + int gid, unsigned long val)
> >> +{
> >> + csr->csrs[gid] = val;
> >> +}
> >> +
> >> +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
> >> + int gid, unsigned long val)
> >> +{
> >> + csr->csrs[gid] |= val;
> >> +}
> >> +
> >> +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
> >> + int gid, unsigned long mask,
> >> + unsigned long val)
> >> +{
> >> + unsigned long _mask = mask;
> >> +
> >> + csr->csrs[gid] &= ~_mask;
> >> + csr->csrs[gid] |= val & _mask;
> >> +}
> >> +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
> >> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
> >> new file mode 100644
> >> index 000000000000..74deaf55d22c
> >> --- /dev/null
> >> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
> >> @@ -0,0 +1,97 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
> >> +#define __ASM_LOONGARCH_KVM_VCPU_H__
> >> +
> >> +#include <linux/kvm_host.h>
> >> +#include <asm/loongarch.h>
> >> +
> >> +/* Controlled by 0x5 guest exst */
> >> +#define CPU_SIP0 (_ULCAST_(1))
> >> +#define CPU_SIP1 (_ULCAST_(1) << 1)
> >> +#define CPU_PMU (_ULCAST_(1) << 10)
> >> +#define CPU_TIMER (_ULCAST_(1) << 11)
> >> +#define CPU_IPI (_ULCAST_(1) << 12)
> >> +
> >> +/* Controlled by 0x52 guest exception VIP
> >> + * aligned to exst bit 5~12
> >> + */
> >> +#define CPU_IP0 (_ULCAST_(1))
> >> +#define CPU_IP1 (_ULCAST_(1) << 1)
> >> +#define CPU_IP2 (_ULCAST_(1) << 2)
> >> +#define CPU_IP3 (_ULCAST_(1) << 3)
> >> +#define CPU_IP4 (_ULCAST_(1) << 4)
> >> +#define CPU_IP5 (_ULCAST_(1) << 5)
> >> +#define CPU_IP6 (_ULCAST_(1) << 6)
> >> +#define CPU_IP7 (_ULCAST_(1) << 7)
> >> +
> >> +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
> >> +
> >> +/* KVM_IRQ_LINE irq field index values */
> >> +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
> >> +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
> >> +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
> >> +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
> >> +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
> >> +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
> >> +
> >> +/* Irq_type field */
> >> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
> >> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
> >> +#define KVM_LOONGSON_IRQ_TYPE_HT 2
> >> +#define KVM_LOONGSON_IRQ_TYPE_MSI 3
> >> +#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
> >> +#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
> >> +
> >> +/* Out-of-kernel GIC cpu interrupt injection irq_number field */
> >> +#define KVM_LOONGSON_IRQ_CPU_IRQ 0
> >> +#define KVM_LOONGSON_IRQ_CPU_FIQ 1
> >> +#define KVM_LOONGSON_CPU_IP_NUM 8
> >> +
> >> +typedef union loongarch_instruction larch_inst;
> >> +typedef int (*exit_handle_fn)(struct kvm_vcpu *);
> >> +
> >> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
> >> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
> >> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
> >> +int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
> >> +int _kvm_emu_idle(struct kvm_vcpu *vcpu);
> >> +int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
> >> +int _kvm_pending_timer(struct kvm_vcpu *vcpu);
> >> +int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
> >> +void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
> >> +
> >> +void kvm_own_fpu(struct kvm_vcpu *vcpu);
> >> +void kvm_lose_fpu(struct kvm_vcpu *vcpu);
> >> +void kvm_save_fpu(struct loongarch_fpu *fpu);
> >> +void kvm_restore_fpu(struct loongarch_fpu *fpu);
> >> +void kvm_restore_fcsr(struct loongarch_fpu *fpu);
> >> +
> >> +void kvm_acquire_timer(struct kvm_vcpu *vcpu);
> >> +void kvm_reset_timer(struct kvm_vcpu *vcpu);
> >> +enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
> >> +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
> >> +void kvm_restore_timer(struct kvm_vcpu *vcpu);
> >> +void kvm_save_timer(struct kvm_vcpu *vcpu);
> >> +
> >> +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
> >> + struct kvm_loongarch_interrupt *irq);
> >> +/*
> >> + * Loongarch KVM guest interrupt handling
> >> + */
> >> +static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
> >> +{
> >> + set_bit(irq, &vcpu->arch.irq_pending);
> >> + clear_bit(irq, &vcpu->arch.irq_clear);
> >> +}
> >> +
> >> +static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
> >> +{
> >> + clear_bit(irq, &vcpu->arch.irq_pending);
> >> + set_bit(irq, &vcpu->arch.irq_clear);
> >> +}
> >> +
> >> +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
> >> diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
> >> index 35e8a52fea11..cb1c66c5a92f 100644
> >> --- a/arch/loongarch/include/asm/loongarch.h
> >> +++ b/arch/loongarch/include/asm/loongarch.h
> >> @@ -11,6 +11,7 @@
> >>
> >> #ifndef __ASSEMBLY__
> >> #include <larchintrin.h>
> >> +#include <asm/insn-def.h>
> >>
> >> /*
> >> * parse_r var, r - Helper assembler macro for parsing register names.
> >> @@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> >> #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
> >> #define CSR_ECFG_VS_SHIFT 16
> >> #define CSR_ECFG_VS_WIDTH 3
> >> +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
> >> #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
> >> #define CSR_ECFG_IM_SHIFT 0
> >> #define CSR_ECFG_IM_WIDTH 14
> >> @@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> >> #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
> >>
> >> #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
> >> -#define CSR_GTLBC_RID_SHIFT 16
> >> -#define CSR_GTLBC_RID_WIDTH 8
> >> -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
> >> +#define CSR_GTLBC_TGID_SHIFT 16
> >> +#define CSR_GTLBC_TGID_WIDTH 8
> >> +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
> >> +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
> >> #define CSR_GTLBC_TOTI_SHIFT 13
> >> #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
> >> -#define CSR_GTLBC_USERID_SHIFT 12
> >> -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
> >> +#define CSR_GTLBC_USETGID_SHIFT 12
> >> +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
> >> #define CSR_GTLBC_GMTLBSZ_SHIFT 0
> >> #define CSR_GTLBC_GMTLBSZ_WIDTH 6
> >> #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
> >> @@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> >> #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
> >> #define CSR_GSTAT_GID_SHIFT 16
> >> #define CSR_GSTAT_GID_WIDTH 8
> >> +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
> >> #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
> >> #define CSR_GSTAT_GIDBIT_SHIFT 4
> >> #define CSR_GSTAT_GIDBIT_WIDTH 6
> >> @@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
> >> #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
> >> #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
> >> #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
> >> +#define CSR_GCFG_MATP_NEST_SHIFT 2
> >> +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
> >> +#define CSR_GCFG_MATP_ROOT_SHIFT 1
> >> +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
> >> +#define CSR_GCFG_MATP_GUEST_SHIFT 0
> >> +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
> >>
> >> #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
> >> #define CSR_GINTC_HC_SHIFT 16
> >> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
> >> new file mode 100644
> >> index 000000000000..17b28d94d569
> >> --- /dev/null
> >> +++ b/arch/loongarch/kvm/trace.h
> >> @@ -0,0 +1,168 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +/*
> >> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
> >> + */
> >> +
> >> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
> >> +#define _TRACE_KVM_H
> >> +
> >> +#include <linux/tracepoint.h>
> >> +#include <asm/kvm_csr.h>
> >> +
> >> +#undef TRACE_SYSTEM
> >> +#define TRACE_SYSTEM kvm
> >> +
> >> +/*
> >> + * Tracepoints for VM enters
> >> + */
> >> +DECLARE_EVENT_CLASS(kvm_transition,
> >> + TP_PROTO(struct kvm_vcpu *vcpu),
> >> + TP_ARGS(vcpu),
> >> + TP_STRUCT__entry(
> >> + __field(unsigned long, pc)
> >> + ),
> >> +
> >> + TP_fast_assign(
> >> + __entry->pc = vcpu->arch.pc;
> >> + ),
> >> +
> >> + TP_printk("PC: 0x%08lx",
> >> + __entry->pc)
> >> +);
> >> +
> >> +DEFINE_EVENT(kvm_transition, kvm_enter,
> >> + TP_PROTO(struct kvm_vcpu *vcpu),
> >> + TP_ARGS(vcpu));
> >> +
> >> +DEFINE_EVENT(kvm_transition, kvm_reenter,
> >> + TP_PROTO(struct kvm_vcpu *vcpu),
> >> + TP_ARGS(vcpu));
> >> +
> >> +DEFINE_EVENT(kvm_transition, kvm_out,
> >> + TP_PROTO(struct kvm_vcpu *vcpu),
> >> + TP_ARGS(vcpu));
> >> +
> >> +/* Further exit reasons */
> >> +#define KVM_TRACE_EXIT_IDLE 64
> >> +#define KVM_TRACE_EXIT_CACHE 65
> >> +#define KVM_TRACE_EXIT_SIGNAL 66
> >> +
> >> +/* Tracepoints for VM exits */
> >> +#define kvm_trace_symbol_exit_types \
> >> + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
> >> + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
> >> + { KVM_TRACE_EXIT_SIGNAL, "Signal" }
> >> +
> >> +TRACE_EVENT(kvm_exit_gspr,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
> >> + TP_ARGS(vcpu, inst_word),
> >> + TP_STRUCT__entry(
> >> + __field(unsigned int, inst_word)
> >> + ),
> >> +
> >> + TP_fast_assign(
> >> + __entry->inst_word = inst_word;
> >> + ),
> >> +
> >> + TP_printk("inst word: 0x%08x",
> >> + __entry->inst_word)
> >> +);
> >> +
> >> +
> >> +DECLARE_EVENT_CLASS(kvm_exit,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> >> + TP_ARGS(vcpu, reason),
> >> + TP_STRUCT__entry(
> >> + __field(unsigned long, pc)
> >> + __field(unsigned int, reason)
> >> + ),
> >> +
> >> + TP_fast_assign(
> >> + __entry->pc = vcpu->arch.pc;
> >> + __entry->reason = reason;
> >> + ),
> >> +
> >> + TP_printk("[%s]PC: 0x%08lx",
> >> + __print_symbolic(__entry->reason,
> >> + kvm_trace_symbol_exit_types),
> >> + __entry->pc)
> >> +);
> >> +
> >> +DEFINE_EVENT(kvm_exit, kvm_exit_idle,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> >> + TP_ARGS(vcpu, reason));
> >> +
> >> +DEFINE_EVENT(kvm_exit, kvm_exit_cache,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> >> + TP_ARGS(vcpu, reason));
> >> +
> >> +DEFINE_EVENT(kvm_exit, kvm_exit,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
> >> + TP_ARGS(vcpu, reason));
> >> +
> >> +#define KVM_TRACE_AUX_RESTORE 0
> >> +#define KVM_TRACE_AUX_SAVE 1
> >> +#define KVM_TRACE_AUX_ENABLE 2
> >> +#define KVM_TRACE_AUX_DISABLE 3
> >> +#define KVM_TRACE_AUX_DISCARD 4
> >> +
> >> +#define KVM_TRACE_AUX_FPU 1
> >> +
> >> +#define kvm_trace_symbol_aux_op \
> >> + { KVM_TRACE_AUX_RESTORE, "restore" }, \
> >> + { KVM_TRACE_AUX_SAVE, "save" }, \
> >> + { KVM_TRACE_AUX_ENABLE, "enable" }, \
> >> + { KVM_TRACE_AUX_DISABLE, "disable" }, \
> >> + { KVM_TRACE_AUX_DISCARD, "discard" }
> >> +
> >> +#define kvm_trace_symbol_aux_state \
> >> + { KVM_TRACE_AUX_FPU, "FPU" }
> >> +
> >> +TRACE_EVENT(kvm_aux,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
> >> + unsigned int state),
> >> + TP_ARGS(vcpu, op, state),
> >> + TP_STRUCT__entry(
> >> + __field(unsigned long, pc)
> >> + __field(u8, op)
> >> + __field(u8, state)
> >> + ),
> >> +
> >> + TP_fast_assign(
> >> + __entry->pc = vcpu->arch.pc;
> >> + __entry->op = op;
> >> + __entry->state = state;
> >> + ),
> >> +
> >> + TP_printk("%s %s PC: 0x%08lx",
> >> + __print_symbolic(__entry->op,
> >> + kvm_trace_symbol_aux_op),
> >> + __print_symbolic(__entry->state,
> >> + kvm_trace_symbol_aux_state),
> >> + __entry->pc)
> >> +);
> >> +
> >> +TRACE_EVENT(kvm_vpid_change,
> >> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
> >> + TP_ARGS(vcpu, vpid),
> >> + TP_STRUCT__entry(
> >> + __field(unsigned long, vpid)
> >> + ),
> >> +
> >> + TP_fast_assign(
> >> + __entry->vpid = vpid;
> >> + ),
> >> +
> >> + TP_printk("vpid: 0x%08lx",
> >> + __entry->vpid)
> >> +);
> >> +
> >> +#endif /* _TRACE_LOONGARCH64_KVM_H */
> >> +
> >> +#undef TRACE_INCLUDE_PATH
> >> +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
> >> +#undef TRACE_INCLUDE_FILE
> >> +#define TRACE_INCLUDE_FILE trace
> >> +
> >> +/* This part must be outside protection */
> >> +#include <trace/define_trace.h>
> >> --
> >> 2.39.1
> >>
> >>
>
在 2023/7/14 17:22, Huacai Chen 写道:
> Hi, Bibo,
>
> On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>>
>>
>>
>> 在 2023/7/14 15:11, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>>>
>>>> Add LoongArch vcpu related header files, including vcpu csr
>>>> information, irq number defines, and some vcpu interfaces.
>>>>
>>>> Reviewed-by: Bibo Mao <[email protected]>
>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>> ---
>>>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>>>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>>>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>>>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>>>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>>>> 5 files changed, 566 insertions(+), 5 deletions(-)
>>>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>>>> create mode 100644 arch/loongarch/kvm/trace.h
>>>>
>>>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>>>> new file mode 100644
>>>> index 000000000000..e285ee108fb0
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/insn-def.h
>>>> @@ -0,0 +1,55 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>> +
>>>> +#ifndef __ASM_INSN_DEF_H
>>>> +#define __ASM_INSN_DEF_H
>>>> +
>>>> +#include <linux/stringify.h>
>>>> +#include <asm/gpr-num.h>
>>>> +#include <asm/asm.h>
>>>> +
>>>> +#define INSN_STR(x) __stringify(x)
>>>> +#define CSR_RD_SHIFT 0
>>>> +#define CSR_RJ_SHIFT 5
>>>> +#define CSR_SIMM14_SHIFT 10
>>>> +#define CSR_OPCODE_SHIFT 24
>>> As all needed instructions have already upstream in binutils now and
>>> binutils 2.41 will be released soon, I suggest again to introduce
>>> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
>> It is a good news that binutils 2.41 has supported LVZ assemble language.
>> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
>> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
>> need write beautiful code, also we should write code with pratical usage.
> 1, For pure upstream: the CI toolchain comes from
> https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
> 2.41 will be released soon, CI toolchain will also be updated soon.
>
> 2, For community distributions, such as Fedora rawhide, Debian
> unstable and Arch: they usually choose the latest released version, so
> binutils 2.41 will be used quickly.
>
> 3, For downstream distributions, such as UOS and Kylin: if they choose
> kernel as new as 6.6, they may probably choose binutils as new as
> 2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
> KVM support to the kernel, then they don't have any reason to not
> backport LVZ instructions support to binutils.
commit 616500232e632dba8b03981eeccadacf2fbf1c30
Author: Huacai Chen <[email protected]>
Date: Thu Jun 29 20:58:43 2023
LoongArch: Add vector extensions support
Introduce AS_HAS_LSX_EXTENSION and AS_HAS_LASX_EXTENSION to avoid non-
vector toolchains complains unsupported instructions.
Signed-off-by: Huacai Chen <[email protected]>
The vector patch 616500232e63"Add vector extensions support" is merged
with any reviewed-by or acked, vector depends on AS_HAS_LSX, so it requires
all the patches obey this rule. I just think it bring thing to the wrong
way.
If you have more energe, you can improve mips-kvm since you are maintainer
of mips-kvm.
Regards
Bibo Mao
>
> Huacai
>
>>
>> Paolo, what is your suggestion?
>>
>> x86 has similar binary assemble code like this:
>> static inline void __tpause(u32 ecx, u32 edx, u32 eax)
>> {
>> /* "tpause %ecx, %edx, %eax;" */
>> #ifdef CONFIG_AS_TPAUSE
>> asm volatile("tpause %%ecx\n"
>> :
>> : "c"(ecx), "d"(edx), "a"(eax));
>> #else
>> asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
>> :
>> : "c"(ecx), "d"(edx), "a"(eax));
>> #endif
>> }
>>
>> Also riscv has similiar code:
>> void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
>> {
>> asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
>> }
>>
>> Regards
>> Bibo Mao
>>>
>>> And this is easier than I suggested before (introduce
>>> CC_HAS_LVZ_EXTENSION), because CC_HAS_XXX depends on both gcc and
>>> binutils, but AS_HAS_XXX only depends on binutils.
>>>
>>> Huacai
>>>
>>>> +
>>>> +#define DEFINE_INSN_CSR \
>>>> + __DEFINE_ASM_GPR_NUMS \
>>>> +" .macro insn_csr, opcode, rj, rd, simm14\n" \
>>>> +" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
>>>> +" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
>>>> +" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
>>>> +" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
>>>> +" .endm\n"
>>>> +
>>>> +#define UNDEFINE_INSN_CSR \
>>>> +" .purgem insn_csr\n"
>>>> +
>>>> +#define __INSN_CSR(opcode, rj, rd, simm14) \
>>>> + DEFINE_INSN_CSR \
>>>> + "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
>>>> + UNDEFINE_INSN_CSR
>>>> +
>>>> +
>>>> +#define INSN_CSR(opcode, rj, rd, simm14) \
>>>> + __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
>>>> + LARCH_##simm14)
>>>> +
>>>> +#define __ASM_STR(x) #x
>>>> +#define LARCH_OPCODE(v) __ASM_STR(v)
>>>> +#define LARCH_SIMM14(v) __ASM_STR(v)
>>>> +#define __LARCH_REG(v) __ASM_STR(v)
>>>> +#define LARCH___RD(v) __LARCH_REG(v)
>>>> +#define LARCH___RJ(v) __LARCH_REG(v)
>>>> +#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
>>>> +
>>>> +#define GCSR_read(csr, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#define GCSR_write(csr, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#define GCSR_xchg(csr, rj, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#endif /* __ASM_INSN_DEF_H */
>>>> diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
>>>> new file mode 100644
>>>> index 000000000000..10dba5bc6df1
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_csr.h
>>>> @@ -0,0 +1,231 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_CSR_H__
>>>> +#define __ASM_LOONGARCH_KVM_CSR_H__
>>>> +#include <asm/loongarch.h>
>>>> +#include <asm/kvm_vcpu.h>
>>>> +#include <linux/uaccess.h>
>>>> +#include <linux/kvm_host.h>
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * read val from guest csr register %[csr]
>>>> + * gcsrrd %[val], %[csr]
>>>> + */
>>>> +#define gcsr_read(csr) \
>>>> +({ \
>>>> + register unsigned long __v; \
>>>> + __asm__ __volatile__ (GCSR_read(csr, %0) \
>>>> + : "=r" (__v) : \
>>>> + : "memory"); \
>>>> + __v; \
>>>> +})
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * write val to guest csr register %[csr]
>>>> + * gcsrwr %[val], %[csr]
>>>> + */
>>>> +#define gcsr_write(val, csr) \
>>>> +({ \
>>>> + register unsigned long __v = val; \
>>>> + __asm__ __volatile__ (GCSR_write(csr, %0) \
>>>> + : "+r" (__v) : \
>>>> + : "memory"); \
>>>> +})
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * replace masked bits of guest csr register %[csr] with val
>>>> + * gcsrxchg %[val], %[mask], %[csr]
>>>> + */
>>>> +#define gcsr_xchg(val, mask, csr) \
>>>> +({ \
>>>> + register unsigned long __v = val; \
>>>> + __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
>>>> + : "+r" (__v) \
>>>> + : "r" (mask) \
>>>> + : "memory"); \
>>>> + __v; \
>>>> +})
>>>> +
>>>> +/* Guest CSRS read and write */
>>>> +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
>>>> +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
>>>> +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
>>>> +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
>>>> +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
>>>> +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
>>>> +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
>>>> +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
>>>> +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
>>>> +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
>>>> +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
>>>> +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
>>>> +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
>>>> +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
>>>> +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
>>>> +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
>>>> +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
>>>> +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
>>>> +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
>>>> +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
>>>> +
>>>> +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
>>>> +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
>>>> +#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
>>>> +#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
>>>> +#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
>>>> +#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
>>>> +#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
>>>> +#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
>>>> +
>>>> +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
>>>> +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
>>>> +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
>>>> +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
>>>> +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
>>>> +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
>>>> +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
>>>> +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
>>>> +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
>>>> +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
>>>> +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
>>>> +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
>>>> +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
>>>> +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
>>>> +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
>>>> +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
>>>> +
>>>> +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
>>>> +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
>>>> +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
>>>> +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
>>>> +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
>>>> +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
>>>> +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
>>>> +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
>>>> +
>>>> +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
>>>> +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
>>>> +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
>>>> +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
>>>> +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
>>>> +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
>>>> +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
>>>> +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
>>>> +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
>>>> +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
>>>> +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
>>>> +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
>>>> +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
>>>> +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
>>>> +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
>>>> +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
>>>> +
>>>> +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
>>>> +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
>>>> +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
>>>> +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
>>>> +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
>>>> +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
>>>> +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
>>>> +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
>>>> +
>>>> +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
>>>> +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
>>>> +
>>>> +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
>>>> +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
>>>> +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
>>>> +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
>>>> +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
>>>> +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
>>>> +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
>>>> +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
>>>> +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
>>>> +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
>>>> +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
>>>> +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
>>>> +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
>>>> +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
>>>> +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
>>>> +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
>>>> +
>>>> +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
>>>> +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
>>>> +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
>>>> +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
>>>> +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
>>>> +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
>>>> +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
>>>> +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
>>>> +
>>>> +/* Guest related CSRs */
>>>> +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
>>>> +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
>>>> +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
>>>> +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
>>>> +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
>>>> +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
>>>> +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
>>>> +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
>>>> +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
>>>> +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
>>>> +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
>>>> +
>>>> +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
>>>> +
>>>> +__BUILD_GCSR_OP(llbctl)
>>>> +__BUILD_GCSR_OP(tlbidx)
>>>> +__BUILD_CSR_OP(gcfg)
>>>> +__BUILD_CSR_OP(gstat)
>>>> +__BUILD_CSR_OP(gtlbc)
>>>> +__BUILD_CSR_OP(gintc)
>>>> +
>>>> +#define set_gcsr_estat(val) \
>>>> + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
>>>> +#define clear_gcsr_estat(val) \
>>>> + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
>>>> +
>>>> +#define kvm_read_hw_gcsr(id) gcsr_read(id)
>>>> +#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
>>>> +
>>>> +int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
>>>> +int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
>>>> +
>>>> +int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +
>>>> +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
>>>> +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
>>>> +
>>>> +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
>>>> +{
>>>> + return csr->csrs[gid];
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long val)
>>>> +{
>>>> + csr->csrs[gid] = val;
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long val)
>>>> +{
>>>> + csr->csrs[gid] |= val;
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long mask,
>>>> + unsigned long val)
>>>> +{
>>>> + unsigned long _mask = mask;
>>>> +
>>>> + csr->csrs[gid] &= ~_mask;
>>>> + csr->csrs[gid] |= val & _mask;
>>>> +}
>>>> +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
>>>> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> new file mode 100644
>>>> index 000000000000..74deaf55d22c
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> @@ -0,0 +1,97 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
>>>> +#define __ASM_LOONGARCH_KVM_VCPU_H__
>>>> +
>>>> +#include <linux/kvm_host.h>
>>>> +#include <asm/loongarch.h>
>>>> +
>>>> +/* Controlled by 0x5 guest exst */
>>>> +#define CPU_SIP0 (_ULCAST_(1))
>>>> +#define CPU_SIP1 (_ULCAST_(1) << 1)
>>>> +#define CPU_PMU (_ULCAST_(1) << 10)
>>>> +#define CPU_TIMER (_ULCAST_(1) << 11)
>>>> +#define CPU_IPI (_ULCAST_(1) << 12)
>>>> +
>>>> +/* Controlled by 0x52 guest exception VIP
>>>> + * aligned to exst bit 5~12
>>>> + */
>>>> +#define CPU_IP0 (_ULCAST_(1))
>>>> +#define CPU_IP1 (_ULCAST_(1) << 1)
>>>> +#define CPU_IP2 (_ULCAST_(1) << 2)
>>>> +#define CPU_IP3 (_ULCAST_(1) << 3)
>>>> +#define CPU_IP4 (_ULCAST_(1) << 4)
>>>> +#define CPU_IP5 (_ULCAST_(1) << 5)
>>>> +#define CPU_IP6 (_ULCAST_(1) << 6)
>>>> +#define CPU_IP7 (_ULCAST_(1) << 7)
>>>> +
>>>> +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
>>>> +
>>>> +/* KVM_IRQ_LINE irq field index values */
>>>> +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
>>>> +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
>>>> +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
>>>> +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
>>>> +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
>>>> +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
>>>> +
>>>> +/* Irq_type field */
>>>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
>>>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
>>>> +#define KVM_LOONGSON_IRQ_TYPE_HT 2
>>>> +#define KVM_LOONGSON_IRQ_TYPE_MSI 3
>>>> +#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
>>>> +#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
>>>> +
>>>> +/* Out-of-kernel GIC cpu interrupt injection irq_number field */
>>>> +#define KVM_LOONGSON_IRQ_CPU_IRQ 0
>>>> +#define KVM_LOONGSON_IRQ_CPU_FIQ 1
>>>> +#define KVM_LOONGSON_CPU_IP_NUM 8
>>>> +
>>>> +typedef union loongarch_instruction larch_inst;
>>>> +typedef int (*exit_handle_fn)(struct kvm_vcpu *);
>>>> +
>>>> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
>>>> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
>>>> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>>> +int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>>> +int _kvm_emu_idle(struct kvm_vcpu *vcpu);
>>>> +int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
>>>> +int _kvm_pending_timer(struct kvm_vcpu *vcpu);
>>>> +int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
>>>> +void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
>>>> +
>>>> +void kvm_own_fpu(struct kvm_vcpu *vcpu);
>>>> +void kvm_lose_fpu(struct kvm_vcpu *vcpu);
>>>> +void kvm_save_fpu(struct loongarch_fpu *fpu);
>>>> +void kvm_restore_fpu(struct loongarch_fpu *fpu);
>>>> +void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>>>> +
>>>> +void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>>> +void kvm_reset_timer(struct kvm_vcpu *vcpu);
>>>> +enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
>>>> +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>>> +void kvm_restore_timer(struct kvm_vcpu *vcpu);
>>>> +void kvm_save_timer(struct kvm_vcpu *vcpu);
>>>> +
>>>> +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
>>>> + struct kvm_loongarch_interrupt *irq);
>>>> +/*
>>>> + * Loongarch KVM guest interrupt handling
>>>> + */
>>>> +static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>>>> +{
>>>> + set_bit(irq, &vcpu->arch.irq_pending);
>>>> + clear_bit(irq, &vcpu->arch.irq_clear);
>>>> +}
>>>> +
>>>> +static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>>>> +{
>>>> + clear_bit(irq, &vcpu->arch.irq_pending);
>>>> + set_bit(irq, &vcpu->arch.irq_clear);
>>>> +}
>>>> +
>>>> +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
>>>> diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
>>>> index 35e8a52fea11..cb1c66c5a92f 100644
>>>> --- a/arch/loongarch/include/asm/loongarch.h
>>>> +++ b/arch/loongarch/include/asm/loongarch.h
>>>> @@ -11,6 +11,7 @@
>>>>
>>>> #ifndef __ASSEMBLY__
>>>> #include <larchintrin.h>
>>>> +#include <asm/insn-def.h>
>>>>
>>>> /*
>>>> * parse_r var, r - Helper assembler macro for parsing register names.
>>>> @@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
>>>> #define CSR_ECFG_VS_SHIFT 16
>>>> #define CSR_ECFG_VS_WIDTH 3
>>>> +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
>>>> #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
>>>> #define CSR_ECFG_IM_SHIFT 0
>>>> #define CSR_ECFG_IM_WIDTH 14
>>>> @@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
>>>>
>>>> #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
>>>> -#define CSR_GTLBC_RID_SHIFT 16
>>>> -#define CSR_GTLBC_RID_WIDTH 8
>>>> -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
>>>> +#define CSR_GTLBC_TGID_SHIFT 16
>>>> +#define CSR_GTLBC_TGID_WIDTH 8
>>>> +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
>>>> +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
>>>> #define CSR_GTLBC_TOTI_SHIFT 13
>>>> #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
>>>> -#define CSR_GTLBC_USERID_SHIFT 12
>>>> -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
>>>> +#define CSR_GTLBC_USETGID_SHIFT 12
>>>> +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
>>>> #define CSR_GTLBC_GMTLBSZ_SHIFT 0
>>>> #define CSR_GTLBC_GMTLBSZ_WIDTH 6
>>>> #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
>>>> @@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
>>>> #define CSR_GSTAT_GID_SHIFT 16
>>>> #define CSR_GSTAT_GID_WIDTH 8
>>>> +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
>>>> #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
>>>> #define CSR_GSTAT_GIDBIT_SHIFT 4
>>>> #define CSR_GSTAT_GIDBIT_WIDTH 6
>>>> @@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
>>>> #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
>>>> #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
>>>> +#define CSR_GCFG_MATP_NEST_SHIFT 2
>>>> +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
>>>> +#define CSR_GCFG_MATP_ROOT_SHIFT 1
>>>> +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
>>>> +#define CSR_GCFG_MATP_GUEST_SHIFT 0
>>>> +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
>>>>
>>>> #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
>>>> #define CSR_GINTC_HC_SHIFT 16
>>>> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
>>>> new file mode 100644
>>>> index 000000000000..17b28d94d569
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/kvm/trace.h
>>>> @@ -0,0 +1,168 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
>>>> +#define _TRACE_KVM_H
>>>> +
>>>> +#include <linux/tracepoint.h>
>>>> +#include <asm/kvm_csr.h>
>>>> +
>>>> +#undef TRACE_SYSTEM
>>>> +#define TRACE_SYSTEM kvm
>>>> +
>>>> +/*
>>>> + * Tracepoints for VM enters
>>>> + */
>>>> +DECLARE_EVENT_CLASS(kvm_transition,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + ),
>>>> +
>>>> + TP_printk("PC: 0x%08lx",
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_enter,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_reenter,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_out,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +/* Further exit reasons */
>>>> +#define KVM_TRACE_EXIT_IDLE 64
>>>> +#define KVM_TRACE_EXIT_CACHE 65
>>>> +#define KVM_TRACE_EXIT_SIGNAL 66
>>>> +
>>>> +/* Tracepoints for VM exits */
>>>> +#define kvm_trace_symbol_exit_types \
>>>> + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
>>>> + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
>>>> + { KVM_TRACE_EXIT_SIGNAL, "Signal" }
>>>> +
>>>> +TRACE_EVENT(kvm_exit_gspr,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
>>>> + TP_ARGS(vcpu, inst_word),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned int, inst_word)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->inst_word = inst_word;
>>>> + ),
>>>> +
>>>> + TP_printk("inst word: 0x%08x",
>>>> + __entry->inst_word)
>>>> +);
>>>> +
>>>> +
>>>> +DECLARE_EVENT_CLASS(kvm_exit,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + __field(unsigned int, reason)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + __entry->reason = reason;
>>>> + ),
>>>> +
>>>> + TP_printk("[%s]PC: 0x%08lx",
>>>> + __print_symbolic(__entry->reason,
>>>> + kvm_trace_symbol_exit_types),
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit_idle,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit_cache,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +#define KVM_TRACE_AUX_RESTORE 0
>>>> +#define KVM_TRACE_AUX_SAVE 1
>>>> +#define KVM_TRACE_AUX_ENABLE 2
>>>> +#define KVM_TRACE_AUX_DISABLE 3
>>>> +#define KVM_TRACE_AUX_DISCARD 4
>>>> +
>>>> +#define KVM_TRACE_AUX_FPU 1
>>>> +
>>>> +#define kvm_trace_symbol_aux_op \
>>>> + { KVM_TRACE_AUX_RESTORE, "restore" }, \
>>>> + { KVM_TRACE_AUX_SAVE, "save" }, \
>>>> + { KVM_TRACE_AUX_ENABLE, "enable" }, \
>>>> + { KVM_TRACE_AUX_DISABLE, "disable" }, \
>>>> + { KVM_TRACE_AUX_DISCARD, "discard" }
>>>> +
>>>> +#define kvm_trace_symbol_aux_state \
>>>> + { KVM_TRACE_AUX_FPU, "FPU" }
>>>> +
>>>> +TRACE_EVENT(kvm_aux,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
>>>> + unsigned int state),
>>>> + TP_ARGS(vcpu, op, state),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + __field(u8, op)
>>>> + __field(u8, state)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + __entry->op = op;
>>>> + __entry->state = state;
>>>> + ),
>>>> +
>>>> + TP_printk("%s %s PC: 0x%08lx",
>>>> + __print_symbolic(__entry->op,
>>>> + kvm_trace_symbol_aux_op),
>>>> + __print_symbolic(__entry->state,
>>>> + kvm_trace_symbol_aux_state),
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +TRACE_EVENT(kvm_vpid_change,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
>>>> + TP_ARGS(vcpu, vpid),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, vpid)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->vpid = vpid;
>>>> + ),
>>>> +
>>>> + TP_printk("vpid: 0x%08lx",
>>>> + __entry->vpid)
>>>> +);
>>>> +
>>>> +#endif /* _TRACE_LOONGARCH64_KVM_H */
>>>> +
>>>> +#undef TRACE_INCLUDE_PATH
>>>> +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
>>>> +#undef TRACE_INCLUDE_FILE
>>>> +#define TRACE_INCLUDE_FILE trace
>>>> +
>>>> +/* This part must be outside protection */
>>>> +#include <trace/define_trace.h>
>>>> --
>>>> 2.39.1
>>>>
>>>>
>>
On 2023/7/14 18:10, bibo mao wrote:
>
>
> 在 2023/7/14 17:22, Huacai Chen 写道:
>> Hi, Bibo,
>>
>> On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>>>
>>>
>>>
>>> 在 2023/7/14 15:11, Huacai Chen 写道:
>>>> Hi, Tianrui,
>>>>
>>>> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>>>>
>>>>> Add LoongArch vcpu related header files, including vcpu csr
>>>>> information, irq number defines, and some vcpu interfaces.
>>>>>
>>>>> Reviewed-by: Bibo Mao <[email protected]>
>>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>>> ---
>>>>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>>>>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>>>>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>>>>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>>>>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>>>>> 5 files changed, 566 insertions(+), 5 deletions(-)
>>>>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>>>>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>>>>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>>>>> create mode 100644 arch/loongarch/kvm/trace.h
>>>>>
>>>>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>>>>> new file mode 100644
>>>>> index 000000000000..e285ee108fb0
>>>>> --- /dev/null
>>>>> +++ b/arch/loongarch/include/asm/insn-def.h
>>>>> @@ -0,0 +1,55 @@
>>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>>> +
>>>>> +#ifndef __ASM_INSN_DEF_H
>>>>> +#define __ASM_INSN_DEF_H
>>>>> +
>>>>> +#include <linux/stringify.h>
>>>>> +#include <asm/gpr-num.h>
>>>>> +#include <asm/asm.h>
>>>>> +
>>>>> +#define INSN_STR(x) __stringify(x)
>>>>> +#define CSR_RD_SHIFT 0
>>>>> +#define CSR_RJ_SHIFT 5
>>>>> +#define CSR_SIMM14_SHIFT 10
>>>>> +#define CSR_OPCODE_SHIFT 24
>>>> As all needed instructions have already upstream in binutils now and
>>>> binutils 2.41 will be released soon, I suggest again to introduce
>>>> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
>>> It is a good news that binutils 2.41 has supported LVZ assemble language.
>>> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
>>> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
>>> need write beautiful code, also we should write code with pratical usage.
>> 1, For pure upstream: the CI toolchain comes from
>> https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
>> 2.41 will be released soon, CI toolchain will also be updated soon.
>>
>> 2, For community distributions, such as Fedora rawhide, Debian
>> unstable and Arch: they usually choose the latest released version, so
>> binutils 2.41 will be used quickly.
>>
>> 3, For downstream distributions, such as UOS and Kylin: if they choose
>> kernel as new as 6.6, they may probably choose binutils as new as
>> 2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
>> KVM support to the kernel, then they don't have any reason to not
>> backport LVZ instructions support to binutils.
>
> commit 616500232e632dba8b03981eeccadacf2fbf1c30
> Author: Huacai Chen <[email protected]>
> Date: Thu Jun 29 20:58:43 2023
>
> LoongArch: Add vector extensions support
>
> Introduce AS_HAS_LSX_EXTENSION and AS_HAS_LASX_EXTENSION to avoid non-
> vector toolchains complains unsupported instructions.
>
> Signed-off-by: Huacai Chen <[email protected]>
>
> The vector patch 616500232e63"Add vector extensions support" is merged
> with any reviewed-by or acked, vector depends on AS_HAS_LSX, so it requires
> all the patches obey this rule. I just think it bring thing to the wrong
> way.
For the record: I've gone through (earlier versions of) the patch but
simply didn't have time to do so for that last revision, to confidently
give my R-b. As can be seen from the Git history and mailing list
archives, my LoongArch time was fully allocated to ClangBuiltLinux (and
by extension, LLVM/Clang) at that time. By the time I was able to
properly look at this LSX/LASX patch, it was already in loongarch-next,
so even if I replied it'd be too late.
And, to make it clear: I'm not opposing your upstreaming effort or
"conspiring against you" in any way. With all the replies I'm only
trying to explain why some code can be dropped so every upstream dev
will have to maintain less code paths and have an easier life.
--
WANG "xen0n" Xuerui
Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
On 2023/7/14 17:58, bibo mao wrote:
>
>
> 在 2023/7/14 17:22, Huacai Chen 写道:
>> Hi, Bibo,
>>
>> On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>>>
>>>
>>>
>>> 在 2023/7/14 15:11, Huacai Chen 写道:
>>>> Hi, Tianrui,
>>>>
>>>> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>>>>
>>>>> Add LoongArch vcpu related header files, including vcpu csr
>>>>> information, irq number defines, and some vcpu interfaces.
>>>>>
>>>>> Reviewed-by: Bibo Mao <[email protected]>
>>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>>> ---
>>>>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>>>>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>>>>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>>>>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>>>>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>>>>> 5 files changed, 566 insertions(+), 5 deletions(-)
>>>>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>>>>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>>>>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>>>>> create mode 100644 arch/loongarch/kvm/trace.h
>>>>>
>>>>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>>>>> new file mode 100644
>>>>> index 000000000000..e285ee108fb0
>>>>> --- /dev/null
>>>>> +++ b/arch/loongarch/include/asm/insn-def.h
>>>>> @@ -0,0 +1,55 @@
>>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>>> +
>>>>> +#ifndef __ASM_INSN_DEF_H
>>>>> +#define __ASM_INSN_DEF_H
>>>>> +
>>>>> +#include <linux/stringify.h>
>>>>> +#include <asm/gpr-num.h>
>>>>> +#include <asm/asm.h>
>>>>> +
>>>>> +#define INSN_STR(x) __stringify(x)
>>>>> +#define CSR_RD_SHIFT 0
>>>>> +#define CSR_RJ_SHIFT 5
>>>>> +#define CSR_SIMM14_SHIFT 10
>>>>> +#define CSR_OPCODE_SHIFT 24
>>>> As all needed instructions have already upstream in binutils now and
>>>> binutils 2.41 will be released soon, I suggest again to introduce
>>>> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
>>> It is a good news that binutils 2.41 has supported LVZ assemble language.
>>> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
>>> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
>>> need write beautiful code, also we should write code with pratical usage.
>> 1, For pure upstream: the CI toolchain comes from
>> https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
>> 2.41 will be released soon, CI toolchain will also be updated soon.
>>
>> 2, For community distributions, such as Fedora rawhide, Debian
>> unstable and Arch: they usually choose the latest released version, so
>> binutils 2.41 will be used quickly.
>>
>> 3, For downstream distributions, such as UOS and Kylin: if they choose
>> kernel as new as 6.6, they may probably choose binutils as new as
>> 2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
>> KVM support to the kernel, then they don't have any reason to not
>> backport LVZ instructions support to binutils.
>>
>> Huacai
> If so, could you post patch to x86 or riscv to remove hardcode binary
> assemble code? If x86 or riscv agree, I do not object.
Don't panic my friend, and generally please don't (figuratively) point
finger at people :)
As a newcomer architecture, I'd argue we're in a different position than
them. The other arches already have a sizable group of developers
running existing systems from some time ago, so the cumulative migration
cost may be too high for them to drop the compatibility code path.
But AFAIK the LoongArch ecosystem simply doesn't have comparable number
of infra developers (to the point that most of the prominent maintainers
and coders can be reached in some <5 WeChat groups), and they invariably
update the dev rigs frequently, so the migration cost for us should be
significantly lower. (While the total number of LoongArch end-users and
app developers may be higher, I expect some 5000~10000 of them, most of
them don't compile their own kernels. So it's really just the kernel and
distro devs.)
--
WANG "xen0n" Xuerui
Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
在 2023/7/14 17:22, Huacai Chen 写道:
> Hi, Bibo,
>
> On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>>
>>
>>
>> 在 2023/7/14 15:11, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>>>
>>>> Add LoongArch vcpu related header files, including vcpu csr
>>>> information, irq number defines, and some vcpu interfaces.
>>>>
>>>> Reviewed-by: Bibo Mao <[email protected]>
>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>> ---
>>>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>>>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>>>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>>>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>>>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>>>> 5 files changed, 566 insertions(+), 5 deletions(-)
>>>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>>>> create mode 100644 arch/loongarch/kvm/trace.h
>>>>
>>>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>>>> new file mode 100644
>>>> index 000000000000..e285ee108fb0
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/insn-def.h
>>>> @@ -0,0 +1,55 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>> +
>>>> +#ifndef __ASM_INSN_DEF_H
>>>> +#define __ASM_INSN_DEF_H
>>>> +
>>>> +#include <linux/stringify.h>
>>>> +#include <asm/gpr-num.h>
>>>> +#include <asm/asm.h>
>>>> +
>>>> +#define INSN_STR(x) __stringify(x)
>>>> +#define CSR_RD_SHIFT 0
>>>> +#define CSR_RJ_SHIFT 5
>>>> +#define CSR_SIMM14_SHIFT 10
>>>> +#define CSR_OPCODE_SHIFT 24
>>> As all needed instructions have already upstream in binutils now and
>>> binutils 2.41 will be released soon, I suggest again to introduce
>>> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
>> It is a good news that binutils 2.41 has supported LVZ assemble language.
>> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
>> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
>> need write beautiful code, also we should write code with pratical usage.
> 1, For pure upstream: the CI toolchain comes from
> https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
> 2.41 will be released soon, CI toolchain will also be updated soon.
>
> 2, For community distributions, such as Fedora rawhide, Debian
> unstable and Arch: they usually choose the latest released version, so
> binutils 2.41 will be used quickly.
>
> 3, For downstream distributions, such as UOS and Kylin: if they choose
> kernel as new as 6.6, they may probably choose binutils as new as
> 2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
> KVM support to the kernel, then they don't have any reason to not
> backport LVZ instructions support to binutils.
>
> Huacai
If so, could you post patch to x86 or riscv to remove hardcode binary
assemble code? If x86 or riscv agree, I do not object.
Regards
Bibo Mao
>
>>
>> Paolo, what is your suggestion?
>>
>> x86 has similar binary assemble code like this:
>> static inline void __tpause(u32 ecx, u32 edx, u32 eax)
>> {
>> /* "tpause %ecx, %edx, %eax;" */
>> #ifdef CONFIG_AS_TPAUSE
>> asm volatile("tpause %%ecx\n"
>> :
>> : "c"(ecx), "d"(edx), "a"(eax));
>> #else
>> asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
>> :
>> : "c"(ecx), "d"(edx), "a"(eax));
>> #endif
>> }
>>
>> Also riscv has similiar code:
>> void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
>> {
>> asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
>> }
>>
>> Regards
>> Bibo Mao
>>>
>>> And this is easier than I suggested before (introduce
>>> CC_HAS_LVZ_EXTENSION), because CC_HAS_XXX depends on both gcc and
>>> binutils, but AS_HAS_XXX only depends on binutils.
>>>
>>> Huacai
>>>
>>>> +
>>>> +#define DEFINE_INSN_CSR \
>>>> + __DEFINE_ASM_GPR_NUMS \
>>>> +" .macro insn_csr, opcode, rj, rd, simm14\n" \
>>>> +" .4byte ((\\opcode << " INSN_STR(CSR_OPCODE_SHIFT) ") |" \
>>>> +" (.L__gpr_num_\\rj << " INSN_STR(CSR_RJ_SHIFT) ") |" \
>>>> +" (.L__gpr_num_\\rd << " INSN_STR(CSR_RD_SHIFT) ") |" \
>>>> +" (\\simm14 << " INSN_STR(CSR_SIMM14_SHIFT) "))\n" \
>>>> +" .endm\n"
>>>> +
>>>> +#define UNDEFINE_INSN_CSR \
>>>> +" .purgem insn_csr\n"
>>>> +
>>>> +#define __INSN_CSR(opcode, rj, rd, simm14) \
>>>> + DEFINE_INSN_CSR \
>>>> + "insn_csr " opcode ", " rj ", " rd ", " simm14 "\n" \
>>>> + UNDEFINE_INSN_CSR
>>>> +
>>>> +
>>>> +#define INSN_CSR(opcode, rj, rd, simm14) \
>>>> + __INSN_CSR(LARCH_##opcode, LARCH_##rj, LARCH_##rd, \
>>>> + LARCH_##simm14)
>>>> +
>>>> +#define __ASM_STR(x) #x
>>>> +#define LARCH_OPCODE(v) __ASM_STR(v)
>>>> +#define LARCH_SIMM14(v) __ASM_STR(v)
>>>> +#define __LARCH_REG(v) __ASM_STR(v)
>>>> +#define LARCH___RD(v) __LARCH_REG(v)
>>>> +#define LARCH___RJ(v) __LARCH_REG(v)
>>>> +#define LARCH_OPCODE_GCSR LARCH_OPCODE(5)
>>>> +
>>>> +#define GCSR_read(csr, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ(zero), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#define GCSR_write(csr, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ($r1), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#define GCSR_xchg(csr, rj, rd) \
>>>> + INSN_CSR(OPCODE_GCSR, __RJ(rj), __RD(rd), SIMM14(csr))
>>>> +
>>>> +#endif /* __ASM_INSN_DEF_H */
>>>> diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
>>>> new file mode 100644
>>>> index 000000000000..10dba5bc6df1
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_csr.h
>>>> @@ -0,0 +1,231 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_CSR_H__
>>>> +#define __ASM_LOONGARCH_KVM_CSR_H__
>>>> +#include <asm/loongarch.h>
>>>> +#include <asm/kvm_vcpu.h>
>>>> +#include <linux/uaccess.h>
>>>> +#include <linux/kvm_host.h>
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * read val from guest csr register %[csr]
>>>> + * gcsrrd %[val], %[csr]
>>>> + */
>>>> +#define gcsr_read(csr) \
>>>> +({ \
>>>> + register unsigned long __v; \
>>>> + __asm__ __volatile__ (GCSR_read(csr, %0) \
>>>> + : "=r" (__v) : \
>>>> + : "memory"); \
>>>> + __v; \
>>>> +})
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * write val to guest csr register %[csr]
>>>> + * gcsrwr %[val], %[csr]
>>>> + */
>>>> +#define gcsr_write(val, csr) \
>>>> +({ \
>>>> + register unsigned long __v = val; \
>>>> + __asm__ __volatile__ (GCSR_write(csr, %0) \
>>>> + : "+r" (__v) : \
>>>> + : "memory"); \
>>>> +})
>>>> +
>>>> +/*
>>>> + * Instructions will be available in binutils later
>>>> + * replace masked bits of guest csr register %[csr] with val
>>>> + * gcsrxchg %[val], %[mask], %[csr]
>>>> + */
>>>> +#define gcsr_xchg(val, mask, csr) \
>>>> +({ \
>>>> + register unsigned long __v = val; \
>>>> + __asm__ __volatile__ (GCSR_xchg(csr, %1, %0) \
>>>> + : "+r" (__v) \
>>>> + : "r" (mask) \
>>>> + : "memory"); \
>>>> + __v; \
>>>> +})
>>>> +
>>>> +/* Guest CSRS read and write */
>>>> +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
>>>> +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
>>>> +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
>>>> +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
>>>> +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
>>>> +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
>>>> +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
>>>> +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
>>>> +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
>>>> +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
>>>> +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
>>>> +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
>>>> +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
>>>> +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
>>>> +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
>>>> +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
>>>> +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
>>>> +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
>>>> +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
>>>> +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
>>>> +
>>>> +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
>>>> +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
>>>> +#define read_gcsr_tlbhi() gcsr_read(LOONGARCH_CSR_TLBEHI)
>>>> +#define write_gcsr_tlbhi(val) gcsr_write(val, LOONGARCH_CSR_TLBEHI)
>>>> +#define read_gcsr_tlblo0() gcsr_read(LOONGARCH_CSR_TLBELO0)
>>>> +#define write_gcsr_tlblo0(val) gcsr_write(val, LOONGARCH_CSR_TLBELO0)
>>>> +#define read_gcsr_tlblo1() gcsr_read(LOONGARCH_CSR_TLBELO1)
>>>> +#define write_gcsr_tlblo1(val) gcsr_write(val, LOONGARCH_CSR_TLBELO1)
>>>> +
>>>> +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
>>>> +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
>>>> +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
>>>> +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
>>>> +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
>>>> +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
>>>> +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
>>>> +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
>>>> +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
>>>> +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
>>>> +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
>>>> +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
>>>> +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
>>>> +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
>>>> +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
>>>> +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
>>>> +
>>>> +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
>>>> +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
>>>> +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
>>>> +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
>>>> +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
>>>> +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
>>>> +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
>>>> +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
>>>> +
>>>> +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
>>>> +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
>>>> +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
>>>> +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
>>>> +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
>>>> +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
>>>> +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
>>>> +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
>>>> +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
>>>> +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
>>>> +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
>>>> +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
>>>> +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
>>>> +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
>>>> +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
>>>> +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
>>>> +
>>>> +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
>>>> +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
>>>> +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
>>>> +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
>>>> +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
>>>> +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
>>>> +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
>>>> +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
>>>> +
>>>> +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
>>>> +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
>>>> +
>>>> +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
>>>> +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
>>>> +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
>>>> +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
>>>> +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
>>>> +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
>>>> +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
>>>> +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
>>>> +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
>>>> +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
>>>> +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
>>>> +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
>>>> +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
>>>> +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
>>>> +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
>>>> +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
>>>> +
>>>> +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
>>>> +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
>>>> +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
>>>> +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
>>>> +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
>>>> +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
>>>> +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
>>>> +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
>>>> +
>>>> +/* Guest related CSRs */
>>>> +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
>>>> +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
>>>> +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
>>>> +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
>>>> +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
>>>> +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
>>>> +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
>>>> +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
>>>> +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
>>>> +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
>>>> +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
>>>> +
>>>> +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
>>>> +
>>>> +__BUILD_GCSR_OP(llbctl)
>>>> +__BUILD_GCSR_OP(tlbidx)
>>>> +__BUILD_CSR_OP(gcfg)
>>>> +__BUILD_CSR_OP(gstat)
>>>> +__BUILD_CSR_OP(gtlbc)
>>>> +__BUILD_CSR_OP(gintc)
>>>> +
>>>> +#define set_gcsr_estat(val) \
>>>> + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
>>>> +#define clear_gcsr_estat(val) \
>>>> + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
>>>> +
>>>> +#define kvm_read_hw_gcsr(id) gcsr_read(id)
>>>> +#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
>>>> +
>>>> +int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
>>>> +int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
>>>> +
>>>> +int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
>>>> +
>>>> +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
>>>> +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
>>>> +
>>>> +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
>>>> +{
>>>> + return csr->csrs[gid];
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long val)
>>>> +{
>>>> + csr->csrs[gid] = val;
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long val)
>>>> +{
>>>> + csr->csrs[gid] |= val;
>>>> +}
>>>> +
>>>> +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
>>>> + int gid, unsigned long mask,
>>>> + unsigned long val)
>>>> +{
>>>> + unsigned long _mask = mask;
>>>> +
>>>> + csr->csrs[gid] &= ~_mask;
>>>> + csr->csrs[gid] |= val & _mask;
>>>> +}
>>>> +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
>>>> diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> new file mode 100644
>>>> index 000000000000..74deaf55d22c
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kvm_vcpu.h
>>>> @@ -0,0 +1,97 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
>>>> +#define __ASM_LOONGARCH_KVM_VCPU_H__
>>>> +
>>>> +#include <linux/kvm_host.h>
>>>> +#include <asm/loongarch.h>
>>>> +
>>>> +/* Controlled by 0x5 guest exst */
>>>> +#define CPU_SIP0 (_ULCAST_(1))
>>>> +#define CPU_SIP1 (_ULCAST_(1) << 1)
>>>> +#define CPU_PMU (_ULCAST_(1) << 10)
>>>> +#define CPU_TIMER (_ULCAST_(1) << 11)
>>>> +#define CPU_IPI (_ULCAST_(1) << 12)
>>>> +
>>>> +/* Controlled by 0x52 guest exception VIP
>>>> + * aligned to exst bit 5~12
>>>> + */
>>>> +#define CPU_IP0 (_ULCAST_(1))
>>>> +#define CPU_IP1 (_ULCAST_(1) << 1)
>>>> +#define CPU_IP2 (_ULCAST_(1) << 2)
>>>> +#define CPU_IP3 (_ULCAST_(1) << 3)
>>>> +#define CPU_IP4 (_ULCAST_(1) << 4)
>>>> +#define CPU_IP5 (_ULCAST_(1) << 5)
>>>> +#define CPU_IP6 (_ULCAST_(1) << 6)
>>>> +#define CPU_IP7 (_ULCAST_(1) << 7)
>>>> +
>>>> +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
>>>> +
>>>> +/* KVM_IRQ_LINE irq field index values */
>>>> +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
>>>> +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
>>>> +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
>>>> +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
>>>> +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
>>>> +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
>>>> +
>>>> +/* Irq_type field */
>>>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IP 0
>>>> +#define KVM_LOONGSON_IRQ_TYPE_CPU_IO 1
>>>> +#define KVM_LOONGSON_IRQ_TYPE_HT 2
>>>> +#define KVM_LOONGSON_IRQ_TYPE_MSI 3
>>>> +#define KVM_LOONGSON_IRQ_TYPE_IOAPIC 4
>>>> +#define KVM_LOONGSON_IRQ_TYPE_ROUTE 5
>>>> +
>>>> +/* Out-of-kernel GIC cpu interrupt injection irq_number field */
>>>> +#define KVM_LOONGSON_IRQ_CPU_IRQ 0
>>>> +#define KVM_LOONGSON_IRQ_CPU_FIQ 1
>>>> +#define KVM_LOONGSON_CPU_IP_NUM 8
>>>> +
>>>> +typedef union loongarch_instruction larch_inst;
>>>> +typedef int (*exit_handle_fn)(struct kvm_vcpu *);
>>>> +
>>>> +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
>>>> +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
>>>> +int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>>> +int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>>> +int _kvm_emu_idle(struct kvm_vcpu *vcpu);
>>>> +int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu);
>>>> +int _kvm_pending_timer(struct kvm_vcpu *vcpu);
>>>> +int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
>>>> +void _kvm_deliver_intr(struct kvm_vcpu *vcpu);
>>>> +
>>>> +void kvm_own_fpu(struct kvm_vcpu *vcpu);
>>>> +void kvm_lose_fpu(struct kvm_vcpu *vcpu);
>>>> +void kvm_save_fpu(struct loongarch_fpu *fpu);
>>>> +void kvm_restore_fpu(struct loongarch_fpu *fpu);
>>>> +void kvm_restore_fcsr(struct loongarch_fpu *fpu);
>>>> +
>>>> +void kvm_acquire_timer(struct kvm_vcpu *vcpu);
>>>> +void kvm_reset_timer(struct kvm_vcpu *vcpu);
>>>> +enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu);
>>>> +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
>>>> +void kvm_restore_timer(struct kvm_vcpu *vcpu);
>>>> +void kvm_save_timer(struct kvm_vcpu *vcpu);
>>>> +
>>>> +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
>>>> + struct kvm_loongarch_interrupt *irq);
>>>> +/*
>>>> + * Loongarch KVM guest interrupt handling
>>>> + */
>>>> +static inline void _kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>>>> +{
>>>> + set_bit(irq, &vcpu->arch.irq_pending);
>>>> + clear_bit(irq, &vcpu->arch.irq_clear);
>>>> +}
>>>> +
>>>> +static inline void _kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
>>>> +{
>>>> + clear_bit(irq, &vcpu->arch.irq_pending);
>>>> + set_bit(irq, &vcpu->arch.irq_clear);
>>>> +}
>>>> +
>>>> +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
>>>> diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
>>>> index 35e8a52fea11..cb1c66c5a92f 100644
>>>> --- a/arch/loongarch/include/asm/loongarch.h
>>>> +++ b/arch/loongarch/include/asm/loongarch.h
>>>> @@ -11,6 +11,7 @@
>>>>
>>>> #ifndef __ASSEMBLY__
>>>> #include <larchintrin.h>
>>>> +#include <asm/insn-def.h>
>>>>
>>>> /*
>>>> * parse_r var, r - Helper assembler macro for parsing register names.
>>>> @@ -309,6 +310,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
>>>> #define CSR_ECFG_VS_SHIFT 16
>>>> #define CSR_ECFG_VS_WIDTH 3
>>>> +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
>>>> #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
>>>> #define CSR_ECFG_IM_SHIFT 0
>>>> #define CSR_ECFG_IM_WIDTH 14
>>>> @@ -397,13 +399,14 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
>>>>
>>>> #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
>>>> -#define CSR_GTLBC_RID_SHIFT 16
>>>> -#define CSR_GTLBC_RID_WIDTH 8
>>>> -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
>>>> +#define CSR_GTLBC_TGID_SHIFT 16
>>>> +#define CSR_GTLBC_TGID_WIDTH 8
>>>> +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
>>>> +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
>>>> #define CSR_GTLBC_TOTI_SHIFT 13
>>>> #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
>>>> -#define CSR_GTLBC_USERID_SHIFT 12
>>>> -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
>>>> +#define CSR_GTLBC_USETGID_SHIFT 12
>>>> +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
>>>> #define CSR_GTLBC_GMTLBSZ_SHIFT 0
>>>> #define CSR_GTLBC_GMTLBSZ_WIDTH 6
>>>> #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
>>>> @@ -555,6 +558,7 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
>>>> #define CSR_GSTAT_GID_SHIFT 16
>>>> #define CSR_GSTAT_GID_WIDTH 8
>>>> +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
>>>> #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
>>>> #define CSR_GSTAT_GIDBIT_SHIFT 4
>>>> #define CSR_GSTAT_GIDBIT_WIDTH 6
>>>> @@ -605,6 +609,12 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
>>>> #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
>>>> #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
>>>> #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
>>>> +#define CSR_GCFG_MATP_NEST_SHIFT 2
>>>> +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
>>>> +#define CSR_GCFG_MATP_ROOT_SHIFT 1
>>>> +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
>>>> +#define CSR_GCFG_MATP_GUEST_SHIFT 0
>>>> +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
>>>>
>>>> #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
>>>> #define CSR_GINTC_HC_SHIFT 16
>>>> diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h
>>>> new file mode 100644
>>>> index 000000000000..17b28d94d569
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/kvm/trace.h
>>>> @@ -0,0 +1,168 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>> +/*
>>>> + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
>>>> + */
>>>> +
>>>> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
>>>> +#define _TRACE_KVM_H
>>>> +
>>>> +#include <linux/tracepoint.h>
>>>> +#include <asm/kvm_csr.h>
>>>> +
>>>> +#undef TRACE_SYSTEM
>>>> +#define TRACE_SYSTEM kvm
>>>> +
>>>> +/*
>>>> + * Tracepoints for VM enters
>>>> + */
>>>> +DECLARE_EVENT_CLASS(kvm_transition,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + ),
>>>> +
>>>> + TP_printk("PC: 0x%08lx",
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_enter,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_reenter,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +DEFINE_EVENT(kvm_transition, kvm_out,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu),
>>>> + TP_ARGS(vcpu));
>>>> +
>>>> +/* Further exit reasons */
>>>> +#define KVM_TRACE_EXIT_IDLE 64
>>>> +#define KVM_TRACE_EXIT_CACHE 65
>>>> +#define KVM_TRACE_EXIT_SIGNAL 66
>>>> +
>>>> +/* Tracepoints for VM exits */
>>>> +#define kvm_trace_symbol_exit_types \
>>>> + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \
>>>> + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
>>>> + { KVM_TRACE_EXIT_SIGNAL, "Signal" }
>>>> +
>>>> +TRACE_EVENT(kvm_exit_gspr,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
>>>> + TP_ARGS(vcpu, inst_word),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned int, inst_word)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->inst_word = inst_word;
>>>> + ),
>>>> +
>>>> + TP_printk("inst word: 0x%08x",
>>>> + __entry->inst_word)
>>>> +);
>>>> +
>>>> +
>>>> +DECLARE_EVENT_CLASS(kvm_exit,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + __field(unsigned int, reason)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + __entry->reason = reason;
>>>> + ),
>>>> +
>>>> + TP_printk("[%s]PC: 0x%08lx",
>>>> + __print_symbolic(__entry->reason,
>>>> + kvm_trace_symbol_exit_types),
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit_idle,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit_cache,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +DEFINE_EVENT(kvm_exit, kvm_exit,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
>>>> + TP_ARGS(vcpu, reason));
>>>> +
>>>> +#define KVM_TRACE_AUX_RESTORE 0
>>>> +#define KVM_TRACE_AUX_SAVE 1
>>>> +#define KVM_TRACE_AUX_ENABLE 2
>>>> +#define KVM_TRACE_AUX_DISABLE 3
>>>> +#define KVM_TRACE_AUX_DISCARD 4
>>>> +
>>>> +#define KVM_TRACE_AUX_FPU 1
>>>> +
>>>> +#define kvm_trace_symbol_aux_op \
>>>> + { KVM_TRACE_AUX_RESTORE, "restore" }, \
>>>> + { KVM_TRACE_AUX_SAVE, "save" }, \
>>>> + { KVM_TRACE_AUX_ENABLE, "enable" }, \
>>>> + { KVM_TRACE_AUX_DISABLE, "disable" }, \
>>>> + { KVM_TRACE_AUX_DISCARD, "discard" }
>>>> +
>>>> +#define kvm_trace_symbol_aux_state \
>>>> + { KVM_TRACE_AUX_FPU, "FPU" }
>>>> +
>>>> +TRACE_EVENT(kvm_aux,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
>>>> + unsigned int state),
>>>> + TP_ARGS(vcpu, op, state),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, pc)
>>>> + __field(u8, op)
>>>> + __field(u8, state)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->pc = vcpu->arch.pc;
>>>> + __entry->op = op;
>>>> + __entry->state = state;
>>>> + ),
>>>> +
>>>> + TP_printk("%s %s PC: 0x%08lx",
>>>> + __print_symbolic(__entry->op,
>>>> + kvm_trace_symbol_aux_op),
>>>> + __print_symbolic(__entry->state,
>>>> + kvm_trace_symbol_aux_state),
>>>> + __entry->pc)
>>>> +);
>>>> +
>>>> +TRACE_EVENT(kvm_vpid_change,
>>>> + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid),
>>>> + TP_ARGS(vcpu, vpid),
>>>> + TP_STRUCT__entry(
>>>> + __field(unsigned long, vpid)
>>>> + ),
>>>> +
>>>> + TP_fast_assign(
>>>> + __entry->vpid = vpid;
>>>> + ),
>>>> +
>>>> + TP_printk("vpid: 0x%08lx",
>>>> + __entry->vpid)
>>>> +);
>>>> +
>>>> +#endif /* _TRACE_LOONGARCH64_KVM_H */
>>>> +
>>>> +#undef TRACE_INCLUDE_PATH
>>>> +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm
>>>> +#undef TRACE_INCLUDE_FILE
>>>> +#define TRACE_INCLUDE_FILE trace
>>>> +
>>>> +/* This part must be outside protection */
>>>> +#include <trace/define_trace.h>
>>>> --
>>>> 2.39.1
>>>>
>>>>
>>
On 2023/7/14 17:22, Huacai Chen wrote:
> Hi, Bibo,
>
> On Fri, Jul 14, 2023 at 3:45 PM bibo mao <[email protected]> wrote:
>>
>>
>>
>> 在 2023/7/14 15:11, Huacai Chen 写道:
>>> Hi, Tianrui,
>>>
>>> On Thu, Jun 29, 2023 at 3:55 PM Tianrui Zhao <[email protected]> wrote:
>>>>
>>>> Add LoongArch vcpu related header files, including vcpu csr
>>>> information, irq number defines, and some vcpu interfaces.
>>>>
>>>> Reviewed-by: Bibo Mao <[email protected]>
>>>> Signed-off-by: Tianrui Zhao <[email protected]>
>>>> ---
>>>> arch/loongarch/include/asm/insn-def.h | 55 ++++++
>>>> arch/loongarch/include/asm/kvm_csr.h | 231 +++++++++++++++++++++++++
>>>> arch/loongarch/include/asm/kvm_vcpu.h | 97 +++++++++++
>>>> arch/loongarch/include/asm/loongarch.h | 20 ++-
>>>> arch/loongarch/kvm/trace.h | 168 ++++++++++++++++++
>>>> 5 files changed, 566 insertions(+), 5 deletions(-)
>>>> create mode 100644 arch/loongarch/include/asm/insn-def.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_csr.h
>>>> create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h
>>>> create mode 100644 arch/loongarch/kvm/trace.h
>>>>
>>>> diff --git a/arch/loongarch/include/asm/insn-def.h b/arch/loongarch/include/asm/insn-def.h
>>>> new file mode 100644
>>>> index 000000000000..e285ee108fb0
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/insn-def.h
>>>> @@ -0,0 +1,55 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>> +
>>>> +#ifndef __ASM_INSN_DEF_H
>>>> +#define __ASM_INSN_DEF_H
>>>> +
>>>> +#include <linux/stringify.h>
>>>> +#include <asm/gpr-num.h>
>>>> +#include <asm/asm.h>
>>>> +
>>>> +#define INSN_STR(x) __stringify(x)
>>>> +#define CSR_RD_SHIFT 0
>>>> +#define CSR_RJ_SHIFT 5
>>>> +#define CSR_SIMM14_SHIFT 10
>>>> +#define CSR_OPCODE_SHIFT 24
>>> As all needed instructions have already upstream in binutils now and
>>> binutils 2.41 will be released soon, I suggest again to introduce
>>> AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
>> It is a good news that binutils 2.41 has supported LVZ assemble language.
>> we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
>> AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
>> need write beautiful code, also we should write code with pratical usage.
> 1, For pure upstream: the CI toolchain comes from
> https://mirrors.edge.kernel.org/pub/tools/crosstool/. Since binutils
> 2.41 will be released soon, CI toolchain will also be updated soon.
>
> 2, For community distributions, such as Fedora rawhide, Debian
> unstable and Arch: they usually choose the latest released version, so
> binutils 2.41 will be used quickly.
You seem to have missed CLFS for LoongArch [1] and Gentoo [2] ;-)
These two distros are also very punctual in adopting new toolchain
versions: the maintainer of CLFS for LoongArch is Loongson employee
himself (and he's the author of the Fedora LoongArch Remix too), while
the Gentoo toolchain team usually follow the upstream release very
quickly. (I happen to maintain the Gentoo loong port too, and also am a
member of gentoo-toolchain team.)
[1]: https://github.com/sunhaiyong1978/CLFS-for-LoongArch
[2]: https://wiki.gentoo.org/wiki/Project:LoongArch
>
> 3, For downstream distributions, such as UOS and Kylin: if they choose
> kernel as new as 6.6, they may probably choose binutils as new as
> 2.41; if they choose an LTS kernel (e.g., 6.1), they should backport
> KVM support to the kernel, then they don't have any reason to not
> backport LVZ instructions support to binutils.
I generally agree with Huacai here. If those distros pick bleeding-edge
kernel releases then they have no reason to not also bump the toolchain
baseline too; otherwise they must be backporting. In which case they either:
a) pull patches from v6.6, so they may as well integrate the binutils
patches along the way (trivial compared to kernel backports), or
b) ask Loongson to provide the patches, in which case you may just give
this version of code to them, which can be done (and I assume, has
already been done) without any upstream involvement.
All in all I don't see a reason for hardcoding any opcode in this
particular time, when everyone involved agrees on moving fast.
--
WANG "xen0n" Xuerui
Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
On Fri, 2023-07-14 at 18:16 +0800, WANG Xuerui wrote:
> > > > > As all needed instructions have already upstream in binutils now and
> > > > > binutils 2.41 will be released soon, I suggest again to introduce
> > > > > AS_HAS_LVZ_EXTENSION and make KVM depend on AS_HAS_LVZ_EXTENSION.
> > > > It is a good news that binutils 2.41 has supported LVZ assemble language.
> > > > we will add AS_HAS_LVZ_EXTENSION support, however KVM need not depend on
> > > > AS_HAS_LVZ_EXTENSION since bintuils 2.41 is not popularly used. yeap we
> > > > need write beautiful code, also we should write code with pratical usage.
I've raised this for a very early version of this series, but Paolo
decided using .word here should be fine:
https://lore.kernel.org/all/[email protected]/
So in this case we should respect the decision of the KVM reviewer. If
this breaks Clang build, we should improve Clang to support using .word
for hard coding an opcode.
Frankly I'm quite frustrated by "a new architecture needs so many
feature tests and hacks, here and there" and sometimes I just want to go
laid-up in bed instead of writing code or porting the distro (Linux From
Scratch). But today I just got [a board from another Chinese vendor
targeting another new architecture] and to me they are doing things even
worse... So maybe we are facing some inherent "no silver bullet
engineering issue".
--
Xi Ruoyao <[email protected]>
School of Aerospace Science and Technology, Xidian University