Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753237AbbEHLXt (ORCPT ); Fri, 8 May 2015 07:23:49 -0400 Received: from mail-wi0-f173.google.com ([209.85.212.173]:37739 "EHLO mail-wi0-f173.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932098AbbEHLV6 (ORCPT ); Fri, 8 May 2015 07:21:58 -0400 From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: rkrcmar@redhat.com, bsd@redhat.com Subject: [PATCH 07/12] KVM: x86: stubs for SMM support Date: Fri, 8 May 2015 13:20:29 +0200 Message-Id: <1431084034-8425-8-git-send-email-pbonzini@redhat.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1431084034-8425-1-git-send-email-pbonzini@redhat.com> References: <1431084034-8425-1-git-send-email-pbonzini@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 9943 Lines: 292 This patch adds the interface between x86.c and the emulator: the SMBASE register, a new emulator flag, the RSM instruction. It also adds a new request bit that will be used by the KVM_SMI ioctl. Signed-off-by: Paolo Bonzini -- RFC->v1: make SMBASE host-readable only add support for latching an SMI do not reset SMBASE on INIT --- arch/x86/include/asm/kvm_emulate.h | 4 +++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/emulate.c | 10 ++++++- arch/x86/kvm/lapic.c | 4 ++- arch/x86/kvm/svm.c | 1 + arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 54 +++++++++++++++++++++++++++++++++++++- include/linux/kvm_host.h | 1 + 9 files changed, 74 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7410879a41f7..e16466ec473c 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -193,6 +193,8 @@ struct x86_emulate_ops { int (*cpl)(struct x86_emulate_ctxt *ctxt); int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); + u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); + void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); @@ -264,6 +266,8 @@ enum x86emul_mode { /* These match some of the HF_* flags defined in kvm_host.h */ #define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ +#define X86EMUL_SMM_MASK (1 << 6) +#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7) struct x86_emulate_ctxt { const struct x86_emulate_ops *ops; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a1bef695dc99..fc05c7dda9f1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -367,6 +367,7 @@ struct kvm_vcpu_arch { int32_t apic_arb_prio; int mp_state; u64 ia32_misc_enable_msr; + u64 smbase; bool tpr_access_reporting; u64 ia32_xss; diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index da772edd19ab..e8f998114df0 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -108,6 +108,7 @@ #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f #define VMX_MISC_SAVE_EFER_LMA 0x00000020 #define VMX_MISC_ACTIVITY_HLT 0x00000040 +#define VMX_MISC_IA32_SMBASE_MSR 0x00008000 /* VMCS Encodings */ enum vmcs_field { diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e82a559df21a..c5a6a407afba 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2262,6 +2262,14 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt) return rc; } +static int em_rsm(struct x86_emulate_ctxt *ctxt) +{ + if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) + return emulate_ud(ctxt); + + return X86EMUL_UNHANDLEABLE; +} + static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) @@ -4173,7 +4181,7 @@ static const struct opcode twobyte_table[256] = { F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), - DI(ImplicitOps, rsm), + II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index dc5b57bb1f76..fe26f2051979 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -799,7 +799,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, break; case APIC_DM_SMI: - apic_debug("Ignoring guest SMI\n"); + result = 1; + kvm_make_request(KVM_REQ_SMI, vcpu); + kvm_vcpu_kick(vcpu); break; case APIC_DM_NMI: diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 24282bc6580c..6afffb82513c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3394,6 +3394,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, + [SVM_EXIT_RSM] = emulate_on_interception, }; static void dump_vmcb(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bf961951786a..121a653919c0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2504,7 +2504,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; vmx->nested.nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | - VMX_MISC_ACTIVITY_HLT; + VMX_MISC_ACTIVITY_HLT | VMX_MISC_IA32_SMBASE_MSR; vmx->nested.nested_vmx_misc_high = 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8015c67a6d07..2bf5bc4ed00f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -954,6 +954,7 @@ static u32 emulated_msrs[] = { MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, + MSR_IA32_SMBASE, }; static unsigned num_emulated_msrs; @@ -2220,6 +2221,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; + case MSR_IA32_SMBASE: + if (!msr_info->host_initiated) + return 1; + vcpu->arch.smbase = data; + break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; @@ -2615,6 +2621,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_MISC_ENABLE: msr_info->data = vcpu->arch.ia32_misc_enable_msr; break; + case MSR_IA32_SMBASE: + if (!msr_info->host_initiated) + return 1; + msr_info->data = vcpu->arch.smbase; + break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ msr_info->data = 1000ULL; @@ -3039,6 +3050,8 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) { + kvm_make_request(KVM_REQ_SMI, vcpu); + return 0; } @@ -5059,6 +5072,20 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } +static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) +{ + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + + return vcpu->arch.smbase; +} + +static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) +{ + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + + vcpu->arch.smbase = smbase; +} + static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { @@ -5144,6 +5171,8 @@ static const struct x86_emulate_ops emulate_ops = { .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, + .get_smbase = emulator_get_smbase, + .set_smbase = emulator_set_smbase, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, @@ -5206,6 +5235,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); + BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); + BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); ctxt->emul_flags = vcpu->arch.hflags; init_decode_cache(ctxt); @@ -5377,6 +5408,13 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu); void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) { + if (is_smm(vcpu) && (emul_flags & HF_SMM_MASK) == 0) { + if (unlikely(vcpu->arch.smi_pending)) { + kvm_make_request(KVM_REQ_SMI, vcpu); + vcpu->arch.smi_pending = 0; + } + } + vcpu->arch.hflags = emul_flags; } @@ -6271,6 +6309,16 @@ static void process_nmi(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); } +static void process_smi(struct kvm_vcpu *vcpu) +{ + if (is_smm(vcpu)) { + vcpu->arch.smi_pending = true; + return; + } + + printk_once(KERN_DEBUG "Ignoring guest SMI\n"); +} + static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; @@ -6377,6 +6425,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); + if (kvm_check_request(KVM_REQ_SMI, vcpu)) + process_smi(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) @@ -7276,8 +7326,10 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; - if (!init_event) + if (!init_event) { kvm_pmu_reset(vcpu); + vcpu->arch.smbase = 0x30000; + } memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2946ec3f1b81..19d09a08885b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -134,6 +134,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_ENABLE_IBS 23 #define KVM_REQ_DISABLE_IBS 24 #define KVM_REQ_APIC_PAGE_RELOAD 25 +#define KVM_REQ_SMI 26 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/