Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754787AbdGJTya (ORCPT ); Mon, 10 Jul 2017 15:54:30 -0400 Received: from mx1.redhat.com ([209.132.183.28]:50690 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754732AbdGJTy1 (ORCPT ); Mon, 10 Jul 2017 15:54:27 -0400 DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 7525661BB0 Authentication-Results: ext-mx10.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx10.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=bsd@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com 7525661BB0 From: Bandan Das To: kvm@vger.kernel.org Cc: pbonzini@redhat.com, david@redhat.com, linux-kernel@vger.kernel.org Subject: [PATCH v3 3/3] KVM: nVMX: Emulate EPTP switching for the L1 hypervisor Date: Mon, 10 Jul 2017 15:53:56 -0400 Message-Id: <20170710195356.31297-4-bsd@redhat.com> In-Reply-To: <20170710195356.31297-1-bsd@redhat.com> References: <20170710195356.31297-1-bsd@redhat.com> X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.39]); Mon, 10 Jul 2017 19:54:27 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4700 Lines: 144 When L2 uses vmfunc, L0 utilizes the associated vmexit to emulate a switching of the ept pointer by reloading the guest MMU. Signed-off-by: Paolo Bonzini Signed-off-by: Bandan Das --- arch/x86/include/asm/vmx.h | 6 +++++ arch/x86/kvm/vmx.c | 58 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index da5375e..5f63a2e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -115,6 +115,10 @@ #define VMX_MISC_SAVE_EFER_LMA 0x00000020 #define VMX_MISC_ACTIVITY_HLT 0x00000040 +/* VMFUNC functions */ +#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 +#define VMFUNC_EPTP_ENTRIES 512 + static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) { return vmx_basic & GENMASK_ULL(30, 0); @@ -200,6 +204,8 @@ enum vmcs_field { EOI_EXIT_BITMAP2_HIGH = 0x00002021, EOI_EXIT_BITMAP3 = 0x00002022, EOI_EXIT_BITMAP3_HIGH = 0x00002023, + EPTP_LIST_ADDRESS = 0x00002024, + EPTP_LIST_ADDRESS_HIGH = 0x00002025, VMREAD_BITMAP = 0x00002026, VMWRITE_BITMAP = 0x00002028, XSS_EXIT_BITMAP = 0x0000202C, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7364678..0a969fb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -246,6 +246,7 @@ struct __packed vmcs12 { u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; + u64 eptp_list_address; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; @@ -771,6 +772,7 @@ static const unsigned short vmcs_field_to_offset_table[] = { FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), + FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), @@ -1402,6 +1404,13 @@ static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); } +static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) +{ + return nested_cpu_has_vmfunc(vmcs12) && + (vmcs12->vm_function_control & + VMX_VMFUNC_EPTP_SWITCHING); +} + static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) @@ -2791,7 +2800,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) if (cpu_has_vmx_vmfunc()) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_VMFUNC; - vmx->nested.nested_vmx_vmfunc_controls = 0; + /* + * Advertise EPTP switching unconditionally + * since we emulate it + */ + vmx->nested.nested_vmx_vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; } /* @@ -7772,6 +7786,9 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12; u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; + u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; + struct page *page = NULL; + u64 *l1_eptp_list, address; /* * VMFUNC is only supported for nested guests, but we always enable the @@ -7784,11 +7801,46 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) } vmcs12 = get_vmcs12(vcpu); - if ((vmcs12->vm_function_control & (1 << function)) == 0) + if (((vmcs12->vm_function_control & (1 << function)) == 0) || + WARN_ON_ONCE(function)) + goto fail; + + if (!nested_cpu_has_ept(vmcs12) || + !nested_cpu_has_eptp_switching(vmcs12)) + goto fail; + + if (!vmcs12->eptp_list_address || index >= VMFUNC_EPTP_ENTRIES) + goto fail; + + page = nested_get_page(vcpu, vmcs12->eptp_list_address); + if (!page) goto fail; - WARN(1, "VMCS12 VM function control should have been zero"); + + l1_eptp_list = kmap(page); + address = l1_eptp_list[index]; + if (!address) + goto fail; + /* + * If the (L2) guest does a vmfunc to the currently + * active ept pointer, we don't have to do anything else + */ + if (vmcs12->ept_pointer != address) { + if (address >> cpuid_maxphyaddr(vcpu) || + !IS_ALIGNED(address, 4096)) + goto fail; + kvm_mmu_unload(vcpu); + vmcs12->ept_pointer = address; + kvm_mmu_reload(vcpu); + kunmap(page); + nested_release_page_clean(page); + } + return kvm_skip_emulated_instruction(vcpu); fail: + if (page) { + kunmap(page); + nested_release_page_clean(page); + } nested_vmx_vmexit(vcpu, vmx->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); -- 2.9.4