Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757383AbcK2UmK (ORCPT ); Tue, 29 Nov 2016 15:42:10 -0500 Received: from mail-pg0-f65.google.com ([74.125.83.65]:34888 "EHLO mail-pg0-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757184AbcK2Ukv (ORCPT ); Tue, 29 Nov 2016 15:40:51 -0500 From: Kyle Huey X-Google-Original-From: Kyle Huey To: Paolo Bonzini , =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, Joerg Roedel Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 2/5] KVM: VMX: Reorder some skip_emulated_instruction calls Date: Tue, 29 Nov 2016 12:40:38 -0800 Message-Id: <20161129204041.8839-3-khuey@kylehuey.com> X-Mailer: git-send-email 2.10.2 In-Reply-To: <20161129204041.8839-1-khuey@kylehuey.com> References: <20161129204041.8839-1-khuey@kylehuey.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3632 Lines: 135 The functions being moved ahead of skip_emulated_instruction here don't need updated IPs, and skipping the emulated instruction at the end will make it easier to return its value. Signed-off-by: Kyle Huey --- arch/x86/kvm/vmx.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e4af9699..f2f9cf5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5703,18 +5703,18 @@ static int handle_cr(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); - skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu); + skip_emulated_instruction(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); skip_emulated_instruction(vcpu); @@ -6128,18 +6128,18 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { int ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { - skip_emulated_instruction(vcpu); trace_kvm_fast_mmio(gpa); + skip_emulated_instruction(vcpu); return 1; } ret = handle_mmio_page_fault(vcpu, gpa, true); if (likely(ret == RET_MMIO_PF_EMULATE)) return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == EMULATE_DONE; @@ -6502,18 +6502,18 @@ static __exit void hardware_unsetup(void) * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); - skip_emulated_instruction(vcpu); kvm_vcpu_on_spin(vcpu); + skip_emulated_instruction(vcpu); return 1; } static int handle_nop(struct kvm_vcpu *vcpu) { skip_emulated_instruction(vcpu); return 1; @@ -6957,18 +6957,18 @@ static int handle_vmon(struct kvm_vcpu *vcpu) vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; - skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); return 1; out_shadow_vmcs: kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: free_page((unsigned long)vmx->nested.msr_bitmap); @@ -7078,18 +7078,18 @@ static void free_nested(struct vcpu_vmx *vmx) } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); - skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); return 1; } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; @@ -7119,18 +7119,18 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) } vmcs12 = kmap(page); vmcs12->launch_state = 0; kunmap(page); nested_release_page(page); nested_free_vmcs02(vmx, vmptr); - skip_emulated_instruction(vcpu); nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); return 1; } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { -- 2.10.2