Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933359AbcCHLqj (ORCPT ); Tue, 8 Mar 2016 06:46:39 -0500 Received: from mail-wm0-f65.google.com ([74.125.82.65]:34279 "EHLO mail-wm0-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933094AbcCHLpn (ORCPT ); Tue, 8 Mar 2016 06:45:43 -0500 From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: guangrong.xiao@linux.intel.com, huaitong.han@intel.com Subject: [RFC PATCH 1/2] KVM: MMU: precompute page fault error code Date: Tue, 8 Mar 2016 12:45:37 +0100 Message-Id: <1457437538-65867-2-git-send-email-pbonzini@redhat.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1457437538-65867-1-git-send-email-pbonzini@redhat.com> References: <1457437538-65867-1-git-send-email-pbonzini@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2575 Lines: 81 For the next patch, we will want to filter PFERR_FETCH_MASK away early, and not pass it to permission_fault if neither NX nor SMEP are enabled. Prepare for the change. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/paging_tmpl.h | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2463de0b935c..e57f7be061e3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3883,7 +3883,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, u = bit & ACC_USER_MASK; if (!ept) { - /* Not really needed: !nx will cause pte.nx to fault */ + /* Not really needed: if !nx, ff will always be zero */ x |= !mmu->nx; /* Allow supervisor writes if !cr0.wp */ w |= !is_write_protection(vcpu) && !uf; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6013f3685ef4..285858d3223b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -272,13 +272,24 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gpa_t pte_gpa; int offset; const int write_fault = access & PFERR_WRITE_MASK; - const int user_fault = access & PFERR_USER_MASK; - const int fetch_fault = access & PFERR_FETCH_MASK; - u16 errcode = 0; + u16 errcode; gpa_t real_gpa; gfn_t gfn; trace_kvm_mmu_pagetable_walk(addr, access); + + /* + * Do not modify PFERR_FETCH_MASK in access. It is used later in the call to + * mmu->translate_gpa and, when nested virtualization is in use, the X or NX + * bit of nested page tables always applies---even if NX and SMEP are disabled + * in the guest. + * + * TODO: cache the result of the NX and SMEP test in struct kvm_mmu? + */ + errcode = access; + if (!(mmu->nx || kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) + errcode &= ~PFERR_FETCH_MASK; + retry_walk: walker->level = mmu->root_level; pte = mmu->get_cr3(vcpu); @@ -389,9 +400,7 @@ retry_walk: if (unlikely(!accessed_dirty)) { ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); - if (unlikely(ret < 0)) - goto error; - else if (ret) + if (ret > 0) goto retry_walk; } @@ -402,11 +411,6 @@ retry_walk: return 1; error: - errcode |= write_fault | user_fault; - if (fetch_fault && (mmu->nx || - kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) - errcode |= PFERR_FETCH_MASK; - walker->fault.vector = PF_VECTOR; walker->fault.error_code_valid = true; walker->fault.error_code = errcode; -- 1.8.3.1