Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755923AbdLTSCm (ORCPT ); Wed, 20 Dec 2017 13:02:42 -0500 Received: from mail-io0-f194.google.com ([209.85.223.194]:41747 "EHLO mail-io0-f194.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755687AbdLTSCc (ORCPT ); Wed, 20 Dec 2017 13:02:32 -0500 X-Google-Smtp-Source: ACJfBouLJzlBEjVhujpnkXrpyAsn85YSOOxK9jYxT1jC3OtT/xdpgD3dnpGJmj61eGeOx/sYEwlvzDknNYMtiaAbbro= MIME-Version: 1.0 In-Reply-To: <1513771538-41693-2-git-send-email-pbonzini@redhat.com> References: <1513771538-41693-1-git-send-email-pbonzini@redhat.com> <1513771538-41693-2-git-send-email-pbonzini@redhat.com> From: Jim Mattson Date: Wed, 20 Dec 2017 10:02:30 -0800 Message-ID: Subject: Re: [PATCH 1/3] KVM: vmx: speed up MSR bitmap merge To: Paolo Bonzini Cc: LKML , kvm list , David Hildenbrand Content-Type: text/plain; charset="UTF-8" Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7462 Lines: 160 Reviewed-by: Jim Mattson On Wed, Dec 20, 2017 at 4:05 AM, Paolo Bonzini wrote: > The bulk of the MSR bitmap is either immutable, or can be copied from > the L1 bitmap. By initializing it at VMXON time, and copying the mutable > parts one long at a time on vmentry (rather than one bit), about 4000 > clock cycles (30%) can be saved on a nested VMLAUNCH/VMRESUME. > > The resulting for loop only has four iterations, so it is cheap enough > to reinitialize the MSR write bitmaps on every iteration, and it makes > the code simpler. > > Suggested-by: Jim Mattson > Signed-off-by: Paolo Bonzini > --- > v1->v2: do not WARN in nested_vmx_merge_msr_bitmap [David] > rename function to nested_vmx_prepare_msr_bitmap, > it's used even if there's no L1 bitmap [Paolo] > > arch/x86/kvm/vmx.c | 78 +++++++++++++++++++++++++++++------------------------- > 1 file changed, 42 insertions(+), 36 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 669f5f74857d..9f9c3194440f 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -5183,11 +5183,6 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, > { > int f = sizeof(unsigned long); > > - if (!cpu_has_vmx_msr_bitmap()) { > - WARN_ON(1); > - return; > - } > - > /* > * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals > * have the write-low and read-high bitmap offsets the wrong way round. > @@ -7459,6 +7454,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) > (unsigned long *)__get_free_page(GFP_KERNEL); > if (!vmx->nested.msr_bitmap) > goto out_msr_bitmap; > + memset(vmx->nested.msr_bitmap, 0xff, PAGE_SIZE); > } > > vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); > @@ -10151,8 +10147,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, > } > } > > -static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, > - struct vmcs12 *vmcs12); > +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, > + struct vmcs12 *vmcs12); > > static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, > struct vmcs12 *vmcs12) > @@ -10241,11 +10237,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, > (unsigned long)(vmcs12->posted_intr_desc_addr & > (PAGE_SIZE - 1))); > } > - if (cpu_has_vmx_msr_bitmap() && > - nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && > - nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) > - ; > - else > + if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) > vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, > CPU_BASED_USE_MSR_BITMAPS); > } > @@ -10313,14 +10305,19 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, > * Merge L0's and L1's MSR bitmap, return false to indicate that > * we do not use the hardware. > */ > -static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, > - struct vmcs12 *vmcs12) > +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, > + struct vmcs12 *vmcs12) > { > int msr; > struct page *page; > unsigned long *msr_bitmap_l1; > unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; > > + /* Nothing to do if the MSR bitmap is not in use. */ > + if (!cpu_has_vmx_msr_bitmap() || > + !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) > + return false; > + > /* This shortcut is ok because we support only x2APIC MSRs so far. */ > if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) > return false; > @@ -10328,32 +10325,41 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, > page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); > if (is_error_page(page)) > return false; > - msr_bitmap_l1 = (unsigned long *)kmap(page); > > - memset(msr_bitmap_l0, 0xff, PAGE_SIZE); > + msr_bitmap_l1 = (unsigned long *)kmap(page); > + if (nested_cpu_has_apic_reg_virt(vmcs12)) { > + /* > + * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it > + * just lets the processor take the value from the virtual-APIC page; > + * take those 256 bits directly from the L1 bitmap. > + */ > + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { > + unsigned word = msr / BITS_PER_LONG; > + msr_bitmap_l0[word] = msr_bitmap_l1[word]; > + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; > + } > + } else { > + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { > + unsigned word = msr / BITS_PER_LONG; > + msr_bitmap_l0[word] = ~0; > + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; > + } > + } > > - if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { > - if (nested_cpu_has_apic_reg_virt(vmcs12)) > - for (msr = 0x800; msr <= 0x8ff; msr++) > - nested_vmx_disable_intercept_for_msr( > - msr_bitmap_l1, msr_bitmap_l0, > - msr, MSR_TYPE_R); > + nested_vmx_disable_intercept_for_msr( > + msr_bitmap_l1, msr_bitmap_l0, > + APIC_BASE_MSR + (APIC_TASKPRI >> 4), > + MSR_TYPE_W); > > + if (nested_cpu_has_vid(vmcs12)) { > nested_vmx_disable_intercept_for_msr( > - msr_bitmap_l1, msr_bitmap_l0, > - APIC_BASE_MSR + (APIC_TASKPRI >> 4), > - MSR_TYPE_R | MSR_TYPE_W); > - > - if (nested_cpu_has_vid(vmcs12)) { > - nested_vmx_disable_intercept_for_msr( > - msr_bitmap_l1, msr_bitmap_l0, > - APIC_BASE_MSR + (APIC_EOI >> 4), > - MSR_TYPE_W); > - nested_vmx_disable_intercept_for_msr( > - msr_bitmap_l1, msr_bitmap_l0, > - APIC_BASE_MSR + (APIC_SELF_IPI >> 4), > - MSR_TYPE_W); > - } > + msr_bitmap_l1, msr_bitmap_l0, > + APIC_BASE_MSR + (APIC_EOI >> 4), > + MSR_TYPE_W); > + nested_vmx_disable_intercept_for_msr( > + msr_bitmap_l1, msr_bitmap_l0, > + APIC_BASE_MSR + (APIC_SELF_IPI >> 4), > + MSR_TYPE_W); > } > kunmap(page); > kvm_release_page_clean(page); > -- > 1.8.3.1 > >