Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755095AbdLUWvM (ORCPT ); Thu, 21 Dec 2017 17:51:12 -0500 Received: from mail-io0-f193.google.com ([209.85.223.193]:42379 "EHLO mail-io0-f193.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753961AbdLUWvK (ORCPT ); Thu, 21 Dec 2017 17:51:10 -0500 X-Google-Smtp-Source: ACJfBougW1mdxAm39Y/z2JrK22kGcw3qkNLzMXb/9+QKfunP62O81DhyKr01ZQ4BHVMAXMBpa9NORbhnEX83+eB8JiU= MIME-Version: 1.0 In-Reply-To: <1513860222-40944-2-git-send-email-pbonzini@redhat.com> References: <1513860222-40944-1-git-send-email-pbonzini@redhat.com> <1513860222-40944-2-git-send-email-pbonzini@redhat.com> From: Jim Mattson Date: Thu, 21 Dec 2017 14:51:08 -0800 Message-ID: Subject: Re: [PATCH 1/4] KVM: VMX: split list of shadowed VMCS field to a separate file To: Paolo Bonzini Cc: LKML , kvm list Content-Type: text/plain; charset="UTF-8" Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5734 Lines: 173 Reviewed-by: Jim Mattson On Thu, Dec 21, 2017 at 4:43 AM, Paolo Bonzini wrote: > Prepare for multiple inclusions of the list. > > Signed-off-by: Paolo Bonzini > --- > arch/x86/kvm/vmx.c | 64 +++--------------------------------- > arch/x86/kvm/vmx_shadow_fields.h | 71 ++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 75 insertions(+), 60 deletions(-) > create mode 100644 arch/x86/kvm/vmx_shadow_fields.h > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 0ac665f88cc0..2ee842990976 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -715,71 +715,15 @@ static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) > > > static u16 shadow_read_only_fields[] = { > - /* > - * We do NOT shadow fields that are modified when L0 > - * traps and emulates any vmx instruction (e.g. VMPTRLD, > - * VMXON...) executed by L1. > - * For example, VM_INSTRUCTION_ERROR is read > - * by L1 if a vmx instruction fails (part of the error path). > - * Note the code assumes this logic. If for some reason > - * we start shadowing these fields then we need to > - * force a shadow sync when L0 emulates vmx instructions > - * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified > - * by nested_vmx_failValid) > - */ > - /* 32-bits */ > - VM_EXIT_REASON, > - VM_EXIT_INTR_INFO, > - VM_EXIT_INSTRUCTION_LEN, > - IDT_VECTORING_INFO_FIELD, > - IDT_VECTORING_ERROR_CODE, > - VM_EXIT_INTR_ERROR_CODE, > - > - /* Natural width */ > - EXIT_QUALIFICATION, > - GUEST_LINEAR_ADDRESS, > - > - /* 64-bit */ > - GUEST_PHYSICAL_ADDRESS, > - GUEST_PHYSICAL_ADDRESS_HIGH, > +#define SHADOW_FIELD_RO(x) x, > +#include "vmx_shadow_fields.h" > }; > static int max_shadow_read_only_fields = > ARRAY_SIZE(shadow_read_only_fields); > > static u16 shadow_read_write_fields[] = { > - /* 16-bits */ > - GUEST_CS_SELECTOR, > - GUEST_INTR_STATUS, > - GUEST_PML_INDEX, > - HOST_FS_SELECTOR, > - HOST_GS_SELECTOR, > - > - /* 32-bits */ > - CPU_BASED_VM_EXEC_CONTROL, > - EXCEPTION_BITMAP, > - VM_ENTRY_EXCEPTION_ERROR_CODE, > - VM_ENTRY_INTR_INFO_FIELD, > - VM_ENTRY_INSTRUCTION_LEN, > - TPR_THRESHOLD, > - GUEST_CS_LIMIT, > - GUEST_CS_AR_BYTES, > - GUEST_INTERRUPTIBILITY_INFO, > - VMX_PREEMPTION_TIMER_VALUE, > - > - /* Natural width */ > - GUEST_RIP, > - GUEST_RSP, > - GUEST_CR0, > - GUEST_CR3, > - GUEST_CR4, > - GUEST_RFLAGS, > - GUEST_CS_BASE, > - GUEST_ES_BASE, > - CR0_GUEST_HOST_MASK, > - CR0_READ_SHADOW, > - CR4_READ_SHADOW, > - HOST_FS_BASE, > - HOST_GS_BASE, > +#define SHADOW_FIELD_RW(x) x, > +#include "vmx_shadow_fields.h" > }; > static int max_shadow_read_write_fields = > ARRAY_SIZE(shadow_read_write_fields); > diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h > new file mode 100644 > index 000000000000..31d7a15338ac > --- /dev/null > +++ b/arch/x86/kvm/vmx_shadow_fields.h > @@ -0,0 +1,71 @@ > +#ifndef SHADOW_FIELD_RO > +#define SHADOW_FIELD_RO(x) > +#endif > +#ifndef SHADOW_FIELD_RW > +#define SHADOW_FIELD_RW(x) > +#endif > + > +/* > + * We do NOT shadow fields that are modified when L0 > + * traps and emulates any vmx instruction (e.g. VMPTRLD, > + * VMXON...) executed by L1. > + * For example, VM_INSTRUCTION_ERROR is read > + * by L1 if a vmx instruction fails (part of the error path). > + * Note the code assumes this logic. If for some reason > + * we start shadowing these fields then we need to > + * force a shadow sync when L0 emulates vmx instructions > + * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified > + * by nested_vmx_failValid) > + * > + * Keeping the fields ordered by size is an attempt at improving > + * branch prediction in vmcs_read_any and vmcs_write_any. > + */ > + > +/* 16-bits */ > +SHADOW_FIELD_RW(GUEST_CS_SELECTOR) > +SHADOW_FIELD_RW(GUEST_INTR_STATUS) > +SHADOW_FIELD_RW(GUEST_PML_INDEX) > +SHADOW_FIELD_RW(HOST_FS_SELECTOR) > +SHADOW_FIELD_RW(HOST_GS_SELECTOR) > + > +/* 32-bits */ > +SHADOW_FIELD_RO(VM_EXIT_REASON) > +SHADOW_FIELD_RO(VM_EXIT_INTR_INFO) > +SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN) > +SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD) > +SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE) > +SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE) > +SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL) > +SHADOW_FIELD_RW(EXCEPTION_BITMAP) > +SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE) > +SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD) > +SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN) > +SHADOW_FIELD_RW(TPR_THRESHOLD) > +SHADOW_FIELD_RW(GUEST_CS_LIMIT) > +SHADOW_FIELD_RW(GUEST_CS_AR_BYTES) > +SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO) > +SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE) > + > +/* Natural width */ > +SHADOW_FIELD_RO(EXIT_QUALIFICATION) > +SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS) > +SHADOW_FIELD_RW(GUEST_RIP) > +SHADOW_FIELD_RW(GUEST_RSP) > +SHADOW_FIELD_RW(GUEST_CR0) > +SHADOW_FIELD_RW(GUEST_CR3) > +SHADOW_FIELD_RW(GUEST_CR4) > +SHADOW_FIELD_RW(GUEST_RFLAGS) > +SHADOW_FIELD_RW(GUEST_CS_BASE) > +SHADOW_FIELD_RW(GUEST_ES_BASE) > +SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK) > +SHADOW_FIELD_RW(CR0_READ_SHADOW) > +SHADOW_FIELD_RW(CR4_READ_SHADOW) > +SHADOW_FIELD_RW(HOST_FS_BASE) > +SHADOW_FIELD_RW(HOST_GS_BASE) > + > +/* 64-bit */ > +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS) > +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH) > + > +#undef SHADOW_FIELD_RO > +#undef SHADOW_FIELD_RW > -- > 1.8.3.1 > >