2020-06-22 22:48:40

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 00/15] KVM: x86: VMX: Fix MSR namespacing

This series attempts to clean up VMX's MSR namespacing, which is in
unimitigated disaster (keeping things PG).

There are a variety of ways VMX saves and restores guest MSRs, all with
unique properties and mechanisms, but with haphazard namespacing (assuming
there is any namespacing at all). Some fun collisions:

__find_msr_index(), find_msr_entry() and vmx_find_msr_index()

vmx_set_guest_msr() and vmx_set_msr()

structs vmx_msrs, vmx_msr_entry, shared_msr_entry, kvm_shared_msrs and
kvm_shared_msrs_values

vcpu_vmx fields guest_msrs, msr_autoload.guest and msr_autostore.guest

Probably the most infurating/confusing nomenclature is "index", which can
mean MSR's ECX index, index into one of several VMX arrays, or index into
a common x86 array. __find_msr_index() even manages to mix at least three
different meanings in about as many lines of code.

The biggest change is to rename the "shared MSRs" mechanism to "user
return MSRs" (details in patch 1), most everything else is either derived
from that rename or is fairly straightforward cleanup.

No true functional changes, although the update_transition_efer() change
in patch 10 dances pretty close to being a functional change.

Sean Christopherson (15):
KVM: x86: Rename "shared_msrs" to "user_return_msrs"
KVM: VMX: Prepend "MAX_" to MSR array size defines
KVM: VMX: Rename "vmx_find_msr_index" to "vmx_find_loadstore_msr_slot"
KVM: VMX: Rename the "shared_msr_entry" struct to "vmx_uret_msr"
KVM: VMX: Rename vcpu_vmx's "nmsrs" to "nr_uret_msrs"
KVM: VMX: Rename vcpu_vmx's "save_nmsrs" to "nr_active_uret_msrs"
KVM: VMX: Rename vcpu_vmx's "guest_msrs_ready" to
"guest_uret_msrs_loaded"
KVM: VMX: Rename "__find_msr_index" to "__vmx_find_uret_msr"
KVM: VMX: Check guest support for RDTSCP before processing MSR_TSC_AUX
KVM: VMX: Move uret MSR lookup into update_transition_efer()
KVM: VMX: Add vmx_setup_uret_msr() to handle lookup and swap
KVM: VMX: Rename "find_msr_entry" to "vmx_find_uret_msr"
KVM: VMX: Rename "vmx_set_guest_msr" to "vmx_set_guest_uret_msr"
KVM: VMX: Rename "vmx_msr_index" to "vmx_uret_msrs_list"
KVM: VMX: Rename vmx_uret_msr's "index" to "slot"

arch/x86/include/asm/kvm_host.h | 4 +-
arch/x86/kvm/vmx/nested.c | 22 ++--
arch/x86/kvm/vmx/vmx.c | 184 ++++++++++++++++----------------
arch/x86/kvm/vmx/vmx.h | 24 ++---
arch/x86/kvm/x86.c | 101 +++++++++---------
5 files changed, 168 insertions(+), 167 deletions(-)

--
2.26.0


2020-06-22 22:49:07

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 04/15] KVM: VMX: Rename the "shared_msr_entry" struct to "vmx_uret_msr"

Rename struct "shared_msr_entry" to "vmx_uret_msr" to align with x86's
rename of "shared_msrs" to "user_return_msrs", and to call out that the
struct is specific to VMX, i.e. not part of the generic "shared_msrs"
framework. Abbreviate "user_return" as "uret" to keep line lengths
marginally sane and code more or less readable.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/nested.c | 2 +-
arch/x86/kvm/vmx/vmx.c | 58 +++++++++++++++++++--------------------
arch/x86/kvm/vmx/vmx.h | 10 +++----
3 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index afc8e7e9ef24..52de3e03fcdc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4209,7 +4209,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,

static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
{
- struct shared_msr_entry *efer_msr;
+ struct vmx_uret_msr *efer_msr;
unsigned int i;

if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ba9af25b34e4..9cd40a7e9b47 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -630,28 +630,28 @@ static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
int i;

for (i = 0; i < vmx->nmsrs; ++i)
- if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
+ if (vmx_msr_index[vmx->guest_uret_msrs[i].index] == msr)
return i;
return -1;
}

-struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+struct vmx_uret_msr *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;

i = __find_msr_index(vmx, msr);
if (i >= 0)
- return &vmx->guest_msrs[i];
+ return &vmx->guest_uret_msrs[i];
return NULL;
}

-static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data)
+static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data)
{
int ret = 0;

u64 old_msr_data = msr->data;
msr->data = data;
- if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ if (msr - vmx->guest_uret_msrs < vmx->save_nmsrs) {
preempt_disable();
ret = kvm_set_user_return_msr(msr->index, msr->data, msr->mask);
preempt_enable();
@@ -996,8 +996,8 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;

- vmx->guest_msrs[efer_offset].data = guest_efer;
- vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+ vmx->guest_uret_msrs[efer_offset].data = guest_efer;
+ vmx->guest_uret_msrs[efer_offset].mask = ~ignore_bits;

return true;
}
@@ -1145,9 +1145,9 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
if (!vmx->guest_msrs_ready) {
vmx->guest_msrs_ready = true;
for (i = 0; i < vmx->save_nmsrs; ++i)
- kvm_set_user_return_msr(vmx->guest_msrs[i].index,
- vmx->guest_msrs[i].data,
- vmx->guest_msrs[i].mask);
+ kvm_set_user_return_msr(vmx->guest_uret_msrs[i].index,
+ vmx->guest_uret_msrs[i].data,
+ vmx->guest_uret_msrs[i].mask);

}

@@ -1714,11 +1714,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
*/
static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{
- struct shared_msr_entry tmp;
+ struct vmx_uret_msr tmp;

- tmp = vmx->guest_msrs[to];
- vmx->guest_msrs[to] = vmx->guest_msrs[from];
- vmx->guest_msrs[from] = tmp;
+ tmp = vmx->guest_uret_msrs[to];
+ vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
+ vmx->guest_uret_msrs[from] = tmp;
}

/*
@@ -1829,7 +1829,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr;
+ struct vmx_uret_msr *msr;
u32 index;

switch (msr_info->index) {
@@ -1850,7 +1850,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
return 1;
- goto find_shared_msr;
+ goto find_uret_msr;
case MSR_IA32_UMWAIT_CONTROL:
if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
return 1;
@@ -1957,9 +1957,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
- goto find_shared_msr;
+ goto find_uret_msr;
default:
- find_shared_msr:
+ find_uret_msr:
msr = find_msr_entry(vmx, msr_info->index);
if (msr) {
msr_info->data = msr->data;
@@ -1989,7 +1989,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr;
+ struct vmx_uret_msr *msr;
int ret = 0;
u32 msr_index = msr_info->index;
u64 data = msr_info->data;
@@ -2093,7 +2093,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
return 1;
- goto find_shared_msr;
+ goto find_uret_msr;
case MSR_IA32_PRED_CMD:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
@@ -2230,10 +2230,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
return 1;
- goto find_shared_msr;
+ goto find_uret_msr;

default:
- find_shared_msr:
+ find_uret_msr:
msr = find_msr_entry(vmx, msr_index);
if (msr)
ret = vmx_set_guest_msr(vmx, msr, data);
@@ -2866,7 +2866,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+ struct vmx_uret_msr *msr = find_msr_entry(vmx, MSR_EFER);

if (!msr)
return;
@@ -6891,7 +6891,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid;
}

- BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != MAX_NR_SHARED_MSRS);
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != MAX_NR_USER_RETURN_MSRS);

for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
@@ -6903,8 +6903,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;

- vmx->guest_msrs[j].index = i;
- vmx->guest_msrs[j].data = 0;
+ vmx->guest_uret_msrs[j].index = i;
+ vmx->guest_uret_msrs[j].data = 0;
switch (index) {
case MSR_IA32_TSX_CTRL:
/*
@@ -6912,10 +6912,10 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
* let's avoid changing CPUID bits under the host
* kernel's feet.
*/
- vmx->guest_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
break;
default:
- vmx->guest_msrs[j].mask = -1ull;
+ vmx->guest_uret_msrs[j].mask = -1ull;
break;
}
++vmx->nmsrs;
@@ -7282,7 +7282,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
update_intel_pt_cfg(vcpu);

if (boot_cpu_has(X86_FEATURE_RTM)) {
- struct shared_msr_entry *msr;
+ struct vmx_uret_msr *msr;
msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
if (msr) {
bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 12a845209088..256e3e4776f8 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -21,9 +21,9 @@ extern const u32 vmx_msr_index[];
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))

#ifdef CONFIG_X86_64
-#define MAX_NR_SHARED_MSRS 7
+#define MAX_NR_USER_RETURN_MSRS 7
#else
-#define MAX_NR_SHARED_MSRS 4
+#define MAX_NR_USER_RETURN_MSRS 4
#endif

#define MAX_NR_LOADSTORE_MSRS 8
@@ -33,7 +33,7 @@ struct vmx_msrs {
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
};

-struct shared_msr_entry {
+struct vmx_uret_msr {
unsigned index;
u64 data;
u64 mask;
@@ -217,7 +217,7 @@ struct vcpu_vmx {
u32 idt_vectoring_info;
ulong rflags;

- struct shared_msr_entry guest_msrs[MAX_NR_SHARED_MSRS];
+ struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
int nmsrs;
int save_nmsrs;
bool guest_msrs_ready;
@@ -351,7 +351,7 @@ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
-struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
+struct vmx_uret_msr *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
--
2.26.0

2020-06-22 23:36:24

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 07/15] KVM: VMX: Rename vcpu_vmx's "guest_msrs_ready" to "guest_uret_msrs_loaded"

Add "uret" to "guest_msrs_ready" to explicitly associate it with the
"guest_uret_msrs" array, and replace "ready" with "loaded" to more
precisely reflect what it tracks, e.g. "ready" could be interpreted as
meaning ready for processing (setup_msrs() has run), which is wrong.
"loaded" also aligns with the similar "guest_state_loaded" field.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 8 ++++----
arch/x86/kvm/vmx/vmx.h | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index baf425fa7089..cebd68ea50ba 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1142,8 +1142,8 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* when guest state is loaded. This happens when guest transitions
* to/from long-mode by setting MSR_EFER.LMA.
*/
- if (!vmx->guest_msrs_ready) {
- vmx->guest_msrs_ready = true;
+ if (!vmx->guest_uret_msrs_loaded) {
+ vmx->guest_uret_msrs_loaded = true;
for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
kvm_set_user_return_msr(vmx->guest_uret_msrs[i].index,
vmx->guest_uret_msrs[i].data,
@@ -1231,7 +1231,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#endif
load_fixmap_gdt(raw_smp_processor_id());
vmx->guest_state_loaded = false;
- vmx->guest_msrs_ready = false;
+ vmx->guest_uret_msrs_loaded = false;
}

#ifdef CONFIG_X86_64
@@ -1759,7 +1759,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, nr_active_uret_msrs++);

vmx->nr_active_uret_msrs = nr_active_uret_msrs;
- vmx->guest_msrs_ready = false;
+ vmx->guest_uret_msrs_loaded = false;

if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 55257195cb27..a0237ff6c4e0 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -220,7 +220,7 @@ struct vcpu_vmx {
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
int nr_uret_msrs;
int nr_active_uret_msrs;
- bool guest_msrs_ready;
+ bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
--
2.26.0

2020-06-22 23:36:35

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 10/15] KVM: VMX: Move uret MSR lookup into update_transition_efer()

Move checking for the existence of MSR_EFER in the uret MSR array into
update_transition_efer() so that the lookup and manipulation of the
array in setup_msrs() occur back-to-back. This paves the way toward
adding a helper to wrap the lookup and manipulation.

To avoid unnecessary overhead, defer the lookup until the uret array
would actually be modified in update_transition_efer(). EFER obviously
exists on CPUs that support the dedicated VMCS fields for switching
EFER, and EFER must exist for the guest and host EFER.NX value to
diverge, i.e. there is no danger of attempting to read/write EFER when
it doesn't exist.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 35 +++++++++++++++++++++--------------
1 file changed, 21 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 954b9aa950f2..8731ca8ca2b0 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -955,10 +955,11 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
m->host.val[j].value = host_val;
}

-static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+static bool update_transition_efer(struct vcpu_vmx *vmx)
{
u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0;
+ int i;

/* Shadow paging assumes NX to be available. */
if (!enable_ept)
@@ -990,17 +991,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
else
clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
- } else {
- clear_atomic_switch_msr(vmx, MSR_EFER);
-
- guest_efer &= ~ignore_bits;
- guest_efer |= host_efer & ignore_bits;
-
- vmx->guest_uret_msrs[efer_offset].data = guest_efer;
- vmx->guest_uret_msrs[efer_offset].mask = ~ignore_bits;
-
- return true;
}
+
+ i = __vmx_find_uret_msr(vmx, MSR_EFER);
+ if (i < 0)
+ return false;
+
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
+
+ vmx->guest_uret_msrs[i].data = guest_efer;
+ vmx->guest_uret_msrs[i].mask = ~ignore_bits;
+
+ return true;
}

#ifdef CONFIG_X86_32
@@ -1748,9 +1753,11 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, nr_active_uret_msrs++);
}
#endif
- index = __vmx_find_uret_msr(vmx, MSR_EFER);
- if (index >= 0 && update_transition_efer(vmx, index))
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ if (update_transition_efer(vmx)) {
+ index = __vmx_find_uret_msr(vmx, MSR_EFER);
+ if (index >= 0)
+ move_msr_up(vmx, index, nr_active_uret_msrs++);
+ }
if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
if (index >= 0)
--
2.26.0

2020-06-22 23:36:35

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 11/15] KVM: VMX: Add vmx_setup_uret_msr() to handle lookup and swap

Add vmx_setup_uret_msr() to wrap the lookup and manipulation of the uret
MSRs array during setup_msrs(). In addition to consolidating code, this
eliminates move_msr_up(), which while being a very literally description
of the function, isn't exacly helpful in understanding the net effect of
the code.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 49 ++++++++++++++++--------------------------
1 file changed, 18 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8731ca8ca2b0..f3cd1de7b0ff 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1714,12 +1714,15 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu);
}

-/*
- * Swap MSR entry in host/guest MSR entry array.
- */
-static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
{
struct vmx_uret_msr tmp;
+ int from, to;
+
+ from = __vmx_find_uret_msr(vmx, msr);
+ if (from < 0)
+ return;
+ to = vmx->nr_active_uret_msrs++;

tmp = vmx->guest_uret_msrs[to];
vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
@@ -1733,42 +1736,26 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
*/
static void setup_msrs(struct vcpu_vmx *vmx)
{
- int nr_active_uret_msrs, index;
-
- nr_active_uret_msrs = 0;
+ vmx->guest_uret_msrs_loaded = false;
+ vmx->nr_active_uret_msrs = 0;
#ifdef CONFIG_X86_64
/*
* The SYSCALL MSRs are only needed on long mode guests, and only
* when EFER.SCE is set.
*/
if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
- index = __vmx_find_uret_msr(vmx, MSR_STAR);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- index = __vmx_find_uret_msr(vmx, MSR_LSTAR);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- index = __vmx_find_uret_msr(vmx, MSR_SYSCALL_MASK);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ vmx_setup_uret_msr(vmx, MSR_STAR);
+ vmx_setup_uret_msr(vmx, MSR_LSTAR);
+ vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
}
#endif
- if (update_transition_efer(vmx)) {
- index = __vmx_find_uret_msr(vmx, MSR_EFER);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- }
- if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
- index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- }
- index = __vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ if (update_transition_efer(vmx))
+ vmx_setup_uret_msr(vmx, MSR_EFER);

- vmx->nr_active_uret_msrs = nr_active_uret_msrs;
- vmx->guest_uret_msrs_loaded = false;
+ if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
+ vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+
+ vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);

if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);
--
2.26.0

2020-06-22 23:36:54

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 05/15] KVM: VMX: Rename vcpu_vmx's "nmsrs" to "nr_uret_msrs"

Rename vcpu_vmx.nsmrs to vcpu_vmx.nr_uret_msrs to explicitly associate
it with the guest_uret_msrs array.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 6 +++---
arch/x86/kvm/vmx/vmx.h | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 9cd40a7e9b47..d957f9d2e351 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -629,7 +629,7 @@ static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;

- for (i = 0; i < vmx->nmsrs; ++i)
+ for (i = 0; i < vmx->nr_uret_msrs; ++i)
if (vmx_msr_index[vmx->guest_uret_msrs[i].index] == msr)
return i;
return -1;
@@ -6896,7 +6896,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
- int j = vmx->nmsrs;
+ int j = vmx->nr_uret_msrs;

if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
@@ -6918,7 +6918,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->guest_uret_msrs[j].mask = -1ull;
break;
}
- ++vmx->nmsrs;
+ ++vmx->nr_uret_msrs;
}

err = alloc_loaded_vmcs(&vmx->vmcs01);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 256e3e4776f8..16450f85ddcb 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -218,7 +218,7 @@ struct vcpu_vmx {
ulong rflags;

struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
- int nmsrs;
+ int nr_uret_msrs;
int save_nmsrs;
bool guest_msrs_ready;
#ifdef CONFIG_X86_64
--
2.26.0

2020-06-23 03:17:55

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 02/15] KVM: VMX: Prepend "MAX_" to MSR array size defines

Add "MAX" to the LOADSTORE and so called SHARED MSR defines to make it
more clear that the define controls the array size, as opposed to the
actual number of valid entries that are in the array.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/nested.c | 2 +-
arch/x86/kvm/vmx/vmx.c | 6 +++---
arch/x86/kvm/vmx/vmx.h | 10 +++++-----
3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index adb11b504d5c..df33878ff612 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1035,7 +1035,7 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);

if (in_vmcs12_store_list && !in_autostore_list) {
- if (autostore->nr == NR_LOADSTORE_MSRS) {
+ if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
/*
* Emulated VMEntry does not fail here. Instead a less
* accurate value will be returned by
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ea79a02d905c..19e6f697affb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -931,8 +931,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
if (!entry_only)
j = vmx_find_msr_index(&m->host, msr);

- if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) ||
- (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) {
+ if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
+ (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
@@ -6891,7 +6891,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid;
}

- BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS);
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != MAX_NR_SHARED_MSRS);

for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 8a83b5edc820..4c053d204bea 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -21,16 +21,16 @@ extern const u32 vmx_msr_index[];
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))

#ifdef CONFIG_X86_64
-#define NR_SHARED_MSRS 7
+#define MAX_NR_SHARED_MSRS 7
#else
-#define NR_SHARED_MSRS 4
+#define MAX_NR_SHARED_MSRS 4
#endif

-#define NR_LOADSTORE_MSRS 8
+#define MAX_NR_LOADSTORE_MSRS 8

struct vmx_msrs {
unsigned int nr;
- struct vmx_msr_entry val[NR_LOADSTORE_MSRS];
+ struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
};

struct shared_msr_entry {
@@ -217,7 +217,7 @@ struct vcpu_vmx {
u32 idt_vectoring_info;
ulong rflags;

- struct shared_msr_entry guest_msrs[NR_SHARED_MSRS];
+ struct shared_msr_entry guest_msrs[MAX_NR_SHARED_MSRS];
int nmsrs;
int save_nmsrs;
bool guest_msrs_ready;
--
2.26.0