The low five bits {INTEL_PSFD, IPRED_CTRL, RRSBA_CTRL, DDPD_U, BHI_CTRL}
advertise the availability of specific bits in IA32_SPEC_CTRL. Since KVM
dynamically determines the legal IA32_SPEC_CTRL bits for the underlying
hardware, the hard work has already been done. Just let userspace know
that a guest can use these IA32_SPEC_CTRL bits.
The sixth bit (MCDT_NO) states that the processor does not exhibit MXCSR
Configuration Dependent Timing (MCDT) behavior. This is an inherent
property of the physical processor that is inherited by the virtual
CPU. Pass that information on to userspace.
Signed-off-by: Jim Mattson <[email protected]>
---
arch/x86/kvm/cpuid.c | 21 ++++++++++++++++++---
arch/x86/kvm/reverse_cpuid.h | 12 ++++++++++++
2 files changed, 30 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c134c181ba80..e5fc888b2715 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -677,6 +677,11 @@ void kvm_set_cpu_caps(void)
F(AMX_COMPLEX)
);
+ kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
+ F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
+ F(BHI_CTRL) | F(MCDT_NO)
+ );
+
kvm_cpu_cap_mask(CPUID_D_1_EAX,
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
);
@@ -957,13 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* function 7 has additional index. */
case 7:
- entry->eax = min(entry->eax, 1u);
+ max_idx = entry->eax = min(entry->eax, 2u);
cpuid_entry_override(entry, CPUID_7_0_EBX);
cpuid_entry_override(entry, CPUID_7_ECX);
cpuid_entry_override(entry, CPUID_7_EDX);
- /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
- if (entry->eax == 1) {
+ /* KVM only supports up to 0x7.2, capped above via min(). */
+ if (max_idx >= 1) {
entry = do_host_cpuid(array, function, 1);
if (!entry)
goto out;
@@ -973,6 +978,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ebx = 0;
entry->ecx = 0;
}
+ if (max_idx >= 2) {
+ entry = do_host_cpuid(array, function, 2);
+ if (!entry)
+ goto out;
+
+ cpuid_entry_override(entry, CPUID_7_2_EDX);
+ entry->ecx = 0;
+ entry->ebx = 0;
+ entry->eax = 0;
+ }
break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index b81650678375..17007016d8b5 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
CPUID_7_1_EDX,
CPUID_8000_0007_EDX,
CPUID_8000_0022_EAX,
+ CPUID_7_2_EDX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
+/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
+#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
+#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
+#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
+#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
+#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+
/* CPUID level 0x80000007 (EDX). */
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
+ [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
};
/*
@@ -116,6 +126,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
return KVM_X86_FEATURE_CONSTANT_TSC;
else if (x86_feature == X86_FEATURE_PERFMON_V2)
return KVM_X86_FEATURE_PERFMON_V2;
+ else if (x86_feature == X86_FEATURE_RRSBA_CTRL)
+ return KVM_X86_FEATURE_RRSBA_CTRL;
return x86_feature;
}
--
2.42.0.758.gaed0368e0e-goog
The compiler will probably do better than linear search.
No functional change intended.
Signed-off-by: Jim Mattson <[email protected]>
---
arch/x86/kvm/reverse_cpuid.h | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 17007016d8b5..da52f5ea0351 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -116,20 +116,22 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
*/
static __always_inline u32 __feature_translate(int x86_feature)
{
- if (x86_feature == X86_FEATURE_SGX1)
+ switch (x86_feature) {
+ case X86_FEATURE_SGX1:
return KVM_X86_FEATURE_SGX1;
- else if (x86_feature == X86_FEATURE_SGX2)
+ case X86_FEATURE_SGX2:
return KVM_X86_FEATURE_SGX2;
- else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
+ case X86_FEATURE_SGX_EDECCSSA:
return KVM_X86_FEATURE_SGX_EDECCSSA;
- else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
+ case X86_FEATURE_CONSTANT_TSC:
return KVM_X86_FEATURE_CONSTANT_TSC;
- else if (x86_feature == X86_FEATURE_PERFMON_V2)
+ case X86_FEATURE_PERFMON_V2:
return KVM_X86_FEATURE_PERFMON_V2;
- else if (x86_feature == X86_FEATURE_RRSBA_CTRL)
+ case X86_FEATURE_RRSBA_CTRL:
return KVM_X86_FEATURE_RRSBA_CTRL;
-
- return x86_feature;
+ default:
+ return x86_feature;
+ }
}
static __always_inline u32 __feature_leaf(int x86_feature)
--
2.42.0.758.gaed0368e0e-goog
On Mon, Oct 23, 2023, Jim Mattson wrote:
> The compiler will probably do better than linear search.
It shouldn't matter, KVM relies on the compiler to resolve the translation at
compile time, e.g. the result is fed into reverse_cpuid_check().
I.e. we should pick whatever is least ugly.
On Mon, Oct 23, 2023 at 05:16:35PM -0700, Jim Mattson wrote:
>The low five bits {INTEL_PSFD, IPRED_CTRL, RRSBA_CTRL, DDPD_U, BHI_CTRL}
>advertise the availability of specific bits in IA32_SPEC_CTRL. Since KVM
>dynamically determines the legal IA32_SPEC_CTRL bits for the underlying
>hardware, the hard work has already been done. Just let userspace know
>that a guest can use these IA32_SPEC_CTRL bits.
>
>The sixth bit (MCDT_NO) states that the processor does not exhibit MXCSR
>Configuration Dependent Timing (MCDT) behavior. This is an inherent
>property of the physical processor that is inherited by the virtual
>CPU. Pass that information on to userspace.
>
>Signed-off-by: Jim Mattson <[email protected]>
Reviewed-by: Chao Gao <[email protected]>
On Mon, Oct 23, 2023, Sean Christopherson wrote:
> On Mon, Oct 23, 2023, Jim Mattson wrote:
> > The compiler will probably do better than linear search.
>
> It shouldn't matter, KVM relies on the compiler to resolve the translation at
> compile time, e.g. the result is fed into reverse_cpuid_check().
>
> I.e. we should pick whatever is least ugly.
What if we add a macro to generate each case statement? It's arguably a wee bit
more readable, and also eliminates the possibility of returning the wrong feature
due to copy+paste errors, e.g. nothing would break at compile time if we goofed
and did:
case X86_FEATURE_SGX1:
return KVM_X86_FEATURE_SGX1;
case X86_FEATURE_SGX2:
return KVM_X86_FEATURE_SGX1;
If you've no objection, I'll push this:
--
Author: Jim Mattson <[email protected]>
Date: Mon Oct 23 17:16:36 2023 -0700
KVM: x86: Use a switch statement and macros in __feature_translate()
Use a switch statement with macro-generated case statements to handle
translating feature flags in order to reduce the probability of runtime
errors due to copy+paste goofs, to make compile-time errors easier to
debug, and to make the code more readable.
E.g. the compiler won't directly generate an error for duplicate if
statements
if (x86_feature == X86_FEATURE_SGX1)
return KVM_X86_FEATURE_SGX1;
else if (x86_feature == X86_FEATURE_SGX2)
return KVM_X86_FEATURE_SGX1;
and so instead reverse_cpuid_check() will fail due to the untranslated
entry pointing at a Linux-defined leaf, which provides practically no
hint as to what is broken
arch/x86/kvm/reverse_cpuid.h:108:2: error: call to __compiletime_assert_450 declared with 'error' attribute:
BUILD_BUG_ON failed: x86_leaf == CPUID_LNX_4
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
^
whereas duplicate case statements very explicitly point at the offending
code:
arch/x86/kvm/reverse_cpuid.h:125:2: error: duplicate case value '361'
KVM_X86_TRANSLATE_FEATURE(SGX2);
^
arch/x86/kvm/reverse_cpuid.h:124:2: error: duplicate case value '360'
KVM_X86_TRANSLATE_FEATURE(SGX1);
^
And without macros, the opposite type of copy+paste goof doesn't generate
any error at compile-time, e.g. this yields no complaints:
case X86_FEATURE_SGX1:
return KVM_X86_FEATURE_SGX1;
case X86_FEATURE_SGX2:
return KVM_X86_FEATURE_SGX1;
Note, __feature_translate() is forcibly inlined and the feature is known
at compile-time, so the code generation between an if-elif sequence and a
switch statement should be identical.
Signed-off-by: Jim Mattson <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
[sean: use a macro, rewrite changelog]
Signed-off-by: Sean Christopherson <[email protected]>
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 17007016d8b5..aadefcaa9561 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -116,20 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
*/
static __always_inline u32 __feature_translate(int x86_feature)
{
- if (x86_feature == X86_FEATURE_SGX1)
- return KVM_X86_FEATURE_SGX1;
- else if (x86_feature == X86_FEATURE_SGX2)
- return KVM_X86_FEATURE_SGX2;
- else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
- return KVM_X86_FEATURE_SGX_EDECCSSA;
- else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
- return KVM_X86_FEATURE_CONSTANT_TSC;
- else if (x86_feature == X86_FEATURE_PERFMON_V2)
- return KVM_X86_FEATURE_PERFMON_V2;
- else if (x86_feature == X86_FEATURE_RRSBA_CTRL)
- return KVM_X86_FEATURE_RRSBA_CTRL;
+#define KVM_X86_TRANSLATE_FEATURE(f) \
+ case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
- return x86_feature;
+ switch (x86_feature) {
+ KVM_X86_TRANSLATE_FEATURE(SGX1);
+ KVM_X86_TRANSLATE_FEATURE(SGX2);
+ KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
+ KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
+ KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
+ KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+ default:
+ return x86_feature;
+ }
}
static __always_inline u32 __feature_leaf(int x86_feature)
On Thu, Nov 30, 2023 at 12:28 PM Sean Christopherson <[email protected]> wrote:
>
> On Mon, Oct 23, 2023, Sean Christopherson wrote:
> > On Mon, Oct 23, 2023, Jim Mattson wrote:
> > > The compiler will probably do better than linear search.
> >
> > It shouldn't matter, KVM relies on the compiler to resolve the translation at
> > compile time, e.g. the result is fed into reverse_cpuid_check().
> >
> > I.e. we should pick whatever is least ugly.
>
> What if we add a macro to generate each case statement? It's arguably a wee bit
> more readable, and also eliminates the possibility of returning the wrong feature
> due to copy+paste errors, e.g. nothing would break at compile time if we goofed
> and did:
>
> case X86_FEATURE_SGX1:
> return KVM_X86_FEATURE_SGX1;
> case X86_FEATURE_SGX2:
> return KVM_X86_FEATURE_SGX1;
>
> If you've no objection, I'll push this:
:barf:
Um, okay.
> --
> Author: Jim Mattson <[email protected]>
> Date: Mon Oct 23 17:16:36 2023 -0700
>
> KVM: x86: Use a switch statement and macros in __feature_translate()
>
> Use a switch statement with macro-generated case statements to handle
> translating feature flags in order to reduce the probability of runtime
> errors due to copy+paste goofs, to make compile-time errors easier to
> debug, and to make the code more readable.
>
> E.g. the compiler won't directly generate an error for duplicate if
> statements
>
> if (x86_feature == X86_FEATURE_SGX1)
> return KVM_X86_FEATURE_SGX1;
> else if (x86_feature == X86_FEATURE_SGX2)
> return KVM_X86_FEATURE_SGX1;
>
> and so instead reverse_cpuid_check() will fail due to the untranslated
> entry pointing at a Linux-defined leaf, which provides practically no
> hint as to what is broken
>
> arch/x86/kvm/reverse_cpuid.h:108:2: error: call to __compiletime_assert_450 declared with 'error' attribute:
> BUILD_BUG_ON failed: x86_leaf == CPUID_LNX_4
> BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
> ^
> whereas duplicate case statements very explicitly point at the offending
> code:
>
> arch/x86/kvm/reverse_cpuid.h:125:2: error: duplicate case value '361'
> KVM_X86_TRANSLATE_FEATURE(SGX2);
> ^
> arch/x86/kvm/reverse_cpuid.h:124:2: error: duplicate case value '360'
> KVM_X86_TRANSLATE_FEATURE(SGX1);
> ^
>
> And without macros, the opposite type of copy+paste goof doesn't generate
> any error at compile-time, e.g. this yields no complaints:
>
> case X86_FEATURE_SGX1:
> return KVM_X86_FEATURE_SGX1;
> case X86_FEATURE_SGX2:
> return KVM_X86_FEATURE_SGX1;
>
> Note, __feature_translate() is forcibly inlined and the feature is known
> at compile-time, so the code generation between an if-elif sequence and a
> switch statement should be identical.
>
> Signed-off-by: Jim Mattson <[email protected]>
> Link: https://lore.kernel.org/r/[email protected]
> [sean: use a macro, rewrite changelog]
> Signed-off-by: Sean Christopherson <[email protected]>
>
> diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
> index 17007016d8b5..aadefcaa9561 100644
> --- a/arch/x86/kvm/reverse_cpuid.h
> +++ b/arch/x86/kvm/reverse_cpuid.h
> @@ -116,20 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
> */
> static __always_inline u32 __feature_translate(int x86_feature)
> {
> - if (x86_feature == X86_FEATURE_SGX1)
> - return KVM_X86_FEATURE_SGX1;
> - else if (x86_feature == X86_FEATURE_SGX2)
> - return KVM_X86_FEATURE_SGX2;
> - else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
> - return KVM_X86_FEATURE_SGX_EDECCSSA;
> - else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
> - return KVM_X86_FEATURE_CONSTANT_TSC;
> - else if (x86_feature == X86_FEATURE_PERFMON_V2)
> - return KVM_X86_FEATURE_PERFMON_V2;
> - else if (x86_feature == X86_FEATURE_RRSBA_CTRL)
> - return KVM_X86_FEATURE_RRSBA_CTRL;
> +#define KVM_X86_TRANSLATE_FEATURE(f) \
> + case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
>
> - return x86_feature;
> + switch (x86_feature) {
> + KVM_X86_TRANSLATE_FEATURE(SGX1);
> + KVM_X86_TRANSLATE_FEATURE(SGX2);
> + KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
> + KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
> + KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
> + KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
> + default:
> + return x86_feature;
> + }
> }
>
> static __always_inline u32 __feature_leaf(int x86_feature)
>
On Mon, 23 Oct 2023 17:16:35 -0700, Jim Mattson wrote:
> The low five bits {INTEL_PSFD, IPRED_CTRL, RRSBA_CTRL, DDPD_U, BHI_CTRL}
> advertise the availability of specific bits in IA32_SPEC_CTRL. Since KVM
> dynamically determines the legal IA32_SPEC_CTRL bits for the underlying
> hardware, the hard work has already been done. Just let userspace know
> that a guest can use these IA32_SPEC_CTRL bits.
>
> The sixth bit (MCDT_NO) states that the processor does not exhibit MXCSR
> Configuration Dependent Timing (MCDT) behavior. This is an inherent
> property of the physical processor that is inherited by the virtual
> CPU. Pass that information on to userspace.
>
> [...]
Applied to kvm-x86 misc, with macros to make Jim queasy (but they really do
guard against copy+paste errors).
[1/2] KVM: x86: Advertise CPUID.(EAX=7,ECX=2):EDX[5:0] to userspace
https://github.com/kvm-x86/linux/commit/eefe5e668209
[2/2] KVM: x86: Use a switch statement and macros in __feature_translate()
https://github.com/kvm-x86/linux/commit/80c883db87d9
--
https://github.com/kvm-x86/linux/tree/next
On Thu, Nov 30, 2023 at 5:54 PM Sean Christopherson <[email protected]> wrote:
>
> On Mon, 23 Oct 2023 17:16:35 -0700, Jim Mattson wrote:
> > The low five bits {INTEL_PSFD, IPRED_CTRL, RRSBA_CTRL, DDPD_U, BHI_CTRL}
> > advertise the availability of specific bits in IA32_SPEC_CTRL. Since KVM
> > dynamically determines the legal IA32_SPEC_CTRL bits for the underlying
> > hardware, the hard work has already been done. Just let userspace know
> > that a guest can use these IA32_SPEC_CTRL bits.
> >
> > The sixth bit (MCDT_NO) states that the processor does not exhibit MXCSR
> > Configuration Dependent Timing (MCDT) behavior. This is an inherent
> > property of the physical processor that is inherited by the virtual
> > CPU. Pass that information on to userspace.
> >
> > [...]
>
> Applied to kvm-x86 misc, with macros to make Jim queasy (but they really do
> guard against copy+paste errors).
They are also quite effective at guarding against code search. :)
> [1/2] KVM: x86: Advertise CPUID.(EAX=7,ECX=2):EDX[5:0] to userspace
> https://github.com/kvm-x86/linux/commit/eefe5e668209
> [2/2] KVM: x86: Use a switch statement and macros in __feature_translate()
> https://github.com/kvm-x86/linux/commit/80c883db87d9
>
> --
> https://github.com/kvm-x86/linux/tree/next