VMMs can address mitigations issues in migration pool by applying
the needed controls whenever the guest is operating on a newer
processor. If a guest is using retpoline to mitigate intra-mode BTI
in CPL0, the VMM can set RRSBA_DIS_S when the guest runs on hardware
which enumerates RRSBA.
Signed-off-by: Zhang Chen <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 57 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6ed6b743be0e..fb0f3b1639b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2007,6 +2007,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
msr_info->data = vmx->msr_virtual_enumeration;
break;
+ case MSR_VIRTUAL_MITIGATION_ENUM:
+ if (!msr_info->host_initiated &&
+ !(vmx->msr_virtual_enumeration &
+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
+ return 1;
+ msr_info->data = vmx->msr_virtual_mitigation_enum;
+ break;
+ case MSR_VIRTUAL_MITIGATION_CTRL:
+ if (!msr_info->host_initiated &&
+ !(vmx->msr_virtual_enumeration &
+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
+ return 1;
+ msr_info->data = vmx->msr_virtual_mitigation_ctrl;
+ break;
default:
find_uret_msr:
msr = vmx_find_uret_msr(vmx, msr_info->index);
@@ -2056,7 +2070,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct vmx_uret_msr *msr;
int ret = 0;
u32 msr_index = msr_info->index;
- u64 data = msr_info->data;
+ u64 data = msr_info->data, arch_msr;
u32 index;
switch (msr_index) {
@@ -2390,6 +2404,46 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->msr_virtual_enumeration = data &
VIRT_ENUM_MITIGATION_CTRL_SUPPORT;
break;
+ case MSR_VIRTUAL_MITIGATION_ENUM:
+ if (msr_info->host_initiated &&
+ !(vmx->msr_virtual_enumeration &
+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
+ return 1;
+ if (data & ~MITI_ENUM_SUPPORTED)
+ return 1;
+ vmx->msr_virtual_mitigation_enum = data;
+ break;
+ case MSR_VIRTUAL_MITIGATION_CTRL:
+ if (!msr_info->host_initiated &&
+ !(vmx->msr_virtual_enumeration &
+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
+ return 1;
+ if (data & ~MITI_CTRL_USED)
+ return 1;
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, arch_msr);
+
+ if (data & MITI_CTRL_RETPOLINE_S_USED &&
+ boot_cpu_has(X86_FEATURE_RRSBA_CTRL) &&
+ arch_msr & ARCH_CAP_RRSBA)
+ vmx->spec_ctrl_mask |= SPEC_CTRL_RRSBA_DIS_S;
+ else
+ vmx->spec_ctrl_mask &= ~SPEC_CTRL_RRSBA_DIS_S;
+
+ if (cpu_has_virt_spec_ctrl()) {
+ vmcs_write64(IA32_SPEC_CTRL_MASK, vmx->spec_ctrl_mask);
+ } else if (vmx->spec_ctrl_mask) {
+ pr_err_once("Virtual spec ctrl is missing. Cannot keep "
+ "bits in %llx always set\n",
+ vmx->spec_ctrl_mask);
+ vmx->spec_ctrl_mask = 0;
+ }
+
+ vmx->spec_ctrl = vmx->spec_ctrl | vmx->spec_ctrl_mask;
+
+ vmx->msr_virtual_mitigation_ctrl = data;
+ break;
default:
find_uret_msr:
@@ -4774,6 +4828,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->rmode.vm86_active = 0;
+ vmx->msr_virtual_mitigation_ctrl = 0;
if (cpu_has_virt_spec_ctrl()) {
vmx->spec_ctrl_mask = 0;
vmcs_write64(IA32_SPEC_CTRL_MASK, vmx->spec_ctrl_mask);
--
2.25.1
On Sun, Dec 11, 2022 at 12:00:45AM +0800, Zhang Chen wrote:
>VMMs can address mitigations issues in migration pool by applying
>the needed controls whenever the guest is operating on a newer
>processor. If a guest is using retpoline to mitigate intra-mode BTI
>in CPL0, the VMM can set RRSBA_DIS_S when the guest runs on hardware
>which enumerates RRSBA.
>
>Signed-off-by: Zhang Chen <[email protected]>
>---
> arch/x86/kvm/vmx/vmx.c | 57 +++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 56 insertions(+), 1 deletion(-)
>
>diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>index 6ed6b743be0e..fb0f3b1639b9 100644
>--- a/arch/x86/kvm/vmx/vmx.c
>+++ b/arch/x86/kvm/vmx/vmx.c
>@@ -2007,6 +2007,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> return 1;
> msr_info->data = vmx->msr_virtual_enumeration;
> break;
>+ case MSR_VIRTUAL_MITIGATION_ENUM:
>+ if (!msr_info->host_initiated &&
>+ !(vmx->msr_virtual_enumeration &
>+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
>+ return 1;
>+ msr_info->data = vmx->msr_virtual_mitigation_enum;
>+ break;
>+ case MSR_VIRTUAL_MITIGATION_CTRL:
>+ if (!msr_info->host_initiated &&
>+ !(vmx->msr_virtual_enumeration &
>+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
>+ return 1;
>+ msr_info->data = vmx->msr_virtual_mitigation_ctrl;
>+ break;
> default:
> find_uret_msr:
> msr = vmx_find_uret_msr(vmx, msr_info->index);
>@@ -2056,7 +2070,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> struct vmx_uret_msr *msr;
> int ret = 0;
> u32 msr_index = msr_info->index;
>- u64 data = msr_info->data;
>+ u64 data = msr_info->data, arch_msr;
> u32 index;
>
> switch (msr_index) {
>@@ -2390,6 +2404,46 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> vmx->msr_virtual_enumeration = data &
> VIRT_ENUM_MITIGATION_CTRL_SUPPORT;
> break;
>+ case MSR_VIRTUAL_MITIGATION_ENUM:
>+ if (msr_info->host_initiated &&
>+ !(vmx->msr_virtual_enumeration &
>+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
>+ return 1;
>+ if (data & ~MITI_ENUM_SUPPORTED)
>+ return 1;
>+ vmx->msr_virtual_mitigation_enum = data;
>+ break;
>+ case MSR_VIRTUAL_MITIGATION_CTRL:
>+ if (!msr_info->host_initiated &&
>+ !(vmx->msr_virtual_enumeration &
>+ VIRT_ENUM_MITIGATION_CTRL_SUPPORT))
>+ return 1;
>+ if (data & ~MITI_CTRL_USED)
>+ return 1;
>+
>+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
>+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, arch_msr);
>+
>+ if (data & MITI_CTRL_RETPOLINE_S_USED &&
>+ boot_cpu_has(X86_FEATURE_RRSBA_CTRL) &&
>+ arch_msr & ARCH_CAP_RRSBA)
>+ vmx->spec_ctrl_mask |= SPEC_CTRL_RRSBA_DIS_S;
>+ else
>+ vmx->spec_ctrl_mask &= ~SPEC_CTRL_RRSBA_DIS_S;
IIUC, only above chunk belongs to this patch. Other changes should be moved
to patch 7, which deals with MSRs emulation.