When the LBR record msrs has already been pass-through, there is no
need to call vmx_update_intercept_for_lbr_msrs() again and again, and
vice versa.
Signed-off-by: Like Xu <[email protected]>
---
arch/x86/kvm/vmx/pmu_intel.c | 1 +
arch/x86/kvm/vmx/vmx.c | 12 ++++++++++++
arch/x86/kvm/vmx/vmx.h | 3 +++
3 files changed, 16 insertions(+)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 85a675004cbb..75ba0444b4d1 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -614,6 +614,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
vmx_get_perf_capabilities() : 0;
lbr_desc->lbr.nr = 0;
lbr_desc->event = NULL;
+ lbr_desc->already_passthrough = false;
}
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 58a8af433741..800a26e3b571 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3880,12 +3880,24 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
static inline void vmx_lbr_disable_passthrough(struct kvm_vcpu *vcpu)
{
+ struct lbr_desc *lbr_desc = &to_vmx(vcpu)->lbr_desc;
+
+ if (!lbr_desc->already_passthrough)
+ return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, true);
+ lbr_desc->already_passthrough = false;
}
static inline void vmx_lbr_enable_passthrough(struct kvm_vcpu *vcpu)
{
+ struct lbr_desc *lbr_desc = &to_vmx(vcpu)->lbr_desc;
+
+ if (lbr_desc->already_passthrough)
+ return;
+
vmx_update_intercept_for_lbr_msrs(vcpu, false);
+ lbr_desc->already_passthrough = true;
}
/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index c67ce758412e..c931463f75d9 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -102,6 +102,9 @@ struct lbr_desc {
* The records may be inaccurate if the host reclaims the LBR.
*/
struct perf_event *event;
+
+ /* A flag to reduce the overhead of LBR pass-through or cancellation. */
+ bool already_passthrough;
};
/*
--
2.21.3