2022-11-11 10:52:31

by Like Xu

[permalink] [raw]
Subject: [PATCH v3 2/8] KVM: VMX: Refactor intel_pmu_set_msr() to align with other set_msr() helpers

From: Sean Christopherson <[email protected]>

Invert the flows in intel_pmu_set_msr()'s case statements so that they
follow the kernel's preferred style of:

if (<not valid>)
return <error>

<commit change>
return <success>

which is also the style used by every other set_msr() helper (except
AMD's PMU variant, which doesn't use a switch statement).

Opportunstically move the "val == current" checks below the validity
checks. Except for the one-off case for MSR_P6_EVNTSEL2, the reserved
bit checks are extremely cheap, and the guest is unlikely to frequently
write the current value, i.e. avoiding the reserved bit checks doesn't
add much (any?) value.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/pmu_intel.c | 81 +++++++++++++++++++-----------------
1 file changed, 42 insertions(+), 39 deletions(-)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f81cf54a245f..2f7cd388859c 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -397,44 +397,43 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)

switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
- if (pmu->fixed_ctr_ctrl == data)
- return 0;
- if (!(data & pmu->fixed_ctr_ctrl_mask)) {
+ if (data & pmu->fixed_ctr_ctrl_mask)
+ return 1;
+
+ if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data);
- return 0;
- }
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
- if (msr_info->host_initiated) {
- pmu->global_status = data;
- return 0;
- }
- break; /* RO MSR */
+ if (!msr_info->host_initiated)
+ return 1; /* RO MSR */
+
+ pmu->global_status = data;
+ break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- if (pmu->global_ctrl == data)
- return 0;
- if (kvm_valid_perf_global_ctrl(pmu, data)) {
+ if (!kvm_valid_perf_global_ctrl(pmu, data))
+ return 1;
+
+ if (pmu->global_ctrl != data) {
diff = pmu->global_ctrl ^ data;
pmu->global_ctrl = data;
reprogram_counters(pmu, diff);
- return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (!(data & pmu->global_ovf_ctrl_mask)) {
- if (!msr_info->host_initiated)
- pmu->global_status &= ~data;
- return 0;
- }
+ if (data & pmu->global_ovf_ctrl_mask)
+ return 1;
+
+ if (!msr_info->host_initiated)
+ pmu->global_status &= ~data;
break;
case MSR_IA32_PEBS_ENABLE:
- if (pmu->pebs_enable == data)
- return 0;
- if (!(data & pmu->pebs_enable_mask)) {
+ if (data & pmu->pebs_enable_mask)
+ return 1;
+
+ if (pmu->pebs_enable != data) {
diff = pmu->pebs_enable ^ data;
pmu->pebs_enable = data;
reprogram_counters(pmu, diff);
- return 0;
}
break;
case MSR_IA32_DS_AREA:
@@ -442,15 +441,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (is_noncanonical_address(data, vcpu))
return 1;
+
pmu->ds_area = data;
- return 0;
+ break;
case MSR_PEBS_DATA_CFG:
- if (pmu->pebs_data_cfg == data)
- return 0;
- if (!(data & pmu->pebs_data_cfg_mask)) {
- pmu->pebs_data_cfg = data;
- return 0;
- }
+ if (data & pmu->pebs_data_cfg_mask)
+ return 1;
+
+ pmu->pebs_data_cfg = data;
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
@@ -458,33 +456,38 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
return 1;
+
if (!msr_info->host_initiated &&
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
- return 0;
+ break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
- return 0;
+ break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
- if (data == pmc->eventsel)
- return 0;
reserved_bits = pmu->reserved_bits;
if ((pmc->idx == 2) &&
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
- if (!(data & reserved_bits)) {
+ if (data & reserved_bits)
+ return 1;
+
+ if (data != pmc->eventsel) {
pmc->eventsel = data;
kvm_pmu_request_counter_reprogam(pmc);
- return 0;
}
- } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
- return 0;
+ break;
+ } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
+ break;
+ }
+ /* Not a known PMU MSR. */
+ return 1;
}

- return 1;
+ return 0;
}

static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
--
2.38.1



2023-01-27 02:09:37

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v3 2/8] KVM: VMX: Refactor intel_pmu_set_msr() to align with other set_msr() helpers

On Fri, Nov 11, 2022, Like Xu wrote:
> From: Sean Christopherson <[email protected]>
>
> Invert the flows in intel_pmu_set_msr()'s case statements so that they
> follow the kernel's preferred style of:
>
> if (<not valid>)
> return <error>
>
> <commit change>
> return <success>
>
> which is also the style used by every other set_msr() helper (except
> AMD's PMU variant, which doesn't use a switch statement).
>
> Opportunstically move the "val == current" checks below the validity
> checks. Except for the one-off case for MSR_P6_EVNTSEL2, the reserved
> bit checks are extremely cheap, and the guest is unlikely to frequently
> write the current value, i.e. avoiding the reserved bit checks doesn't
> add much (any?) value.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <[email protected]>

FYI, I'm going to post this separately and extend it to give the get_msr() flow
the same treatment. I'll plan on getting it queued sooner than later so that
this series can use it as a base.