From: Kan Liang <[email protected]>
The client IMC uncore obscurely hack the generic
uncore_perf_event_update to support the 'UNCORE_PMC_IDX_FIXED + 1' case.
The code quality issue will bring problem when new counter index is
introduced into generic code. For example, free running counter.
Introduce customized pmu event_read function for client IMC uncore.
The customized function is exactly copied from previous generic
uncore_pmu_event_read.
Correct the fixed counter checking code in uncore_perf_event_update.
Signed-off-by: Kan Liang <[email protected]>
---
Change since V2:
- New patch to fix event->hw.idx >= UNCORE_PMC_IDX_FIXED in generic code.
Temporarily add customized pmu event_read function. Patch 5/5 will clean up
the customized event_* functions for client IMC uncore
arch/x86/events/intel/uncore.c | 2 +-
arch/x86/events/intel/uncore_snb.c | 26 +++++++++++++++++++++++++-
2 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 1c5390f..3b8cd88 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
u64 prev_count, new_count, delta;
int shift;
- if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+ if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
shift = 64 - uncore_fixed_ctr_bits(box);
else
shift = 64 - uncore_perf_ctr_bits(box);
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index db1127c..9d5cd3f 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -498,6 +498,30 @@ static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
}
+static void snb_uncore_imc_event_read(struct perf_event *event)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ u64 prev_count, new_count, delta;
+ int shift;
+
+ if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+ shift = 64 - uncore_fixed_ctr_bits(box);
+ else
+ shift = 64 - uncore_perf_ctr_bits(box);
+
+ /* the hrtimer might modify the previous event value */
+again:
+ prev_count = local64_read(&event->hw.prev_count);
+ new_count = uncore_read_counter(box, event);
+ if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
+ goto again;
+
+ delta = (new_count << shift) - (prev_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
int snb_pci2phy_map_init(int devid)
{
struct pci_dev *dev = NULL;
@@ -533,7 +557,7 @@ static struct pmu snb_uncore_imc_pmu = {
.del = snb_uncore_imc_event_del,
.start = snb_uncore_imc_event_start,
.stop = snb_uncore_imc_event_stop,
- .read = uncore_pmu_event_read,
+ .read = snb_uncore_imc_event_read,
};
static struct intel_uncore_ops snb_uncore_imc_ops = {
--
2.7.4
From 1582976266940809418@xxx Thu Nov 02 17:30:09 +0000 2017
X-GM-THRID: 1582976266940809418
X-Gmail-Labels: Inbox,Category Forums,HistoricalUnread