2012-10-05 13:04:57

by Peter Zijlstra

[permalink] [raw]
Subject: [tip:perf/urgent] perf: Clarify perf_cpu_context:: active_pmu usage by renaming it to ::unique_pmu

Commit-ID: 3f1f33206c16c7b3839d71372bc2ac3f305aa802
Gitweb: http://git.kernel.org/tip/3f1f33206c16c7b3839d71372bc2ac3f305aa802
Author: Peter Zijlstra <[email protected]>
AuthorDate: Tue, 2 Oct 2012 15:38:52 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 5 Oct 2012 13:59:06 +0200

perf: Clarify perf_cpu_context::active_pmu usage by renaming it to ::unique_pmu

Stephane thought the perf_cpu_context::active_pmu name confusing and
suggested using 'unique_pmu' instead.

This pointer is a pointer to a 'random' pmu sharing the cpuctx
instance, therefore limiting a for_each_pmu loop to those where
cpuctx->unique_pmu matches the pmu we get a loop over unique cpuctx
instances.

Suggested-by: Stephane Eranian <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/perf_event.h | 2 +-
kernel/events/core.c | 12 ++++++------
2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 599afc4..b4166cd 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1110,7 +1110,7 @@ struct perf_cpu_context {
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
- struct pmu *active_pmu;
+ struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7b9df35..81939e8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4419,7 +4419,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_task_ctx(&cpuctx->ctx, task_event);

@@ -4565,7 +4565,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_comm_ctx(&cpuctx->ctx, comm_event);

@@ -4761,7 +4761,7 @@ got_name:
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
vma->vm_flags & VM_EXEC);
@@ -5862,8 +5862,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)

cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

- if (cpuctx->active_pmu == old_pmu)
- cpuctx->active_pmu = pmu;
+ if (cpuctx->unique_pmu == old_pmu)
+ cpuctx->unique_pmu = pmu;
}
}

@@ -5998,7 +5998,7 @@ skip_type:
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
- cpuctx->active_pmu = pmu;
+ cpuctx->unique_pmu = pmu;
}

got_cpu_context: