This short patch series fixes some issues with RCU locking in the generic
perf layer.
Patch 1 fixes cgroup switching rcu issues related to the fact that
perf_cgroup_sched_out() and perf_cgroup_sched_in() were missing some
rcu read lock to protect the reference to the cgroup. Consequently,
we moved the rcu readlock out of perf_cgroup_switch() to avoid double
calls.
Patch 2 reinforces the testing for the rcu locking in perf cgorup code.
Either we have to hold the rcu read lock or we must have the ctx->lock
which guarantees the task cannot leave the cgroup.
Thanks to Peter and Eric from their suggestions on how to fix this correctly.
Stephane Eranian (2):
perf/core: fix RCU problem with cgroup context switching code
perf/core: robustify perf_cgroup_from_task rcu checks
arch/x86/kernel/cpu/perf_event_intel_cqm.c | 2 +-
include/linux/perf_event.h | 6 ++++--
kernel/events/core.c | 31 ++++++++++++++++++++----------
3 files changed, 26 insertions(+), 13 deletions(-)
--
2.5.0
The RCU checker detected RCU violation in the cgroup switching routines
perf_cgroup_sched_in() and perf_cgroup_sched_out(). We were dereferencing
cgroup from task without hold the rcu lock. Fix this mov holding the
rcu read lock. We move the locking from perf_cgroup_switch() to avoid double
locking.
Signed-off-by: Stephane Eranian <[email protected]>
---
kernel/events/core.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 98a4b9d..ea0bdc5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* we reschedule only in the presence of cgroup
* constrained events.
*/
- rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -531,8 +530,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
}
}
- rcu_read_unlock();
-
local_irq_restore(flags);
}
@@ -542,6 +539,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -561,6 +559,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+ rcu_read_unlock();
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,6 +569,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -584,6 +585,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+ rcu_read_unlock();
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -9447,7 +9450,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
+ rcu_read_lock();
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ rcu_read_unlock();
return 0;
}
--
2.5.0
This patch reinforces the lockdep checks performed by
perf_cgroup_from_tsk() by passing the perf_event_context
whenever possible. It is okay to not hold the rcu read lock
when we know we hold the ctx->lock. This patch makes sure this
property holds.
In some functions, such as perf_cgroup_sched_in(), we do not
pass the context because we are sure we hold the rcu read lock.
Signed-off-by: Stephane Eranian <[email protected]>
---
arch/x86/kernel/cpu/perf_event_intel_cqm.c | 2 +-
include/linux/perf_event.h | 6 ++++--
kernel/events/core.c | 20 +++++++++++++-------
3 files changed, 18 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8..a316ca9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
- return perf_cgroup_from_task(event->hw.target);
+ return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp;
}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33..f9828a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
- return container_of(task_css(task, perf_event_cgrp_id),
+ return container_of(task_css_check(task, perf_event_cgrp_id,
+ ctx ? lockdep_is_held(&ctx->lock)
+ : true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ea0bdc5..3dbc3c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
if (!is_cgroup_event(event))
return;
- cgrp = perf_cgroup_from_task(current);
+ cgrp = perf_cgroup_from_task(current, event->ctx);
/*
* Do not update time when cgroup is not active
*/
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
if (!task || !ctx->nr_cgroups)
return;
- cgrp = perf_cgroup_from_task(task);
+ cgrp = perf_cgroup_from_task(task, ctx);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
+ * we pass the cpuctx->ctx to perf_cgroup_from_task()
+ * because cgorup events are only per-cpu
*/
- cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
- cgrp2 = perf_cgroup_from_task(next);
+ cgrp2 = perf_cgroup_from_task(next, NULL);
/*
* only schedule out current cgroup events if we know
@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/* prev can never be NULL */
- cgrp2 = perf_cgroup_from_task(prev);
+ cgrp2 = perf_cgroup_from_task(prev, NULL);
/*
* only need to schedule in cgroup events if we are changing
--
2.5.0
On Thu, Nov 12, 2015 at 11:00:02AM +0100, Stephane Eranian wrote:
>
> This short patch series fixes some issues with RCU locking in the generic
> perf layer.
>
> Patch 1 fixes cgroup switching rcu issues related to the fact that
> perf_cgroup_sched_out() and perf_cgroup_sched_in() were missing some
> rcu read lock to protect the reference to the cgroup. Consequently,
> we moved the rcu readlock out of perf_cgroup_switch() to avoid double
> calls.
>
> Patch 2 reinforces the testing for the rcu locking in perf cgorup code.
> Either we have to hold the rcu read lock or we must have the ctx->lock
> which guarantees the task cannot leave the cgroup.
>
> Thanks to Peter and Eric from their suggestions on how to fix this correctly.
>
> Stephane Eranian (2):
> perf/core: fix RCU problem with cgroup context switching code
> perf/core: robustify perf_cgroup_from_task rcu checks
Thanks!
Commit-ID: ddaaf4e291dd63db0667991e4a335fcf3a7df13e
Gitweb: http://git.kernel.org/tip/ddaaf4e291dd63db0667991e4a335fcf3a7df13e
Author: Stephane Eranian <[email protected]>
AuthorDate: Thu, 12 Nov 2015 11:00:03 +0100
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 23 Nov 2015 09:21:03 +0100
perf/core: Fix RCU problem with cgroup context switching code
The RCU checker detected RCU violation in the cgroup switching routines
perf_cgroup_sched_in() and perf_cgroup_sched_out(). We were dereferencing
cgroup from task without holding the RCU lock.
Fix this by holding the RCU read lock. We move the locking from
perf_cgroup_switch() to avoid double locking.
Signed-off-by: Stephane Eranian <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/events/core.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 36babfd..60e71ca 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* we reschedule only in the presence of cgroup
* constrained events.
*/
- rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -531,8 +530,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
}
}
- rcu_read_unlock();
-
local_irq_restore(flags);
}
@@ -542,6 +539,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -561,6 +559,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+ rcu_read_unlock();
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,6 +569,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -584,6 +585,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+ rcu_read_unlock();
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -9452,7 +9455,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
+ rcu_read_lock();
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ rcu_read_unlock();
return 0;
}
Commit-ID: 614e4c4ebc75517295bccd29b20ddbc5b52af6fc
Gitweb: http://git.kernel.org/tip/614e4c4ebc75517295bccd29b20ddbc5b52af6fc
Author: Stephane Eranian <[email protected]>
AuthorDate: Thu, 12 Nov 2015 11:00:04 +0100
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 23 Nov 2015 09:21:03 +0100
perf/core: Robustify the perf_cgroup_from_task() RCU checks
This patch reinforces the lockdep checks performed by
perf_cgroup_from_tsk() by passing the perf_event_context
whenever possible. It is okay to not hold the RCU read lock
when we know we hold the ctx->lock. This patch makes sure this
property holds.
In some functions, such as perf_cgroup_sched_in(), we do not
pass the context because we are sure we are holding the RCU
read lock.
Signed-off-by: Stephane Eranian <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/kernel/cpu/perf_event_intel_cqm.c | 2 +-
include/linux/perf_event.h | 6 ++++--
kernel/events/core.c | 20 +++++++++++++-------
3 files changed, 18 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8..a316ca9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
- return perf_cgroup_from_task(event->hw.target);
+ return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp;
}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33..f9828a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
- return container_of(task_css(task, perf_event_cgrp_id),
+ return container_of(task_css_check(task, perf_event_cgrp_id,
+ ctx ? lockdep_is_held(&ctx->lock)
+ : true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 60e71ca..1ac857a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
if (!is_cgroup_event(event))
return;
- cgrp = perf_cgroup_from_task(current);
+ cgrp = perf_cgroup_from_task(current, event->ctx);
/*
* Do not update time when cgroup is not active
*/
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
if (!task || !ctx->nr_cgroups)
return;
- cgrp = perf_cgroup_from_task(task);
+ cgrp = perf_cgroup_from_task(task, ctx);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
+ * we pass the cpuctx->ctx to perf_cgroup_from_task()
+ * because cgorup events are only per-cpu
*/
- cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
- cgrp2 = perf_cgroup_from_task(next);
+ cgrp2 = perf_cgroup_from_task(next, NULL);
/*
* only schedule out current cgroup events if we know
@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
+ * we do not need to pass the ctx here because we know
+ * we are holding the rcu lock
*/
- cgrp1 = perf_cgroup_from_task(task);
+ cgrp1 = perf_cgroup_from_task(task, NULL);
/* prev can never be NULL */
- cgrp2 = perf_cgroup_from_task(prev);
+ cgrp2 = perf_cgroup_from_task(prev, NULL);
/*
* only need to schedule in cgroup events if we are changing