Steven,
Please consider this series for inclusion. It doesn't depend on
other changes we are discussing.
Only cosmetic changes since v1, everything was acked by Masami.
Oleg.
kernel/trace/trace_kprobe.c | 187 ++++++++++++++-----------------------------
1 files changed, 61 insertions(+), 126 deletions(-)
On Thu, 2013-06-20 at 19:37 +0200, Oleg Nesterov wrote:
> Steven,
>
> Please consider this series for inclusion. It doesn't depend on
> other changes we are discussing.
>
> Only cosmetic changes since v1, everything was acked by Masami.
>
Not that I don't trust you :-)
But Masami, can you Ack all these patches one more time. I like to have
the Acked-by, a direct reply to the patches I pull. It's also fine to
reply to this email with "All patches in this series: Acked-by: ... " So
you don't need to reply to each patch individually.
Thanks,
-- Steve
perf_trace_buf_prepare() + perf_trace_buf_submit() make no sense
if this task/CPU has no active counters. Change kprobe_perf_func()
and kretprobe_perf_func() to check call->perf_events beforehand
and return if this list is empty.
For example, "perf record -e some_probe -p1". Only /sbin/init will
report, all other threads which hit the same probe will do
perf_trace_buf_prepare/perf_trace_buf_submit just to realize that
nobody wants perf_swevent_event().
Signed-off-by: Oleg Nesterov <[email protected]>
---
kernel/trace/trace_kprobe.c | 12 ++++++++----
1 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9f46e98..c0af476 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1157,6 +1157,10 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
int size, __size, dsize;
int rctx;
+ head = this_cpu_ptr(call->perf_events);
+ if (hlist_empty(head))
+ return;
+
dsize = __get_data_size(tp, regs);
__size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
@@ -1172,8 +1176,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
entry->ip = (unsigned long)tp->rp.kp.addr;
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
-
- head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx,
entry->ip, 1, regs, head, NULL);
}
@@ -1189,6 +1191,10 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
int size, __size, dsize;
int rctx;
+ head = this_cpu_ptr(call->perf_events);
+ if (hlist_empty(head))
+ return;
+
dsize = __get_data_size(tp, regs);
__size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
@@ -1204,8 +1210,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
-
- head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx,
entry->ret_ip, 1, regs, head, NULL);
}
--
1.5.5.1
kprobe_perf_func() and kretprobe_perf_func() pass addr=ip to
perf_trace_buf_submit() for no reason.
This sets perf_sample_data->addr for PERF_SAMPLE_ADDR, we already
have perf_sample_data->ip initialized if PERF_SAMPLE_IP.
Signed-off-by: Oleg Nesterov <[email protected]>
---
kernel/trace/trace_kprobe.c | 6 ++----
1 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3432652..141c682 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1163,8 +1163,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
entry->ip = (unsigned long)tp->rp.kp.addr;
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
- perf_trace_buf_submit(entry, size, rctx,
- entry->ip, 1, regs, head, NULL);
+ perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
/* Kretprobe profile handler */
@@ -1197,8 +1196,7 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
- perf_trace_buf_submit(entry, size, rctx,
- entry->ret_ip, 1, regs, head, NULL);
+ perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
#endif /* CONFIG_PERF_EVENTS */
--
1.5.5.1
I think that "ftrace_event_file *trace_probe[]" complicates the
code for no reason, turn it into list_head to simplify the code.
enable_trace_probe() no longer needs synchronize_sched().
This needs the extra sizeof(list_head) memory for every attached
ftrace_event_file, hopefully not a problem in this case.
Signed-off-by: Oleg Nesterov <[email protected]>
---
kernel/trace/trace_kprobe.c | 138 ++++++++++++-------------------------------
1 files changed, 37 insertions(+), 101 deletions(-)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 141c682..2f54344 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,12 +35,17 @@ struct trace_probe {
const char *symbol; /* symbol name */
struct ftrace_event_class class;
struct ftrace_event_call call;
- struct ftrace_event_file * __rcu *files;
+ struct list_head files;
ssize_t size; /* trace entry size */
unsigned int nr_args;
struct probe_arg args[];
};
+struct event_file_link {
+ struct ftrace_event_file *file;
+ struct list_head list;
+};
+
#define SIZEOF_TRACE_PROBE(n) \
(offsetof(struct trace_probe, args) + \
(sizeof(struct probe_arg) * (n)))
@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error;
INIT_LIST_HEAD(&tp->list);
+ INIT_LIST_HEAD(&tp->files);
return tp;
error:
kfree(tp->call.name);
@@ -184,22 +190,6 @@ static struct trace_probe *find_trace_probe(const char *event,
}
/*
- * This and enable_trace_probe/disable_trace_probe rely on event_mutex
- * held by the caller, __ftrace_set_clr_event().
- */
-static int trace_probe_nr_files(struct trace_probe *tp)
-{
- struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
- int ret = 0;
-
- if (file)
- while (*(file++))
- ret++;
-
- return ret;
-}
-
-/*
* Enable trace_probe
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
*/
@@ -209,29 +199,18 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
int ret = 0;
if (file) {
- struct ftrace_event_file **new, **old;
- int n = trace_probe_nr_files(tp);
-
- old = rcu_dereference_raw(tp->files);
- /* 1 is for new one and 1 is for stopper */
- new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
- GFP_KERNEL);
- if (!new) {
+ struct event_file_link *link;
+
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
ret = -ENOMEM;
goto out;
}
- memcpy(new, old, n * sizeof(struct ftrace_event_file *));
- new[n] = file;
- /* The last one keeps a NULL */
- rcu_assign_pointer(tp->files, new);
- tp->flags |= TP_FLAG_TRACE;
+ link->file = file;
+ list_add_tail_rcu(&link->list, &tp->files);
- if (old) {
- /* Make sure the probe is done with old files */
- synchronize_sched();
- kfree(old);
- }
+ tp->flags |= TP_FLAG_TRACE;
} else
tp->flags |= TP_FLAG_PROFILE;
@@ -246,24 +225,16 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
return ret;
}
-static int
-trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
+static struct event_file_link *
+find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
{
- struct ftrace_event_file **files;
- int i;
+ struct event_file_link *link;
- /*
- * Since all tp->files updater is protected by probe_enable_lock,
- * we don't need to lock an rcu_read_lock.
- */
- files = rcu_dereference_raw(tp->files);
- if (files) {
- for (i = 0; files[i]; i++)
- if (files[i] == file)
- return i;
- }
+ list_for_each_entry(link, &tp->files, list)
+ if (link->file == file)
+ return link;
- return -1;
+ return NULL;
}
/*
@@ -276,38 +247,23 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
int ret = 0;
if (file) {
- struct ftrace_event_file **new, **old;
- int n = trace_probe_nr_files(tp);
- int i, j;
+ struct event_file_link *link;
- old = rcu_dereference_raw(tp->files);
- if (n == 0 || trace_probe_file_index(tp, file) < 0) {
+ link = find_event_file_link(tp, file);
+ if (!link) {
ret = -EINVAL;
goto out;
}
- if (n == 1) { /* Remove the last file */
- tp->flags &= ~TP_FLAG_TRACE;
- new = NULL;
- } else {
- new = kzalloc(n * sizeof(struct ftrace_event_file *),
- GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* This copy & check loop copies the NULL stopper too */
- for (i = 0, j = 0; j < n && i < n + 1; i++)
- if (old[i] != file)
- new[j++] = old[i];
- }
+ list_del_rcu(&link->list);
+ /* synchronize with kprobe_trace_func/kretprobe_trace_func */
+ synchronize_sched();
+ kfree(link);
- rcu_assign_pointer(tp->files, new);
+ if (!list_empty(&tp->files))
+ goto out;
- /* Make sure the probe is done with old files */
- synchronize_sched();
- kfree(old);
+ tp->flags &= ~TP_FLAG_TRACE;
} else
tp->flags &= ~TP_FLAG_PROFILE;
@@ -872,20 +828,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{
- /*
- * Note: preempt is already disabled around the kprobe handler.
- * However, we still need an smp_read_barrier_depends() corresponding
- * to smp_wmb() in rcu_assign_pointer() to access the pointer.
- */
- struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
-
- if (unlikely(!file))
- return;
+ struct event_file_link *link;
- while (*file) {
- __kprobe_trace_func(tp, regs, *file);
- file++;
- }
+ list_for_each_entry_rcu(link, &tp->files, list)
+ __kprobe_trace_func(tp, regs, link->file);
}
/* Kretprobe handler */
@@ -932,20 +878,10 @@ static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
- /*
- * Note: preempt is already disabled around the kprobe handler.
- * However, we still need an smp_read_barrier_depends() corresponding
- * to smp_wmb() in rcu_assign_pointer() to access the pointer.
- */
- struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
-
- if (unlikely(!file))
- return;
+ struct event_file_link *link;
- while (*file) {
- __kretprobe_trace_func(tp, ri, regs, *file);
- file++;
- }
+ list_for_each_entry_rcu(link, &tp->files, list)
+ __kretprobe_trace_func(tp, ri, regs, link->file);
}
/* Event entry printers */
--
1.5.5.1
enable_trace_probe() and disable_trace_probe() should not worry about
serialization, the caller (perf_trace_init or __ftrace_set_clr_event)
holds event_mutex.
They are also called by kprobe_trace_self_tests_init(), but this __init
function can't race with itself or trace_events.c
And note that this code depended on event_mutex even before 41a7dd420c
which introduced probe_enable_lock. In fact it assumes that the caller
kprobe_register() can never race with itself. Otherwise, say, tp->flags
manipulations are racy.
Signed-off-by: Oleg Nesterov <[email protected]>
---
kernel/trace/trace_kprobe.c | 43 ++++++++++++++++++++-----------------------
1 files changed, 20 insertions(+), 23 deletions(-)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c0af476..3432652 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -183,16 +183,15 @@ static struct trace_probe *find_trace_probe(const char *event,
return NULL;
}
+/*
+ * This and enable_trace_probe/disable_trace_probe rely on event_mutex
+ * held by the caller, __ftrace_set_clr_event().
+ */
static int trace_probe_nr_files(struct trace_probe *tp)
{
- struct ftrace_event_file **file;
+ struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
int ret = 0;
- /*
- * Since all tp->files updater is protected by probe_enable_lock,
- * we don't need to lock an rcu_read_lock.
- */
- file = rcu_dereference_raw(tp->files);
if (file)
while (*(file++))
ret++;
@@ -200,8 +199,6 @@ static int trace_probe_nr_files(struct trace_probe *tp)
return ret;
}
-static DEFINE_MUTEX(probe_enable_lock);
-
/*
* Enable trace_probe
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
@@ -211,8 +208,6 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{
int ret = 0;
- mutex_lock(&probe_enable_lock);
-
if (file) {
struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp);
@@ -223,7 +218,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
- goto out_unlock;
+ goto out;
}
memcpy(new, old, n * sizeof(struct ftrace_event_file *));
new[n] = file;
@@ -247,10 +242,7 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
else
ret = enable_kprobe(&tp->rp.kp);
}
-
- out_unlock:
- mutex_unlock(&probe_enable_lock);
-
+ out:
return ret;
}
@@ -283,8 +275,6 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{
int ret = 0;
- mutex_lock(&probe_enable_lock);
-
if (file) {
struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp);
@@ -293,7 +283,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
old = rcu_dereference_raw(tp->files);
if (n == 0 || trace_probe_file_index(tp, file) < 0) {
ret = -EINVAL;
- goto out_unlock;
+ goto out;
}
if (n == 1) { /* Remove the last file */
@@ -304,7 +294,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
- goto out_unlock;
+ goto out;
}
/* This copy & check loop copies the NULL stopper too */
@@ -327,10 +317,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
else
disable_kprobe(&tp->rp.kp);
}
-
- out_unlock:
- mutex_unlock(&probe_enable_lock);
-
+ out:
return ret;
}
@@ -1215,6 +1202,12 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
}
#endif /* CONFIG_PERF_EVENTS */
+/*
+ * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
+ *
+ * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
+ * lockless, but we can't race with this __init function.
+ */
static __kprobes
int kprobe_register(struct ftrace_event_call *event,
enum trace_reg type, void *data)
@@ -1380,6 +1373,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
return NULL;
}
+/*
+ * Nobody but us can call enable_trace_probe/disable_trace_probe at this
+ * stage, we can do this lockless.
+ */
static __init int kprobe_trace_self_tests_init(void)
{
int ret, warn = 0;
--
1.5.5.1
(2013/06/21 3:42), Steven Rostedt wrote:
> On Thu, 2013-06-20 at 19:37 +0200, Oleg Nesterov wrote:
>> Steven,
>>
>> Please consider this series for inclusion. It doesn't depend on
>> other changes we are discussing.
>>
>> Only cosmetic changes since v1, everything was acked by Masami.
>>
>
>
> Not that I don't trust you :-)
>
> But Masami, can you Ack all these patches one more time. I like to have
> the Acked-by, a direct reply to the patches I pull. It's also fine to
> reply to this email with "All patches in this series: Acked-by: ... " So
> you don't need to reply to each patch individually.
Sure, all patches are fine for me :)
All patches in this series: Acked-by: Masami Hiramatsu <[email protected]>
Thank you!
--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]
On Thu, 2013-06-20 at 19:38 +0200, Oleg Nesterov wrote:
> kprobe_perf_func() and kretprobe_perf_func() pass addr=ip to
> perf_trace_buf_submit() for no reason.
>
> This sets perf_sample_data->addr for PERF_SAMPLE_ADDR, we already
> have perf_sample_data->ip initialized if PERF_SAMPLE_IP.
This change log is confusing. I have no idea what this is trying to fix.
Can you explain what is the issue here and what this change fixes.
Thanks,
-- Steve
>
> Signed-off-by: Oleg Nesterov <[email protected]>
> ---
> kernel/trace/trace_kprobe.c | 6 ++----
> 1 files changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> index 3432652..141c682 100644
> --- a/kernel/trace/trace_kprobe.c
> +++ b/kernel/trace/trace_kprobe.c
> @@ -1163,8 +1163,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
> entry->ip = (unsigned long)tp->rp.kp.addr;
> memset(&entry[1], 0, dsize);
> store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
> - perf_trace_buf_submit(entry, size, rctx,
> - entry->ip, 1, regs, head, NULL);
> + perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
> }
>
> /* Kretprobe profile handler */
> @@ -1197,8 +1196,7 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
> entry->func = (unsigned long)tp->rp.kp.addr;
> entry->ret_ip = (unsigned long)ri->ret_addr;
> store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
> - perf_trace_buf_submit(entry, size, rctx,
> - entry->ret_ip, 1, regs, head, NULL);
> + perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
> }
> #endif /* CONFIG_PERF_EVENTS */
>
On Thu, 2013-06-20 at 19:38 +0200, Oleg Nesterov wrote:
> -static int
> -trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
> +static struct event_file_link *
> +find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
> {
> - struct ftrace_event_file **files;
> - int i;
> + struct event_file_link *link;
>
> - /*
> - * Since all tp->files updater is protected by probe_enable_lock,
> - * we don't need to lock an rcu_read_lock.
> - */
> - files = rcu_dereference_raw(tp->files);
> - if (files) {
> - for (i = 0; files[i]; i++)
> - if (files[i] == file)
> - return i;
> - }
> + list_for_each_entry(link, &tp->files, list)
> + if (link->file == file)
> + return link;
Shouldn't that be list_for_each_entry_rcu()?
-- Steve
>
> - return -1;
> + return NULL;
> }
>
> /*
On 07/01, Steven Rostedt wrote:
>
> On Thu, 2013-06-20 at 19:38 +0200, Oleg Nesterov wrote:
>
> > -static int
> > -trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
> > +static struct event_file_link *
> > +find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
> > {
> > - struct ftrace_event_file **files;
> > - int i;
> > + struct event_file_link *link;
> >
> > - /*
> > - * Since all tp->files updater is protected by probe_enable_lock,
> > - * we don't need to lock an rcu_read_lock.
> > - */
> > - files = rcu_dereference_raw(tp->files);
> > - if (files) {
> > - for (i = 0; files[i]; i++)
> > - if (files[i] == file)
> > - return i;
> > - }
> > + list_for_each_entry(link, &tp->files, list)
> > + if (link->file == file)
> > + return link;
>
> Shouldn't that be list_for_each_entry_rcu()?
No.
This is the writer which modifies the list. enable/disable_trace_probe
should be serialized wrt each other / itself anyway, otherwise they are
buggy in any case.
Oleg.
On 07/01, Steven Rostedt wrote:
>
> On Thu, 2013-06-20 at 19:38 +0200, Oleg Nesterov wrote:
> > kprobe_perf_func() and kretprobe_perf_func() pass addr=ip to
> > perf_trace_buf_submit() for no reason.
> >
> > This sets perf_sample_data->addr for PERF_SAMPLE_ADDR, we already
> > have perf_sample_data->ip initialized if PERF_SAMPLE_IP.
>
> This change log is confusing. I have no idea what this is trying to fix.
> Can you explain what is the issue here and what this change fixes.
Sorry for confusion, this fixes nothing.
Just there is no reason to pass "addr = kp.addr", we report it in entry.
Note also that kprobes is the last user perf_trace_buf_submit(addr != 0),
perhaps we can kill this arg and __perf_addr().
See also 32520b2c6 "Don't pass addr=ip to perf_trace_buf_submit()".
> Thanks,
>
> -- Steve
>
> >
> > Signed-off-by: Oleg Nesterov <[email protected]>
> > ---
> > kernel/trace/trace_kprobe.c | 6 ++----
> > 1 files changed, 2 insertions(+), 4 deletions(-)
> >
> > diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> > index 3432652..141c682 100644
> > --- a/kernel/trace/trace_kprobe.c
> > +++ b/kernel/trace/trace_kprobe.c
> > @@ -1163,8 +1163,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
> > entry->ip = (unsigned long)tp->rp.kp.addr;
> > memset(&entry[1], 0, dsize);
> > store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
> > - perf_trace_buf_submit(entry, size, rctx,
> > - entry->ip, 1, regs, head, NULL);
> > + perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
> > }
> >
> > /* Kretprobe profile handler */
> > @@ -1197,8 +1196,7 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
> > entry->func = (unsigned long)tp->rp.kp.addr;
> > entry->ret_ip = (unsigned long)ri->ret_addr;
> > store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
> > - perf_trace_buf_submit(entry, size, rctx,
> > - entry->ret_ip, 1, regs, head, NULL);
> > + perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
> > }
> > #endif /* CONFIG_PERF_EVENTS */
> >
>
>
On Mon, 2013-07-01 at 21:36 +0200, Oleg Nesterov wrote:
> On 07/01, Steven Rostedt wrote:
> >
> > On Thu, 2013-06-20 at 19:38 +0200, Oleg Nesterov wrote:
> >
> > > -static int
> > > -trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
> > > +static struct event_file_link *
> > > +find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
> > > {
> > > - struct ftrace_event_file **files;
> > > - int i;
> > > + struct event_file_link *link;
> > >
> > > - /*
> > > - * Since all tp->files updater is protected by probe_enable_lock,
> > > - * we don't need to lock an rcu_read_lock.
> > > - */
> > > - files = rcu_dereference_raw(tp->files);
> > > - if (files) {
> > > - for (i = 0; files[i]; i++)
> > > - if (files[i] == file)
> > > - return i;
> > > - }
> > > + list_for_each_entry(link, &tp->files, list)
> > > + if (link->file == file)
> > > + return link;
> >
> > Shouldn't that be list_for_each_entry_rcu()?
>
> No.
>
> This is the writer which modifies the list. enable/disable_trace_probe
> should be serialized wrt each other / itself anyway, otherwise they are
> buggy in any case.
Ah OK, I missed the readers of kprobe*_trace_func() and was confused by
the other rcu usage. Nevermind.
-- Steve