From: Namhyung Kim <[email protected]>
It's not used anywhere in the function.
Signed-off-by: Namhyung Kim <[email protected]>
---
kernel/trace/ftrace.c | 5 -----
1 file changed, 5 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 78f4398cb608..03953fbd1b2a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1312,7 +1312,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct hlist_head *hhd;
struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
- unsigned long key;
int size = src->count;
int bits = 0;
int ret;
@@ -1355,10 +1354,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
- if (bits > 0)
- key = hash_long(entry->ip, bits);
- else
- key = 0;
remove_hash_entry(src, entry);
__add_hash_entry(new_hash, entry);
}
--
1.7.11.7
From: Namhyung Kim <[email protected]>
It looks like tracing_release() lacks checking iter->cpu_file so that
closing a per_cpu trace file would attempt to close all cpu buffers.
Signed-off-by: Namhyung Kim <[email protected]>
---
kernel/trace/trace.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7270460cfe3c..0beddcb80509 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2883,7 +2883,13 @@ static int tracing_release(struct inode *inode, struct file *file)
WARN_ON(!tr->ref);
tr->ref--;
- for_each_tracing_cpu(cpu) {
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ for_each_tracing_cpu(cpu) {
+ if (iter->buffer_iter[cpu])
+ ring_buffer_read_finish(iter->buffer_iter[cpu]);
+ }
+ } else {
+ cpu = iter->cpu_file;
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
--
1.7.11.7
From: Namhyung Kim <[email protected]>
Check return value and bail out if it's NULL.
Signed-off-by: Namhyung Kim <[email protected]>
---
kernel/trace/trace.c | 2 ++
kernel/trace/trace_stack.c | 2 ++
kernel/trace/trace_stat.c | 2 ++
3 files changed, 6 insertions(+)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 829b2bee24e8..7270460cfe3c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5953,6 +5953,8 @@ static __init int tracer_init_debugfs(void)
trace_access_lock_init();
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
init_tracer_debugfs(&global_trace, d_tracer);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index aab277b67fa9..8c3f37e2dc43 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -431,6 +431,8 @@ static __init int stack_trace_init(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 96cffb269e73..847f88a6194b 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
struct dentry *d_tracing;
d_tracing = tracing_init_dentry();
+ if (!d_tracing)
+ return 0;
stat_dir = debugfs_create_dir("trace_stat", d_tracing);
if (!stat_dir)
--
1.7.11.7
On Wed, 2013-04-10 at 09:18 +0900, Namhyung Kim wrote:
> From: Namhyung Kim <[email protected]>
>
> It looks like tracing_release() lacks checking iter->cpu_file so that
> closing a per_cpu trace file would attempt to close all cpu buffers.
>
> Signed-off-by: Namhyung Kim <[email protected]>
> ---
> kernel/trace/trace.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 7270460cfe3c..0beddcb80509 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -2883,7 +2883,13 @@ static int tracing_release(struct inode *inode, struct file *file)
> WARN_ON(!tr->ref);
> tr->ref--;
>
> - for_each_tracing_cpu(cpu) {
> + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
> + for_each_tracing_cpu(cpu) {
> + if (iter->buffer_iter[cpu])
Only the cpu that is assigned gets buffer_iter[cpu] set. The other
buffer_iter[cpus] will simply be ignored.
-- Steve
> + ring_buffer_read_finish(iter->buffer_iter[cpu]);
> + }
> + } else {
> + cpu = iter->cpu_file;
> if (iter->buffer_iter[cpu])
> ring_buffer_read_finish(iter->buffer_iter[cpu]);
> }
Hi Steve,
2013-04-10 오전 9:31, Steven Rostedt 쓴 글:
> On Wed, 2013-04-10 at 09:18 +0900, Namhyung Kim wrote:
>> From: Namhyung Kim <[email protected]>
>>
>> It looks like tracing_release() lacks checking iter->cpu_file so that
>> closing a per_cpu trace file would attempt to close all cpu buffers.
>>
>> Signed-off-by: Namhyung Kim <[email protected]>
>> ---
>> kernel/trace/trace.c | 8 +++++++-
>> 1 file changed, 7 insertions(+), 1 deletion(-)
>>
>> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
>> index 7270460cfe3c..0beddcb80509 100644
>> --- a/kernel/trace/trace.c
>> +++ b/kernel/trace/trace.c
>> @@ -2883,7 +2883,13 @@ static int tracing_release(struct inode *inode, struct file *file)
>> WARN_ON(!tr->ref);
>> tr->ref--;
>>
>> - for_each_tracing_cpu(cpu) {
>> + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
>> + for_each_tracing_cpu(cpu) {
>> + if (iter->buffer_iter[cpu])
>
> Only the cpu that is assigned gets buffer_iter[cpu] set. The other
> buffer_iter[cpus] will simply be ignored.
You meant iter->cpu_file != RING_BUFFER_ALL_CPUS case, right?
So why bother trying to check other cpus then?
Thanks,
Namhyung
On Wed, 2013-04-10 at 09:36 +0900, Namhyung Kim wrote:
> You meant iter->cpu_file != RING_BUFFER_ALL_CPUS case, right?
Yep.
>
> So why bother trying to check other cpus then?
Because it's a very slow path (closing a file), and it keeps the code
simpler and more condense.
We could add your change for consistency, but right now, its very low
priority.
But looking at the code, I do see a clean up that looks like it would be
worth updating. If the ring_buffer_read_prepare() fails, we should
probably let the user know, instead of succeeding and then having no
output.
Looks like all users of the buffer_iter[cpu] will fail quietly if it is
NULL, thus it's not a problem with crashing the kernel.
-- Steve
On Tue, 09 Apr 2013 20:46:27 -0400, Steven Rostedt wrote:
> On Wed, 2013-04-10 at 09:36 +0900, Namhyung Kim wrote:
>
>> You meant iter->cpu_file != RING_BUFFER_ALL_CPUS case, right?
>
> Yep.
>
>>
>> So why bother trying to check other cpus then?
>
> Because it's a very slow path (closing a file), and it keeps the code
> simpler and more condense.
>
> We could add your change for consistency, but right now, its very low
> priority.
Hmm.. okay.
>
> But looking at the code, I do see a clean up that looks like it would be
> worth updating. If the ring_buffer_read_prepare() fails, we should
> probably let the user know, instead of succeeding and then having no
> output.
How about below.. :)
>From 7ba245dba217ef858b467552019acd49f7fdce7e Mon Sep 17 00:00:00 2001
From: Namhyung Kim <[email protected]>
Date: Wed, 10 Apr 2013 09:10:44 +0900
Subject: [PATCH] tracing: Check result of ring_buffer_read_prepare()
The ring_buffer_read_prepare() can return NULL if memory allocation
fails. Fail out in this case instead of succedding and then having
no output.
Suggested-by: Steven Rostedt <[email protected]>
Signed-off-by: Namhyung Kim <[email protected]>
---
kernel/trace/trace.c | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7270460cfe3c..3b3514dc8e5e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2826,6 +2826,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ if (!iter->buffer_iter[cpu])
+ goto free;
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
@@ -2836,6 +2838,9 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ if (!iter->buffer_iter[cpu])
+ goto free;
+
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
@@ -2847,6 +2852,26 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
return iter;
+free:
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ for_each_tracing_cpu(cpu) {
+ if (iter->buffer_iter[cpu])
+ ring_buffer_read_finish(iter->buffer_iter[cpu]);
+ }
+ } else {
+ cpu = iter->cpu_file;
+ if (iter->buffer_iter[cpu])
+ ring_buffer_read_finish(iter->buffer_iter[cpu]);
+ }
+
+ if (iter->trace && iter->trace->close)
+ iter->trace->close(iter);
+
+ if (!iter->snapshot)
+ tracing_start_tr(tr);
+
+ mutex_destroy(&iter->mutex);
+ free_cpumask_var(iter->started);
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
--
1.7.11.7
On Wed, 2013-04-10 at 10:30 +0900, Namhyung Kim wrote:
> >From 7ba245dba217ef858b467552019acd49f7fdce7e Mon Sep 17 00:00:00 2001
> From: Namhyung Kim <[email protected]>
> Date: Wed, 10 Apr 2013 09:10:44 +0900
> Subject: [PATCH] tracing: Check result of ring_buffer_read_prepare()
>
> The ring_buffer_read_prepare() can return NULL if memory allocation
> fails. Fail out in this case instead of succedding and then having
> no output.
>
> Suggested-by: Steven Rostedt <[email protected]>
> Signed-off-by: Namhyung Kim <[email protected]>
> ---
> kernel/trace/trace.c | 25 +++++++++++++++++++++++++
> 1 file changed, 25 insertions(+)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 7270460cfe3c..3b3514dc8e5e 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -2826,6 +2826,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
> for_each_tracing_cpu(cpu) {
> iter->buffer_iter[cpu] =
> ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
> + if (!iter->buffer_iter[cpu])
> + goto free;
> }
> ring_buffer_read_prepare_sync();
> for_each_tracing_cpu(cpu) {
> @@ -2836,6 +2838,9 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
> cpu = iter->cpu_file;
> iter->buffer_iter[cpu] =
> ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
> + if (!iter->buffer_iter[cpu])
> + goto free;
> +
> ring_buffer_read_prepare_sync();
> ring_buffer_read_start(iter->buffer_iter[cpu]);
> tracing_iter_reset(iter, cpu);
> @@ -2847,6 +2852,26 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
>
> return iter;
>
> +free:
> + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
> + for_each_tracing_cpu(cpu) {
> + if (iter->buffer_iter[cpu])
> + ring_buffer_read_finish(iter->buffer_iter[cpu]);
> + }
> + } else {
> + cpu = iter->cpu_file;
> + if (iter->buffer_iter[cpu])
> + ring_buffer_read_finish(iter->buffer_iter[cpu]);
> + }
As I said I would consider updating the release code, but here, it's an
error path that is extremely unlikely to be hit. Please just keep the
single loop, and leave out the cpu_file compare.
Thanks,
-- Steve
> +
> + if (iter->trace && iter->trace->close)
> + iter->trace->close(iter);
> +
> + if (!iter->snapshot)
> + tracing_start_tr(tr);
> +
> + mutex_destroy(&iter->mutex);
> + free_cpumask_var(iter->started);
> fail:
> mutex_unlock(&trace_types_lock);
> kfree(iter->trace);
From: Namhyung Kim <[email protected]>
The ring_buffer_read_prepare() can return NULL if memory allocation
fails. Fail out in this case instead of succedding and then having
no output.
Suggested-by: Steven Rostedt <[email protected]>
Signed-off-by: Namhyung Kim <[email protected]>
---
kernel/trace/trace.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7270460cfe3c..13200de31f0b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2826,6 +2826,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ if (!iter->buffer_iter[cpu])
+ goto free;
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
@@ -2836,6 +2838,9 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ if (!iter->buffer_iter[cpu])
+ goto free;
+
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
@@ -2847,6 +2852,23 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
return iter;
+free:
+ /*
+ * For simplicity, just keep single loop without comparing cpu_file.
+ */
+ for_each_tracing_cpu(cpu) {
+ if (iter->buffer_iter[cpu])
+ ring_buffer_read_finish(iter->buffer_iter[cpu]);
+ }
+
+ if (iter->trace && iter->trace->close)
+ iter->trace->close(iter);
+
+ if (!iter->snapshot)
+ tracing_start_tr(tr);
+
+ mutex_destroy(&iter->mutex);
+ free_cpumask_var(iter->started);
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
--
1.7.11.7
On Wed, 2013-04-10 at 10:55 +0900, Namhyung Kim wrote:
> From: Namhyung Kim <[email protected]>
>
> The ring_buffer_read_prepare() can return NULL if memory allocation
> fails. Fail out in this case instead of succedding and then having
> no output.
>
> Suggested-by: Steven Rostedt <[email protected]>
> Signed-off-by: Namhyung Kim <[email protected]>
> ---
> kernel/trace/trace.c | 22 ++++++++++++++++++++++
> 1 file changed, 22 insertions(+)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 7270460cfe3c..13200de31f0b 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -2826,6 +2826,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
> for_each_tracing_cpu(cpu) {
> iter->buffer_iter[cpu] =
> ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
> + if (!iter->buffer_iter[cpu])
> + goto free;
> }
OK, this totally fails. I guess we need to allow
ring_buffer_read_prepare() to return NULL, as it will return NULL if a
cpu is offline or the tracing_cpumask has a CPU down.
I'll just pull this patch out of my queue for now.
-- Steve
> ring_buffer_read_prepare_sync();
> for_each_tracing_cpu(cpu) {
> @@ -2836,6 +2838,9 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
> cpu = iter->cpu_file;
> iter->buffer_iter[cpu] =
> ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
> + if (!iter->buffer_iter[cpu])
> + goto free;
> +
> ring_buffer_read_prepare_sync();
> ring_buffer_read_start(iter->buffer_iter[cpu]);
> tracing_iter_reset(iter, cpu);
> @@ -2847,6 +2852,23 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
>
> return iter;
>
> +free:
> + /*
> + * For simplicity, just keep single loop without comparing cpu_file.
> + */
> + for_each_tracing_cpu(cpu) {
> + if (iter->buffer_iter[cpu])
> + ring_buffer_read_finish(iter->buffer_iter[cpu]);
> + }
> +
> + if (iter->trace && iter->trace->close)
> + iter->trace->close(iter);
> +
> + if (!iter->snapshot)
> + tracing_start_tr(tr);
> +
> + mutex_destroy(&iter->mutex);
> + free_cpumask_var(iter->started);
> fail:
> mutex_unlock(&trace_types_lock);
> kfree(iter->trace);