The things skipped by kdb's "ftdump" command when you pass it a
parameter has always been entries, not lines. The difference usually
doesn't matter but when the trace buffer has multi-line entries (like
a stack dump) it can matter.
Let's fix this both in the help text for ftdump and also in the local
variable names.
Signed-off-by: Douglas Anderson <[email protected]>
Acked-by: Daniel Thompson <[email protected]>
---
Changes in v5:
- Add Daniel Thompson Ack.
Changes in v4:
- skip_lines => skip_entries new for v4.
Changes in v3: None
kernel/trace/trace_kdb.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 810d78a8d14c..4b666643d69f 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -17,7 +17,7 @@
#include "trace.h"
#include "trace_output.h"
-static void ftrace_dump_buf(int skip_lines, long cpu_file)
+static void ftrace_dump_buf(int skip_entries, long cpu_file)
{
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
@@ -70,11 +70,11 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
kdb_printf("---------------------------------\n");
cnt++;
- if (!skip_lines) {
+ if (!skip_entries) {
print_trace_line(&iter);
trace_printk_seq(&iter.seq);
} else {
- skip_lines--;
+ skip_entries--;
}
if (KDB_FLAG(CMD_INTERRUPT))
@@ -106,7 +106,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
*/
static int kdb_ftdump(int argc, const char **argv)
{
- int skip_lines = 0;
+ int skip_entries = 0;
long cpu_file;
char *cp;
@@ -114,9 +114,9 @@ static int kdb_ftdump(int argc, const char **argv)
return KDB_ARGCOUNT;
if (argc) {
- skip_lines = simple_strtol(argv[1], &cp, 0);
+ skip_entries = simple_strtol(argv[1], &cp, 0);
if (*cp)
- skip_lines = 0;
+ skip_entries = 0;
}
if (argc == 2) {
@@ -129,7 +129,7 @@ static int kdb_ftdump(int argc, const char **argv)
}
kdb_trap_printk++;
- ftrace_dump_buf(skip_lines, cpu_file);
+ ftrace_dump_buf(skip_entries, cpu_file);
kdb_trap_printk--;
return 0;
@@ -137,7 +137,7 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
- kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+ kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
"Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
--
2.21.0.225.g810b269d1ac-goog
These two new exported functions will be used in a future patch by
kdb_ftdump() to quickly skip all but the last few trace entries.
Suggested-by: Steven Rostedt <[email protected]>
Signed-off-by: Douglas Anderson <[email protected]>
---
Changes in v5: None
Changes in v4:
- trace_total_entries() / trace_total_entries_cpu() new for v4
Changes in v3: None
kernel/trace/trace.c | 65 ++++++++++++++++++++++++++++++++++----------
kernel/trace/trace.h | 3 ++
2 files changed, 53 insertions(+), 15 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ccd759eaad79..7afc90f82e53 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3490,34 +3490,69 @@ static void s_stop(struct seq_file *m, void *p)
trace_event_read_unlock();
}
+static void
+get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+ unsigned long *entries, int cpu)
+{
+ unsigned long count;
+
+ count = ring_buffer_entries_cpu(buf->buffer, cpu);
+ /*
+ * If this buffer has skipped entries, then we hold all
+ * entries for the trace and we need to ignore the
+ * ones before the time stamp.
+ */
+ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
+ count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
+ /* total is the same as the entries */
+ *total = count;
+ } else
+ *total = count +
+ ring_buffer_overrun_cpu(buf->buffer, cpu);
+ *entries = count;
+}
+
static void
get_total_entries(struct trace_buffer *buf,
unsigned long *total, unsigned long *entries)
{
- unsigned long count;
+ unsigned long t, e;
int cpu;
*total = 0;
*entries = 0;
for_each_tracing_cpu(cpu) {
- count = ring_buffer_entries_cpu(buf->buffer, cpu);
- /*
- * If this buffer has skipped entries, then we hold all
- * entries for the trace and we need to ignore the
- * ones before the time stamp.
- */
- if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
- count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
- /* total is the same as the entries */
- *total += count;
- } else
- *total += count +
- ring_buffer_overrun_cpu(buf->buffer, cpu);
- *entries += count;
+ get_total_entries_cpu(buf, &t, &e, cpu);
+ *total += t;
+ *entries += e;
}
}
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
+{
+ unsigned long total, entries;
+
+ if (!tr)
+ tr = &global_trace;
+
+ get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+
+ return entries;
+}
+
+unsigned long trace_total_entries(struct trace_array *tr)
+{
+ unsigned long total, entries;
+
+ if (!tr)
+ tr = &global_trace;
+
+ get_total_entries(&tr->trace_buffer, &total, &entries);
+
+ return entries;
+}
+
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n"
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d80cee49e0eb..4562b30eda9d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -719,6 +719,9 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
+unsigned long trace_total_entries(struct trace_array *tr);
+
void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
--
2.21.0.225.g810b269d1ac-goog
The 'ftdump' command in kdb is currently a bit of a last resort, at
least if you have lots of traces turned on. It's going to print a
whole boatload of data out your serial port which is probably running
at 115200. This could easily take many, many minutes.
Usually you're most interested in what's at the _end_ of the ftrace
buffer, AKA what happened most recently. That means you've got to
wait the full time for the dump. The 'ftdump' command does attempt to
help you a little bit by allowing you to skip a fixed number of
entries. Unfortunately it provides no way for you to know how many
entries you should skip.
Let's do similar to python and allow you to use a negative number to
indicate that you want to skip all entries except the last few. This
allows you to quickly see what you want.
Note that we also change the printout in ftdump to print the
(positive) number of entries actually skipped since that could be
helpful to know when you've specified a negative skip count.
Signed-off-by: Douglas Anderson <[email protected]>
Acked-by: Daniel Thompson <[email protected]>
---
Changes in v5:
- Only print skipping info if we skipped something (Daniel/Steven)
- Add Daniel Thompson Ack.
Changes in v4:
- Now uses trace_total_entries() / trace_total_entries_cpu().
- Based upon new patch that renames "lines" to "entries".
Changes in v3:
- Optimize counting as per Steven Rostedt.
- Down to 1 patch since patch #1 from v2 landed.
kernel/trace/trace_kdb.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 4b666643d69f..a9fc4f828036 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -40,6 +40,8 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
+ if (skip_entries)
+ kdb_printf("(skipping %d entries)\n", skip_entries);
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
@@ -109,6 +111,7 @@ static int kdb_ftdump(int argc, const char **argv)
int skip_entries = 0;
long cpu_file;
char *cp;
+ int cnt;
if (argc > 2)
return KDB_ARGCOUNT;
@@ -129,6 +132,16 @@ static int kdb_ftdump(int argc, const char **argv)
}
kdb_trap_printk++;
+
+ /* A negative skip_entries means skip all but the last entries */
+ if (skip_entries < 0) {
+ if (cpu_file == RING_BUFFER_ALL_CPUS)
+ cnt = trace_total_entries(NULL);
+ else
+ cnt = trace_total_entries_cpu(NULL, cpu_file);
+ skip_entries = max(cnt + skip_entries, 0);
+ }
+
ftrace_dump_buf(skip_entries, cpu_file);
kdb_trap_printk--;
@@ -138,7 +151,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
- "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
+ "Dump ftrace log; -skip dumps last #entries", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
--
2.21.0.225.g810b269d1ac-goog
On Mon, 18 Mar 2019 13:47:39 -0700
Douglas Anderson <[email protected]> wrote:
> The things skipped by kdb's "ftdump" command when you pass it a
> parameter has always been entries, not lines. The difference usually
> doesn't matter but when the trace buffer has multi-line entries (like
> a stack dump) it can matter.
>
> Let's fix this both in the help text for ftdump and also in the local
> variable names.
>
> Signed-off-by: Douglas Anderson <[email protected]>
> Acked-by: Daniel Thompson <[email protected]>
> ---
I'd like to get some acts from the kdb developers and I'll take it.
-- Steve
On Mon, 18 Mar 2019 13:47:40 -0700
Douglas Anderson <[email protected]> wrote:
> These two new exported functions will be used in a future patch by
> kdb_ftdump() to quickly skip all but the last few trace entries.
>
> Suggested-by: Steven Rostedt <[email protected]>
> Signed-off-by: Douglas Anderson <[email protected]>
Actually, I guess only this email needs the ack from Daniel.
-- Steve
On Mon, Mar 18, 2019 at 01:47:40PM -0700, Douglas Anderson wrote:
> These two new exported functions will be used in a future patch by
> kdb_ftdump() to quickly skip all but the last few trace entries.
>
> Suggested-by: Steven Rostedt <[email protected]>
> Signed-off-by: Douglas Anderson <[email protected]>
> ---
>
> Changes in v5: None
> Changes in v4:
> - trace_total_entries() / trace_total_entries_cpu() new for v4
>
> Changes in v3: None
>
> kernel/trace/trace.c | 65 ++++++++++++++++++++++++++++++++++----------
> kernel/trace/trace.h | 3 ++
> 2 files changed, 53 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index ccd759eaad79..7afc90f82e53 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -3490,34 +3490,69 @@ static void s_stop(struct seq_file *m, void *p)
> trace_event_read_unlock();
> }
>
> +static void
> +get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
> + unsigned long *entries, int cpu)
> +{
> + unsigned long count;
> +
> + count = ring_buffer_entries_cpu(buf->buffer, cpu);
> + /*
> + * If this buffer has skipped entries, then we hold all
> + * entries for the trace and we need to ignore the
> + * ones before the time stamp.
> + */
> + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> + /* total is the same as the entries */
> + *total = count;
> + } else
> + *total = count +
> + ring_buffer_overrun_cpu(buf->buffer, cpu);
> + *entries = count;
> +}
> +
> static void
> get_total_entries(struct trace_buffer *buf,
> unsigned long *total, unsigned long *entries)
> {
> - unsigned long count;
> + unsigned long t, e;
> int cpu;
>
> *total = 0;
> *entries = 0;
>
> for_each_tracing_cpu(cpu) {
> - count = ring_buffer_entries_cpu(buf->buffer, cpu);
> - /*
> - * If this buffer has skipped entries, then we hold all
> - * entries for the trace and we need to ignore the
> - * ones before the time stamp.
> - */
> - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> - /* total is the same as the entries */
> - *total += count;
> - } else
> - *total += count +
> - ring_buffer_overrun_cpu(buf->buffer, cpu);
> - *entries += count;
> + get_total_entries_cpu(buf, &t, &e, cpu);
> + *total += t;
> + *entries += e;
> }
> }
>
> +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
> +{
> + unsigned long total, entries;
> +
> + if (!tr)
> + tr = &global_trace;
This function is only ever called with tr set to NULL which means tr is
an argument looking for a user.
I wouldn't mind except if this was following copying prior art to keep
the API feel the same but I can't find any other trace function where
the trace_array can be substituted for NULL. AFAICT all the existing
sites where global_trace is used will use it unconditionally.
> +
> + get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
> +
> + return entries;
> +}
> +
> +unsigned long trace_total_entries(struct trace_array *tr)
> +{
> + unsigned long total, entries;
> +
> + if (!tr)
> + tr = &global_trace;
Ditto.
Daniel.
> +
> + get_total_entries(&tr->trace_buffer, &total, &entries);
> +
> + return entries;
> +}
> +
> static void print_lat_help_header(struct seq_file *m)
> {
> seq_puts(m, "# _------=> CPU# \n"
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index d80cee49e0eb..4562b30eda9d 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -719,6 +719,9 @@ void trace_init_global_iter(struct trace_iterator *iter);
>
> void tracing_iter_reset(struct trace_iterator *iter, int cpu);
>
> +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
> +unsigned long trace_total_entries(struct trace_array *tr);
> +
> void trace_function(struct trace_array *tr,
> unsigned long ip,
> unsigned long parent_ip,
> --
> 2.21.0.225.g810b269d1ac-goog
>
On Mon, Mar 18, 2019 at 05:18:46PM -0400, Steven Rostedt wrote:
> On Mon, 18 Mar 2019 13:47:40 -0700
> Douglas Anderson <[email protected]> wrote:
>
> > These two new exported functions will be used in a future patch by
> > kdb_ftdump() to quickly skip all but the last few trace entries.
> >
> > Suggested-by: Steven Rostedt <[email protected]>
> > Signed-off-by: Douglas Anderson <[email protected]>
>
> Actually, I guess only this email needs the ack from Daniel.
I skipped this patch in v4 since I misunderstood and thought it was just
tweaking an existing trace API (which wouldn't really need an ack from
me).
I did offer a bit of feedback after reading it but TBH even if the
feedback is rejected I'm ok to ack:
Acked-by: Daniel Thompson <[email protected]>
Daniel.
Hi,
On Tue, Mar 19, 2019 at 4:25 AM Daniel Thompson
<[email protected]> wrote:
>
> On Mon, Mar 18, 2019 at 01:47:40PM -0700, Douglas Anderson wrote:
> > These two new exported functions will be used in a future patch by
> > kdb_ftdump() to quickly skip all but the last few trace entries.
> >
> > Suggested-by: Steven Rostedt <[email protected]>
> > Signed-off-by: Douglas Anderson <[email protected]>
> > ---
> >
> > Changes in v5: None
> > Changes in v4:
> > - trace_total_entries() / trace_total_entries_cpu() new for v4
> >
> > Changes in v3: None
> >
> > kernel/trace/trace.c | 65 ++++++++++++++++++++++++++++++++++----------
> > kernel/trace/trace.h | 3 ++
> > 2 files changed, 53 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> > index ccd759eaad79..7afc90f82e53 100644
> > --- a/kernel/trace/trace.c
> > +++ b/kernel/trace/trace.c
> > @@ -3490,34 +3490,69 @@ static void s_stop(struct seq_file *m, void *p)
> > trace_event_read_unlock();
> > }
> >
> > +static void
> > +get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
> > + unsigned long *entries, int cpu)
> > +{
> > + unsigned long count;
> > +
> > + count = ring_buffer_entries_cpu(buf->buffer, cpu);
> > + /*
> > + * If this buffer has skipped entries, then we hold all
> > + * entries for the trace and we need to ignore the
> > + * ones before the time stamp.
> > + */
> > + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> > + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> > + /* total is the same as the entries */
> > + *total = count;
> > + } else
> > + *total = count +
> > + ring_buffer_overrun_cpu(buf->buffer, cpu);
> > + *entries = count;
> > +}
> > +
> > static void
> > get_total_entries(struct trace_buffer *buf,
> > unsigned long *total, unsigned long *entries)
> > {
> > - unsigned long count;
> > + unsigned long t, e;
> > int cpu;
> >
> > *total = 0;
> > *entries = 0;
> >
> > for_each_tracing_cpu(cpu) {
> > - count = ring_buffer_entries_cpu(buf->buffer, cpu);
> > - /*
> > - * If this buffer has skipped entries, then we hold all
> > - * entries for the trace and we need to ignore the
> > - * ones before the time stamp.
> > - */
> > - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
> > - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
> > - /* total is the same as the entries */
> > - *total += count;
> > - } else
> > - *total += count +
> > - ring_buffer_overrun_cpu(buf->buffer, cpu);
> > - *entries += count;
> > + get_total_entries_cpu(buf, &t, &e, cpu);
> > + *total += t;
> > + *entries += e;
> > }
> > }
> >
> > +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
> > +{
> > + unsigned long total, entries;
> > +
> > + if (!tr)
> > + tr = &global_trace;
>
> This function is only ever called with tr set to NULL which means tr is
> an argument looking for a user.
>
> I wouldn't mind except if this was following copying prior art to keep
> the API feel the same but I can't find any other trace function where
> the trace_array can be substituted for NULL. AFAICT all the existing
> sites where global_trace is used will use it unconditionally.
Happy to change this if you guys want. At the moment the
trace_total_entries() comes straight from Steven's suggestion at:
https://lkml.kernel.org/r/[email protected]
Ugh, but while looking at this it looks like I totally forgot one
piece of feedback from Steven: to move the disabling of the tracing
out so it's not re-enabled after counting but before dumping. Not
sure how I missed that. :(
I'll wait to spin until I hear from Steven if he wants me to remove
the "struct trace_array *tr" argument, then I'll post out a v6 with
the fix I forgot...
-Doug
On Tue, 19 Mar 2019 07:45:38 -0700
Doug Anderson <[email protected]> wrote:
> > > +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
> > > +{
> > > + unsigned long total, entries;
> > > +
> > > + if (!tr)
> > > + tr = &global_trace;
> >
> > This function is only ever called with tr set to NULL which means tr is
> > an argument looking for a user.
> >
> > I wouldn't mind except if this was following copying prior art to keep
> > the API feel the same but I can't find any other trace function where
> > the trace_array can be substituted for NULL. AFAICT all the existing
> > sites where global_trace is used will use it unconditionally.
I'm in the process of making everything work better with tracing
instances. Which means, making all the APIs use the tr pointer (which is
the descriptor for the instance).
All new APIs added to the tracing infrastructure should reference a "tr"
pointer, and not use the global_trace directly. We can default it if tr
is NULL.
There are currently lots of cases in the code that do so, but I'm
trying to get rid of them.
>
> Happy to change this if you guys want. At the moment the
> trace_total_entries() comes straight from Steven's suggestion at:
>
> https://lkml.kernel.org/r/[email protected]
>
> Ugh, but while looking at this it looks like I totally forgot one
> piece of feedback from Steven: to move the disabling of the tracing
> out so it's not re-enabled after counting but before dumping. Not
> sure how I missed that. :(
>
> I'll wait to spin until I hear from Steven if he wants me to remove
> the "struct trace_array *tr" argument, then I'll post out a v6 with
> the fix I forgot...
Please keep the tr argument.
Thanks!
-- Steve
On Tue, Mar 19, 2019 at 10:55:25AM -0400, Steven Rostedt wrote:
> On Tue, 19 Mar 2019 07:45:38 -0700
> Doug Anderson <[email protected]> wrote:
>
> > > > +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
> > > > +{
> > > > + unsigned long total, entries;
> > > > +
> > > > + if (!tr)
> > > > + tr = &global_trace;
> > >
> > > This function is only ever called with tr set to NULL which means tr is
> > > an argument looking for a user.
> > >
> > > I wouldn't mind except if this was following copying prior art to keep
> > > the API feel the same but I can't find any other trace function where
> > > the trace_array can be substituted for NULL. AFAICT all the existing
> > > sites where global_trace is used will use it unconditionally.
>
> I'm in the process of making everything work better with tracing
> instances. Which means, making all the APIs use the tr pointer (which is
> the descriptor for the instance).
>
> All new APIs added to the tracing infrastructure should reference a "tr"
> pointer, and not use the global_trace directly. We can default it if tr
> is NULL.
>
> There are currently lots of cases in the code that do so, but I'm
> trying to get rid of them.
I see. So this API style will align with the rest of the family when
that works lands.
Fine for me.
Daniel.