NOT FOR INCLUSION
I created this patch as a result of Peter Zilstra's request to get more
info from lockdep. This patch is not for inclusion, at least in its
present form, because it adds some redunant info to /proc/lockdep_stats
However, some of the fields are new, and it is worth examining, and / or
applying if you are looking at the MAX_STACK_TRACE_ENTRIES too big
problem.
I generated this patch against a recent tip/master but it applies without
conflicts to the latest rt kernel as well. Comments are welcome, in fact
they are appreciated.
>From 5181c0296dd1549e4e706ff25a4cd81a1d90137d Mon Sep 17 00:00:00 2001
From: John Kacur <[email protected]>
Date: Thu, 22 Apr 2010 17:02:42 +0200
Subject: [PATCH] lockdep: Add nr_save_trace_invocations counter
Add the nr_save_trace_invocations counter which counts the number of
time save_trace() is invoked when relevant for trace enteries.
This means, those invocations from mark_lock() and add_lock_to_list()
When called from mark_lock() we break it down into LOCKSTATE categories.
Signed-off-by: John Kacur <[email protected]>
---
kernel/lockdep.c | 20 ++++++++++++++++++++
kernel/lockdep_internals.h | 2 ++
kernel/lockdep_proc.c | 23 +++++++++++++++++++++++
3 files changed, 45 insertions(+), 0 deletions(-)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 78325f8..f921576 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -371,6 +371,10 @@ static int verbose(struct lock_class *class)
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
+/* Calls to save_trace() from mark_lock() and add_lock_to_list() only*/
+unsigned long nr_save_trace_invocations;
+unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
+
static int save_trace(struct stack_trace *trace)
{
trace->nr_entries = 0;
@@ -410,6 +414,19 @@ static int save_trace(struct stack_trace *trace)
return 1;
}
+/*
+ * This function is only called from mark_lock() and add_lock_to_list()
+ * which are only called when holding the graph_lock. This counter
+ * piggybacks off of that lock
+ */
+static void inc_save_trace_invocations(enum lock_usage_bit new_bit)
+{
+ nr_save_trace_invocations++;
+ if (WARN_ON(new_bit >= LOCK_USAGE_STATES))
+ return;
+ nr_save_trace_invocations_type[new_bit]++;
+}
+
unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
@@ -449,6 +466,7 @@ static const char *usage_str[] =
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
+#undef __USAGE
[LOCK_USED] = "INITIAL USE",
};
@@ -816,6 +834,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
if (!entry)
return 0;
+ nr_save_trace_invocations++;
if (!save_trace(&entry->trace))
return 0;
@@ -2615,6 +2634,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
hlock_class(this)->usage_mask |= new_mask;
+ inc_save_trace_invocations(new_bit);
if (!save_trace(hlock_class(this)->usage_traces + new_bit))
return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 8d7d4b6..6149358 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -84,6 +84,8 @@ extern unsigned long nr_list_entries;
extern unsigned long nr_lock_chains;
extern int nr_chain_hlocks;
extern unsigned long nr_stack_trace_entries;
+extern unsigned long nr_save_trace_invocations;
+extern unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
extern unsigned int nr_hardirq_chains;
extern unsigned int nr_softirq_chains;
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 59b76c8..ef5f372 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -215,8 +215,24 @@ static void lockdep_stats_debug_show(struct seq_file *m)
#endif
}
+#define __USAGE(__STATE) \
+[LOCK_USED_IN_##__STATE] = "LOCK_USED_IN_"__stringify(__STATE), \
+[LOCK_ENABLED_##__STATE] = "LOCK_ENABLED_"__stringify(__STATE), \
+[LOCK_USED_IN_##__STATE##_READ] = "LOCK_USED_IN_"__stringify(__STATE)"_READ", \
+[LOCK_ENABLED_##__STATE##_READ] = "LOCK_ENABLED_"__stringify(__STATE)"_READ",
+
+static const char *lockstate_tostr[] =
+{
+#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+#undef __USAGE
+ [LOCK_USED] = "LOCK_USED",
+};
+
static int lockdep_stats_show(struct seq_file *m, void *v)
{
+ int bit;
struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
@@ -307,6 +323,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_process_chains);
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
+ seq_printf(m, " stack-trace invocations: %lu\n",
+ nr_save_trace_invocations);
+
+ for (bit=0; bit < LOCK_USAGE_STATES; bit++)
+ seq_printf(m, "\t%s: %lu\n", lockstate_tostr[bit],
+ nr_save_trace_invocations_type[bit]);
+
seq_printf(m, " combined max dependencies: %11u\n",
(nr_hardirq_chains + 1) *
(nr_softirq_chains + 1) *
--
1.6.6.1
On Fri, 2010-04-23 at 10:58 +0800, Yong Zhang wrote:
>
> > +unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
>
> And each item in this array will equal to nr_hardirq_[un]safe,
> nr_softirq_[un]safe and such things under lockdep_stats_show(). Right?
Well, the stats for RECLAIM_FS as missing, so at the very least we need
to re-write lockdep_stats_show() to use the lockdep_states.h magic.
On Thu, Apr 22, 2010 at 10:15:48PM +0200, John Kacur wrote:
> NOT FOR INCLUSION
>
> I created this patch as a result of Peter Zilstra's request to get more
> info from lockdep. This patch is not for inclusion, at least in its
> present form, because it adds some redunant info to /proc/lockdep_stats
>
> However, some of the fields are new, and it is worth examining, and / or
> applying if you are looking at the MAX_STACK_TRACE_ENTRIES too big
> problem.
>
> I generated this patch against a recent tip/master but it applies without
> conflicts to the latest rt kernel as well. Comments are welcome, in fact
> they are appreciated.
>
> >From 5181c0296dd1549e4e706ff25a4cd81a1d90137d Mon Sep 17 00:00:00 2001
> From: John Kacur <[email protected]>
> Date: Thu, 22 Apr 2010 17:02:42 +0200
> Subject: [PATCH] lockdep: Add nr_save_trace_invocations counter
>
> Add the nr_save_trace_invocations counter which counts the number of
> time save_trace() is invoked when relevant for trace enteries.
>
> This means, those invocations from mark_lock() and add_lock_to_list()
>
> When called from mark_lock() we break it down into LOCKSTATE categories.
>
> Signed-off-by: John Kacur <[email protected]>
Just take a rough look at it. I don't think this can give more info.
> +/* Calls to save_trace() from mark_lock() and add_lock_to_list() only*/
> +unsigned long nr_save_trace_invocations;
It will equal to nr_list_entries.
> +unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
And each item in this array will equal to nr_hardirq_[un]safe,
nr_softirq_[un]safe and such things under lockdep_stats_show(). Right?
Thanks,
Yong
> +
> static int save_trace(struct stack_trace *trace)
> {
> trace->nr_entries = 0;
> @@ -410,6 +414,19 @@ static int save_trace(struct stack_trace *trace)
> return 1;
> }
>
> +/*
> + * This function is only called from mark_lock() and add_lock_to_list()
> + * which are only called when holding the graph_lock. This counter
> + * piggybacks off of that lock
> + */
> +static void inc_save_trace_invocations(enum lock_usage_bit new_bit)
> +{
> + nr_save_trace_invocations++;
> + if (WARN_ON(new_bit >= LOCK_USAGE_STATES))
> + return;
> + nr_save_trace_invocations_type[new_bit]++;
> +}
> +
> unsigned int nr_hardirq_chains;
> unsigned int nr_softirq_chains;
> unsigned int nr_process_chains;
> @@ -449,6 +466,7 @@ static const char *usage_str[] =
> #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
> #include "lockdep_states.h"
> #undef LOCKDEP_STATE
> +#undef __USAGE
> [LOCK_USED] = "INITIAL USE",
> };
>
> @@ -816,6 +834,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
> if (!entry)
> return 0;
>
> + nr_save_trace_invocations++;
> if (!save_trace(&entry->trace))
> return 0;
>
> @@ -2615,6 +2634,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
>
> hlock_class(this)->usage_mask |= new_mask;
>
> + inc_save_trace_invocations(new_bit);
> if (!save_trace(hlock_class(this)->usage_traces + new_bit))
> return 0;
>
> diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
> index 8d7d4b6..6149358 100644
> --- a/kernel/lockdep_internals.h
> +++ b/kernel/lockdep_internals.h
> @@ -84,6 +84,8 @@ extern unsigned long nr_list_entries;
> extern unsigned long nr_lock_chains;
> extern int nr_chain_hlocks;
> extern unsigned long nr_stack_trace_entries;
> +extern unsigned long nr_save_trace_invocations;
> +extern unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
>
> extern unsigned int nr_hardirq_chains;
> extern unsigned int nr_softirq_chains;
> diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
> index 59b76c8..ef5f372 100644
> --- a/kernel/lockdep_proc.c
> +++ b/kernel/lockdep_proc.c
> @@ -215,8 +215,24 @@ static void lockdep_stats_debug_show(struct seq_file *m)
> #endif
> }
>
> +#define __USAGE(__STATE) \
> +[LOCK_USED_IN_##__STATE] = "LOCK_USED_IN_"__stringify(__STATE), \
> +[LOCK_ENABLED_##__STATE] = "LOCK_ENABLED_"__stringify(__STATE), \
> +[LOCK_USED_IN_##__STATE##_READ] = "LOCK_USED_IN_"__stringify(__STATE)"_READ", \
> +[LOCK_ENABLED_##__STATE##_READ] = "LOCK_ENABLED_"__stringify(__STATE)"_READ",
> +
> +static const char *lockstate_tostr[] =
> +{
> +#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
> +#include "lockdep_states.h"
> +#undef LOCKDEP_STATE
> +#undef __USAGE
> + [LOCK_USED] = "LOCK_USED",
> +};
> +
> static int lockdep_stats_show(struct seq_file *m, void *v)
> {
> + int bit;
> struct lock_class *class;
> unsigned long nr_unused = 0, nr_uncategorized = 0,
> nr_irq_safe = 0, nr_irq_unsafe = 0,
> @@ -307,6 +323,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
> nr_process_chains);
> seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
> nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
> + seq_printf(m, " stack-trace invocations: %lu\n",
> + nr_save_trace_invocations);
> +
> + for (bit=0; bit < LOCK_USAGE_STATES; bit++)
> + seq_printf(m, "\t%s: %lu\n", lockstate_tostr[bit],
> + nr_save_trace_invocations_type[bit]);
> +
> seq_printf(m, " combined max dependencies: %11u\n",
> (nr_hardirq_chains + 1) *
> (nr_softirq_chains + 1) *
> --
> 1.6.6.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
On Fri, 23 Apr 2010, Yong Zhang wrote:
> On Thu, Apr 22, 2010 at 10:15:48PM +0200, John Kacur wrote:
> > NOT FOR INCLUSION
> >
> > I created this patch as a result of Peter Zilstra's request to get more
> > info from lockdep. This patch is not for inclusion, at least in its
> > present form, because it adds some redunant info to /proc/lockdep_stats
> >
> > However, some of the fields are new, and it is worth examining, and / or
> > applying if you are looking at the MAX_STACK_TRACE_ENTRIES too big
> > problem.
> >
> > I generated this patch against a recent tip/master but it applies without
> > conflicts to the latest rt kernel as well. Comments are welcome, in fact
> > they are appreciated.
> >
> > >From 5181c0296dd1549e4e706ff25a4cd81a1d90137d Mon Sep 17 00:00:00 2001
> > From: John Kacur <[email protected]>
> > Date: Thu, 22 Apr 2010 17:02:42 +0200
> > Subject: [PATCH] lockdep: Add nr_save_trace_invocations counter
> >
> > Add the nr_save_trace_invocations counter which counts the number of
> > time save_trace() is invoked when relevant for trace enteries.
> >
> > This means, those invocations from mark_lock() and add_lock_to_list()
> >
> > When called from mark_lock() we break it down into LOCKSTATE categories.
> >
> > Signed-off-by: John Kacur <[email protected]>
>
> Just take a rough look at it. I don't think this can give more info.
>
> > +/* Calls to save_trace() from mark_lock() and add_lock_to_list() only*/
> > +unsigned long nr_save_trace_invocations;
>
> It will equal to nr_list_entries.
>
> > +unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
>
> And each item in this array will equal to nr_hardirq_[un]safe,
> nr_softirq_[un]safe and such things under lockdep_stats_show(). Right?
>
> Thanks,
> Yong
>
Hi Yong
Some context here - Peter asked me to see if we could get some more
detailed stats on why some configurations reach the
MAX_STACK_TRACE_ENTRIES limit - whether the limit was really too low for
some circumstances, or whether we were counting somethings unnecessarily.
In any case, I stamped a big NOT FOR INCLUSION on my mail, because I
noticed that somethings were redundant - albeit, obtained in a slightly
different manner, however, not everything is redundant.
In particular, nr_save_trace_invocations is NOT equal to nr_list_entries.
You will see that reported in /proc/lockdep_stats as
direct dependencies: 8752 [max: 16384]
I have
stack-trace invocations: 10888
from the same run.
Still trying to figure out what the meaning is of that though to be
honest.
Here is a portion of the lockdep_stats, with all of the new fields and the
redundant ones.
stack-trace invocations: 10888
LOCK_USED_IN_HARDIRQ: 15
LOCK_USED_IN_HARDIRQ_READ: 0
LOCK_ENABLED_HARDIRQ: 543
LOCK_ENABLED_HARDIRQ_READ: 28
LOCK_USED_IN_SOFTIRQ: 0
LOCK_USED_IN_SOFTIRQ_READ: 0
LOCK_ENABLED_SOFTIRQ: 543
LOCK_ENABLED_SOFTIRQ_READ: 28
LOCK_USED_IN_RECLAIM_FS: 5
LOCK_USED_IN_RECLAIM_FS_READ: 0
LOCK_ENABLED_RECLAIM_FS: 95
LOCK_ENABLED_RECLAIM_FS_READ: 8
LOCK_USED: 871
combined max dependencies: 139841
hardirq-safe locks: 15
hardirq-unsafe locks: 543
softirq-safe locks: 0
softirq-unsafe locks: 543
irq-safe locks: 15
irq-unsafe locks: 543
hardirq-read-safe locks: 0
hardirq-read-unsafe locks: 28
softirq-read-safe locks: 0
softirq-read-unsafe locks: 28
irq-read-safe locks: 0
irq-read-unsafe locks: 28
So, you see that all of the reclaim fields are new,
LOCK_USED_IN_RECLAIM_FS: 5
LOCK_USED_IN_RECLAIM_FS_READ: 0
LOCK_ENABLED_RECLAIM_FS: 95
LOCK_ENABLED_RECLAIM_FS_READ: 8
I can create a patch for inclusion that adds the reclaim fields, the
question is, is the nr_save_trace_invocations a useful stat for us or not?
Thanks
On Fri, Apr 23, 2010 at 09:24:55AM +0200, John Kacur wrote:
> Some context here - Peter asked me to see if we could get some more
> detailed stats on why some configurations reach the
> MAX_STACK_TRACE_ENTRIES limit - whether the limit was really too low for
> some circumstances, or whether we were counting somethings unnecessarily.
>
> In any case, I stamped a big NOT FOR INCLUSION on my mail, because I
> noticed that somethings were redundant - albeit, obtained in a slightly
> different manner, however, not everything is redundant.
>
> In particular, nr_save_trace_invocations is NOT equal to nr_list_entries.
> You will see that reported in /proc/lockdep_stats as
> direct dependencies: 8752 [max: 16384]
> I have
> stack-trace invocations: 10888
> from the same run.
I missed that nr_save_trace_invocations is also increased in
inc_save_trace_invocations().
So nr_save_trace_invocations = nr_list_entries + sum of
nr_save_trace_invocations_type[].
>
> Still trying to figure out what the meaning is of that though to be
> honest.
>
> Here is a portion of the lockdep_stats, with all of the new fields and the
> redundant ones.
>
> stack-trace invocations: 10888
> LOCK_USED_IN_HARDIRQ: 15
> LOCK_USED_IN_HARDIRQ_READ: 0
> LOCK_ENABLED_HARDIRQ: 543
> LOCK_ENABLED_HARDIRQ_READ: 28
> LOCK_USED_IN_SOFTIRQ: 0
> LOCK_USED_IN_SOFTIRQ_READ: 0
> LOCK_ENABLED_SOFTIRQ: 543
> LOCK_ENABLED_SOFTIRQ_READ: 28
> LOCK_USED_IN_RECLAIM_FS: 5
> LOCK_USED_IN_RECLAIM_FS_READ: 0
> LOCK_ENABLED_RECLAIM_FS: 95
> LOCK_ENABLED_RECLAIM_FS_READ: 8
> LOCK_USED: 871
> combined max dependencies: 139841
> hardirq-safe locks: 15
> hardirq-unsafe locks: 543
> softirq-safe locks: 0
> softirq-unsafe locks: 543
> irq-safe locks: 15
> irq-unsafe locks: 543
> hardirq-read-safe locks: 0
> hardirq-read-unsafe locks: 28
> softirq-read-safe locks: 0
> softirq-read-unsafe locks: 28
> irq-read-safe locks: 0
> irq-read-unsafe locks: 28
>
> So, you see that all of the reclaim fields are new,
> LOCK_USED_IN_RECLAIM_FS: 5
> LOCK_USED_IN_RECLAIM_FS_READ: 0
> LOCK_ENABLED_RECLAIM_FS: 95
> LOCK_ENABLED_RECLAIM_FS_READ: 8
Yes, indeed, data in lockdep_stats_show() is out of time.
So as Peter has said in another thread, we should add sample for RECLAIM_FS.
>
> I can create a patch for inclusion that adds the reclaim fields, the
> question is, is the nr_save_trace_invocations a useful stat for us or not?
Actually it's just a summation of the samples.
I don't think it's necessary.
Thanks,
Yong
On Fri, Apr 23, 2010 at 08:52:05AM +0200, Peter Zijlstra wrote:
> On Fri, 2010-04-23 at 10:58 +0800, Yong Zhang wrote:
> >
> > > +unsigned long nr_save_trace_invocations_type[LOCK_USAGE_STATES];
> >
> > And each item in this array will equal to nr_hardirq_[un]safe,
> > nr_softirq_[un]safe and such things under lockdep_stats_show(). Right?
>
> Well, the stats for RECLAIM_FS as missing, so at the very least we need
> to re-write lockdep_stats_show() to use the lockdep_states.h magic.
Yeah, it's a good start point and will keep thing unifying in the future.
Thanks,
Yong
On Fri, 2010-04-23 at 09:24 +0200, John Kacur wrote:
> direct dependencies: 8752 [max: 16384]
> I have
> stack-trace invocations: 10888
> from the same run.
>
> Still trying to figure out what the meaning is of that though to be
> honest.
>
> Here is a portion of the lockdep_stats, with all of the new fields and the
> redundant ones.
>
> stack-trace invocations: 10888
> LOCK_USED_IN_HARDIRQ: 15
> LOCK_USED_IN_HARDIRQ_READ: 0
> LOCK_ENABLED_HARDIRQ: 543
> LOCK_ENABLED_HARDIRQ_READ: 28
> LOCK_USED_IN_SOFTIRQ: 0
> LOCK_USED_IN_SOFTIRQ_READ: 0
> LOCK_ENABLED_SOFTIRQ: 543
> LOCK_ENABLED_SOFTIRQ_READ: 28
> LOCK_USED_IN_RECLAIM_FS: 5
> LOCK_USED_IN_RECLAIM_FS_READ: 0
> LOCK_ENABLED_RECLAIM_FS: 95
> LOCK_ENABLED_RECLAIM_FS_READ: 8
> LOCK_USED: 871
8752+871+8+95+5+28+543+28+543+15=10888
So you get a stack-trace for each direct-dependency, and you get a
stack-trace for each LOCK_state, the sum seems to match the total
invocations.
Non of these numbers look strange..
On Fri, Apr 23, 2010 at 10:05 AM, Peter Zijlstra <[email protected]> wrote:
> On Fri, 2010-04-23 at 09:24 +0200, John Kacur wrote:
>> direct dependencies: ? ? ? ? ? ? ? ? ?8752 [max: 16384]
>> I have
>> stack-trace invocations: 10888
>> from the same run.
>>
>> Still trying to figure out what the meaning is of that though to be
>> honest.
>>
>> Here is a portion of the lockdep_stats, with all of the new fields and the
>> redundant ones.
>>
>> stack-trace invocations: 10888
>> ? ? ? ? LOCK_USED_IN_HARDIRQ: 15
>> ? ? ? ? LOCK_USED_IN_HARDIRQ_READ: 0
>> ? ? ? ? LOCK_ENABLED_HARDIRQ: 543
>> ? ? ? ? LOCK_ENABLED_HARDIRQ_READ: 28
>> ? ? ? ? LOCK_USED_IN_SOFTIRQ: 0
>> ? ? ? ? LOCK_USED_IN_SOFTIRQ_READ: 0
>> ? ? ? ? LOCK_ENABLED_SOFTIRQ: 543
>> ? ? ? ? LOCK_ENABLED_SOFTIRQ_READ: 28
>> ? ? ? ? LOCK_USED_IN_RECLAIM_FS: 5
>> ? ? ? ? LOCK_USED_IN_RECLAIM_FS_READ: 0
>> ? ? ? ? LOCK_ENABLED_RECLAIM_FS: 95
>> ? ? ? ? LOCK_ENABLED_RECLAIM_FS_READ: 8
>> ? ? ? ? LOCK_USED: 871
>
> 8752+871+8+95+5+28+543+28+543+15=10888
>
> So you get a stack-trace for each direct-dependency, and you get a
> stack-trace for each LOCK_state, the sum seems to match the total
> invocations.
>
> Non of these numbers look strange..
>
As I told Peter privately the laptop that triggered the
MAX_STACK_TRACE_ENTRIES every time, has met an
unfortunate early demise. However, I think it was the config - not the
hardware. On this machine where the above
numbers come from, I believe I have less debug options configured -
but it is running the exact same kernel as
the laptop was. (2.6.33.2-rt13)
On Fri, Apr 23, 2010 at 10:31:16AM +0200, John Kacur wrote:
> > 8752+871+8+95+5+28+543+28+543+15=10888
> >
> > So you get a stack-trace for each direct-dependency, and you get a
> > stack-trace for each LOCK_state, the sum seems to match the total
> > invocations.
> >
> > Non of these numbers look strange..
> >
>
> As I told Peter privately the laptop that triggered the
> MAX_STACK_TRACE_ENTRIES every time, has met an
> unfortunate early demise. However, I think it was the config - not the
> hardware. On this machine where the above
> numbers come from, I believe I have less debug options configured -
> but it is running the exact same kernel as
> the laptop was. (2.6.33.2-rt13)
Through a rough computation:
MAX_STACK_TRACE_ENTRIES/10888 = 24
That means the average stack deepth is about 24.
So I'm thinking if we can take a check on the biggest deepth?
Could this make sense?
Thanks,
Yong
On Fri, Apr 23, 2010 at 10:49 AM, Yong Zhang <[email protected]> wrote:
> On Fri, Apr 23, 2010 at 10:31:16AM +0200, John Kacur wrote:
>> > 8752+871+8+95+5+28+543+28+543+15=10888
>> >
>> > So you get a stack-trace for each direct-dependency, and you get a
>> > stack-trace for each LOCK_state, the sum seems to match the total
>> > invocations.
>> >
>> > Non of these numbers look strange..
>> >
>>
>> As I told Peter privately the laptop that triggered the
>> MAX_STACK_TRACE_ENTRIES every time, has met an
>> unfortunate early demise. However, I think it was the config - not the
>> hardware. On this machine where the above
>> numbers come from, I believe I have less debug options configured -
>> but it is running the exact same kernel as
>> the laptop was. (2.6.33.2-rt13)
>
> Through a rough computation:
> MAX_STACK_TRACE_ENTRIES/10888 = 24
> That means the average stack deepth is about 24.
> So I'm thinking if we can take a check on the biggest deepth?
> Could this make sense?
>
Hi Yong, yes that makes sense, I'll see if I can provide a patch for that too.
Thanks
On Fri, Apr 23, 2010 at 10:31:16AM +0200, John Kacur wrote:
> > Non of these numbers look strange..
> >
>
> As I told Peter privately the laptop that triggered the
> MAX_STACK_TRACE_ENTRIES every time, has met an
> unfortunate early demise. However, I think it was the config - not the
> hardware. On this machine where the above
> numbers come from, I believe I have less debug options configured -
> but it is running the exact same kernel as
> the laptop was. (2.6.33.2-rt13)
Hi John,
(checking mail at home).
I find some place which can be hacked. Below is the patch.
But I don't even compile it. Can you test it to see if it can smooth
your problem.
---cut here ---
>From 6b9d513b7c417c0805ef0acc1cb3227bddba0889 Mon Sep 17 00:00:00 2001
From: Yong Zhang <[email protected]>
Date: Fri, 23 Apr 2010 21:13:54 +0800
Subject: [PATCH] lockdep: reduce stack_trace usage
When calling check_prevs_add(), if all validations passed
add_lock_to_list() will add new lock to dependency tree and
alloc stack_trace for each list_entry. But at this time,
we are always on the same stack, so stack_trace for each
list_entry has the same value. This is redundant and eats up
lots of memory which could lead to warning on low
MAX_STACK_TRACE_ENTRIES.
Using one copy of stack_trace instead.
Signed-off-by: Yong Zhang <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: David S. Miller <[email protected]>
---
kernel/lockdep.c | 20 ++++++++++++--------
1 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2594e1c..097d5fb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -818,7 +818,8 @@ static struct lock_list *alloc_list_entry(void)
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip, int distance)
+ struct list_head *head, unsigned long ip,
+ int distance, struct stack_trace *trace)
{
struct lock_list *entry;
/*
@@ -829,11 +830,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
if (!entry)
return 0;
- if (!save_trace(&entry->trace))
- return 0;
-
entry->class = this;
entry->distance = distance;
+ entry->trace = *trace;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
@@ -1635,7 +1634,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance)
+ struct held_lock *next, int distance, struct stack_trace *trace)
{
struct lock_list *entry;
int ret;
@@ -1694,14 +1693,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(prev)->locks_after,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, trace);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(next)->locks_before,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, trace);
if (!ret)
return 0;
@@ -1732,6 +1731,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
struct held_lock *hlock;
+ struct stack_trace trace;
/*
* Debugging checks.
@@ -1748,6 +1748,9 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
curr->held_locks[depth-1].irq_context)
goto out_bug;
+ if (!save_trace(&trace))
+ return 0;
+
for (;;) {
int distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth-1;
@@ -1756,7 +1759,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
* added:
*/
if (hlock->read != 2) {
- if (!check_prev_add(curr, hlock, next, distance))
+ if (!check_prev_add(curr, hlock, next,
+ distance, &trace))
return 0;
/*
* Stop after the first non-trylock entry,
--
1.6.3.3
On Fri, Apr 23, 2010 at 09:40:44PM +0800, Yong Zhang wrote:
> Hi John,
>
> (checking mail at home).
> I find some place which can be hacked. Below is the patch.
> But I don't even compile it. Can you test it to see if it can smooth
> your problem.
>
> ---cut here ---
> >From 6b9d513b7c417c0805ef0acc1cb3227bddba0889 Mon Sep 17 00:00:00 2001
> From: Yong Zhang <[email protected]>
> Date: Fri, 23 Apr 2010 21:13:54 +0800
> Subject: [PATCH] lockdep: reduce stack_trace usage
>
> When calling check_prevs_add(), if all validations passed
> add_lock_to_list() will add new lock to dependency tree and
> alloc stack_trace for each list_entry. But at this time,
> we are always on the same stack, so stack_trace for each
> list_entry has the same value. This is redundant and eats up
> lots of memory which could lead to warning on low
> MAX_STACK_TRACE_ENTRIES.
> Using one copy of stack_trace instead.
With the following test patch running on my machine:
> cat /proc/lockdep_stats
lock-classes: 564 [max: 8191]
direct dependencies: 2626 [max: 16384]
indirect dependencies: 5951
all direct dependencies: 48226
dependency chains: 2576 [max: 32768]
dependency chain hlocks: 6740 [max: 163840]
in-hardirq chains: 18
in-softirq chains: 163
in-process chains: 2395
stack-trace entries: 63076 [max: 262144]
duplicated stack-trace entries: 7592 <=========
combined max dependencies: 7465936
hardirq-safe locks: 30
hardirq-unsafe locks: 380
softirq-safe locks: 82
softirq-unsafe locks: 305
irq-safe locks: 90
irq-unsafe locks: 380
hardirq-read-safe locks: 0
hardirq-read-unsafe locks: 51
softirq-read-safe locks: 11
softirq-read-unsafe locks: 40
irq-read-safe locks: 11
irq-read-unsafe locks: 51
uncategorized locks: 112
unused locks: 1
max locking depth: 15
max bfs queue depth: 83
debug_locks: 1
We can see that about 12% stack_trace usage can be reduced.
Thanks,
Yong
------lockdep-duplicated-stack_trace-detect.patch------
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2594e1c..dfd37ea 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -369,6 +369,7 @@ static int verbose(struct lock_class *class)
* addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
+unsigned long nr_duplicated_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
static int save_trace(struct stack_trace *trace)
@@ -818,9 +819,14 @@ static struct lock_list *alloc_list_entry(void)
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip, int distance)
+ struct list_head *head, unsigned long ip,
+ int distance, struct held_lock *next)
{
struct lock_list *entry;
+ static struct held_lock *hlock;
+ static struct stack_trace trace;
+ int i;
+
/*
* Lock not present yet - get a new dependency struct and
* add it to the list:
@@ -834,6 +840,20 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
entry->class = this;
entry->distance = distance;
+
+ if (hlock != next) {
+ hlock = next;
+ trace = entry->trace;
+ } else if (trace.nr_entries == entry->trace.nr_entries &&
+ trace.max_entries == entry->trace.max_entries) {
+ /* start from 2 to skip the stack introduced by lockdep */
+ for (i=2; i<trace.nr_entries; i++) {
+ if (trace.entries[i] != entry->trace.entries[i])
+ goto no_duplicated;
+ }
+ nr_duplicated_stack_trace_entries += trace.nr_entries;
+ }
+no_duplicated:
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
@@ -1694,14 +1714,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(prev)->locks_after,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, next);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(next)->locks_before,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, next);
if (!ret)
return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index a2ee95a..d00fe7b 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -84,6 +84,7 @@ extern unsigned long nr_list_entries;
extern unsigned long nr_lock_chains;
extern int nr_chain_hlocks;
extern unsigned long nr_stack_trace_entries;
+extern unsigned long nr_duplicated_stack_trace_entries;
extern unsigned int nr_hardirq_chains;
extern unsigned int nr_softirq_chains;
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d4aba4f..32cb9a3 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -307,6 +307,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_process_chains);
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
+ seq_printf(m, " duplicated stack-trace entries:%11lu\n",
+ nr_duplicated_stack_trace_entries);
seq_printf(m, " combined max dependencies: %11u\n",
(nr_hardirq_chains + 1) *
(nr_softirq_chains + 1) *
>
> Signed-off-by: Yong Zhang <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: David S. Miller <[email protected]>
> ---
> kernel/lockdep.c | 20 ++++++++++++--------
> 1 files changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
> index 2594e1c..097d5fb 100644
> --- a/kernel/lockdep.c
> +++ b/kernel/lockdep.c
> @@ -818,7 +818,8 @@ static struct lock_list *alloc_list_entry(void)
> * Add a new dependency to the head of the list:
> */
> static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
> - struct list_head *head, unsigned long ip, int distance)
> + struct list_head *head, unsigned long ip,
> + int distance, struct stack_trace *trace)
> {
> struct lock_list *entry;
> /*
> @@ -829,11 +830,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
> if (!entry)
> return 0;
>
> - if (!save_trace(&entry->trace))
> - return 0;
> -
> entry->class = this;
> entry->distance = distance;
> + entry->trace = *trace;
> /*
> * Since we never remove from the dependency list, the list can
> * be walked lockless by other CPUs, it's only allocation
> @@ -1635,7 +1634,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
> */
> static int
> check_prev_add(struct task_struct *curr, struct held_lock *prev,
> - struct held_lock *next, int distance)
> + struct held_lock *next, int distance, struct stack_trace *trace)
> {
> struct lock_list *entry;
> int ret;
> @@ -1694,14 +1693,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
> */
> ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
> &hlock_class(prev)->locks_after,
> - next->acquire_ip, distance);
> + next->acquire_ip, distance, trace);
>
> if (!ret)
> return 0;
>
> ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
> &hlock_class(next)->locks_before,
> - next->acquire_ip, distance);
> + next->acquire_ip, distance, trace);
> if (!ret)
> return 0;
>
> @@ -1732,6 +1731,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
> {
> int depth = curr->lockdep_depth;
> struct held_lock *hlock;
> + struct stack_trace trace;
>
> /*
> * Debugging checks.
> @@ -1748,6 +1748,9 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
> curr->held_locks[depth-1].irq_context)
> goto out_bug;
>
> + if (!save_trace(&trace))
> + return 0;
> +
> for (;;) {
> int distance = curr->lockdep_depth - depth + 1;
> hlock = curr->held_locks + depth-1;
> @@ -1756,7 +1759,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
> * added:
> */
> if (hlock->read != 2) {
> - if (!check_prev_add(curr, hlock, next, distance))
> + if (!check_prev_add(curr, hlock, next,
> + distance, &trace))
> return 0;
> /*
> * Stop after the first non-trylock entry,
> --
> 1.6.3.3