2001-12-04 00:12:11

by Manfred Spraul

[permalink] [raw]
Subject: [PATCH] improve spinlock debugging

// $Header$
// Kernel Version:
// VERSION = 2
// PATCHLEVEL = 5
// SUBLEVEL = 1
// EXTRAVERSION =-pre5
--- 2.5/include/linux/spinlock.h Fri Oct 26 17:03:12 2001
+++ build-2.5/include/linux/spinlock.h Mon Dec 3 19:45:58 2001
@@ -37,12 +37,13 @@

#ifdef CONFIG_SMP
#include <asm/spinlock.h>
+#else

-#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously
- defined (e.g. by including asm/spinlock.h */
-
-#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
-
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define DEBUG_SPINLOCKS 2 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
+#else
+#define DEBUG_SPINLOCKS 0
+#endif
#if (DEBUG_SPINLOCKS < 1)

#define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
@@ -85,22 +86,101 @@

#else /* (DEBUG_SPINLOCKS >= 2) */

+#define SPINLOCK_MAGIC 0x1D244B3C
typedef struct {
+ unsigned long magic;
volatile unsigned long lock;
volatile unsigned int babble;
const char *module;
+ char *owner;
+ int oline;
} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}

#include <linux/kernel.h>

-#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
-#define spin_is_locked(lock) (test_bit(0,(lock)))
-#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
-
-#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
-#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
-#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_init(x) \
+ do { \
+ (x)->magic = SPINLOCK_MAGIC; \
+ (x)->lock = 0; \
+ (x)->babble = 5; \
+ (x)->module = __FILE__; \
+ (x)->owner = NULL; \
+ (x)->oline = 0; \
+ } while (0)
+#define CHECK_LOCK(x) \
+ do { \
+ if ((x)->magic != SPINLOCK_MAGIC) { \
+ printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
+ __FILE__, __LINE__, (x)); \
+ } \
+ } while(0)
+/* without debugging, spin_is_locked on UP always says
+ * FALSE. --> printk if already locked. */
+#define spin_is_locked(x) \
+ ({ \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ 0; \
+ })
+
+/* without debugging, spin_trylock on UP always says
+ * TRUE. --> printk if already locked. */
+#define spin_trylock(x) \
+ ({ \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ (x)->lock = 1; \
+ (x)->owner = __FILE__; \
+ (x)->oline = __LINE__; \
+ 1; \
+ })
+
+#define spin_lock(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, \
+ (x), (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ } \
+ (x)->lock = 1; \
+ (x)->owner = __FILE__; \
+ (x)->oline = __LINE__; \
+ } while (0)
+
+#define spin_unlock_wait(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if ((x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
+ __FILE__,__LINE__, (x)->module, (x), \
+ (x)->owner, (x)->oline); \
+ (x)->babble--; \
+ }\
+ } while (0)
+
+#define spin_unlock(x) \
+ do { \
+ CHECK_LOCK(x); \
+ if (!(x)->lock&&(x)->babble) { \
+ printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
+ __FILE__,__LINE__, (x)->module, (x));\
+ (x)->babble--; \
+ } \
+ (x)->lock = 0; \
+ } while (0)

#endif /* DEBUG_SPINLOCKS */

--- 2.5/kernel/ksyms.c Mon Dec 3 18:30:08 2001
+++ build-2.5/kernel/ksyms.c Mon Dec 3 20:06:49 2001
@@ -381,10 +381,10 @@
EXPORT_SYMBOL(tq_timer);
EXPORT_SYMBOL(tq_immediate);

-#ifdef CONFIG_SMP
/* Various random spinlocks we want to export */
EXPORT_SYMBOL(tqueue_lock);

+#ifdef CONFIG_SMP
/* Big-Reader lock implementation */
EXPORT_SYMBOL(__brlock_array);
#ifndef __BRLOCK_USE_ATOMICS
--- 2.5/Documentation/Configure.help Mon Dec 3 18:30:07 2001
+++ build-2.5/Documentation/Configure.help Mon Dec 3 20:31:40 2001
@@ -23565,8 +23565,10 @@

Spinlock debugging
CONFIG_DEBUG_SPINLOCK
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
+ Say Y here to add additional runtime checks into the spinlock
+ functions. On UP it detects missing initializations and simple
+ deadlocks. On SMP it finds missing initializations and certain other
+ kinds of spinlock errors commonly made, excluding deadlocks. This is
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.




Attachments:
patch-debug-sp (5.43 kB)

2001-12-04 04:21:46

by David Miller

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

From: Manfred Spraul <[email protected]>
Date: Mon, 03 Dec 2001 21:10:27 +0100

Which other runtime checks are possible?
Tests for correct _irq usage are not possible, several drivers use
disable_irq().

Keep track of how many locks are being held at once, and check if it
is zero at switch_to() time. You can also do this to measure things
like max number of locks held at once and other statistics.

I added the first bit to sparc64 while hunting down a bug.

2001-12-04 04:30:36

by Robert Love

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On Mon, 2001-12-03 at 23:21, David S. Miller wrote:

> Keep track of how many locks are being held at once, and check if it
> is zero at switch_to() time. You can also do this to measure things
> like max number of locks held at once and other statistics.
>
> I added the first bit to sparc64 while hunting down a bug.

Another interesting idea is see if a lock is held during a call to
udelay.

Once you get a lock count, a lot is possible. I have looked into doing
some of these tests with the preemptible kernel patch. Since the
preemption count is essential the lock depth, we have quick access to
all this data.

Robert Love

2001-12-04 20:34:14

by George Anzinger

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Manfred Spraul wrote:
>
> CONFIG_DEBUG_SPINLOCK only adds spinlock tests for SMP builds. The
> attached patch adds runtime checks for uniprocessor builds.
>
> Tested on i386/UP, but it should work on all platforms. It contains
> runtime checks for:
>
> - missing initialization
> - recursive lock
> - double unlock
> - incorrect use of spin_is_locked() or spin_trylock() [both function
> do not work as expected on uniprocessor builds]
> The next step are checks for spinlock ordering mismatches.

What do you consider an order mismatch? I would like to see this:

spin_lockirq

spin_unlock

restore_irq

go away. I.e. xx_lockirq should be paired with xx_unlockirq. If
exceptions are needed, we should provide macros that explicitly do this
such as:

spin_unlock_leaveirq_off

or some such.

Of course, all this is predicated on using the lock macros in a
different way than intended. For example, preemption currently counts
up on spin_lock and disable irq, counting the spin_lockirq twice. In
fact, it need only count it once, if the locks are properly paired. It
might also be nice to have a set of lock routines that make explicit the
assumptions made. For example:

read_lock_irq_assumed_off or
read_lockirq_assumed_on (used instead of saveirq based on the
assumption)

These locks could then have optional debug code that tested the
assumption.

If we got all of this clean enough, we would know when we were locking
with irq off and the preemption patch, for example, would not need to
count the spinlock at all, but just the irq. (Oh, and since the irq
inhibits preemption all by itself, we don't need to count it either.)
>
> Which other runtime checks are possible?
> Tests for correct _irq usage are not possible, several drivers use
> disable_irq().

Run time checks for xxx_irq when irq is already off seem reasonable.
The implication is that the xxx_unlockirq will then turn it on, which
most likely is an error. Also, see above about rolling assumptions in
to the macro name.
>
> --
> Manfred
>

--
George [email protected]
High-res-timers: http://sourceforge.net/projects/high-res-timers/
Real time sched: http://sourceforge.net/projects/rtsched/

2001-12-04 20:54:40

by Robert Love

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On Tue, 2001-12-04 at 15:30, george anzinger wrote:

> spin_lockirq
>
> spin_unlock
>
> restore_irq

Given this order, couldn't we _always_ not touch the preempt count since
irq's are off?

Further, since I doubt we ever see:

spin_lock_irq
restore_irq
spin_unlock

and the common use is:

spin_lock_irq
spin_unlock_irq

Isn't it safe to have spin_lock_irq *never* touch the preempt count?

Robert Love

2001-12-04 21:22:00

by Nigel Gamble

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On Tue, 4 Dec 2001, george anzinger wrote:
> For example, preemption currently counts
> up on spin_lock and disable irq, counting the spin_lockirq twice. In

That's not correct: we don't count it twice because we don't count
local_irq_disable etc., only the spin locks. Because...

> (Oh, and since the irq
> inhibits preemption all by itself, we don't need to count it either.)

...so we don't.

Nigel Gamble [email protected]
Mountain View, CA, USA. http://www.nrg.org/

2001-12-04 21:27:20

by George Anzinger

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Robert Love wrote:
>
> On Tue, 2001-12-04 at 15:30, george anzinger wrote:
>
> > spin_lockirq
> >
> > spin_unlock
> >
> > restore_irq
>
> Given this order, couldn't we _always_ not touch the preempt count since
> irq's are off?
>
> Further, since I doubt we ever see:
>
> spin_lock_irq
> restore_irq
> spin_unlock
>
> and the common use is:
>
> spin_lock_irq
> spin_unlock_irq
>
> Isn't it safe to have spin_lock_irq *never* touch the preempt count?
>
NO. The problem is the first example above. The spin_unlock will down
count, but the spin_lockirq did NOT do the paired up count (been there,
done that). This is where we need the spin_unlock_no_irq_restore.
--
George [email protected]
High-res-timers: http://sourceforge.net/projects/high-res-timers/
Real time sched: http://sourceforge.net/projects/rtsched/

2001-12-04 21:28:40

by George Anzinger

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Nigel Gamble wrote:
>
> On Tue, 4 Dec 2001, george anzinger wrote:
> > For example, preemption currently counts
> > up on spin_lock and disable irq, counting the spin_lockirq twice. In
>
> That's not correct: we don't count it twice because we don't count
> local_irq_disable etc., only the spin locks. Because...
>
> > (Oh, and since the irq
> > inhibits preemption all by itself, we don't need to count it either.)
>
> ...so we don't.

Right, but we do count spin_lockirq (just once). See other email, this
time.

--
George [email protected]
High-res-timers: http://sourceforge.net/projects/high-res-timers/
Real time sched: http://sourceforge.net/projects/rtsched/

2001-12-04 21:39:30

by Robert Love

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On Tue, 2001-12-04 at 16:25, george anzinger wrote:

> NO. The problem is the first example above. The spin_unlock will down
> count, but the spin_lockirq did NOT do the paired up count (been there,
> done that). This is where we need the spin_unlock_no_irq_restore.

Your right, I thought too fast. Then we need the proper macros ...

Robert Love

2001-12-04 22:08:16

by Nigel Gamble

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On 4 Dec 2001, Robert Love wrote:
> On Tue, 2001-12-04 at 15:30, george anzinger wrote:
>
> > spin_lockirq
> >
> > spin_unlock
> >
> > restore_irq
>
> Given this order, couldn't we _always_ not touch the preempt count since
> irq's are off?

Not with the current spinlock usage in the kernel.
spin_lock/spin_unlock are used both nested when interrupts are known to
be disabled (as above) or, more commonly,

spin_lock_irqsave(a, flags)

spin_lock(b)

spin_unlock(b)

spin_unlock_irqrestore(a, flags)

and when interrupts are enabled:

spin_lock(c)

spin_unlock(c)

We don't need to preempt count the former but we do the latter, but
there's no way to tell the difference without a runtime check for
interrupt state.

In IRIX we changed the name of the former, nested versions to:

spin_lock_irqsave(a, flags)

nested_spin_lock(b)

nested_spin_unlock(b)

spin_unlock_irqrestore(a, flags)

The nested version contained an assertion that interrupts were disabled
in DEBUG kernels. We wouldn't need to count the nested_spin_lock
versions.


> Further, since I doubt we ever see:
>
> spin_lock_irq
> restore_irq
> spin_unlock

I hope not, since that would be a bug.

> and the common use is:
>
> spin_lock_irq
> spin_unlock_irq
>
> Isn't it safe to have spin_lock_irq *never* touch the preempt count?

No, because of

> > spin_lockirq
> >
> > spin_unlock
> >
> > restore_irq

(which does occasionally occur in the kernel). The spin_unlock is going
to decrement the count, so the spin_lock_irqsave must increment it. If
we had, and used, a nested_spin_unlock, we could then have spin_lock_irq
never touch the preempt count.

[And if we could guarantee that all spinlocks we held for only a few 10s
of microseconds at the most (a big "if"), we could make them all into
spin_lock_irqs and then we wouldn't need the preempt count at all. This
is how REAL/IX and IRIX implemented kernel preemption.]

Nigel Gamble [email protected]
Mountain View, CA, USA. http://www.nrg.org/

2001-12-04 22:23:43

by Robert Love

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

On Tue, 2001-12-04 at 17:06, Nigel Gamble wrote:

> > Given this order, couldn't we _always_ not touch the preempt count since
> > irq's are off?
>
> Not with the current spinlock usage in the kernel.
> spin_lock/spin_unlock are used both nested when interrupts are known to
> be disabled (as above) or, more commonly,
>
> spin_lock_irqsave(a, flags)
>
> spin_lock(b)
>
> spin_unlock(b)
>
> spin_unlock_irqrestore(a, flags)
>
> and when interrupts are enabled:
>
> spin_lock(c)
>
> spin_unlock(c)

Right, I meant just the spin_lock_irq case, which would be fine except
for the case where:

spin_lock_irq
spin_unlock
restore_irq

to solve this, we need a spin_unlock_irq_on macro that didn't touch the
preemption count.

> We don't need to preempt count the former but we do the latter, but
> there's no way to tell the difference without a runtime check for
> interrupt state.
>
> In IRIX we changed the name of the former, nested versions to:
>
> spin_lock_irqsave(a, flags)
>
> nested_spin_lock(b)
>
> nested_spin_unlock(b)
>
> spin_unlock_irqrestore(a, flags)
>
> The nested version contained an assertion that interrupts were disabled
> in DEBUG kernels. We wouldn't need to count the nested_spin_lock
> versions.

Ah, another optimization wrt preempt count. This goes further than just
making spin_lock_irqsave not touch the preempt count, in that it also
let's us say (this lock is _always_ nested inside another, we don't need
to touch the preempt count).

It would apply anywhere we grab multiple locks and drop the first one
last.

> > Further, since I doubt we ever see:
> >
> > spin_lock_irq
> > restore_irq
> > spin_unlock
>
> I hope not, since that would be a bug.
>
> > and the common use is:
> >
> > spin_lock_irq
> > spin_unlock_irq
> >
> > Isn't it safe to have spin_lock_irq *never* touch the preempt count?
>
> No, because of
>
> > > spin_lockirq
> > >
> > > spin_unlock
> > >
> > > restore_irq
>
> (which does occasionally occur in the kernel). The spin_unlock is going
> to decrement the count, so the spin_lock_irqsave must increment it. If
> we had, and used, a nested_spin_unlock, we could then have spin_lock_irq
> never touch the preempt count.

Right.

> [And if we could guarantee that all spinlocks we held for only a few 10s
> of microseconds at the most (a big "if"), we could make them all into
> spin_lock_irqs and then we wouldn't need the preempt count at all. This
> is how REAL/IX and IRIX implemented kernel preemption.]

I offered that idea and George convinced me otherwise :)

Robert Love

2001-12-04 23:55:37

by Manfred Spraul

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

george anzinger wrote:
>
> Manfred Spraul wrote:
> >
> > CONFIG_DEBUG_SPINLOCK only adds spinlock tests for SMP builds. The
> > attached patch adds runtime checks for uniprocessor builds.
> >
> > Tested on i386/UP, but it should work on all platforms. It contains
> > runtime checks for:
> >
> > - missing initialization
> > - recursive lock
> > - double unlock
> > - incorrect use of spin_is_locked() or spin_trylock() [both function
> > do not work as expected on uniprocessor builds]
> > The next step are checks for spinlock ordering mismatches.
>
> What do you consider an order mismatch? I would like to see this:
>
spin_lock(a);
spin_lock(b);

and somewhere else
spin_lock(b);
spin_lock(a);

>
> Run time checks for xxx_irq when irq is already off seem reasonable.
> The implication is that the xxx_unlockirq will then turn it on, which
> most likely is an error. Also, see above about rolling assumptions in
> to the macro name.
>
Unfortuantely there are still a few special cases where spin_lock_irq()
with already disabled interrupts is both intentional and correct^wnot
buggy (search for sleep_on and cli() through the lkml archives).

And there are optimizations such as
spin_lock_bh(a);
spin_unlock(b);
spin_lock(b);
spin_unlock_bh(b);

--
Manfred

2001-12-05 00:54:56

by George Anzinger

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Manfred Spraul wrote:
>
> george anzinger wrote:
> >
> > Manfred Spraul wrote:
> > >
> > > CONFIG_DEBUG_SPINLOCK only adds spinlock tests for SMP builds. The
> > > attached patch adds runtime checks for uniprocessor builds.
> > >
> > > Tested on i386/UP, but it should work on all platforms. It contains
> > > runtime checks for:
> > >
> > > - missing initialization
> > > - recursive lock
> > > - double unlock
> > > - incorrect use of spin_is_locked() or spin_trylock() [both function
> > > do not work as expected on uniprocessor builds]
> > > The next step are checks for spinlock ordering mismatches.
> >
> > What do you consider an order mismatch? I would like to see this:
> >
> spin_lock(a);
> spin_lock(b);
>
> and somewhere else
> spin_lock(b);
> spin_lock(a);
>
> >
> > Run time checks for xxx_irq when irq is already off seem reasonable.
> > The implication is that the xxx_unlockirq will then turn it on, which
> > most likely is an error. Also, see above about rolling assumptions in
> > to the macro name.
> >
> Unfortuantely there are still a few special cases where spin_lock_irq()
> with already disabled interrupts is both intentional and correct^wnot
> buggy (search for sleep_on and cli() through the lkml archives).
>
> And there are optimizations such as
> spin_lock_bh(a);
> spin_unlock(b);
> spin_lock(b);
> spin_unlock_bh(b);
>
Dam the special case! Full speed ahead!

But really, there will always be exceptions, but they should be noted
(and commented upon). To hide them in obscurity, and worst yet, allow
them to dictate that we can not do better is a crime. There are tricks
that can be used to help us in these cases, if we just set our
collective minds to it.

--
George [email protected]
High-res-timers: http://sourceforge.net/projects/high-res-timers/
Real time sched: http://sourceforge.net/projects/rtsched/

2001-12-05 01:13:57

by Roman Zippel

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Hi,

Robert Love wrote:

> Right, I meant just the spin_lock_irq case, which would be fine except
> for the case where:
>
> spin_lock_irq
> spin_unlock
> restore_irq
>
> to solve this, we need a spin_unlock_irq_on macro that didn't touch the
> preemption count.

Has someone a real example of something like this?
I'd suspect someone is trying a (questionable) micro optimization or is
holding the lock for too long anyway. Instead of adding more macros,
maybe it's better to look closely whether something needs fixing.

bye, Roman

2001-12-05 07:41:44

by George Anzinger

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Roman Zippel wrote:
>
> Hi,
>
> Robert Love wrote:
>
> > Right, I meant just the spin_lock_irq case, which would be fine except
> > for the case where:
> >
> > spin_lock_irq
> > spin_unlock
> > restore_irq
> >
> > to solve this, we need a spin_unlock_irq_on macro that didn't touch the
> > preemption count.
>
> Has someone a real example of something like this?
> I'd suspect someone is trying a (questionable) micro optimization or is
> holding the lock for too long anyway. Instead of adding more macros,
> maybe it's better to look closely whether something needs fixing.
>
Oh it is in there somewhere. I tried to do the indicated thing for
preemption and ran aground on it. Grep drivers for spin_lock_irq (or
irq save). I don't remember which, but there is more than one.


--
George [email protected]
High-res-timers: http://sourceforge.net/projects/high-res-timers/
Real time sched: http://sourceforge.net/projects/rtsched/

2001-12-05 08:48:37

by Giuliano Pochini

[permalink] [raw]
Subject: RE: [PATCH] improve spinlock debugging


> Tested on i386/UP, but it should work on all platforms. It contains
> runtime checks for:
>
> - missing initialization
> - recursive lock
> - double unlock
> - incorrect use of spin_is_locked() or spin_trylock() [both function
> do not work as expected on uniprocessor builds]
> The next step are checks for spinlock ordering mismatches.
>
> Which other runtime checks are possible?

It's very useful to log when a lock(irq) is held more than xx ms
and who is the caller. Is it possible ?


Bye.

2001-12-05 15:42:31

by Manfred Spraul

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Giuliano Pochini wrote:

>
>It's very useful to log when a lock(irq) is held more than xx ms
>and who is the caller. Is it possible ?
>
Have you looked at SGI's lockmeter patch?
It's very good to check performance numbers of spinlocks.

--
Manfred

2001-12-20 17:08:42

by Manfred Spraul

[permalink] [raw]
Subject: Re: [PATCH] improve spinlock debugging

Anton Blanchard wrote:

>
>What do you think of the idea below? We create arch specific spinlock
>functions as __foo and wrap the debugging bits around them as foo. This
>should also allow us to unify the UP and SMP spinlock debugging somewhat.
>
I tried to create a minimal patch, to reduce the probability of patch
conflicts with earlier/future kernels.

I agree that your solutition is better if the patch is included in
Linus/Marcelo's tree.
Have you asked Linus about it? I love runtime checks, but IIRC the
majority of the kernel maintainers disagrees.

--
Manfred