2020-03-12 15:14:28

by Boqun Feng

[permalink] [raw]
Subject: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

Qian Cai reported a bug when PROVE_RCU_LIST=y, and read on /proc/lockdep
triggered a warning:

[26405.676199][ T3548] DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)
...
[26406.402408][ T3548] Call Trace:
[26406.416739][ T3548] lock_is_held_type+0x5d/0x150
[26406.438262][ T3548] ? rcu_lockdep_current_cpu_online+0x64/0x80
[26406.466463][ T3548] rcu_read_lock_any_held+0xac/0x100
[26406.490105][ T3548] ? rcu_read_lock_held+0xc0/0xc0
[26406.513258][ T3548] ? __slab_free+0x421/0x540
[26406.535012][ T3548] ? kasan_kmalloc+0x9/0x10
[26406.555901][ T3548] ? __kmalloc_node+0x1d7/0x320
[26406.578668][ T3548] ? kvmalloc_node+0x6f/0x80
[26406.599872][ T3548] __bfs+0x28a/0x3c0
[26406.617075][ T3548] ? class_equal+0x30/0x30
[26406.637524][ T3548] lockdep_count_forward_deps+0x11a/0x1a0

The warning got triggered because lockdep_count_forward_deps() call
__bfs() without current->lockdep_recursion being set, as a result
a lockdep internal function (__bfs()) is checked by lockdep, which is
unexpected, and the inconsistency between the irq-off state and the
state traced by lockdep caused the warning.

Apart from this warning, lockdep internal functions like __bfs() should
always be protected by current->lockdep_recursion to avoid potential
deadlocks and data inconsistency, therefore add the
current->lockdep_recursion on-and-off section to protect __bfs() in both
lockdep_count_forward_deps() and lockdep_count_backward_deps()

Reported-by: Qian Cai <[email protected]>
Signed-off-by: Boqun Feng <[email protected]>
---
kernel/locking/lockdep.c | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32406ef0d6a2..5142a6b11bf5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1719,9 +1719,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);

return ret;
@@ -1746,9 +1748,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);

return ret;
--
2.25.1


2020-03-13 09:35:49

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

On Thu, Mar 12, 2020 at 11:12:55PM +0800, Boqun Feng wrote:

Thanks!

> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 32406ef0d6a2..5142a6b11bf5 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -1719,9 +1719,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
> this.class = class;
>
> raw_local_irq_save(flags);
> + current->lockdep_recursion = 1;
> arch_spin_lock(&lockdep_lock);
> ret = __lockdep_count_forward_deps(&this);
> arch_spin_unlock(&lockdep_lock);
> + current->lockdep_recursion = 0;
> raw_local_irq_restore(flags);
>
> return ret;
> @@ -1746,9 +1748,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
> this.class = class;
>
> raw_local_irq_save(flags);
> + current->lockdep_recursion = 1;
> arch_spin_lock(&lockdep_lock);
> ret = __lockdep_count_backward_deps(&this);
> arch_spin_unlock(&lockdep_lock);
> + current->lockdep_recursion = 0;
> raw_local_irq_restore(flags);
>
> return ret;

This copies a bad pattern though; all the sites that do not check
lockdep_recursion_count first really should be using ++/-- instead. But
I just found there are indeed already a few sites that violate this.

I've taken this patch and done a general fixup on top.

---
Subject: locking/lockdep: Fix bad recursion pattern
From: Peter Zijlstra <[email protected]>
Date: Fri Mar 13 09:56:38 CET 2020

There were two patterns for lockdep_recursion:

Pattern-A:
if (current->lockdep_recursion)
return

current->lockdep_recursion = 1;
/* do stuff */
current->lockdep_recursion = 0;

Pattern-B:
current->lockdep_recursion++;
/* do stuff */
current->lockdep_recursion--;

But a third pattern has emerged:

Pattern-C:
current->lockdep_recursion = 1;
/* do stuff */
current->lockdep_recursion = 0;

And while this isn't broken per-se, it is highly dangerous because it
doesn't nest properly.

Get rid of all Pattern-C instances and shore up Pattern-A with a
warning.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
kernel/locking/lockdep.c | 74 +++++++++++++++++++++++++----------------------
1 file changed, 40 insertions(+), 34 deletions(-)

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -389,6 +389,12 @@ void lockdep_on(void)
}
EXPORT_SYMBOL(lockdep_on);

+static inline void lockdep_recursion_finish(void)
+{
+ if (WARN_ON_ONCE(--current->lockdep_recursion))
+ current->lockdep_recursion = 0;
+}
+
void lockdep_set_selftest_task(struct task_struct *task)
{
lockdep_selftest_task_struct = task;
@@ -1719,11 +1725,11 @@ unsigned long lockdep_count_forward_deps
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
raw_local_irq_restore(flags);

return ret;
@@ -1748,11 +1754,11 @@ unsigned long lockdep_count_backward_dep
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
raw_local_irq_restore(flags);

return ret;
@@ -3433,9 +3439,9 @@ void lockdep_hardirqs_on(unsigned long i
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__trace_hardirqs_on_caller(ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
}
NOKPROBE_SYMBOL(lockdep_hardirqs_on);

@@ -3491,7 +3497,7 @@ void trace_softirqs_on(unsigned long ip)
return;
}

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
/*
* We'll do an OFF -> ON transition:
*/
@@ -3506,7 +3512,7 @@ void trace_softirqs_on(unsigned long ip)
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
}

/*
@@ -3759,9 +3765,9 @@ void lockdep_init_map(struct lockdep_map
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
register_lock_class(lock, subclass, 1);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
}
@@ -4441,11 +4447,11 @@ void lock_set_class(struct lockdep_map *
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
@@ -4458,11 +4464,11 @@ void lock_downgrade(struct lockdep_map *
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);
@@ -4483,11 +4489,11 @@ void lock_acquire(struct lockdep_map *lo
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
@@ -4501,11 +4507,11 @@ void lock_release(struct lockdep_map *lo

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_release(lock, ip);
if (__lock_release(lock, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
@@ -4521,9 +4527,9 @@ int lock_is_held_type(const struct lockd
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
ret = __lock_is_held(lock, read);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);

return ret;
@@ -4542,9 +4548,9 @@ struct pin_cookie lock_pin_lock(struct l
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
cookie = __lock_pin_lock(lock);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);

return cookie;
@@ -4561,9 +4567,9 @@ void lock_repin_lock(struct lockdep_map
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_repin_lock(lock, cookie);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);
@@ -4578,9 +4584,9 @@ void lock_unpin_lock(struct lockdep_map
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_unpin_lock(lock, cookie);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);
@@ -4716,10 +4722,10 @@ void lock_contended(struct lockdep_map *

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_contended(lock, ip);
__lock_contended(lock, ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
@@ -4736,9 +4742,9 @@ void lock_acquired(struct lockdep_map *l

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_acquired(lock, ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
@@ -4963,7 +4969,7 @@ static void free_zapped_rcu(struct rcu_h

raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;

/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4975,7 +4981,7 @@ static void free_zapped_rcu(struct rcu_h
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);

- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);
}
@@ -5022,11 +5028,11 @@ static void lockdep_free_key_range_reg(v

raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);

2020-03-13 10:23:22

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

On Thu, Mar 12, 2020 at 11:12:55PM +0800, Boqun Feng wrote:
> The warning got triggered because lockdep_count_forward_deps() call
> __bfs() without current->lockdep_recursion being set, as a result
> a lockdep internal function (__bfs()) is checked by lockdep, which is
> unexpected, and the inconsistency between the irq-off state and the
> state traced by lockdep caused the warning.

This also had me look at __bfs(), while there is a WARN in there, it
doesn't really assert all the expectations.

This lead to the below patch.

---
Subject: locking/lockdep: Rework lockdep_lock
From: Peter Zijlstra <[email protected]>
Date: Fri Mar 13 11:09:49 CET 2020

A few sites want to assert we own the graph_lock/lockdep_lock, provide
a more conventional lock interface for it with a number of trivial
debug checks.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
kernel/locking/lockdep.c | 89 +++++++++++++++++++++++++----------------------
1 file changed, 48 insertions(+), 41 deletions(-)

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -84,12 +84,39 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static struct task_struct *__owner;
+
+static inline void lockdep_lock(void)
+{
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+ arch_spin_lock(&__lock);
+ __owner = current;
+ current->lockdep_recursion++;
+}
+
+static inline void lockdep_unlock(void)
+{
+ if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
+ return;
+
+ current->lockdep_recursion--;
+ __owner = NULL;
+ arch_spin_unlock(&__lock);
+}
+
+static inline bool lockdep_assert_locked(void)
+{
+ return DEBUG_LOCKS_WARN_ON(__owner != current);
+}
+
static struct task_struct *lockdep_selftest_task_struct;

+
static int graph_lock(void)
{
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
@@ -97,27 +124,15 @@ static int graph_lock(void)
* dropped already)
*/
if (!debug_locks) {
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
return 0;
}
- /* prevent any recursions within lockdep from causing deadlocks */
- current->lockdep_recursion++;
return 1;
}

-static inline int graph_unlock(void)
+static inline void graph_unlock(void)
{
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
- /*
- * The lockdep graph lock isn't locked while we expect it to
- * be, we're confused now, bye!
- */
- return DEBUG_LOCKS_WARN_ON(1);
- }
-
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
- return 0;
+ lockdep_unlock();
}

/*
@@ -128,7 +143,7 @@ static inline int debug_locks_off_graph_
{
int ret = debug_locks_off();

- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();

return ret;
}
@@ -1475,6 +1490,8 @@ static int __bfs(struct lock_list *sourc
struct circular_queue *cq = &lock_cq;
int ret = 1;

+ lockdep_assert_locked();
+
if (match(source_entry, data)) {
*target_entry = source_entry;
ret = 0;
@@ -1497,8 +1514,6 @@ static int __bfs(struct lock_list *sourc

head = get_dep_list(lock, offset);

- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
-
list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) {
unsigned int cq_depth;
@@ -1725,11 +1740,9 @@ unsigned long lockdep_count_forward_deps
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion++;
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
ret = __lockdep_count_forward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion--;
+ lockdep_unlock();
raw_local_irq_restore(flags);

return ret;
@@ -1754,11 +1767,9 @@ unsigned long lockdep_count_backward_dep
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion++;
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
ret = __lockdep_count_backward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion--;
+ lockdep_unlock();
raw_local_irq_restore(flags);

return ret;
@@ -2813,7 +2824,7 @@ static inline int add_chain_cache(struct
* disabled to make this an IRQ-safe lock.. for recursion reasons
* lockdep won't complain about its own locking errors.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (lockdep_assert_locked())
return 0;

chain = alloc_lock_chain();
@@ -4968,8 +4979,7 @@ static void free_zapped_rcu(struct rcu_h
return;

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion++;
+ lockdep_lock();

/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4981,8 +4991,7 @@ static void free_zapped_rcu(struct rcu_h
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);

- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

@@ -5027,13 +5036,11 @@ static void lockdep_free_key_range_reg(v
init_data_structures_once();

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion++;
+ lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);

/*
@@ -5055,10 +5062,10 @@ static void lockdep_free_key_range_imm(v
init_data_structures_once();

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
__lockdep_free_key_range(pf, start, size);
__free_zapped_classes(pf);
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

@@ -5154,10 +5161,10 @@ static void lockdep_reset_lock_imm(struc
unsigned long flags;

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
__lockdep_reset_lock(pf, lock);
__free_zapped_classes(pf);
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

2020-03-15 01:33:44

by Joel Fernandes

[permalink] [raw]
Subject: Re: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

On Fri, Mar 13, 2020 at 10:33:25AM +0100, Peter Zijlstra wrote:
> On Thu, Mar 12, 2020 at 11:12:55PM +0800, Boqun Feng wrote:
>
> Thanks!

Thanks Peter and Boqun, the below patch makes sense to me. Just had some nits
below, otherwise:

Reviewed-by: Joel Fernandes (Google) <[email protected]>

> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 32406ef0d6a2..5142a6b11bf5 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -1719,9 +1719,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
> > this.class = class;
> >
> > raw_local_irq_save(flags);
> > + current->lockdep_recursion = 1;
> > arch_spin_lock(&lockdep_lock);
> > ret = __lockdep_count_forward_deps(&this);
> > arch_spin_unlock(&lockdep_lock);
> > + current->lockdep_recursion = 0;
> > raw_local_irq_restore(flags);
> >
> > return ret;
> > @@ -1746,9 +1748,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
> > this.class = class;
> >
> > raw_local_irq_save(flags);
> > + current->lockdep_recursion = 1;
> > arch_spin_lock(&lockdep_lock);
> > ret = __lockdep_count_backward_deps(&this);
> > arch_spin_unlock(&lockdep_lock);
> > + current->lockdep_recursion = 0;
> > raw_local_irq_restore(flags);
> >
> > return ret;
>
> This copies a bad pattern though; all the sites that do not check
> lockdep_recursion_count first really should be using ++/-- instead. But
> I just found there are indeed already a few sites that violate this.
>
> I've taken this patch and done a general fixup on top.
>
> ---
> Subject: locking/lockdep: Fix bad recursion pattern
> From: Peter Zijlstra <[email protected]>
> Date: Fri Mar 13 09:56:38 CET 2020
>
> There were two patterns for lockdep_recursion:
>
> Pattern-A:
> if (current->lockdep_recursion)
> return
>
> current->lockdep_recursion = 1;
> /* do stuff */
> current->lockdep_recursion = 0;
>
> Pattern-B:
> current->lockdep_recursion++;
> /* do stuff */
> current->lockdep_recursion--;
>
> But a third pattern has emerged:
>
> Pattern-C:
> current->lockdep_recursion = 1;
> /* do stuff */
> current->lockdep_recursion = 0;
>
> And while this isn't broken per-se, it is highly dangerous because it
> doesn't nest properly.
>
> Get rid of all Pattern-C instances and shore up Pattern-A with a
> warning.
>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> ---
> kernel/locking/lockdep.c | 74 +++++++++++++++++++++++++----------------------
> 1 file changed, 40 insertions(+), 34 deletions(-)
>
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -389,6 +389,12 @@ void lockdep_on(void)
> }
> EXPORT_SYMBOL(lockdep_on);
>
> +static inline void lockdep_recursion_finish(void)
> +{
> + if (WARN_ON_ONCE(--current->lockdep_recursion))
> + current->lockdep_recursion = 0;
> +}
> +
> void lockdep_set_selftest_task(struct task_struct *task)
> {
> lockdep_selftest_task_struct = task;
> @@ -1719,11 +1725,11 @@ unsigned long lockdep_count_forward_deps
> this.class = class;
>
> raw_local_irq_save(flags);
> - current->lockdep_recursion = 1;
> + current->lockdep_recursion++;
> arch_spin_lock(&lockdep_lock);
> ret = __lockdep_count_forward_deps(&this);
> arch_spin_unlock(&lockdep_lock);
> - current->lockdep_recursion = 0;
> + current->lockdep_recursion--;

This doesn't look like it should recurse. Why not just use the
lockdep_recursion_finish() helper here?

> raw_local_irq_restore(flags);
>
> return ret;
> @@ -1748,11 +1754,11 @@ unsigned long lockdep_count_backward_dep
> this.class = class;
>
> raw_local_irq_save(flags);
> - current->lockdep_recursion = 1;
> + current->lockdep_recursion++;
> arch_spin_lock(&lockdep_lock);
> ret = __lockdep_count_backward_deps(&this);
> arch_spin_unlock(&lockdep_lock);
> - current->lockdep_recursion = 0;
> + current->lockdep_recursion--;

And here.

> @@ -4963,7 +4969,7 @@ static void free_zapped_rcu(struct rcu_h
>
> raw_local_irq_save(flags);
> arch_spin_lock(&lockdep_lock);
> - current->lockdep_recursion = 1;
> + current->lockdep_recursion++;
>
> /* closed head */
> pf = delayed_free.pf + (delayed_free.index ^ 1);
> @@ -4975,7 +4981,7 @@ static void free_zapped_rcu(struct rcu_h
> */
> call_rcu_zapped(delayed_free.pf + delayed_free.index);
>
> - current->lockdep_recursion = 0;
> + current->lockdep_recursion--;

And here also if it applies.

> arch_spin_unlock(&lockdep_lock);
> raw_local_irq_restore(flags);
> }
> @@ -5022,11 +5028,11 @@ static void lockdep_free_key_range_reg(v
>
> raw_local_irq_save(flags);
> arch_spin_lock(&lockdep_lock);
> - current->lockdep_recursion = 1;
> + current->lockdep_recursion++;
> pf = get_pending_free();
> __lockdep_free_key_range(pf, start, size);
> call_rcu_zapped(pf);
> - current->lockdep_recursion = 0;
> + current->lockdep_recursion--;

And here also if it applies.

thanks!

- Joel

2020-03-16 13:56:04

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

On Sat, Mar 14, 2020 at 09:04:22PM -0400, Joel Fernandes wrote:
> On Fri, Mar 13, 2020 at 10:33:25AM +0100, Peter Zijlstra wrote:

> Thanks Peter and Boqun, the below patch makes sense to me. Just had some nits
> below, otherwise:
> > @@ -1719,11 +1725,11 @@ unsigned long lockdep_count_forward_deps
> > this.class = class;
> >
> > raw_local_irq_save(flags);
> > - current->lockdep_recursion = 1;
> > + current->lockdep_recursion++;
> > arch_spin_lock(&lockdep_lock);
> > ret = __lockdep_count_forward_deps(&this);
> > arch_spin_unlock(&lockdep_lock);
> > - current->lockdep_recursion = 0;
> > + current->lockdep_recursion--;
>
> This doesn't look like it should recurse. Why not just use the
> lockdep_recursion_finish() helper here?

I chose to only add that to the sites that check recursion on entry.

2020-03-16 15:04:19

by Joel Fernandes

[permalink] [raw]
Subject: Re: [PATCH] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

On Mon, Mar 16, 2020 at 02:55:07PM +0100, Peter Zijlstra wrote:
> On Sat, Mar 14, 2020 at 09:04:22PM -0400, Joel Fernandes wrote:
> > On Fri, Mar 13, 2020 at 10:33:25AM +0100, Peter Zijlstra wrote:
>
> > Thanks Peter and Boqun, the below patch makes sense to me. Just had some nits
> > below, otherwise:
> > > @@ -1719,11 +1725,11 @@ unsigned long lockdep_count_forward_deps
> > > this.class = class;
> > >
> > > raw_local_irq_save(flags);
> > > - current->lockdep_recursion = 1;
> > > + current->lockdep_recursion++;
> > > arch_spin_lock(&lockdep_lock);
> > > ret = __lockdep_count_forward_deps(&this);
> > > arch_spin_unlock(&lockdep_lock);
> > > - current->lockdep_recursion = 0;
> > > + current->lockdep_recursion--;
> >
> > This doesn't look like it should recurse. Why not just use the
> > lockdep_recursion_finish() helper here?
>
> I chose to only add that to the sites that check recursion on entry.

Makes sense, thank you Peter.

- Joel

Subject: [tip: locking/core] locking/lockdep: Rework lockdep_lock

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 248efb2158f1e23750728e92ad9db3ab60c14485
Gitweb: https://git.kernel.org/tip/248efb2158f1e23750728e92ad9db3ab60c14485
Author: Peter Zijlstra <[email protected]>
AuthorDate: Fri, 13 Mar 2020 11:09:49 +01:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Fri, 20 Mar 2020 13:06:25 +01:00

locking/lockdep: Rework lockdep_lock

A few sites want to assert we own the graph_lock/lockdep_lock, provide
a more conventional lock interface for it with a number of trivial
debug checks.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/lockdep.c | 89 +++++++++++++++++++++------------------
1 file changed, 48 insertions(+), 41 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 64ea69f..47e3acb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -84,12 +84,39 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static struct task_struct *__owner;
+
+static inline void lockdep_lock(void)
+{
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+ arch_spin_lock(&__lock);
+ __owner = current;
+ current->lockdep_recursion++;
+}
+
+static inline void lockdep_unlock(void)
+{
+ if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
+ return;
+
+ current->lockdep_recursion--;
+ __owner = NULL;
+ arch_spin_unlock(&__lock);
+}
+
+static inline bool lockdep_assert_locked(void)
+{
+ return DEBUG_LOCKS_WARN_ON(__owner != current);
+}
+
static struct task_struct *lockdep_selftest_task_struct;

+
static int graph_lock(void)
{
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
@@ -97,27 +124,15 @@ static int graph_lock(void)
* dropped already)
*/
if (!debug_locks) {
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
return 0;
}
- /* prevent any recursions within lockdep from causing deadlocks */
- current->lockdep_recursion++;
return 1;
}

-static inline int graph_unlock(void)
+static inline void graph_unlock(void)
{
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
- /*
- * The lockdep graph lock isn't locked while we expect it to
- * be, we're confused now, bye!
- */
- return DEBUG_LOCKS_WARN_ON(1);
- }
-
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
- return 0;
+ lockdep_unlock();
}

/*
@@ -128,7 +143,7 @@ static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();

- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();

return ret;
}
@@ -1479,6 +1494,8 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = &lock_cq;
int ret = 1;

+ lockdep_assert_locked();
+
if (match(source_entry, data)) {
*target_entry = source_entry;
ret = 0;
@@ -1501,8 +1518,6 @@ static int __bfs(struct lock_list *source_entry,

head = get_dep_list(lock, offset);

- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
-
list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) {
unsigned int cq_depth;
@@ -1729,11 +1744,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion++;
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
ret = __lockdep_count_forward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion--;
+ lockdep_unlock();
raw_local_irq_restore(flags);

return ret;
@@ -1758,11 +1771,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion++;
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
ret = __lockdep_count_backward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion--;
+ lockdep_unlock();
raw_local_irq_restore(flags);

return ret;
@@ -3046,7 +3057,7 @@ static inline int add_chain_cache(struct task_struct *curr,
* disabled to make this an IRQ-safe lock.. for recursion reasons
* lockdep won't complain about its own locking errors.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (lockdep_assert_locked())
return 0;

chain = alloc_lock_chain();
@@ -5181,8 +5192,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
return;

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion++;
+ lockdep_lock();

/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -5194,8 +5204,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);

- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

@@ -5240,13 +5249,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
init_data_structures_once();

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion++;
+ lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);

/*
@@ -5268,10 +5275,10 @@ static void lockdep_free_key_range_imm(void *start, unsigned long size)
init_data_structures_once();

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
__lockdep_free_key_range(pf, start, size);
__free_zapped_classes(pf);
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

@@ -5367,10 +5374,10 @@ static void lockdep_reset_lock_imm(struct lockdep_map *lock)
unsigned long flags;

raw_local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
+ lockdep_lock();
__lockdep_reset_lock(pf, lock);
__free_zapped_classes(pf);
- arch_spin_unlock(&lockdep_lock);
+ lockdep_unlock();
raw_local_irq_restore(flags);
}

Subject: [tip: locking/core] locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 25016bd7f4caf5fc983bbab7403d08e64cba3004
Gitweb: https://git.kernel.org/tip/25016bd7f4caf5fc983bbab7403d08e64cba3004
Author: Boqun Feng <[email protected]>
AuthorDate: Thu, 12 Mar 2020 23:12:55 +08:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Fri, 20 Mar 2020 13:06:25 +01:00

locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()

Qian Cai reported a bug when PROVE_RCU_LIST=y, and read on /proc/lockdep
triggered a warning:

[ ] DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)
...
[ ] Call Trace:
[ ] lock_is_held_type+0x5d/0x150
[ ] ? rcu_lockdep_current_cpu_online+0x64/0x80
[ ] rcu_read_lock_any_held+0xac/0x100
[ ] ? rcu_read_lock_held+0xc0/0xc0
[ ] ? __slab_free+0x421/0x540
[ ] ? kasan_kmalloc+0x9/0x10
[ ] ? __kmalloc_node+0x1d7/0x320
[ ] ? kvmalloc_node+0x6f/0x80
[ ] __bfs+0x28a/0x3c0
[ ] ? class_equal+0x30/0x30
[ ] lockdep_count_forward_deps+0x11a/0x1a0

The warning got triggered because lockdep_count_forward_deps() call
__bfs() without current->lockdep_recursion being set, as a result
a lockdep internal function (__bfs()) is checked by lockdep, which is
unexpected, and the inconsistency between the irq-off state and the
state traced by lockdep caused the warning.

Apart from this warning, lockdep internal functions like __bfs() should
always be protected by current->lockdep_recursion to avoid potential
deadlocks and data inconsistency, therefore add the
current->lockdep_recursion on-and-off section to protect __bfs() in both
lockdep_count_forward_deps() and lockdep_count_backward_deps()

Reported-by: Qian Cai <[email protected]>
Signed-off-by: Boqun Feng <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/lockdep.c | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e55c4ee..2564950 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1723,9 +1723,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);

return ret;
@@ -1750,9 +1752,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);

return ret;

Subject: [tip: locking/core] locking/lockdep: Fix bad recursion pattern

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 10476e6304222ced7df9b3d5fb0a043b3c2a1ad8
Gitweb: https://git.kernel.org/tip/10476e6304222ced7df9b3d5fb0a043b3c2a1ad8
Author: Peter Zijlstra <[email protected]>
AuthorDate: Fri, 13 Mar 2020 09:56:38 +01:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Fri, 20 Mar 2020 13:06:25 +01:00

locking/lockdep: Fix bad recursion pattern

There were two patterns for lockdep_recursion:

Pattern-A:
if (current->lockdep_recursion)
return

current->lockdep_recursion = 1;
/* do stuff */
current->lockdep_recursion = 0;

Pattern-B:
current->lockdep_recursion++;
/* do stuff */
current->lockdep_recursion--;

But a third pattern has emerged:

Pattern-C:
current->lockdep_recursion = 1;
/* do stuff */
current->lockdep_recursion = 0;

And while this isn't broken per-se, it is highly dangerous because it
doesn't nest properly.

Get rid of all Pattern-C instances and shore up Pattern-A with a
warning.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/lockdep.c | 74 +++++++++++++++++++++------------------
1 file changed, 40 insertions(+), 34 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2564950..64ea69f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -390,6 +390,12 @@ void lockdep_on(void)
}
EXPORT_SYMBOL(lockdep_on);

+static inline void lockdep_recursion_finish(void)
+{
+ if (WARN_ON_ONCE(--current->lockdep_recursion))
+ current->lockdep_recursion = 0;
+}
+
void lockdep_set_selftest_task(struct task_struct *task)
{
lockdep_selftest_task_struct = task;
@@ -1723,11 +1729,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
raw_local_irq_restore(flags);

return ret;
@@ -1752,11 +1758,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
raw_local_irq_restore(flags);

return ret;
@@ -3668,9 +3674,9 @@ void lockdep_hardirqs_on(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__trace_hardirqs_on_caller(ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
}
NOKPROBE_SYMBOL(lockdep_hardirqs_on);

@@ -3726,7 +3732,7 @@ void trace_softirqs_on(unsigned long ip)
return;
}

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
/*
* We'll do an OFF -> ON transition:
*/
@@ -3741,7 +3747,7 @@ void trace_softirqs_on(unsigned long ip)
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
}

/*
@@ -3995,9 +4001,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
register_lock_class(lock, subclass, 1);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
}
@@ -4677,11 +4683,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
@@ -4694,11 +4700,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
return;

raw_local_irq_save(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);
@@ -4719,11 +4725,11 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
@@ -4737,11 +4743,11 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_release(lock, ip);
if (__lock_release(lock, ip))
check_chain_key(current);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
@@ -4757,9 +4763,9 @@ int lock_is_held_type(const struct lockdep_map *lock, int read)
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
ret = __lock_is_held(lock, read);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);

return ret;
@@ -4778,9 +4784,9 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
cookie = __lock_pin_lock(lock);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);

return cookie;
@@ -4797,9 +4803,9 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_repin_lock(lock, cookie);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);
@@ -4814,9 +4820,9 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
raw_local_irq_save(flags);
check_flags(flags);

- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_unpin_lock(lock, cookie);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);
@@ -4952,10 +4958,10 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
trace_lock_contended(lock, ip);
__lock_contended(lock, ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
@@ -4972,9 +4978,9 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)

raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
__lock_acquired(lock, ip);
- current->lockdep_recursion = 0;
+ lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
@@ -5176,7 +5182,7 @@ static void free_zapped_rcu(struct rcu_head *ch)

raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;

/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -5188,7 +5194,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);

- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);
}
@@ -5235,11 +5241,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)

raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
- current->lockdep_recursion = 1;
+ current->lockdep_recursion++;
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
- current->lockdep_recursion = 0;
+ current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);