2020-02-16 07:48:31

by Amol Grover

[permalink] [raw]
Subject: [PATCH RESEND] lockdep: Pass lockdep expression to RCU lists

Data is traversed using hlist_for_each_entry_rcu outside an
RCU read-side critical section but under the protection
of either lockdep_lock or with irqs disabled.

Hence, add corresponding lockdep expression to silence false-positive
lockdep warnings, and harden RCU lists. Also add macro for
corresponding lockdep expression.

Two things to note:
- RCU traversals protected under both, irqs disabled and
graph lock, have both the checks in the lockdep expression.
- RCU traversals under the protection of just disabled irqs
don't have a corresponding lockdep expression as it is implicitly
checked for.

Signed-off-by: Amol Grover <[email protected]>
---
kernel/locking/lockdep.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..696ad5d4daed 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
* code to recurse back into the lockdep code...
*/
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+#define graph_lock_held() \
+ arch_spin_is_locked(&lockdep_lock)
static struct task_struct *lockdep_selftest_task_struct;

static int graph_lock(void)
@@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
/* Check the chain_key of all lock chains. */
for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
head = chainhash_table + i;
- hlist_for_each_entry_rcu(chain, head, entry) {
+ hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
if (!check_lock_chain_key(chain))
return false;
}
@@ -1124,7 +1126,8 @@ void lockdep_register_key(struct lock_class_key *key)
raw_local_irq_save(flags);
if (!graph_lock())
goto restore_irqs;
- hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+ irqs_disabled() && graph_lock_held()) {
if (WARN_ON_ONCE(k == key))
goto out_unlock;
}
@@ -1203,7 +1206,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
- hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
+ hlist_for_each_entry_rcu(class, hash_head, hash_entry,
+ irqs_disabled() && graph_lock_held()) {
if (class->key == key)
goto out_unlock_set;
}
@@ -2858,7 +2862,7 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;

- hlist_for_each_entry_rcu(chain, hash_head, entry) {
+ hlist_for_each_entry_rcu(chain, hash_head, entry, graph_lock_held()) {
if (READ_ONCE(chain->chain_key) == chain_key) {
debug_atomic_inc(chain_lookup_hits);
return chain;
@@ -4833,7 +4837,7 @@ static void remove_class_from_lock_chains(struct pending_free *pf,

for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
head = chainhash_table + i;
- hlist_for_each_entry_rcu(chain, head, entry) {
+ hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
remove_class_from_lock_chain(pf, chain, class);
}
}
@@ -4993,7 +4997,7 @@ static void __lockdep_free_key_range(struct pending_free *pf, void *start,
/* Unhash all classes that were created by a module. */
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
- hlist_for_each_entry_rcu(class, head, hash_entry) {
+ hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
if (!within(class->key, start, size) &&
!within(class->name, start, size))
continue;
@@ -5076,7 +5080,7 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock)

for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
- hlist_for_each_entry_rcu(class, head, hash_entry) {
+ hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
if (lock->class_cache[j] == class)
return true;
@@ -5181,7 +5185,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
goto out_irq;

pf = get_pending_free();
- hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+ irqs_disabled() && graph_lock_held()) {
if (k == key) {
hlist_del_rcu(&k->hash_entry);
found = true;
--
2.24.1


2020-02-17 15:23:45

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH RESEND] lockdep: Pass lockdep expression to RCU lists

On Sun, Feb 16, 2020 at 01:16:36PM +0530, Amol Grover wrote:
> Data is traversed using hlist_for_each_entry_rcu outside an
> RCU read-side critical section but under the protection
> of either lockdep_lock or with irqs disabled.
>
> Hence, add corresponding lockdep expression to silence false-positive
> lockdep warnings, and harden RCU lists. Also add macro for
> corresponding lockdep expression.
>
> Two things to note:
> - RCU traversals protected under both, irqs disabled and
> graph lock, have both the checks in the lockdep expression.
> - RCU traversals under the protection of just disabled irqs
> don't have a corresponding lockdep expression as it is implicitly
> checked for.
>
> Signed-off-by: Amol Grover <[email protected]>
> ---
> kernel/locking/lockdep.c | 21 +++++++++++++--------
> 1 file changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 32282e7112d3..696ad5d4daed 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
> * code to recurse back into the lockdep code...
> */
> static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> +#define graph_lock_held() \
> + arch_spin_is_locked(&lockdep_lock)
> static struct task_struct *lockdep_selftest_task_struct;
>
> static int graph_lock(void)
> @@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
> /* Check the chain_key of all lock chains. */
> for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
> head = chainhash_table + i;
> - hlist_for_each_entry_rcu(chain, head, entry) {
> + hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
> if (!check_lock_chain_key(chain))
> return false;
> }

URGH.. this patch combines two horribles to create a horrific :/

- spin_is_locked() is an abomination
- this RCU list stuff is just plain annoying

I'm tempted to do something like:

#define STFU (true)

hlist_for_each_entry_rcu(chain, head, entry, STFU) {

Paul, are we going a little over-board with this stuff? Do we really
have to annotate all of this?

2020-02-17 16:36:35

by Joel Fernandes

[permalink] [raw]
Subject: Re: [PATCH RESEND] lockdep: Pass lockdep expression to RCU lists

On Mon, Feb 17, 2020 at 04:12:46PM +0100, Peter Zijlstra wrote:
> On Sun, Feb 16, 2020 at 01:16:36PM +0530, Amol Grover wrote:
> > Data is traversed using hlist_for_each_entry_rcu outside an
> > RCU read-side critical section but under the protection
> > of either lockdep_lock or with irqs disabled.
> >
> > Hence, add corresponding lockdep expression to silence false-positive
> > lockdep warnings, and harden RCU lists. Also add macro for
> > corresponding lockdep expression.
> >
> > Two things to note:
> > - RCU traversals protected under both, irqs disabled and
> > graph lock, have both the checks in the lockdep expression.
> > - RCU traversals under the protection of just disabled irqs
> > don't have a corresponding lockdep expression as it is implicitly
> > checked for.
> >
> > Signed-off-by: Amol Grover <[email protected]>
> > ---
> > kernel/locking/lockdep.c | 21 +++++++++++++--------
> > 1 file changed, 13 insertions(+), 8 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 32282e7112d3..696ad5d4daed 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
> > * code to recurse back into the lockdep code...
> > */
> > static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> > +#define graph_lock_held() \
> > + arch_spin_is_locked(&lockdep_lock)
> > static struct task_struct *lockdep_selftest_task_struct;
> >
> > static int graph_lock(void)
> > @@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
> > /* Check the chain_key of all lock chains. */
> > for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
> > head = chainhash_table + i;
> > - hlist_for_each_entry_rcu(chain, head, entry) {
> > + hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
> > if (!check_lock_chain_key(chain))
> > return false;
> > }
>
> URGH.. this patch combines two horribles to create a horrific :/
>
> - spin_is_locked() is an abomination
> - this RCU list stuff is just plain annoying
>
> I'm tempted to do something like:
>
> #define STFU (true)
>
> hlist_for_each_entry_rcu(chain, head, entry, STFU) {
>
> Paul, are we going a little over-board with this stuff? Do we really
> have to annotate all of this?

Could it use hlist_for_each_entry_rcu_notrace() if that's better for this
code? That one does not need the additional condition passed. Though I find
rcu_dereference_raw_nocheck() in that macro a bit odd since it does sparse
checking, where as the rcu_dereference_raw() in hlist_for_each_entry() does
nothing.

And perf can do the same thing if it iss too annoying, like the tracing code
does.

This came up mainly because list_for_each_entry_rcu() does some checking of
it is in a reader section, but it is helpless in its checking when a lock is
held.

thanks,

- Joel

2020-02-17 18:32:45

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH RESEND] lockdep: Pass lockdep expression to RCU lists

On Mon, Feb 17, 2020 at 04:12:46PM +0100, Peter Zijlstra wrote:
> On Sun, Feb 16, 2020 at 01:16:36PM +0530, Amol Grover wrote:
> > Data is traversed using hlist_for_each_entry_rcu outside an
> > RCU read-side critical section but under the protection
> > of either lockdep_lock or with irqs disabled.
> >
> > Hence, add corresponding lockdep expression to silence false-positive
> > lockdep warnings, and harden RCU lists. Also add macro for
> > corresponding lockdep expression.
> >
> > Two things to note:
> > - RCU traversals protected under both, irqs disabled and
> > graph lock, have both the checks in the lockdep expression.
> > - RCU traversals under the protection of just disabled irqs
> > don't have a corresponding lockdep expression as it is implicitly
> > checked for.
> >
> > Signed-off-by: Amol Grover <[email protected]>
> > ---
> > kernel/locking/lockdep.c | 21 +++++++++++++--------
> > 1 file changed, 13 insertions(+), 8 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 32282e7112d3..696ad5d4daed 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
> > * code to recurse back into the lockdep code...
> > */
> > static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> > +#define graph_lock_held() \
> > + arch_spin_is_locked(&lockdep_lock)
> > static struct task_struct *lockdep_selftest_task_struct;
> >
> > static int graph_lock(void)
> > @@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
> > /* Check the chain_key of all lock chains. */
> > for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
> > head = chainhash_table + i;
> > - hlist_for_each_entry_rcu(chain, head, entry) {
> > + hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
> > if (!check_lock_chain_key(chain))
> > return false;
> > }
>
> URGH.. this patch combines two horribles to create a horrific :/
>
> - spin_is_locked() is an abomination

Agreed, I would prefer use of lockdep assertions myself. And yes, I
did try to get rid of spin_is_locked() some time back, but there were
a few use cases that proved stubborn. :-(

> - this RCU list stuff is just plain annoying
>
> I'm tempted to do something like:
>
> #define STFU (true)
>
> hlist_for_each_entry_rcu(chain, head, entry, STFU) {

Now that is just plain silly. It is easier to type "true" than "STFU",
satisfying though the latter might feel to you right now.

> Paul, are we going a little over-board with this stuff? Do we really
> have to annotate all of this?

Like rcu_dereference_raw()?

My goal is to provide infrastructure that allows people to gain the
benefit of automated code review if they so choose. And a number have
so chosen. In this case, it is pretty easy to disable the checking by
adding "true" as the last argument, so I am not seeing a real problem.

Just don't come crying to me if doing so ends up hiding a bug. ;-)

Thanx, Paul