Hi Peter and Ingo,
This patchset proposes a general read-write lock deadlock detection
algorithm on the premise that exclusive locks can be seen as a
special/partial usage of read-write locks. The current Linux kernel
locks are all considered in this algorithm. Prominently, the
recursive-read lock can be well supported, which has not been for more
than a decade.
The bulk of the algorithm is in patch #27. Now that the recursive-read
locks are suppported, we have all the 262 cases passed.
Please, some of the minor fix and/or no-functional-change patches may
be reviewed at least.
Changes from v3:
- Reworded some changelogs
- Rebased to current code base
- Per Boqun's suggestion, reordered patches
- Much more time elapsed
Changes from v2:
- Handle correctly rwsem locks hopefully.
- Remove indirect dependency redundancy check.
- Check direct dependency redundancy before validation.
- Compose lock chains for those with trylocks or separated by trylocks.
- Map lock dependencies to lock chains.
- Consolidate forward and backward lock_lists.
- Clearly and formally define two-task model for lockdep.
--
Yuyang Du (30):
locking/lockdep: Rename deadlock check functions
locking/lockdep: Change return type of add_chain_cache()
locking/lockdep: Change return type of lookup_chain_cache_add()
locking/lockdep: Pass lock chain from validate_chain() to
check_prev_add()
locking/lockdep: Add lock chain list_head field in struct lock_list
and lock_chain
locking/lockdep: Update comments in struct lock_list and held_lock
locking/lockdep: Remove indirect dependency redundancy check
locking/lockdep: Skip checks if direct dependency is already present
locking/lockdep: Remove chain_head argument in validate_chain()
locking/lockdep: Remove useless lock type assignment
locking/lockdep: Remove irq-safe to irq-unsafe read check
locking/lockdep: Specify the depth of current lock stack in
lookup_chain_cache_add()
locking/lockdep: Treat every lock dependency as in a new lock chain
locking/lockdep: Combine lock_lists in struct lock_class into an array
locking/lockdep: Consolidate forward and backward lock_lists into one
locking/lockdep: Add lock chains to direct lock dependency graph
locking/lockdep: Use lock type enum to explicitly specify read or
write locks
ocking/lockdep: Add read-write type for a lock dependency
locking/lockdep: Add helper functions to operate on the searched path
locking/lockdep: Update direct dependency's read-write type if it
exists
locking/lockdep: Introduce chain_hlocks_type for held lock's
read-write type
locking/lockdep: Hash held lock's read-write type into chain key
locking/lockdep: Adjust BFS algorithm to support multiple matches
locking/lockdep: Define the two task model for lockdep checks formally
locking/lockdep: Introduce mark_lock_unaccessed()
locking/lockdep: Add nest lock type
locking/lockdep: Add lock exclusiveness table
locking/lockdep: Support read-write lock's deadlock detection
locking/lockdep: Adjust selftest case for recursive read lock
locking/lockdep: Add more lockdep selftest cases
include/linux/lockdep.h | 91 ++-
include/linux/rcupdate.h | 2 +-
kernel/locking/lockdep.c | 1227 ++++++++++++++++++++++++------------
kernel/locking/lockdep_internals.h | 3 +-
kernel/locking/lockdep_proc.c | 8 +-
lib/locking-selftest.c | 1109 +++++++++++++++++++++++++++++++-
6 files changed, 1981 insertions(+), 459 deletions(-)
--
1.8.3.1
In lockdep, deadlock checkings are carried out at two places:
- In current task's held lock stack, check lock recursion deadlock scenarios.
- In dependency graph, check lock inversion deadlock scenarios.
Rename these two relevant functions for later use. Plus, with recursive-read
locks, only a dependency circle in lock graph is not sufficient condition
for lock inversion deadlocks anymore, so check_noncircular() is not entirely
accurate.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c3902c..3c89a50 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1782,8 +1782,8 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
* Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct held_lock *src, struct held_lock *target,
- struct lock_trace **const trace)
+check_deadlock_graph(struct held_lock *src, struct held_lock *target,
+ struct lock_trace **const trace)
{
int ret;
struct lock_list *uninitialized_var(target_entry);
@@ -2372,7 +2372,8 @@ static inline void inc_chains(void)
}
/*
- * Check whether we are holding such a class already.
+ * Check whether we are holding such a class already in the current
+ * held lock stack.
*
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
@@ -2380,7 +2381,7 @@ static inline void inc_chains(void)
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next)
+check_deadlock_current(struct task_struct *curr, struct held_lock *next)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2465,7 +2466,7 @@ static inline void inc_chains(void)
/*
* Prove that the new <prev> -> <next> dependency would not
- * create a circular dependency in the graph. (We do this by
+ * create a deadlock scenario in the graph. (We do this by
* a breadth-first search into the graph starting at <next>,
* and check whether we can reach <prev>.)
*
@@ -2473,7 +2474,7 @@ static inline void inc_chains(void)
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- ret = check_noncircular(next, prev, trace);
+ ret = check_deadlock_graph(next, prev, trace);
if (unlikely(ret <= 0))
return 0;
@@ -2952,7 +2953,7 @@ static int validate_chain(struct task_struct *curr,
* The simple case: does the current hold the same lock
* already?
*/
- int ret = check_deadlock(curr, hlock);
+ int ret = check_deadlock_current(curr, hlock);
if (!ret)
return 0;
--
1.8.3.1
The function add_chain_cache() adds a new chain - the current lock chain
- into the lock_chains cache if it does not exist in there.
Previously if failed, an integer 0 is returned and if successful, 1 is
returned. Change the return type to the pointer of the chain if the new
chain is added or NULL otherwise for later use.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c89a50..9c9b408 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2788,12 +2788,12 @@ static struct lock_chain *alloc_lock_chain(void)
* Adds a dependency chain into chain hashtable. And must be called with
* graph_lock held.
*
- * Return 0 if fail, and graph_lock is released.
- * Return 1 if succeed, with graph_lock held.
+ * Return NULL if failed, and graph_lock is released.
+ * Return the new chain if successful, with graph_lock held.
*/
-static inline int add_chain_cache(struct task_struct *curr,
- struct held_lock *hlock,
- u64 chain_key)
+static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
+ struct held_lock *hlock,
+ u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2806,16 +2806,16 @@ static inline int add_chain_cache(struct task_struct *curr,
* lockdep won't complain about its own locking errors.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return 0;
+ return NULL;
chain = alloc_lock_chain();
if (!chain) {
if (!debug_locks_off_graph_unlock())
- return 0;
+ return NULL;
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
- return 0;
+ return NULL;
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
@@ -2836,18 +2836,18 @@ static inline int add_chain_cache(struct task_struct *curr,
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
- return 0;
+ return NULL;
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
dump_stack();
- return 0;
+ return NULL;
}
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
- return 1;
+ return chain;
}
/*
--
1.8.3.1
Like the previous change, the function lookup_chain_cache_add() returns
the pointer of the lock chain if the chain is new.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9c9b408..51918d2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2870,13 +2870,13 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
/*
* If the key is not present yet in dependency chain cache then
- * add it and return 1 - in this case the new dependency chain is
- * validated. If the key is already hashed, return 0.
- * (On return with 1 graph_lock is held.)
+ * add it and return the chain - in this case the new dependency
+ * chain will be validated. If the key is already hashed, return
+ * NULL. (On return with the new chain graph_lock is held.)
*/
-static inline int lookup_chain_cache_add(struct task_struct *curr,
- struct held_lock *hlock,
- u64 chain_key)
+static inline struct lock_chain *
+lookup_chain_cache_add(struct task_struct *curr, struct held_lock *hlock,
+ u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
@@ -2884,7 +2884,7 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
if (chain) {
cache_hit:
if (!check_no_collision(curr, hlock, chain))
- return 0;
+ return NULL;
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
@@ -2893,7 +2893,7 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
class->key, class->name);
}
- return 0;
+ return NULL;
}
if (very_verbose(class)) {
@@ -2902,7 +2902,7 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
}
if (!graph_lock())
- return 0;
+ return NULL;
/*
* We have to walk the chain again locked - to avoid duplicates:
@@ -2913,10 +2913,7 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
goto cache_hit;
}
- if (!add_chain_cache(curr, hlock, chain_key))
- return 0;
-
- return 1;
+ return add_chain_cache(curr, hlock, chain_key);
}
static int validate_chain(struct task_struct *curr,
--
1.8.3.1
The pointer of lock chains is passed all the way from validate_chain()
to check_prev_add(). This is aimed for the later effort to associate lock
chains to lock dependencies.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 51918d2..a0e62e5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2441,7 +2441,7 @@ static inline void inc_chains(void)
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance,
- struct lock_trace **const trace)
+ struct lock_trace **const trace, struct lock_chain *chain)
{
struct lock_list *entry;
int ret;
@@ -2549,7 +2549,8 @@ static inline void inc_chains(void)
* the end of this context's lock-chain - whichever comes first.
*/
static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next)
+check_prevs_add(struct task_struct *curr, struct held_lock *next,
+ struct lock_chain *chain)
{
struct lock_trace *trace = NULL;
int depth = curr->lockdep_depth;
@@ -2580,7 +2581,7 @@ static inline void inc_chains(void)
*/
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance,
- &trace);
+ &trace, chain);
if (!ret)
return 0;
@@ -2920,6 +2921,7 @@ static int validate_chain(struct task_struct *curr,
struct held_lock *hlock,
int chain_head, u64 chain_key)
{
+ struct lock_chain *chain;
/*
* Trylock needs to maintain the stack of held locks, but it
* does not add new dependencies, because trylock can be done
@@ -2931,7 +2933,7 @@ static int validate_chain(struct task_struct *curr,
* graph_lock for us)
*/
if (!hlock->trylock && hlock->check &&
- lookup_chain_cache_add(curr, hlock, chain_key)) {
+ (chain = lookup_chain_cache_add(curr, hlock, chain_key))) {
/*
* Check whether last held lock:
*
@@ -2966,7 +2968,7 @@ static int validate_chain(struct task_struct *curr,
* of the chain, and if it's not a secondary read-lock:
*/
if (!chain_head && ret != 2) {
- if (!check_prevs_add(curr, hlock))
+ if (!check_prevs_add(curr, hlock, chain))
return 0;
}
--
1.8.3.1
A direct lock dependency, such as L1 -> L2, can be in different lock chains
having that lock dependency. In order for us to associate lock chains to
lock dependencies, we add some new fields in struct lock_list and
lock_chain.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b8a835f..d7bec61 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -188,6 +188,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
*/
struct lock_list {
struct list_head entry;
+ struct list_head chains;
struct lock_class *class;
struct lock_class *links_to;
const struct lock_trace *trace;
@@ -207,6 +208,7 @@ struct lock_list {
* @depth: the number of held locks in this chain
* @base: the index in chain_hlocks for this chain
* @entry: the collided lock chains in lock_chain hash list
+ * @chain_entry: the link to the next lock_chain in the same dependency
* @chain_key: the hash key of this lock_chain
*/
struct lock_chain {
@@ -216,6 +218,7 @@ struct lock_chain {
base : 24;
/* 4 byte hole */
struct hlist_node entry;
+ struct list_head chain_entry;
u64 chain_key;
};
--
1.8.3.1
The comments regarding initial chain key and BFS are outdated, so update them.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d7bec61..0246a70 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -195,8 +195,7 @@ struct lock_list {
int distance;
/*
- * The parent field is used to implement breadth-first search, and the
- * bit 0 is reused to indicate if the lock has been accessed in BFS.
+ * The parent field is used to implement breadth-first search.
*/
struct lock_list *parent;
};
@@ -239,7 +238,7 @@ struct held_lock {
* as likely as possible - hence the 64-bit width.
*
* The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
+ * with INITIAL_CHAIN_KEY), here we store the previous hash value:
*/
u64 prev_chain_key;
unsigned long acquire_ip;
@@ -258,12 +257,12 @@ struct held_lock {
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
+ * the hashes by starting with a new chain if we cross into an
+ * interrupt context, and we also keep not adding cross-context
+ * lock dependencies - the lock usage graph walking covers that
+ * area anyway, and we'd just unnecessarily increase the number
+ * of dependencies otherwise. [Note: hardirq and softirq
+ * contexts are separated from each other too.]
*
* The following field is used to detect when we cross into an
* interrupt context:
--
1.8.3.1
Indirect dependency redundancy check was added for cross-release, which has
been reverted. Then suggested by Peter, only when CONFIG_LOCKDEP_SMALL is
set it takes effect.
With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be quite complicated to
implement. Lets remove it for good. This inevitably increases the number of
dependencies, but after combining forward and backward dependencies, the
increase will be offset.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 41 --------------------------------------
kernel/locking/lockdep_internals.h | 1 -
kernel/locking/lockdep_proc.c | 2 --
3 files changed, 44 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a0e62e5..4838c99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1812,38 +1812,6 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at <src> can lead to
- * <target> or not. If it can, <src> -> <target> dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
- int ret;
- struct lock_list *uninitialized_var(target_entry);
- struct lock_list src_entry = {
- .class = hlock_class(src),
- .parent = NULL,
- };
-
- debug_atomic_inc(nr_redundant_checks);
-
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
-
- if (!ret) {
- debug_atomic_inc(nr_redundant);
- ret = 2;
- } else if (ret < 0)
- ret = 0;
-
- return ret;
-}
-#endif
-
#ifdef CONFIG_TRACE_IRQFLAGS
static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2507,15 +2475,6 @@ static inline void inc_chains(void)
}
}
-#ifdef CONFIG_LOCKDEP_SMALL
- /*
- * Is the <prev> -> <next> link redundant?
- */
- ret = check_redundant(prev, next);
- if (ret != 1)
- return ret;
-#endif
-
if (!*trace) {
*trace = save_trace();
if (!*trace)
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 18d85ae..f499426 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -177,7 +177,6 @@ struct lockdep_stats {
unsigned long redundant_softirqs_on;
unsigned long redundant_softirqs_off;
int nr_unused_locks;
- unsigned int nr_redundant_checks;
unsigned int nr_redundant;
unsigned int nr_cyclic_checks;
unsigned int nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dadb7b7..edc4a7b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
- seq_printf(m, " redundant checks: %11llu\n",
- debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links: %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
--
1.8.3.1
This argument says whether the chain is a head, meaning having just one
lock, which can actually be tested by lock_chain->depth. So there is no
need to explicitly make this argument, so remove it.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index de088da..e8ebb64 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2880,9 +2880,8 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
return add_chain_cache(curr, hlock, chain_key);
}
-static int validate_chain(struct task_struct *curr,
- struct held_lock *hlock,
- int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+ u64 chain_key)
{
struct lock_chain *chain;
/*
@@ -2930,7 +2929,7 @@ static int validate_chain(struct task_struct *curr,
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
- if (!chain_head && ret != 2) {
+ if (chain->depth > 1 && ret != 2) {
if (!check_prevs_add(curr, hlock, chain))
return 0;
}
@@ -2947,7 +2946,7 @@ static int validate_chain(struct task_struct *curr,
#else
static inline int validate_chain(struct task_struct *curr,
struct held_lock *hlock,
- int chain_head, u64 chain_key)
+ u64 chain_key)
{
return 1;
}
@@ -3781,7 +3780,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth;
- int chain_head = 0;
int class_idx;
u64 chain_key;
@@ -3895,14 +3893,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
*/
if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
- chain_head = 1;
}
hlock->prev_chain_key = chain_key;
- if (separate_irq_context(curr, hlock)) {
+ if (separate_irq_context(curr, hlock))
chain_key = INITIAL_CHAIN_KEY;
- chain_head = 1;
- }
+
chain_key = iterate_chain_key(chain_key, class_idx);
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
@@ -3915,7 +3911,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
- if (!validate_chain(curr, hlock, chain_head, chain_key))
+ if (!validate_chain(curr, hlock, chain_key))
return 0;
curr->curr_chain_key = chain_key;
--
1.8.3.1
Given a dependency <prev> -> <next>, two checks are performed:
1. Lock inversion deadlock:
We search whether there is a path from <next> to <prev> in the dependency
graph and if so we have a potential deadlock scenario in check_deadlock_graph().
But if the direct dependency <prev> -> <next> is already in the graph, there
can't be such a path (i.e., <next> to <prev>) because otherwise this path
would have been found when adding the last critical dependency that
completes the circle.
2. IRQ usage violation:
The IRQ usage check searches whether there is a path through <prev> to
<next> that connects an irq-safe lock to an irq-unsafe lock in the
dependency graph in check_irq_usage(). Similarly, if <prev> -> <next> is
already in the graph, there can't be such a path either.
This check skipping should be able to greatly improve performance by
reducing the number of deadlock and IRQ usage checks. This number precisely
equals nr_redundant, which actually is not a small number.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4838c99..de088da 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2433,6 +2433,25 @@ static inline void inc_chains(void)
}
/*
+ * Is the <prev> -> <next> dependency already present?
+ *
+ * (this may occur even though this is a new chain: consider
+ * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
+ * chains - the second one will be new, but L1 already has
+ * L2 added to its dependency list, due to the first chain.)
+ */
+ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+ if (entry->class == hlock_class(next)) {
+ debug_atomic_inc(nr_redundant);
+
+ if (distance == 1)
+ entry->distance = 1;
+
+ return 1;
+ }
+ }
+
+ /*
* Prove that the new <prev> -> <next> dependency would not
* create a deadlock scenario in the graph. (We do this by
* a breadth-first search into the graph starting at <next>,
@@ -2459,21 +2478,6 @@ static inline void inc_chains(void)
*/
if (next->read == 2 || prev->read == 2)
return 1;
- /*
- * Is the <prev> -> <next> dependency already present?
- *
- * (this may occur even though this is a new chain: consider
- * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
- * chains - the second one will be new, but L1 already has
- * L2 added to its dependency list, due to the first chain.)
- */
- list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
- if (entry->class == hlock_class(next)) {
- if (distance == 1)
- entry->distance = 1;
- return 1;
- }
- }
if (!*trace) {
*trace = save_trace();
--
1.8.3.1
The next lock to acquire has its lock type set already, so there is no
need to reassign it regardless of whether it is recursive read.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 7 -------
1 file changed, 7 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e8ebb64..acbd538 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2919,13 +2919,6 @@ static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
if (!ret)
return 0;
/*
- * Mark recursive read, as we jump over it when
- * building dependencies (just like we jump over
- * trylock entries):
- */
- if (ret == 2)
- hlock->read = 2;
- /*
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
--
1.8.3.1
We have a lockdep warning:
========================================================
WARNING: possible irq lock inversion dependency detected
5.1.0-rc7+ #141 Not tainted
--------------------------------------------------------
kworker/8:2/328 just changed the state of lock:
0000000007f1a95b (&(&host->lock)->rlock){-...}, at: ata_bmdma_interrupt+0x27/0x1c0 [libata]
but this lock took another, HARDIRQ-READ-unsafe lock in the past:
(&trig->leddev_list_lock){.+.?}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
Possible interrupt unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&trig->leddev_list_lock);
local_irq_disable();
lock(&(&host->lock)->rlock);
lock(&trig->leddev_list_lock);
<Interrupt>
lock(&(&host->lock)->rlock);
*** DEADLOCK ***
This splat is a false positive, which is enabled by the addition of
recursive read locks in the graph. Specifically, trig->leddev_list_lock is a
rwlock_t type, which was not in the graph before recursive read lock support
was added in lockdep.
This false positve is caused by a "false-positive" check in IRQ usage check.
In mark_lock_irq(), the following checks are currently performed:
----------------------------------
| -> | unsafe | read unsafe |
|----------------------------------|
| safe | F B | F* B* |
|----------------------------------|
| read safe | F* B* | - |
----------------------------------
Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS
But actually the safe -> unsafe read dependency does not create a deadlock
scenario.
Fix this by simply removing those two checks, and since safe read -> unsafe
is indeed a problem, these checks are not actually strict per se, so remove
the macro STRICT_READ_CHECKS, and we have the following checks:
----------------------------------
| -> | unsafe | read unsafe |
|----------------------------------|
| safe | F B | - |
|----------------------------------|
| read safe | F B | - |
----------------------------------
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acbd538..1dda9de 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3221,8 +3221,6 @@ static int SOFTIRQ_verbose(struct lock_class *class)
return 0;
}
-#define STRICT_READ_CHECKS 1
-
static int (*state_verbose_f[])(struct lock_class *class) = {
#define LOCKDEP_STATE(__STATE) \
__STATE##_verbose,
@@ -3268,7 +3266,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
* Validate that the lock dependencies don't have conflicting usage
* states.
*/
- if ((!read || STRICT_READ_CHECKS) &&
+ if ((!read || !dir) &&
!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
@@ -3279,7 +3277,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
return 0;
- if (STRICT_READ_CHECKS &&
+ if (dir &&
!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
--
1.8.3.1
When looking up and adding a chain (i.e., in lookup_chain_cache_add() and
only in it), explicitly specify the depth of the held lock stack as the
chain. The depth now only equals curr->lockdep_depth.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 48 ++++++++++++++++++++++++++----------------------
1 file changed, 26 insertions(+), 22 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1dda9de..569d3c1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2600,12 +2600,12 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
* Returns the index of the first held_lock of the current chain
*/
static inline int get_first_held_lock(struct task_struct *curr,
- struct held_lock *hlock)
+ struct held_lock *hlock, int depth)
{
int i;
struct held_lock *hlock_curr;
- for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+ for (i = depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock->irq_context)
break;
@@ -2630,12 +2630,12 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
}
static void
-print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
+print_chain_keys_held_locks(struct task_struct *curr,
+ struct held_lock *hlock_next, int depth)
{
struct held_lock *hlock;
u64 chain_key = INITIAL_CHAIN_KEY;
- int depth = curr->lockdep_depth;
- int i = get_first_held_lock(curr, hlock_next);
+ int i = get_first_held_lock(curr, hlock_next, depth);
printk("depth: %u (irq_context %u)\n", depth - i + 1,
hlock_next->irq_context);
@@ -2667,8 +2667,8 @@ static void print_chain_keys_chain(struct lock_chain *chain)
}
static void print_collision(struct task_struct *curr,
- struct held_lock *hlock_next,
- struct lock_chain *chain)
+ struct held_lock *hlock_next,
+ struct lock_chain *chain, int depth)
{
pr_warn("\n");
pr_warn("============================\n");
@@ -2679,7 +2679,7 @@ static void print_collision(struct task_struct *curr,
pr_warn("Hash chain already cached but the contents don't match!\n");
pr_warn("Held locks:");
- print_chain_keys_held_locks(curr, hlock_next);
+ print_chain_keys_held_locks(curr, hlock_next, depth);
pr_warn("Locks in cached chain:");
print_chain_keys_chain(chain);
@@ -2695,17 +2695,16 @@ static void print_collision(struct task_struct *curr,
* that there was a collision during the calculation of the chain_key.
* Returns: 0 not passed, 1 passed
*/
-static int check_no_collision(struct task_struct *curr,
- struct held_lock *hlock,
- struct lock_chain *chain)
+static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
+ struct lock_chain *chain, int depth)
{
#ifdef CONFIG_DEBUG_LOCKDEP
int i, j, id;
- i = get_first_held_lock(curr, hlock);
+ i = get_first_held_lock(curr, hlock, depth);
- if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
- print_collision(curr, hlock, chain);
+ if (DEBUG_LOCKS_WARN_ON(chain->depth != depth - (i - 1))) {
+ print_collision(curr, hlock, chain, depth);
return 0;
}
@@ -2713,7 +2712,7 @@ static int check_no_collision(struct task_struct *curr,
id = curr->held_locks[i].class_idx;
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
- print_collision(curr, hlock, chain);
+ print_collision(curr, hlock, chain, depth);
return 0;
}
}
@@ -2757,7 +2756,7 @@ static struct lock_chain *alloc_lock_chain(void)
*/
static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
- u64 chain_key)
+ u64 chain_key, int depth)
{
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2783,8 +2782,8 @@ static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
- i = get_first_held_lock(curr, hlock);
- chain->depth = curr->lockdep_depth + 1 - i;
+ i = get_first_held_lock(curr, hlock, depth);
+ chain->depth = depth + 1 - i;
BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
@@ -2837,17 +2836,21 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
* add it and return the chain - in this case the new dependency
* chain will be validated. If the key is already hashed, return
* NULL. (On return with the new chain graph_lock is held.)
+ *
+ * If the key is not hashed, the new chain is composed of @hlock
+ * and @depth worth of the current held lock stack, of which the
+ * held locks are in the same context as @hlock.
*/
static inline struct lock_chain *
lookup_chain_cache_add(struct task_struct *curr, struct held_lock *hlock,
- u64 chain_key)
+ u64 chain_key, int depth)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
if (chain) {
cache_hit:
- if (!check_no_collision(curr, hlock, chain))
+ if (!check_no_collision(curr, hlock, chain, depth))
return NULL;
if (very_verbose(class)) {
@@ -2877,7 +2880,7 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
goto cache_hit;
}
- return add_chain_cache(curr, hlock, chain_key);
+ return add_chain_cache(curr, hlock, chain_key, depth);
}
static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
@@ -2895,7 +2898,8 @@ static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
* graph_lock for us)
*/
if (!hlock->trylock && hlock->check &&
- (chain = lookup_chain_cache_add(curr, hlock, chain_key))) {
+ (chain = lookup_chain_cache_add(curr, hlock, chain_key,
+ curr->lockdep_depth))) {
/*
* Check whether last held lock:
*
--
1.8.3.1
For a lock chain that has a lock (i.e., next) on top of a trylock or a
multiple of consecutive trylocks, if they are in the same context, we check
each prev trylock -> next and the first prev non-trylock -> next lock
dependency, illustrated as:
(The top is the latest lock.)
Lock1
Try Lock2
Try Lock3
Lock4
...
If the prev lock is not the direct previous lock to next (e.g., Lock3 or
Lock4), this does not attempt to create a new lock chain. This patch treats
each such case as if it is from a new lock chain. And if the chain already
exists, then this is a chain cache hit and the check is not needed. As a
result, compare this:
Previous lock chains:
(1) ... -> Lock4 -> Lock3 -> Lock2 -> Lock1
Lock chains after this patch:
(1) ... -> Lock4 -> Lock3 -> Lock2 -> Lock1
(2) ... -> Lock4 -> Lock3 -> Lock1
After this, it is guarantteed that each dependency has at least a lock
chain associated with it.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 223 +++++++++++++++++++++++------------------------
1 file changed, 108 insertions(+), 115 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 569d3c1..21d9e99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1275,6 +1275,11 @@ static bool is_dynamic_key(const struct lock_class_key *key)
}
#ifdef CONFIG_PROVE_LOCKING
+static inline struct
+lock_chain *lookup_chain_cache_add(struct task_struct *curr,
+ struct held_lock *hlock,
+ u64 chain_key, int depth);
+
/*
* Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
@@ -2505,87 +2510,6 @@ static inline void inc_chains(void)
return 2;
}
-/*
- * Add the dependency to all directly-previous locks that are 'relevant'.
- * The ones that are relevant are (in increasing distance from curr):
- * all consecutive trylock entries and the final non-trylock entry - or
- * the end of this context's lock-chain - whichever comes first.
- */
-static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next,
- struct lock_chain *chain)
-{
- struct lock_trace *trace = NULL;
- int depth = curr->lockdep_depth;
- struct held_lock *hlock;
-
- /*
- * Debugging checks.
- *
- * Depth must not be zero for a non-head lock:
- */
- if (!depth)
- goto out_bug;
- /*
- * At least two relevant locks must exist for this
- * to be a head:
- */
- if (curr->held_locks[depth].irq_context !=
- curr->held_locks[depth-1].irq_context)
- goto out_bug;
-
- for (;;) {
- int distance = curr->lockdep_depth - depth + 1;
- hlock = curr->held_locks + depth - 1;
-
- /*
- * Only non-recursive-read entries get new dependencies
- * added:
- */
- if (hlock->read != 2 && hlock->check) {
- int ret = check_prev_add(curr, hlock, next, distance,
- &trace, chain);
- if (!ret)
- return 0;
-
- /*
- * Stop after the first non-trylock entry,
- * as non-trylock entries have added their
- * own direct dependencies already, so this
- * lock is connected to them indirectly:
- */
- if (!hlock->trylock)
- break;
- }
-
- depth--;
- /*
- * End of lock-stack?
- */
- if (!depth)
- break;
- /*
- * Stop the search if we cross into another context:
- */
- if (curr->held_locks[depth].irq_context !=
- curr->held_locks[depth-1].irq_context)
- break;
- }
- return 1;
-out_bug:
- if (!debug_locks_off_graph_unlock())
- return 0;
-
- /*
- * Clearly we all shouldn't be here, but since we made it we
- * can reliable say we messed up our state. See the above two
- * gotos for reasons why we could possibly end up here.
- */
- WARN_ON(1);
-
- return 0;
-}
-
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
int nr_chain_hlocks;
@@ -2883,66 +2807,135 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
return add_chain_cache(curr, hlock, chain_key, depth);
}
-static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+/*
+ * Check whether last held lock:
+ *
+ * - is irq-safe, if this lock is irq-unsafe
+ * - is softirq-safe, if this lock is hardirq-unsafe
+ *
+ * And check whether the new lock's dependency graph could lead back to the
+ * previous lock:
+ *
+ * - within the current held-lock stack or
+ * - across our accumulated lock dependency graph
+ *
+ * any of these scenarios could lead to a deadlock.
+ */
+static int validate_chain(struct task_struct *curr, struct held_lock *next,
u64 chain_key)
{
struct lock_chain *chain;
+ struct lock_trace *trace = NULL;
+ struct held_lock *hlock;
+ int depth = curr->lockdep_depth;
+
/*
* Trylock needs to maintain the stack of held locks, but it
- * does not add new dependencies, because trylock can be done
- * in any order.
- *
+ * does not add new dependencies unless it is taken, because
+ * attempting to acquire a trylock does not block.
+ */
+ if (next->trylock || !next->check)
+ return 1;
+
+ /*
+ * Add the dependency to all previous locks that are 'relevant'. The
+ * ones that are relevant are (in increasing distance from next lock
+ * to acquire): all consecutive trylock entries and the final
+ * non-trylock entry - or the end of this context's lock-chain
+ * - whichever comes first.
+ */
+chain_again:
+ hlock = curr->held_locks + depth - 1;
+
+ /*
* We look up the chain_key and do the O(N^2) check and update of
- * the dependencies only if this is a new dependency chain.
- * (If lookup_chain_cache_add() return with 1 it acquires
- * graph_lock for us)
+ * the dependencies only if this is a new dependency chain. (If
+ * lookup_chain_cache_add() return with 1 it acquires graph_lock for
+ * us.)
*/
- if (!hlock->trylock && hlock->check &&
- (chain = lookup_chain_cache_add(curr, hlock, chain_key,
- curr->lockdep_depth))) {
- /*
- * Check whether last held lock:
- *
- * - is irq-safe, if this lock is irq-unsafe
- * - is softirq-safe, if this lock is hardirq-unsafe
- *
- * And check whether the new lock's dependency graph
- * could lead back to the previous lock:
- *
- * - within the current held-lock stack
- * - across our accumulated lock dependency records
- *
- * any of these scenarios could lead to a deadlock.
- */
+ chain = lookup_chain_cache_add(curr, next, chain_key, depth);
+ if (depth == curr->lockdep_depth) {
+ int ret;
+
+ if (!chain)
+ return 1;
/*
* The simple case: does the current hold the same lock
* already?
*/
- int ret = check_deadlock_current(curr, hlock);
+ ret = check_deadlock_current(curr, next);
if (!ret)
return 0;
/*
- * Add dependency only if this lock is not the head
- * of the chain, and if it's not a secondary read-lock:
+ * Add dependency only if this lock is not the head of the
+ * chain, and if it's not a second recursive-read lock. If
+ * not, there is no need to check further.
*/
- if (chain->depth > 1 && ret != 2) {
- if (!check_prevs_add(curr, hlock, chain))
+ if (!(chain->depth > 1 && ret != 2))
+ goto out_unlock;
+ }
+
+ /*
+ * Only non-recursive-read entries get new dependencies
+ * added:
+ */
+ if (chain) {
+ if (hlock->read != 2 && hlock->check) {
+ int distance = curr->lockdep_depth - depth + 1;
+
+ if (!check_prev_add(curr, hlock, next, distance,
+ &trace, chain))
return 0;
}
graph_unlock();
- } else {
- /* after lookup_chain_cache_add(): */
- if (unlikely(!debug_locks))
- return 0;
}
+ /*
+ * Stop after the first non-trylock entry, as non-trylock entries
+ * have added their own direct dependencies already, so this lock is
+ * connected to them indirectly:
+ */
+ if (!hlock->trylock)
+ goto out;
+
+ depth--;
+ /*
+ * End of lock-stack?
+ */
+ if (!depth)
+ goto out;
+ /*
+ * Stop the search if we cross into another context:
+ */
+ if (curr->held_locks[depth].irq_context !=
+ curr->held_locks[depth-1].irq_context)
+ goto out;
+
+ /*
+ * This is another direct dependency with a further previous lock
+ * that is separated by a trylock. We compose a lock chain out of
+ * this, then calculate the chain key, and look it up in the
+ * lock_chains. If it exists the check is actually not needed.
+ */
+ chain_key = iterate_chain_key(hlock->prev_chain_key,
+ hlock_class(next) - lock_classes);
+
+ goto chain_again;
+
+out_unlock:
+ graph_unlock();
+out:
+ /* after lookup_chain_cache_add(): */
+ if (unlikely(!debug_locks))
+ return 0;
+
return 1;
}
#else
static inline int validate_chain(struct task_struct *curr,
- struct held_lock *hlock,
+ struct held_lock *next,
u64 chain_key)
{
return 1;
--
1.8.3.1
Since a forward dependency has an exact 1:1 mapping to a backward
dependency, the forward and its counterpart backward lock_lists can be
combined into one lock_list entry. This is illustrated as:
Forward dependecy: L1 -> L2
Backward dependency: L2 <- L1
So one lock_list can represent these two dependencies.
Despite the side-effect benefit of saving memory, the direct reason for
sharing one lock list between forward and its counterpart backward
dependencies is that after this we would be able to map lock chains to lock
dependencies in the graph because a forward dependency and its counterpart
backward dependency has the same lock chains.
To make this possible, one lock_list struct would have two-element
arrays of classes and list_heads for dependency list:
Lock_list: L1 -> L2
class[0]: L1
class[1]: L2
entry[0]: for dep_list[0]
entry[1]: for dep_list[1]
With this change the rule to use which class[] or entry[] element is simple:
whenever forward graph search is performed use class[1] and entry[1], and
whenever backward graph search is performed use class[0] and entry[0].
Actually, should be no functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 22 +++--
kernel/locking/lockdep.c | 192 ++++++++++++++++++++++++------------------
kernel/locking/lockdep_proc.c | 6 +-
3 files changed, 131 insertions(+), 89 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06b686d..fcfc1dd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -183,14 +183,26 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
}
/*
- * Every lock has a list of other locks that were taken after it.
- * We only grow the list, never remove from it:
+ * Every lock has a list of other locks that were taken after or before
+ * it as lock dependencies. These dependencies constitute a graph, which
+ * depicts the locking behavior of the kernel and the workloads.
+ *
+ * Since forward dependencies and backward dependencies have an exact
+ * 1:1 mapping. A lock_list entry can be shared between them.
+ *
+ * For the locks after and before lists:
+ *
+ * @entry[0] is used to link to next backward lock, while @entry[1] is
+ * used for next forward lock.
+ *
+ * For the forward dependency L1 -> L2:
+ *
+ * @class[0] is used for L1 and @class[1] is for L2.
*/
struct lock_list {
- struct list_head entry;
+ struct list_head entry[2];
struct list_head chains;
- struct lock_class *class;
- struct lock_class *links_to;
+ struct lock_class *class[2];
const struct lock_trace *trace;
int distance;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8c53b59..33f8187 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -175,6 +175,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
return lock_classes + class_idx;
}
+static inline struct lock_class *fw_dep_class(struct lock_list *lock)
+{
+ return lock->class[1];
+}
+
+static inline struct lock_class *bw_dep_class(struct lock_list *lock)
+{
+ return lock->class[0];
+}
+
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
@@ -917,19 +927,21 @@ static bool in_any_class_list(struct list_head *e)
return false;
}
-static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
+static bool class_lock_list_valid(struct lock_class *c, int forward)
{
struct lock_list *e;
+ struct list_head *h = &c->dep_list[forward];
+ int other = 1 - forward;
- list_for_each_entry(e, h, entry) {
- if (e->links_to != c) {
+ list_for_each_entry(e, h, entry[forward]) {
+ if (e->class[other] != c) {
printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
c->name ? : "(?)",
(unsigned long)(e - list_entries),
- e->links_to && e->links_to->name ?
- e->links_to->name : "(?)",
- e->class && e->class->name ? e->class->name :
- "(?)");
+ e->class[other] && e->class[other]->name ?
+ e->class[other]->name : "(?)",
+ e->class[forward] && e->class[forward]->name ?
+ e->class[forward]->name : "(?)");
return false;
}
}
@@ -999,9 +1011,9 @@ static bool __check_data_structures(void)
/* Check whether all classes have valid lock lists. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
- if (!class_lock_list_valid(class, &class->dep_list[0]))
+ if (!class_lock_list_valid(class, 0))
return false;
- if (!class_lock_list_valid(class, &class->dep_list[1]))
+ if (!class_lock_list_valid(class, 1))
return false;
}
@@ -1020,11 +1032,18 @@ static bool __check_data_structures(void)
*/
for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
e = list_entries + i;
- if (!in_any_class_list(&e->entry)) {
+ if (!in_any_class_list(&e->entry[0])) {
printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
(unsigned int)(e - list_entries),
- e->class->name ? : "(?)",
- e->links_to->name ? : "(?)");
+ e->class[0]->name ? : "(?)",
+ e->class[1]->name ? : "(?)");
+ return false;
+ }
+ if (!in_any_class_list(&e->entry[1])) {
+ printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class[0]->name ? : "(?)",
+ e->class[1]->name ? : "(?)");
return false;
}
}
@@ -1035,13 +1054,22 @@ static bool __check_data_structures(void)
*/
for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
e = list_entries + i;
- if (in_any_class_list(&e->entry)) {
+ if (in_any_class_list(&e->entry[0])) {
+ printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class[0] && e->class[0]->name ? e->class[0]->name :
+ "(?)",
+ e->class[1] && e->class[1]->name ?
+ e->class[1]->name : "(?)");
+ return false;
+ }
+ if (in_any_class_list(&e->entry[1])) {
printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
(unsigned int)(e - list_entries),
- e->class && e->class->name ? e->class->name :
+ e->class[0] && e->class[0]->name ? e->class[0]->name :
"(?)",
- e->links_to && e->links_to->name ?
- e->links_to->name : "(?)");
+ e->class[1] && e->class[1]->name ?
+ e->class[1]->name : "(?)");
return false;
}
}
@@ -1304,8 +1332,8 @@ static struct lock_list *alloc_list_entry(void)
/*
* Add a new dependency to the head of the list:
*/
-static int add_lock_to_list(struct lock_class *this,
- struct lock_class *links_to, struct list_head *head,
+static int add_lock_to_list(struct lock_class *lock1,
+ struct lock_class *lock2,
unsigned long ip, int distance,
const struct lock_trace *trace)
{
@@ -1318,16 +1346,18 @@ static int add_lock_to_list(struct lock_class *this,
if (!entry)
return 0;
- entry->class = this;
- entry->links_to = links_to;
+ entry->class[0] = lock1;
+ entry->class[1] = lock2;
entry->distance = distance;
entry->trace = trace;
+
/*
* Both allocation and removal are done under the graph lock; but
* iteration is under RCU-sched; see look_up_lock_class() and
* lockdep_free_key_range().
*/
- list_add_tail_rcu(&entry->entry, head);
+ list_add_tail_rcu(&entry->entry[1], &lock1->dep_list[1]);
+ list_add_tail_rcu(&entry->entry[0], &lock2->dep_list[0]);
return 1;
}
@@ -1408,23 +1438,23 @@ static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
}
static inline void mark_lock_accessed(struct lock_list *lock,
- struct lock_list *parent)
+ struct lock_list *parent, int forward)
{
unsigned long nr;
nr = lock - list_entries;
WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
lock->parent = parent;
- lock->class->dep_gen_id = lockdep_dependency_gen_id;
+ lock->class[forward]->dep_gen_id = lockdep_dependency_gen_id;
}
-static inline unsigned long lock_accessed(struct lock_list *lock)
+static inline unsigned long lock_accessed(struct lock_list *lock, int forward)
{
unsigned long nr;
nr = lock - list_entries;
WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
- return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+ return lock->class[forward]->dep_gen_id == lockdep_dependency_gen_id;
}
static inline struct lock_list *get_lock_parent(struct lock_list *child)
@@ -1452,7 +1482,7 @@ static inline int get_lock_depth(struct lock_list *child)
*/
static inline struct list_head *get_dep_list(struct lock_list *lock, int forward)
{
- struct lock_class *class = lock->class;
+ struct lock_class *class = lock->class[forward];
return &class->dep_list[forward];
}
@@ -1487,7 +1517,7 @@ static int __bfs(struct lock_list *source_entry,
while ((lock = __cq_dequeue(cq))) {
- if (!lock->class) {
+ if (!lock->class[forward]) {
ret = -2;
goto exit;
}
@@ -1496,10 +1526,10 @@ static int __bfs(struct lock_list *source_entry,
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
- list_for_each_entry_rcu(entry, head, entry) {
- if (!lock_accessed(entry)) {
+ list_for_each_entry_rcu(entry, head, entry[forward]) {
+ if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
- mark_lock_accessed(entry, lock);
+ mark_lock_accessed(entry, lock, forward);
if (match(entry, data)) {
*target_entry = entry;
ret = 0;
@@ -1552,7 +1582,7 @@ static void print_lock_trace(const struct lock_trace *trace,
if (debug_locks_silent)
return;
printk("\n-> #%u", depth);
- print_lock_name(target->class);
+ print_lock_name(fw_dep_class(target));
printk(KERN_CONT ":\n");
print_lock_trace(target->trace, 6);
}
@@ -1564,7 +1594,7 @@ static void print_lock_trace(const struct lock_trace *trace,
{
struct lock_class *source = hlock_class(src);
struct lock_class *target = hlock_class(tgt);
- struct lock_class *parent = prt->class;
+ struct lock_class *parent = fw_dep_class(prt);
/*
* A direct locking problem where unsafe_class lock is taken
@@ -1641,7 +1671,7 @@ static void print_lock_trace(const struct lock_trace *trace,
static inline int class_equal(struct lock_list *entry, void *data)
{
- return entry->class == data;
+ return fw_dep_class(entry) == data;
}
static noinline void print_circular_bug(struct lock_list *this,
@@ -1715,7 +1745,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
struct lock_list this;
this.parent = NULL;
- this.class = class;
+ this.class[1] = class;
raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
@@ -1742,7 +1772,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
struct lock_list this;
this.parent = NULL;
- this.class = class;
+ this.class[0] = class;
raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
@@ -1786,7 +1816,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
int ret;
struct lock_list *uninitialized_var(target_entry);
struct lock_list src_entry = {
- .class = hlock_class(src),
+ .class[1] = hlock_class(src),
.parent = NULL,
};
@@ -1814,7 +1844,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
static inline int usage_accumulate(struct lock_list *entry, void *mask)
{
- *(unsigned long *)mask |= entry->class->usage_mask;
+ *(unsigned long *)mask |= bw_dep_class(entry)->usage_mask;
return 0;
}
@@ -1824,10 +1854,14 @@ static inline int usage_accumulate(struct lock_list *entry, void *mask)
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
+static inline int usage_match_forward(struct lock_list *entry, void *mask)
+{
+ return entry->class[1]->usage_mask & *(unsigned long *)mask;
+}
-static inline int usage_match(struct lock_list *entry, void *mask)
+static inline int usage_match_backward(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & *(unsigned long *)mask;
+ return entry->class[0]->usage_mask & *(unsigned long *)mask;
}
/*
@@ -1848,7 +1882,8 @@ static inline int usage_match(struct lock_list *entry, void *mask)
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match_forward,
+ target_entry);
return result;
}
@@ -1871,7 +1906,8 @@ static inline int usage_match(struct lock_list *entry, void *mask)
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match_backward,
+ target_entry);
return result;
}
@@ -1907,7 +1943,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
- struct lock_list *root)
+ struct lock_list *root,
+ int forward)
{
struct lock_list *entry = leaf;
int depth;
@@ -1916,7 +1953,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
depth = get_lock_depth(leaf);
do {
- print_lock_class_header(entry->class, depth);
+ print_lock_class_header(entry->class[forward], depth);
printk("%*s ... acquired at:\n", depth, "");
print_lock_trace(entry->trace, 2);
printk("\n");
@@ -1932,13 +1969,11 @@ static void print_lock_class_header(struct lock_class *class, int depth)
}
static void
-print_irq_lock_scenario(struct lock_list *safe_entry,
- struct lock_list *unsafe_entry,
+print_irq_lock_scenario(struct lock_class *safe_class,
+ struct lock_class *unsafe_class,
struct lock_class *prev_class,
struct lock_class *next_class)
{
- struct lock_class *safe_class = safe_entry->class;
- struct lock_class *unsafe_class = unsafe_entry->class;
struct lock_class *middle_class = prev_class;
if (middle_class == safe_class)
@@ -2026,20 +2061,21 @@ static void print_lock_class_header(struct lock_class *class, int depth)
pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
- print_lock_name(backwards_entry->class);
+ print_lock_name(bw_dep_class(backwards_entry));
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
- print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
+ print_lock_trace(bw_dep_class(backwards_entry)->usage_traces[bit1], 1);
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
- print_lock_name(forwards_entry->class);
+ print_lock_name(fw_dep_class(forwards_entry));
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
pr_warn("...");
- print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
+ print_lock_trace(fw_dep_class(forwards_entry)->usage_traces[bit2], 1);
pr_warn("\nother info that might help us debug this:\n\n");
- print_irq_lock_scenario(backwards_entry, forwards_entry,
+ print_irq_lock_scenario(bw_dep_class(backwards_entry),
+ fw_dep_class(forwards_entry),
hlock_class(prev), hlock_class(next));
lockdep_print_held_locks(curr);
@@ -2048,14 +2084,14 @@ static void print_lock_class_header(struct lock_class *class, int depth)
prev_root->trace = save_trace();
if (!prev_root->trace)
return;
- print_shortest_lock_dependencies(backwards_entry, prev_root);
+ print_shortest_lock_dependencies(backwards_entry, prev_root, 0);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
next_root->trace = save_trace();
if (!next_root->trace)
return;
- print_shortest_lock_dependencies(forwards_entry, next_root);
+ print_shortest_lock_dependencies(forwards_entry, next_root, 1);
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -2202,7 +2238,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* accumulated usage mask.
*/
this.parent = NULL;
- this.class = hlock_class(prev);
+ this.class[0] = hlock_class(prev);
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
if (ret < 0) {
@@ -2221,7 +2257,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
forward_mask = exclusive_mask(usage_mask);
that.parent = NULL;
- that.class = hlock_class(next);
+ that.class[1] = hlock_class(next);
ret = find_usage_forwards(&that, forward_mask, &target_entry1);
if (ret < 0) {
@@ -2236,7 +2272,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* list whose usage mask matches the exclusive usage mask from the
* lock found on the forward list.
*/
- backward_mask = original_mask(target_entry1->class->usage_mask);
+ backward_mask = original_mask(fw_dep_class(target_entry1)->usage_mask);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
if (ret < 0) {
@@ -2250,8 +2286,8 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* Step 4: narrow down to a pair of incompatible usage bits
* and report it.
*/
- ret = find_exclusive_match(target_entry->class->usage_mask,
- target_entry1->class->usage_mask,
+ ret = find_exclusive_match(bw_dep_class(target_entry)->usage_mask,
+ fw_dep_class(target_entry1)->usage_mask,
&backward_bit, &forward_bit);
if (DEBUG_LOCKS_WARN_ON(ret == -1))
return 1;
@@ -2438,8 +2474,8 @@ static inline void inc_chains(void)
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
- list_for_each_entry(entry, &hlock_class(prev)->dep_list[1], entry) {
- if (entry->class == hlock_class(next)) {
+ list_for_each_entry(entry, &hlock_class(prev)->dep_list[1], entry[1]) {
+ if (fw_dep_class(entry) == hlock_class(next)) {
debug_atomic_inc(nr_redundant);
if (distance == 1)
@@ -2484,19 +2520,12 @@ static inline void inc_chains(void)
}
/*
- * Ok, all validations passed, add the new lock
- * to the previous lock's dependency list:
+ * Ok, all validations passed, add the new lock <next> to the
+ * dependency list of the previous lock <prev>:
*/
- ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
- &hlock_class(prev)->dep_list[1],
- next->acquire_ip, distance, *trace);
-
- if (!ret)
- return 0;
-
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- &hlock_class(next)->dep_list[0],
next->acquire_ip, distance, *trace);
+
if (!ret)
return 0;
@@ -3089,7 +3118,7 @@ static void print_usage_bug_scenario(struct held_lock *lock)
pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
- print_lock_name(other->class);
+ print_lock_name(other->class[forwards]);
pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
pr_warn("\nother info that might help us debug this:\n");
@@ -3106,11 +3135,11 @@ static void print_usage_bug_scenario(struct held_lock *lock)
depth--;
} while (entry && entry != root && (depth >= 0));
if (forwards)
- print_irq_lock_scenario(root, other,
- middle ? middle->class : root->class, other->class);
+ print_irq_lock_scenario(fw_dep_class(root), fw_dep_class(other),
+ middle ? middle->class[1] : root->class[1], other->class[1]);
else
- print_irq_lock_scenario(other, root,
- middle ? middle->class : other->class, root->class);
+ print_irq_lock_scenario(bw_dep_class(other), bw_dep_class(root),
+ middle ? middle->class[0] : other->class[0], root->class[0]);
lockdep_print_held_locks(curr);
@@ -3118,7 +3147,7 @@ static void print_usage_bug_scenario(struct held_lock *lock)
root->trace = save_trace();
if (!root->trace)
return;
- print_shortest_lock_dependencies(other, root);
+ print_shortest_lock_dependencies(other, root, forwards);
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -3137,7 +3166,7 @@ static void print_usage_bug_scenario(struct held_lock *lock)
struct lock_list *uninitialized_var(target_entry);
root.parent = NULL;
- root.class = hlock_class(this);
+ root.class[1] = hlock_class(this);
ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
if (ret < 0) {
print_bfs_bug(ret);
@@ -3164,7 +3193,7 @@ static void print_usage_bug_scenario(struct held_lock *lock)
struct lock_list *uninitialized_var(target_entry);
root.parent = NULL;
- root.class = hlock_class(this);
+ root.class[0] = hlock_class(this);
ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
if (ret < 0) {
print_bfs_bug(ret);
@@ -4798,11 +4827,12 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
*/
for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
entry = list_entries + i;
- if (entry->class != class && entry->links_to != class)
+ if (entry->class[0] != class && entry->class[1] != class)
continue;
__clear_bit(i, list_entries_in_use);
nr_list_entries--;
- list_del_rcu(&entry->entry);
+ list_del_rcu(&entry->entry[0]);
+ list_del_rcu(&entry->entry[1]);
}
if (list_empty(&class->dep_list[0]) &&
list_empty(&class->dep_list[1])) {
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 989c27e..80cef1f 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -82,10 +82,10 @@ static int l_show(struct seq_file *m, void *v)
print_name(m, class);
seq_puts(m, "\n");
- list_for_each_entry(entry, &class->dep_list[1], entry) {
+ list_for_each_entry(entry, &class->dep_list[1], entry[1]) {
if (entry->distance == 1) {
- seq_printf(m, " -> [%p] ", entry->class->key);
- print_name(m, entry->class);
+ seq_printf(m, " -> [%p] ", entry->class[1]->key);
+ print_name(m, entry->class[1]);
seq_puts(m, "\n");
}
}
--
1.8.3.1
Direct dependencies need to keep track of their read-write lock types.
Two bit fields, which share the distance field, are added to lock_list
struct to store the types.
With a dependecy lock1 -> lock2, lock_type1 has the type for lock1 and
lock_type2 has the type for lock2, where the values are one of the
lock_type enums.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 15 ++++++++++++++-
kernel/locking/lockdep.c | 25 +++++++++++++++++++++++--
2 files changed, 37 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2335447..eab8a90 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -182,6 +182,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
to->class_cache[i] = NULL;
}
+#define LOCK_TYPE_BITS 2
+
/*
* Every lock has a list of other locks that were taken after or before
* it as lock dependencies. These dependencies constitute a graph, which
@@ -204,7 +206,17 @@ struct lock_list {
struct list_head chains;
struct lock_class *class[2];
const struct lock_trace *trace;
- int distance;
+
+ /*
+ * The lock_type fields keep track of the lock type of this
+ * dependency.
+ *
+ * With L1 -> L2, lock_type1 stores the lock type of L1, and
+ * lock_type2 stores that of L2.
+ */
+ unsigned int lock_type1 : LOCK_TYPE_BITS,
+ lock_type2 : LOCK_TYPE_BITS,
+ distance : 32 - 2*LOCK_TYPE_BITS;
/*
* The parent field is used to implement breadth-first search.
@@ -359,6 +371,7 @@ enum lock_type {
LOCK_TYPE_WRITE = 0,
LOCK_TYPE_READ,
LOCK_TYPE_RECURSIVE,
+ NR_LOCK_TYPE,
};
/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index bdc7e94..18303ff 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1379,9 +1379,17 @@ static int add_lock_to_list(struct lock_class *lock1,
struct lock_class *lock2,
unsigned long ip, int distance,
const struct lock_trace *trace,
- struct lock_chain *chain)
+ struct lock_chain *chain,
+ int lock_type1, int lock_type2)
{
struct lock_list *entry;
+
+ /*
+ * The distance bit field in struct lock_list must be large
+ * enough to hold the maximum lock depth.
+ */
+ BUILD_BUG_ON((1 << (32 - 2*LOCK_TYPE_BITS)) < MAX_LOCK_DEPTH);
+
/*
* Lock not present yet - get a new dependency struct and
* add it to the list:
@@ -1394,6 +1402,8 @@ static int add_lock_to_list(struct lock_class *lock1,
entry->class[1] = lock2;
entry->distance = distance;
entry->trace = trace;
+ entry->lock_type1 = lock_type1;
+ entry->lock_type2 = lock_type2;
/*
* Both allocation and removal are done under the graph lock; but
@@ -1537,6 +1547,16 @@ static inline struct list_head *get_dep_list(struct lock_list *lock, int forward
return &class->dep_list[forward];
}
+static inline int get_lock_type1(struct lock_list *lock)
+{
+ return lock->lock_type1;
+}
+
+static inline int get_lock_type2(struct lock_list *lock)
+{
+ return lock->lock_type2;
+}
+
/*
* Forward- or backward-dependency search, used for both circular dependency
* checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2580,7 +2600,8 @@ static inline void inc_chains(void)
* dependency list of the previous lock <prev>:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- next->acquire_ip, distance, *trace, chain);
+ next->acquire_ip, distance, *trace, chain,
+ prev->read, next->read);
if (!ret)
return 0;
--
1.8.3.1
- find_lock_in_path() tries to find whether a lock class is in the path.
- find_next_dep_in_path() returns the next dependency after a
given dependency in the path.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 18303ff..41cb735 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1535,6 +1535,37 @@ static inline int get_lock_depth(struct lock_list *child)
}
/*
+ * Return the dependency if @lock is in the path, otherwise NULL.
+ */
+static inline struct lock_list *
+find_lock_in_path(struct lock_class *lock, struct lock_list *target)
+{
+ while ((target = get_lock_parent(target)))
+ if (fw_dep_class(target) == lock)
+ return target;
+
+ return NULL;
+}
+
+/*
+ * Walk back to the next dependency after @source from @target. Note
+ * that @source must be in the path, and @source can not be the same as
+ * @target, otherwise this is going to fail and reutrn NULL.
+ */
+static inline struct lock_list *
+find_next_dep_in_path(struct lock_list *source, struct lock_list *target)
+{
+ while (get_lock_parent(target) != source) {
+ target = get_lock_parent(target);
+
+ if (!target)
+ break;
+ }
+
+ return target;
+}
+
+/*
* Return the forward or backward dependency list.
*
* @lock: the lock_list to get its class's dependency list
--
1.8.3.1
With recursive-read locks, a circle is not sufficient condition for
deadlocks. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 51 ++++++++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 25 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d13b6b7..3ad97bc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1623,7 +1623,7 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry, int forward)
+ struct lock_list **target_entry, int forward, bool init)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1631,19 +1631,11 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = &lock_cq;
int ret = 1;
- if (match(source_entry, data)) {
- *target_entry = source_entry;
- ret = 0;
- goto exit;
+ if (init) {
+ __cq_init(cq);
+ __cq_enqueue(cq, source_entry);
}
- head = get_dep_list(source_entry, forward);
- if (list_empty(head))
- goto exit;
-
- __cq_init(cq);
- __cq_enqueue(cq, source_entry);
-
while ((lock = __cq_dequeue(cq))) {
if (!lock->class[forward]) {
@@ -1655,25 +1647,34 @@ static int __bfs(struct lock_list *source_entry,
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ /*
+ * Enqueue a node's children before match() so that even
+ * this node matches but is not wanted, we can continue
+ * the search.
+ */
list_for_each_entry_rcu(entry, head, entry[forward]) {
if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
+
mark_lock_accessed(entry, lock, forward);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = 0;
- goto exit;
- }
if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
+
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
+
+ if (match(lock, data)) {
+ *target_entry = lock;
+ ret = 0;
+ goto exit;
+ }
+
}
exit:
return ret;
@@ -1682,9 +1683,9 @@ static int __bfs(struct lock_list *source_entry,
static inline int __bfs_forwards(struct lock_list *src_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
- return __bfs(src_entry, data, match, target_entry, 1);
+ return __bfs(src_entry, data, match, target_entry, 1, init);
}
static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1692,7 +1693,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 0);
+ return __bfs(src_entry, data, match, target_entry, 0, true);
}
static void print_lock_trace(const struct lock_trace *trace,
@@ -1864,7 +1865,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
unsigned long count = 0;
struct lock_list *uninitialized_var(target_entry);
- __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry, true);
return count;
}
@@ -1918,12 +1919,12 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
*/
static noinline int
check_path(struct lock_class *target, struct lock_list *src_entry,
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
int ret;
ret = __bfs_forwards(src_entry, (void *)target, class_equal,
- target_entry);
+ target_entry, init);
if (unlikely(ret < 0))
print_bfs_bug(ret);
@@ -1951,7 +1952,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
if (unlikely(!ret)) {
if (!*trace) {
@@ -2012,7 +2013,7 @@ static inline int usage_match_backward(struct lock_list *entry, void *mask)
debug_atomic_inc(nr_find_usage_forwards_checks);
result = __bfs_forwards(root, &usage_mask, usage_match_forward,
- target_entry);
+ target_entry, true);
return result;
}
--
1.8.3.1
Lock chains are derived from the current task lock stack. A lock chain is a
new sequence of locks taken by task or by interrupt contexts. It is not hard
to notice that lockdep validates the locking behavior on a lock chain basis,
hence its main function name validate_chain(). With a new lock chain, there
may be a new direct dependency, and if so the new dependency is checked.
Every direct lock dependency must be the top two locks in the lock
chains, but one direct dependency normally is associated with a number
of lock chains.
With such relationship between lock dependencies and lock chains, this
patch maps lock chains to their corresponding lock dependencies, for
example:
Lock dependency: L1 -> L2:
|
|--> Lock chain 1: .... L1 -> L2
|
|--> Lock chain 2: .... L1 -> L2
|
.....
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 85 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 81 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 33f8187..754a718 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1046,6 +1046,35 @@ static bool __check_data_structures(void)
e->class[1]->name ? : "(?)");
return false;
}
+
+#ifdef CONFIG_PROVE_LOCKING
+ list_for_each_entry_rcu(chain, &e->chains, chain_entry) {
+ if (!check_lock_chain_key(chain))
+ return false;
+
+ /* <next> lock */
+ class = lock_classes +
+ chain_hlocks[chain->base + chain->depth - 1];
+ if (class != e->class[1]) {
+ printk(KERN_INFO "list entry %d has bad <next> lock; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ class->name ? : "(?)",
+ e->class[1]->name ? : "(?)");
+ return false;
+ }
+
+ /* <prev> lock */
+ class = lock_classes +
+ chain_hlocks[chain->base + chain->depth - 2];
+ if (class != e->class[0]) {
+ printk(KERN_INFO "list entry %d has bad <prev> lock; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ class->name ? : "(?)",
+ e->class[0]->name ? : "(?)");
+ return false;
+ }
+ }
+#endif
}
/*
@@ -1072,6 +1101,16 @@ static bool __check_data_structures(void)
e->class[1]->name : "(?)");
return false;
}
+
+ if (!list_empty(&e->chains)) {
+ printk(KERN_INFO "list entry %d has nonempty chain list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class[0] && e->class[0]->name ? e->class[0]->name :
+ "(?)",
+ e->class[1] && e->class[1]->name ?
+ e->class[1]->name : "(?)");
+ return false;
+ }
}
return true;
@@ -1128,6 +1167,9 @@ static void init_data_structures_once(void)
INIT_LIST_HEAD(&lock_classes[i].dep_list[0]);
INIT_LIST_HEAD(&lock_classes[i].dep_list[1]);
}
+
+ for (i = 0; i < ARRAY_SIZE(list_entries); i++)
+ INIT_LIST_HEAD(&list_entries[i].chains);
}
static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
@@ -1302,6 +1344,7 @@ static bool is_dynamic_key(const struct lock_class_key *key)
}
#ifdef CONFIG_PROVE_LOCKING
+static DECLARE_BITMAP(lock_chains_in_dep, MAX_LOCKDEP_CHAINS);
static inline struct
lock_chain *lookup_chain_cache_add(struct task_struct *curr,
struct held_lock *hlock,
@@ -1335,7 +1378,8 @@ static struct lock_list *alloc_list_entry(void)
static int add_lock_to_list(struct lock_class *lock1,
struct lock_class *lock2,
unsigned long ip, int distance,
- const struct lock_trace *trace)
+ const struct lock_trace *trace,
+ struct lock_chain *chain)
{
struct lock_list *entry;
/*
@@ -1359,6 +1403,12 @@ static int add_lock_to_list(struct lock_class *lock1,
list_add_tail_rcu(&entry->entry[1], &lock1->dep_list[1]);
list_add_tail_rcu(&entry->entry[0], &lock2->dep_list[0]);
+ /*
+ * Add the corresponding lock chain to lock_list's chains.
+ */
+ list_add_tail_rcu(&chain->chain_entry, &entry->chains);
+ __set_bit(chain - lock_chains, lock_chains_in_dep);
+
return 1;
}
@@ -2478,6 +2528,9 @@ static inline void inc_chains(void)
if (fw_dep_class(entry) == hlock_class(next)) {
debug_atomic_inc(nr_redundant);
+ list_add_tail_rcu(&chain->chain_entry, &entry->chains);
+ __set_bit(chain - lock_chains, lock_chains_in_dep);
+
if (distance == 1)
entry->distance = 1;
@@ -2524,8 +2577,7 @@ static inline void inc_chains(void)
* dependency list of the previous lock <prev>:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- next->acquire_ip, distance, *trace);
-
+ next->acquire_ip, distance, *trace, chain);
if (!ret)
return 0;
@@ -4783,8 +4835,11 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
* If the modified lock chain matches an existing lock chain, drop
* the modified lock chain.
*/
- if (lookup_chain_cache(chain_key))
+ if (lookup_chain_cache(chain_key)) {
+ if (test_bit(chain - lock_chains, lock_chains_in_dep))
+ list_del_rcu(&chain->chain_entry);
return;
+ }
new_chain = alloc_lock_chain();
if (WARN_ON_ONCE(!new_chain)) {
debug_locks_off();
@@ -4792,6 +4847,10 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
}
*new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
+ if (test_bit(chain - lock_chains, lock_chains_in_dep)) {
+ list_replace_rcu(&chain->chain_entry, &new_chain->chain_entry);
+ __set_bit(new_chain - lock_chains, lock_chains_in_dep);
+ }
#endif
}
@@ -4817,6 +4876,7 @@ static void remove_class_from_lock_chains(struct pending_free *pf,
static void zap_class(struct pending_free *pf, struct lock_class *class)
{
struct lock_list *entry;
+ struct lock_chain *chain;
int i;
WARN_ON_ONCE(!class->key);
@@ -4833,6 +4893,20 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
nr_list_entries--;
list_del_rcu(&entry->entry[0]);
list_del_rcu(&entry->entry[1]);
+
+#ifdef CONFIG_PROVE_LOCKING
+ /*
+ * Destroy the chain list by deleting every chain associated
+ * with this lock_list entry.
+ *
+ * The corresponding lock chains in this lock_list will
+ * be removed later by remove_class_from_lock_chains().
+ */
+ list_for_each_entry_rcu(chain, &entry->chains, chain_entry) {
+ __clear_bit(chain - lock_chains, lock_chains_in_dep);
+ list_del_rcu(&chain->chain_entry);
+ }
+#endif
}
if (list_empty(&class->dep_list[0]) &&
list_empty(&class->dep_list[1])) {
@@ -4919,6 +4993,8 @@ static void __free_zapped_classes(struct pending_free *pf)
#ifdef CONFIG_PROVE_LOCKING
bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
+ bitmap_andnot(lock_chains_in_dep, lock_chains_in_dep,
+ pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
#endif
}
@@ -5197,6 +5273,7 @@ void __init lockdep_init(void)
+ sizeof(lock_cq)
+ sizeof(lock_chains)
+ sizeof(lock_chains_in_use)
+ + sizeof(lock_chains_in_dep)
+ sizeof(chain_hlocks)
#endif
) / 1024
--
1.8.3.1
Add a macro LOCK_TYPE_NEST for nest lock type and mark the nested lock in
check_deadlock_current(). Unlike the other LOCK_TYPE_* enums, this lock type
is used only in lockdep.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 7 +++++--
kernel/locking/lockdep_internals.h | 2 ++
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4cd844e..755b584 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2591,6 +2591,7 @@ static inline void inc_chains(void)
* 0: on deadlock detected;
* 1: on OK;
* LOCK_TYPE_RECURSIVE: on recursive read
+ * LOCK_TYPE_NEST: on nest lock
*/
static int
check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2620,7 +2621,7 @@ static inline void inc_chains(void)
* nesting behaviour.
*/
if (nest)
- return LOCK_TYPE_RECURSIVE;
+ return LOCK_TYPE_NEST;
print_deadlock_bug(curr, prev, next);
return 0;
@@ -3126,12 +3127,14 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
if (!ret)
return 0;
+
/*
* Add dependency only if this lock is not the head of the
* chain, and if it's not a second recursive-read lock. If
* not, there is no need to check further.
*/
- if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
+ if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE &&
+ ret != LOCK_TYPE_NEST))
goto out_unlock;
}
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index f499426..37f6b0d 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -26,6 +26,8 @@ enum lock_usage_bit {
#define LOCK_USAGE_DIR_MASK 2
#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
+#define LOCK_TYPE_NEST NR_LOCK_TYPE
+
/*
* Usage-state bitmasks:
*/
--
1.8.3.1
Since in the graph search, multiple matches may be needed, a matched lock
needs to rejoin the search for another match, thereby introduce
mark_lock_unaccessed().
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 05c70be..4cd844e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1521,6 +1521,15 @@ static inline void mark_lock_accessed(struct lock_list *lock,
lock->class[forward]->dep_gen_id = lockdep_dependency_gen_id;
}
+static inline void mark_lock_unaccessed(struct lock_list *lock)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
+ fw_dep_class(lock)->dep_gen_id--;
+}
+
static inline unsigned long lock_accessed(struct lock_list *lock, int forward)
{
unsigned long nr;
--
1.8.3.1
Exclusive locks can be seen as a special use case of read-write locks: they
are read-write locks but only the exclusive write locks are present. To this
point, the read-write lock is a general form of locks that should be well
supported in lockdep, which, however, is not the reality. This patch series
designs and implements an algorithm to do this.
In the following sections, we first give an introduction to the locks in
Linux, then describe the problem statement, and after that we present the
design of the general read-write lock deadlock detection algorithm while in
the making we loosely prove that the algorithm indeed solves the deadlock
detection problem with a series of lemmas.
Read-write locks
----------------
It is necessary to take a close look at the read-write locks. Their design
subltety can be vastly impactful to deadlock checks. To this end, two
elements are singled out: recursive readers and the order to grant a lock to
readers and writers.
Recursive readers conceptually is very simple. If a task is allowed to
recursively acquire the same read locks, then it is recursive readers. That
is it, no more and no less.
Lock grant order determines when there are contending readers and writers
simultaneously who gets the lock before the others.
Lets briefly review three read-write locks in Linux kernel. To be concise,
the following symbol R stands for read lock, W stands for write lock or
exclusive lock, RR stands for recursive lock, and X stands for any R, RR, or
W.
-----
rwsem
-----
The famous rwsem is not recursive. The lock prioritizes writers over
readers. A writer will block later readers to concurrently take the lock
with earlier readers. This grant order is necessary because otherwise, i.e.,
when readers own the lock, coming readers immediately would join them,
as a result writers would never be given a chance to ever take the lock in
cases like the above contrived extreme one.
Consequently, this case is not a deadlock scenario:
T1 T2
-- --
R1 W2
W2 R1
But this one is a deadlock scenario:
T1 T2 T3
-- -- --
R1 W2
W1
W2 R1
Since lockdep cannot differentiate them, it has to assume the worse-case
scenario.
------
rwlock
------
rwlock is recursive; readers are prioritized. Note that this simple order
has to be implemented because otherwise it is just too easy to have a
deadlock scenario:
T1 T2
-- --
RR1
W1
RR1 [ Deadlock ]
Consequently, this case for rwlock is not a deadlock scenario:
T1 T2 T3
-- -- --
RR1 W2
W1
W2 RR1 [ No deadlock ]
-------
qrwlock
-------
qrwlock is a combination of rwsem and rwlock depending on the locking
contexts. It is recursive in interrupt context like rwlock, but not in
process context like rwsem, so a qrwlock lock class can be used as both
read lock and recursive-read lock [1]. Consequently, this case is a deadlock
scenario:
T1 T2 T3
-- -- --
(In IRQ
context)
RR1 W2
W1
W2 R1 [ Deadlock ]
and this is not a deadlock scenario:
T1 T2 T3
-- -- --
(In IRQ
context)
W1 R2
W2
RR2 W1 [ No deadlock ]
It is important to note that conceptually the two locking elements - locks
being recursive and lock grant order (e.g., a waiting writer can block later
readers) - are completely irrelevant to each other by nature. But because
whether a waiting writer can block later readers is so critical to deadlock
scenario determination, and because of the read-write locks Linux currently
has, we use the following terms to specifically refer to the read-write
locks in terms of these two locking elements:
+----------------------+-----------+-------------------------+
| | Recursive | Writer blocking readers |
+----------------------+-----------+-------------------------+
| Recursive-read locks | Yes | No |
+----------------------+-----------+-------------------------+
| Read locks | No | Yes |
+----------------------+-----------+-------------------------+
Problem statement
-----------------
Waiting relations in a circular fashion are at the heart of a deadlock.
Because of the waiting circle, no one gets what it waits for and thus
they all wait forever. A circle is universally *necessary* for deadlocks,
but it is not *sufficient* with read-write locks. Deadlock circles can
be arbitrarily long with any number of tasks, each contributing some
arcs. But no matter how long the circle is, it has to complete with a
final arc, so the problem to solve deadlock detection is stated below as
a lemma:
Lemma #1
--------
A deadlock can be and can only be the earliest detected at the *final*
arc when it completes a waiting circle. In other words, at the final
arc, the problem is try to find out whether this circle to come is a
deadlock or not.
The following three facts are relevant to solving the problem:
(a) With a new arc, determining whether it completes a circle is an
easy task.
(b) A new direct arc (direct dependency) can introduce a number of
indirect arcs (indirect dependencies).
(c) Checking all the additional arcs (direct and indirect) efficiently
is not so easy since lock operations are frequent and lock graph
can be gigantic. Actually, if it is free to check any number of
arcs, deadlock detection even with read-write locks is fairly easy.
To solve the problem, performance is a real difficulty, so a good
algorithm should not only be self-evident that it does solve the problem
but also do so at low cost. We take a divide-and-conquer approach to the
solution: the entire problem is broken down into a comprehensive list of
simple and abstract problem cases to solve, and if each and every one of
them is solved, the entire problem is solved. These cases would be
labeled Case #x in the rest of this document.
Ready, here we go give it a try!
Solution
--------
Given a deadlock with a waiting circle, assume there are n tasks
contributing to that circle, denoted as T_1, T_2, ..., T_n. Depending on
n, there are two cases: (1) n = 1 and (2) n >= 2.
The first case corresponds to the recursion deadlock scenario, which can
be checked on the task held lock stack when attempting to acquire a
lock. Therefore, this (n = 1) case is skipped and we focus on the the
second one.
For other n tasks in deadlock, they can be grouped as (T_1, ..., T_n-1)
and T_n. And by virtually consolidating the former, we get a big imagined
T1 and T2 (with task numbers adjusted). This essentially means:
Lemma #2
--------
Two tasks (T1 and T2) can virtually represent any situation with any
number of tasks to check for deadlock.
We previously proposed the two-task model based on this. The two-task
model has the workload's locking behavior divided into two tasks:
- T1: the entire previous locking behavior with the lock dependency
graph to depict it.
- T2: the current new locking behavior, i.e., the X1 -> X2 direct
dependency.
It is worth noting that we also assume there can be multiple concurrent
instances of T1 to address the worst-case but most probable scenarios. Also
note that there is nuances regarding equivalency between T1 and the real
locking bahavior, so the following cases should be correctly handled:
Case #1.1:
T1 T2
-- --
W1
RR2 W3
W3 W1 [Deadlock]
Case #1.2:
T1 T2
-- --
W1
RR2
(W1 RR2 released
in T1)
RR2 W3
W3 W1 [No deadlock]
Case #1.3:
T1a T1b T2
--- --- --
W1 RR2 W3
RR2 W3 W1 [No deadlock]
How long the actual circle is does not really matter and hence we have Lemma #3:
Lemma #3
--------
Any deadlock scenario can be converted to an ABBA deadlock.
where AB comes from T1 and BA from T2 (T1 and T2 are made by Lemma #2),
which says any other associated locks in the graph are not critical or
necessary and thus may be ignored. For example:
T1 T2
-- --
X1 X7
X2 X2
A B
X3 X3
X4 X8
B A [Deadlock]
X5
X6
from deadlock perspective is equivalent to an ABBA:
T1 T2
-- --
A B
B A [Deadlock]
Based on Lemma #1, the problem as stated is what a difference the final
missing arc can make: deadlock or not, and based on Lemma #3, the goal
is to find an ABBA, we can then reason that the final arc is definitely
the BA or a critical part of the BA (based on Lemma #2) if a deadlock
scenario is about being created with this arc.
Now lets formalize an ABBA deadlock by defining the exclusiveness table
of locking acquiring operations for a read-write lock. Locking operations
being exclusive means that these two operations cannot be granted the lock
at the same time.
Table #1:
+---------------------+---------------------+-----------+------------+
| X.A vs. X.B | Recursive-read lock | Read lock | Write lock |
+---------------------+---------------------+-----------+------------+
| Recursive-read lock | No | Yes | Yes |
+---------------------+---------------------+-----------+------------+
| Read lock | No | Yes | Yes |
+---------------------+---------------------+-----------+------------+
| Write lock | Yes | Yes | Yes |
+---------------------+---------------------+-----------+------------+
Note that this table has considered the read-lock can be blocked by
waiting write-lock problem. Also note that when using this table A is
above B, which means that for this case:
T1 T2
-- --
X.A
X.B
One should look for X.A vs. X.B in the table instead of the other way
around.
and for this one:
T1 T2
-- --
X.A
X.B
One should look for X.A vs. X.B as well in the table instead of the
other way around.
Depending on the read-write lock type of the final direct arc or
dependency in T2, there are nine contrived cases to solve:
- read lock and read lock
- read lock and recursive-read lock
- read lock and write lock
- write lock and read lock
- write lock and recursive-read lock
- write lock and write lock
- recursive read lock and read lock
- recursive read lock and recursive-read lock
- recursive read lock and write lock
---------------------------------------------------------------
When the final dependency is ended with read lock and read lock
---------------------------------------------------------------
Case #2.1:
T1 T2
-- --
X1 R2
W2 R1 [Deadlock]
Case #2.2:
T1 T2
X1 R2
R2 R1 [Deadlock]
Case #2.3:
T1 T2
X1 R2
RR2 R1 [No deadlock]
--------------------------------------------------------------
When the final dependency is read lock and recursive-read lock
--------------------------------------------------------------
Case #3.1:
T1 T2
W1 R2
X2 RR1 [Deadlock]
Case #3.2:
T1 T2
-- --
R1 R2
X2 RR1 [No deadlock]
Case #3.3:
T1 T2
RR1 R2
X2 RR1 [No deadlock]
-----------------------------------------------------
When the final dependency is read lock and write lock
-----------------------------------------------------
Case #4.1:
T1 T2
-- --
X1 R2
R2 W1 [Deadlock]
Case #4.2:
T1 T2
-- --
X1 R2
W2 W1 [Deadlock]
Case #4.3:
T1 T2
-- --
X1 R2
RR2 W1 [No deadlock]
--------------------------------------------------------------
When the final dependency is recursive-read lock and read lock
--------------------------------------------------------------
Case #5.1:
T1 T2
-- --
X1 RR2
R2 R1 [Deadlock]
Case #5.2:
T1 T2
X1 RR2
W2 R1 [Deadlock]
Case #5.3:
T1 T2
X1 RR2
RR2 R1 [No deadlock]
------------------------------------------------------------------------
When the final dependency is recursive-read lock and recursive-read lock
------------------------------------------------------------------------
Case #6.1:
T1 T2
W1 RR2
W2 RR1 [Deadlock]
Case #6.2:
T1 T2
W1 RR2
R2 RR1 [Deadlock]
Case #6.3:
T1 T2
R1 RR2
X2 RR1 [No deadlock]
Case #6.4:
T1 T2
RR1 RR2
X2 RR1 [No deadlock]
Case #6.5:
T1 T2
-- --
X1 RR2
RR2 RR1 [No deadlock]
---------------------------------------------------------------
When the final dependency is recursive-read lock and write lock
---------------------------------------------------------------
Case #7.1:
T1 T2
X1 RR2
W2 W1 [Deadlock]
Case #7.2:
T1 T2
X1 RR2
R2 W1 [Deadlock]
Case #7.3:
T1 T2
X1 RR2
RR2 W1 [No deadlock]
-----------------------------------------------------
When the final dependency is write lock and read lock
-----------------------------------------------------
Case #8.1:
T1 T2
X1 W2
X2 R1 [Deadlock]
---------------------------------------------------------------
When the final dependency is write lock and recursive-read lock
---------------------------------------------------------------
Case #9.1:
T1 T2
W1 W2
X2 RR1 [Deadlock]
Case #9.2:
T1 T2
R1 W2
X2 RR1 [No deadlock]
Case #9.3:
T1 T2
RR1 W2
X2 RR1 [No deadlock]
------------------------------------------------------
When the final dependency is write lock and write lock
------------------------------------------------------
Case #10:
T1 T2
-- --
X1 W2
X2 W1 [Deadlock]
Solving the above cases (no false positive or false negative) is
actually fairly easy to do; we therefore have our first *Simple
Algorithm*:
----------------
Simple Algorithm
----------------
Step 1
------
Keep track of each dependency's read or write ends. There is a
combination of nine types:
- read -> read
- read -> recursive read
- read -> write
- recursive read -> read
- recursive read -> recursive read
- recursive read -> write
- write -> read
- write -> recursive read
- write -> write
Step 2
------
Redundancy check (as to whether adding a dependency into graph) for a
direct dependency needs to be beefed up considering dependency's read-
or write-ended types. We do redundancy check only for direct
dependencies. A direct dependency is redundant to a direct dependency
only if their ends have the same types. If a dependency has different
read-write lock types from an existing dependency, then the existing one
will be be "upgraded": setting the end type towards more exclusive (the
exlusiveness increases from recursive read -> read -> write).
Step 3
------
Traverse the entire dependency graph (there may be more than one circle)
to find whether a circle can be formed by adding a new direct
dependency. A circle may or may not be a deadlock. The deciding rule is
simple: it is a deadlock if the new dependency to add have inversed
dependency with exclusive lock types in the graph for both A and B (the
ABBA deadlock scenario according to Lemma #3). Lock exclusiveness
relations are listed in Table #1.
Note that the 9 cases above according to the types of a direct
dependency can be modulated into two very simple cases to decide whether
it is a deadlock scenario of not.
T1 T2
-- --
X1.A X2.A
X2.B X1.B
- If X1.A vs. X1.B are exclusive and X2.A vs. X2.B are exclusive then
it is deadlock
- Otherwise no deadlock
Note that the dependency graph is traversed until all dependency circles
are found and checked. This can avoid the following false negative case:
Case #11:
T1 T2
-- --
RR1
RR2
(RR1 RR2 released)
W1 RR2
W2 RR1 [Deadlock]
*Simple Algorithm* done loosely described.
I wish we lived in a fairy-tale world that the problem has been solved
so easily, but the reality is not. Huge false negatives owing to
indirect dependencies, which is illustrated as the following case to
further solve:
Case #12:
T1 T2
-- --
X1 X3
RR2 RR2
X3 X1 [Deadlock]
where X1's and X3's in the two tasks create a deadlock scenario (they
can be any one of the deadlock cases above). When checking the direct
dependency RR2 -> X1, there is no obvious deadlock using our *Simple
Algorithm*, however, under the hood the actual deadlock is formed after
RR2 introduces an indirect dependency X3 -> X1, which could comfortably
be missed.
To detect deadlock scenario like Case #12, a naive option is to check
all additional indirect dependencies, but this option would be so
inefficient and thus is simply passed. To find an efficient solution
instead, lets first contrive a no-deadlock Case #13 for comparison
(which is essentially rewritten from Case #5 or Case #7).
Case #13:
T1 T2
-- --
X1
X3 RR2
RR2 X1 [No deadlock]
Having considered Case #12 and Case #13, a final working algorithm can
be formulated:
---------------
Final Algorithm
---------------
This *Final Algorithm* is beefed up from Simple Algorithm using the
following lemmas:
Lemma #4
--------
The direct dependency RR2 -> X1 that serves in the path from X3 -> X1
is *critical*.
Although the actual deadlock in Case #12 cannot be easily found by our
*Simple Algorithm*, however, by strengthening the algorithm somehow the
deadlock *definitely* can be found at adding the direct dependency
(i.e., RR2 -> X1 in Case #7), see Lemma #1. In other words, a *critical*
direct dependency (i.e., the final arc) suffices to find any deadlock if
there is a deadlock, otherwise there is no deadlock. As a matter of
fact, after a false deadlock (RR2 -> X1 -> RR2), if the search continues
the true deadlock (RR2 -> X1 -> RR2 -> X3 -> RR2) will eventually be
taken out of the hood.
Lemma #5
--------
Missed in Case #13, the game changer to Case #12 is that it has X3 in
T2 whereas Case #13 does not.
Having considered this, our *Final Algorithm* can be adjusted from the
*Simple Algorithm* by adding:
(a). Continue searching the graph to find a new circle.
(b). In the new circle, if previous locks in T2's stack (or chain) are in
it and then check whether the circle is indeed a deadlock. This is
done by checking each newly introduced indirect dependency (such as
X3 -> X1 in Case #12) according to our Simple Algorithm as before.
(c). If a deadlock is found then the algorithm is done, otherwise go to
(a) unless the entire graph is traversed.
Why does Lemma #5 work?
Lemma #6
--------
Lemma #5 nicely raises a question whether a previous lock involved
indirect dependency in T2 is *necessary*. The answer is yes, otherwise
our *Simple Algorithm* has already solved the problem.
Lemma #5 and Lemma #6 extend our two-task model which divides the workload's
locking behavior into two virtual tasks:
- T1: the entire previous locking behavior with the lock dependency
graph to depict it.
- T2: the current task's held lock stack (or lock chain) worth of
direct and indirect dependencies (previously T2 is just an additional
new direct dependency).
Lemma #7
--------
It is also natural to ask whether indirect dependencies with a
starting lock in T2 only is *sufficient*: what if the indirect
dependency has a starting lock from T1. The answer is yes too.
Because Lemma #2 and Lemma #3 say that any deadlock is an ABBA so that
T1 can only contribute an AB and T2 must have a BA, and the final direct
dependency is the BA or the ending part of the BA in T2.
Finally, since we assumed T1 has no deadlock and Lemma #4 says the new
dependency is *critical*, then any deadlock formed by the new direct or
indirect dependencies introduced in T2 (which is the BA part) will
definitely be found with *Simple Algorithm* or *Final Algorithm*
respectively.
This is perhaps the most subtle and difficult part of this algorithm. To
test Lemma #12 holds true, one may try to contrive a case based on Case #13
or freely to generate a deadlock case if possible.
Anyway, we welcome any new cases. Cases matter in this algorithm because
as stated before, this algorithm solves the read-write lock deadlock
detection problem by having solved all the contrived cases (be it deadlock
or no deadlock). And if anyone comes up with a new case that is not covered
here, it likely will defeat this algorithm, but if otherwise this algorithm
just works sound and complete.
*Final Algorithm* done loosely described.
Now that the bulk of the implementation of the read-write lock deadlock
detection algorithm is done, the lockdep internal performance statistics can
be collected:
(The workload used as usual is: make clean; reboot; make vmlinux -j8.)
Before this series
------------------
direct dependencies: 9284 [max: 32768]
indirect dependencies: 41804
all direct dependencies: 215468
dependency chains: 12109 [max: 65536]
dependency chain hlocks: 45588 [max: 327680]
in-hardirq chains: 87
in-softirq chains: 398
in-process chains: 10757
stack-trace entries: 125652 [max: 524288]
combined max dependencies: 377734896
max bfs queue depth: 253
chain lookup misses: 13218
chain lookup hits: 7016143232
cyclic checks: 11906
redundant checks: 0
redundant links: 0
find-mask forwards checks: 2082
find-mask backwards checks: 5481
After this series
-----------------
direct dependencies: 4579 [max: 32768]
indirect dependencies: 39614
all direct dependencies: 211089
dependency chains: 12893 [max: 65536]
dependency chain hlocks: 47623 [max: 327680]
in-hardirq chains: 87
in-softirq chains: 401
in-process chains: 11083
stack-trace entries: 127520 [max: 524288]
combined max dependencies: 392107584
max bfs queue depth: 230
chain lookup misses: 14258
chain lookup hits: 909929196
cyclic checks: 5178
redundant links: 5988
find-mask forwards checks: 526
find-mask backwards checks: 5128
Noticeably, we have slightly more chains and chain lookup misses, but none
of them raises concerns. More noticeably, we have half as many direct
dependencies due to the consolidation of forward and backward lock_lists.
And thanks to the added cached trylock chains and skipping checks if the
dependency is redundant, the chain lookup hits are significantly more, the
cyclic checks are halved, and the find-mask forwards checks are only as many
as a quarter, which can be translated into better performance after this
patch series.
Reference:
[1]: Recursive read deadlocks and Where to find them by Boqun Feng at
Linux Plumbers Conference 2018.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 227 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 178 insertions(+), 49 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5c97dbf..f1c083c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -86,6 +86,7 @@
*/
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static struct task_struct *lockdep_selftest_task_struct;
+static bool inside_selftest(void);
static int graph_lock(void)
{
@@ -2040,38 +2041,159 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
}
/*
- * Prove that the dependency graph starting at <src> can not
- * lead to <target>. If it can, there is a circle when adding
- * <target> -> <src> dependency.
+ * Prove that when adding <target> -> <src> dependency into the dependency
+ * graph, there is no deadlock. This is done by:
+ *
+ * 1. Determine whether the graph starting at <src> can lead to <target>.
+ * If it can, there is a circle.
+ *
+ * 2. If there is a circle, there may or may not be a deadlock, which is
+ * then comprehensively checked according to the general read-write
+ * lock deadlock detection algorithm.
*
* Print an error and return 0 if it does.
*/
static noinline int
-check_deadlock_graph(struct held_lock *src, struct held_lock *target,
- struct lock_trace **const trace)
+check_deadlock_graph(struct task_struct *curr, struct held_lock *src,
+ struct held_lock *target, struct lock_trace **const trace)
{
- int ret;
+ int ret, i;
+ bool init = true;
struct lock_list *uninitialized_var(target_entry);
struct lock_list src_entry = {
.class[1] = hlock_class(src),
.parent = NULL,
};
+ if (DEBUG_LOCKS_WARN_ON(hlock_class(src) == hlock_class(target)))
+ return 0;
+
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
+ while (true) {
+ ret = check_path(hlock_class(target), &src_entry,
+ &target_entry, init);
+ init = false;
+
+ /* Found a circle, is it deadlock? */
+ if (unlikely(!ret)) {
+ struct lock_list *parent;
- if (unlikely(!ret)) {
- if (!*trace) {
/*
- * If save_trace fails here, the printing might
- * trigger a WARN but because of the !nr_entries it
- * should not do bad things.
+ * Check this direct dependency.
+ *
+ * Step 1: next's lock type and source dependency's
+ * lock type are exclusive, no?
+ *
+ * Find the first dependency after source dependency.
*/
- *trace = save_trace();
- }
+ parent = find_next_dep_in_path(&src_entry, target_entry);
+ if (!parent) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return -3;
+ }
+
+ if (!lock_excl_table[get_lock_type1(parent)]
+ [hlock_type(src)])
+ goto cont;
+
+ /*
+ * Step 2: prev's lock type and target dependency's
+ * lock type are exclusive, yes?
+ */
+ if (lock_excl_table[hlock_type(target)]
+ [get_lock_type2(target_entry)])
+ goto print;
- print_circular_bug(&src_entry, target_entry, src, target);
+ /*
+ * Check indirect dependency from even further
+ * previous lock.
+ */
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *prev = curr->held_locks + i;
+
+ if (prev->irq_context != src->irq_context)
+ continue;
+
+ /*
+ * We arrived at the same prev lock in this
+ * direct dependency checking.
+ */
+ if (prev == target)
+ break;
+
+ /*
+ * This previous lock has the same class as
+ * the src (the next lock to acquire); this
+ * must be a recursive read case. Skip.
+ */
+ if (hlock_class(prev) == hlock_class(src))
+ continue;
+
+ /*
+ * Since the src lock (the next lock to
+ * acquire) is not nested lock, so up to
+ * now this prev class cannot be the src
+ * class, then does the path have this
+ * previous lock?
+ *
+ * With read locks it would be possible a
+ * lock can reoccur in a path. For example:
+ *
+ * -> RL1 -> RL2 -> RL3 -> RL1 -> ...
+ *
+ * and for another three examples:
+ *
+ * Ex1: -> RL1 -> WL2 -> RL3 -> RL1
+ * Ex2: -> WL1 -> RL2 -> RL3 -> WL1
+ * Ex3: -> RL1 -> RL2 -> RL3 -> WL1
+ *
+ * This can result in that a path may
+ * encounter a lock twice or more, but we
+ * used the breadth-first search algorithm
+ * that will find the shortest path,
+ * which means that this path can not have
+ * the same (middle) lock multiple times.
+ * However, is Ex3 a problem?
+ */
+ parent = find_lock_in_path(hlock_class(prev),
+ target_entry);
+ if (parent) {
+ /*
+ * Yes, we have a candidate indirect
+ * dependency to check.
+ *
+ * Again step 2: new prev's lock
+ * type and its dependency in graph
+ * are exclusive, yes?
+ *
+ * Note that we don't need step 1
+ * again.
+ */
+ if (lock_excl_table[hlock_type(prev)]
+ [get_lock_type2(parent)])
+ goto print;
+ }
+ }
+cont:
+ mark_lock_unaccessed(target_entry);
+ continue;
+print:
+ if (!*trace) {
+ /*
+ * If save_trace fails here, the printing
+ * might trigger a WARN but because of the
+ * !nr_entries it should not do bad things.
+ */
+ save_trace();
+ }
+
+ print_circular_bug(&src_entry, target_entry,
+ src, target);
+ break;
+ } else
+ /* The graph is all traversed or an error occurred */
+ break;
}
return ret;
@@ -2687,7 +2809,7 @@ static inline void inc_chains(void)
struct lock_trace **const trace, struct lock_chain *chain)
{
struct lock_list *entry;
- int ret;
+ int ret, upgraded = 0;
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
/*
@@ -2708,6 +2830,17 @@ static inline void inc_chains(void)
}
/*
+ * Filter out the direct dependency with the same lock class, which
+ * is legitimate only if the next lock is the recursive-read type,
+ * otherwise we should not have been here in the first place.
+ */
+ if (hlock_class(prev) == hlock_class(next)) {
+ WARN_ON_ONCE(next->read != LOCK_TYPE_RECURSIVE);
+ WARN_ON_ONCE(prev->read == LOCK_TYPE_WRITE);
+ return 2;
+ }
+
+ /*
* Is the <prev> -> <next> dependency already present?
*
* (this may occur even though this is a new chain: consider
@@ -2719,24 +2852,31 @@ static inline void inc_chains(void)
if (fw_dep_class(entry) == hlock_class(next)) {
debug_atomic_inc(nr_redundant);
- list_add_tail_rcu(&chain->chain_entry, &entry->chains);
- __set_bit(chain - lock_chains, lock_chains_in_dep);
-
/*
* For a direct dependency, smaller type value
* generally means more lock exlusiveness; we
* keep the more exlusive one, in other words,
* we "upgrade" the dependency if we can.
*/
- if (prev->read < get_lock_type1(entry))
+ if (prev->read < get_lock_type1(entry)) {
set_lock_type1(entry, prev->read);
- if (next->read < get_lock_type2(entry))
+ upgraded = 1;
+ }
+ if (next->read < get_lock_type2(entry)) {
set_lock_type2(entry, next->read);
+ upgraded = 1;
+ }
if (distance == 1)
entry->distance = 1;
- return 1;
+ if (!upgraded) {
+ list_add_tail_rcu(&chain->chain_entry,
+ &entry->chains);
+ __set_bit(chain - lock_chains,
+ lock_chains_in_dep);
+ return 1;
+ }
}
}
@@ -2750,24 +2890,13 @@ static inline void inc_chains(void)
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- ret = check_deadlock_graph(next, prev, trace);
+ ret = check_deadlock_graph(curr, next, prev, trace);
if (unlikely(ret <= 0))
return 0;
if (!check_irq_usage(curr, prev, next))
return 0;
- /*
- * For recursive read-locks we do all the dependency checks,
- * but we dont store read-triggered dependencies (only
- * write-triggered dependencies). This ensures that only the
- * write-side dependencies matter, and that if for example a
- * write-lock never takes any other locks, then the reads are
- * equivalent to a NOP.
- */
- if (next->read == LOCK_TYPE_RECURSIVE || prev->read == LOCK_TYPE_RECURSIVE)
- return 1;
-
if (!*trace) {
*trace = save_trace();
if (!*trace)
@@ -3160,20 +3289,15 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
/*
* Add dependency only if this lock is not the head of the
- * chain, and if it's not a second recursive-read lock. If
- * not, there is no need to check further.
+ * chain, and if it's not a nest lock, otherwise there is no
+ * need to check further.
*/
- if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE &&
- ret != LOCK_TYPE_NEST))
+ if (!(chain->depth > 1 && ret != LOCK_TYPE_NEST))
goto out_unlock;
}
- /*
- * Only non-recursive-read entries get new dependencies
- * added:
- */
if (chain) {
- if (hlock->read != LOCK_TYPE_RECURSIVE && hlock->check) {
+ if (hlock->check) {
int distance = curr->lockdep_depth - depth + 1;
if (!check_prev_add(curr, hlock, next, distance,
@@ -5015,7 +5139,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
#ifdef CONFIG_PROVE_LOCKING
struct lock_chain *new_chain;
u64 chain_key;
- int i;
+ int i, found = 0;
for (i = chain->base; i < chain->base + chain->depth; i++) {
if (chain_hlocks[i] != class - lock_classes)
@@ -5030,21 +5154,26 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
sizeof(chain_hlocks_type[0]));
}
/*
- * Each lock class occurs at most once in a lock chain so once
- * we found a match we can break out of this loop.
+ * Recursive-read lock class may occurs more than once in a
+ * lock chain.
*/
- goto recalc;
+ found = 1;
}
/* Since the chain has not been modified, return. */
- return;
+ if (!found)
+ return;
-recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
chain_hlocks_type[i]);
+ /*
+ * It would be weird that the new key remains the same, but can't
+ * say impossible.
+ */
if (chain->depth && chain->chain_key == chain_key)
return;
+
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, chain_key);
/*
--
1.8.3.1
Lock exclusiveness table gathers the information about whether two lock
acquisitions for the same lock instance can be granted concurrently.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 755b584..5c97dbf 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -962,6 +962,36 @@ static bool class_lock_list_valid(struct lock_class *c, int forward)
#ifdef CONFIG_PROVE_LOCKING
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+/*
+ * Lock exclusiveness table.
+ *
+ * With lock X.A and X.B (X.A is on top and X.B is on bottom):
+ *
+ * T1 TB T1 TB
+ * -- -- -- --
+ *
+ * X.A or X.A
+ * X.B X.B
+ *
+ * in the table Yes means the two locks are exclusive and No otherwise.
+ *
+ * +---------------------+------------+-----------+---------------------+
+ * | X.A vs. X.B | Write lock | Read lock | Recursive-read lock |
+ * +---------------------+------------+-----------+---------------------+
+ * | Write lock | Yes | Yes | Yes |
+ * +---------------------+------------+-----------+---------------------+
+ * | Read lock | Yes | Yes | No |
+ * +---------------------+------------+-----------+---------------------+
+ * | Recursive-read lock | Yes | Yes | No |
+ * +---------------------+------------+-----------+---------------------+
+ *
+ */
+static int lock_excl_table[3][3] = {
+ {1, 1, 1}, /* Write lock vs. X.B */
+ {1, 1, 0}, /* Read lock vs. X.B */
+ {1, 1, 0}, /* Recursive-read lock vs. X.B */
+};
#endif
static bool check_lock_chain_key(struct lock_chain *chain)
--
1.8.3.1
Now that we support recursive read locks, a previously failed case:
----------------------------------------------------------------------------
| spin |wlock |rlock |mutex | wsem | rsem |
--------------------------------------------------------------------------
mixed read-lock/lock-write ABBA: |FAILED| | ok |
can be added back. Now we have:
Good, all 262 testcases passed!
See the case in: e91498589746065e3a ("Add mixed read-write ABBA tests")
It is worth noting that previously for the lock inversion deadlock checks,
the SUCCESS's of _rlock cases are only because the dependencies having
recursive-read locks (rlock) are not included in the graph.
Signed-off-by: Yuyang Du <[email protected]>
---
lib/locking-selftest.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a170554..7d14d87 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -462,12 +462,13 @@ static void rwsem_ABBA3(void)
/*
* ABBA deadlock:
+ *
+ * Should fail except for either A or B is rlock.
*/
-
#define E() \
\
LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(B, A); /* fail */
+ LOCK_UNLOCK_2(B, A);
/*
* 6 testcases:
@@ -494,13 +495,15 @@ static void rwsem_ABBA3(void)
/*
* AB BC CA deadlock:
+ *
+ * Should fail except for rlock.
*/
#define E() \
\
LOCK_UNLOCK_2(A, B); \
LOCK_UNLOCK_2(B, C); \
- LOCK_UNLOCK_2(C, A); /* fail */
+ LOCK_UNLOCK_2(C, A);
/*
* 6 testcases:
@@ -527,13 +530,15 @@ static void rwsem_ABBA3(void)
/*
* AB CA BC deadlock:
+ *
+ * Should fail except for rlock.
*/
#define E() \
\
LOCK_UNLOCK_2(A, B); \
LOCK_UNLOCK_2(C, A); \
- LOCK_UNLOCK_2(B, C); /* fail */
+ LOCK_UNLOCK_2(B, C);
/*
* 6 testcases:
@@ -560,6 +565,8 @@ static void rwsem_ABBA3(void)
/*
* AB BC CD DA deadlock:
+ *
+ * Should fail except for rlock.
*/
#define E() \
@@ -567,7 +574,7 @@ static void rwsem_ABBA3(void)
LOCK_UNLOCK_2(A, B); \
LOCK_UNLOCK_2(B, C); \
LOCK_UNLOCK_2(C, D); \
- LOCK_UNLOCK_2(D, A); /* fail */
+ LOCK_UNLOCK_2(D, A);
/*
* 6 testcases:
@@ -594,13 +601,15 @@ static void rwsem_ABBA3(void)
/*
* AB CD BD DA deadlock:
+ *
+ * Should fail except for rlock.
*/
#define E() \
\
LOCK_UNLOCK_2(A, B); \
LOCK_UNLOCK_2(C, D); \
LOCK_UNLOCK_2(B, D); \
- LOCK_UNLOCK_2(D, A); /* fail */
+ LOCK_UNLOCK_2(D, A);
/*
* 6 testcases:
@@ -627,13 +636,15 @@ static void rwsem_ABBA3(void)
/*
* AB CD BC DA deadlock:
+ *
+ * Should fail except for rlock.
*/
#define E() \
\
LOCK_UNLOCK_2(A, B); \
LOCK_UNLOCK_2(C, D); \
LOCK_UNLOCK_2(B, C); \
- LOCK_UNLOCK_2(D, A); /* fail */
+ LOCK_UNLOCK_2(D, A);
/*
* 6 testcases:
@@ -2033,13 +2044,6 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
-#ifdef CONFIG_PROVE_LOCKING
- /*
- * Lockdep does indeed fail here, but there's nothing we can do about
- * that now. Don't kill lockdep for it.
- */
- unexpected_testcase_failures--;
-#endif
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
--
1.8.3.1
Lets make sure our 13 cases can be correctly handled. Some cases are
missing because we currently don't have such locks or annotated
correctly to implement the cases.
----------------------------------------------------------------------------
| spin |wlock |rlock |mutex | wsem | rsem |
--------------------------------------------------------------------------
read-write lock ABBA Case #1.1: | ok |
read-write lock ABBA Case #1.2: | ok |
read-write lock ABBA Case #2.1a: | ok |
read-write lock ABBA Case #2.1b: | ok |
read-write lock ABBA Case #2.2a: | ok |
read-write lock ABBA Case #2.2b: | ok |
read-write lock ABBA Case #4.1a: | ok |
read-write lock ABBA Case #4.1b: | ok |
read-write lock ABBA Case #4.2a: | ok |
read-write lock ABBA Case #4.2b: | ok |
read-write lock ABBA Case #6.1: | ok |
read-write lock ABBA Case #6.2a: | ok |
read-write lock ABBA Case #6.2b: | ok |
read-write lock ABBA Case #6.3: | ok |
read-write lock ABBA Case #7.1a: | ok |
read-write lock ABBA Case #7.1b: | ok |
read-write lock ABBA Case #7.2a: | ok |
read-write lock ABBA Case #7.2b: | ok |
read-write lock ABBA Case #8a: | ok |
read-write lock ABBA Case #8b: | ok |
read-write lock ABBA Case #8c: | ok |
read-write lock ABBA Case #8d: | ok |
read-write lock ABBA Case #9.1a: | ok |
read-write lock ABBA Case #9.1b: | ok |
read-write lock ABBA Case #9.2a: | ok |
read-write lock ABBA Case #9.2b: | ok |
read-write lock ABBA Case #10a: | ok | | ok |
read-write lock ABBA Case #10b: | ok | | ok |
read-write lock ABBA Case #10c: | ok | | ok |
read-write lock ABBA Case #10d: | ok | | ok |
read-write lock ABBA Case #11: | ok |
read-write lock ABBA Case #12a: | ok |
read-write lock ABBA Case #12b: | ok |
read-write lock ABBA Case #12c: | ok |
read-write lock ABBA Case #12d: | ok |
read-write lock ABBA Case #12e: | ok |
read-write lock ABBA Case #12f: | ok |
read-write lock ABBA Case #13.1: | ok |
read-write lock ABBA Case #13.2: | ok |
--------------------------------------------------------------------------
Now this patch marks the finish of the implementation of the read-write
lock detection algorithm. With recursive-read lock dependencies in
graph, there may be new deadlock scenarios that have never been able to
be discovered before. Admittedly, they include both true and possibly
false positives. Have fun and brace for impact!
Signed-off-by: Yuyang Du <[email protected]>
---
lib/locking-selftest.c | 1077 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 1077 insertions(+)
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 7d14d87..4fb6ab6 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -461,6 +461,919 @@ static void rwsem_ABBA3(void)
}
/*
+ * Read-write lock ABBA cases.
+ *
+ * Notation:
+ * W: write lock
+ * R: read lock
+ * RR: recursive-read lock
+ * X: either write, read, or recursive-read lock
+ */
+
+/*
+ * Lock dependencies in one chain vs. in different chains
+ * ------------------------------------------------------
+ *
+ * Case #1.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * W1
+ * RR2 W3
+ * W3 W1 [Deadlock]
+ *
+ * Case #1.2:
+ *
+ * T1 T2
+ * -- --
+ *
+ * W1
+ * RR2
+ *
+ * (W1 RR2 released
+ * in T1)
+ *
+ * RR2 W3
+ * W3 W1 [No deadlock]
+ *
+ * Case #1.3:
+ *
+ * T1a T1b T2
+ * --- --- --
+ *
+ * W1 RR2 W3
+ * RR2 W3 W1 [No deadlock]
+ */
+static void rlock_ABBA_case1_1(void)
+{
+ WL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Z1);
+ RL(X1);
+ RU(X1);
+ RU(Z1);
+}
+
+static void rlock_ABBA_case1_2(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+
+ RL(Z1);
+ RL(X1);
+ RU(X1);
+ RU(Z1);
+}
+
+/*
+ * When the final dependency is ended with read lock and read lock
+ * ---------------------------------------------------------------
+ *
+ * Case #2.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * W2 R1 [Deadlock]
+ *
+ * Case #2.2:
+ *
+ * T1 T2
+ *
+ * X1 R2
+ * R2 R1 [Deadlock]
+ *
+ * Case #2.3:
+ *
+ * T1 T2
+ *
+ * X1 R2
+ * RR2 R1 [No deadlock]
+ */
+
+static void rwsem_ABBA_case2_1a(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case2_1b(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case2_2a(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case2_2b(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * When the final dependency is read lock and recursive-read lock
+ * --------------------------------------------------------------
+ *
+ * Case #3.1:
+ *
+ * T1 T2
+ *
+ * W1 R2
+ * X2 RR1 [Deadlock]
+ *
+ * Case #3.2:
+ *
+ * T1 T2
+ * -- --
+ *
+ * R1 R2
+ * X2 RR1 [No deadlock]
+ *
+ * Case #3.3:
+ *
+ * T1 T2
+ *
+ * RR1 R2
+ * X2 RR1 [No deadlock]
+ */
+
+/*
+ * When the final dependency is read lock and write lock
+ * -----------------------------------------------------
+ *
+ * Case #4.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * R2 W1 [Deadlock]
+ *
+ * Case #4.2:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * W2 W1 [Deadlock]
+ *
+ * Case #4.3:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * RR2 W1 [No deadlock]
+ */
+
+static void rwsem_ABBA_case4_1a(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case4_1b(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case4_2a(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rwsem_ABBA_case4_2b(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * When the final dependency is recursive-read lock and read lock
+ * --------------------------------------------------------------
+ *
+ * Case #5.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 RR2
+ * R2 R1 [Deadlock]
+ *
+ * Case #5.2:
+ *
+ * T1 T2
+ *
+ * X1 RR2
+ * W2 R1 [Deadlock]
+ *
+ * Case #5.3:
+ *
+ * T1 T2
+ *
+ * X1 RR2
+ * RR2 R1 [No deadlock]
+ */
+
+/*
+ * When the final dependency is recursive-read lock and recursive-read lock
+ * ------------------------------------------------------------------------
+ *
+ * Case #6.1:
+ *
+ * T1 T2
+ *
+ * W1 RR2
+ * W2 RR1 [Deadlock]
+ *
+ * Case #6.2:
+ *
+ * T1 T2
+ *
+ * RR1 RR2
+ * X2 RR1 [No deadlock]
+ *
+ * Case #6.3:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 RR2
+ * RR2 RR1 [No deadlock]
+ *
+ * Case #6.4:
+ *
+ * T1 T2
+ *
+ * W1 RR2
+ * R2 RR1 [Deadlock]
+ *
+ * Case #6.5:
+ *
+ * T1 T2
+ *
+ * R1 RR2
+ * X2 RR1 [No deadlock]
+ */
+
+static void rlock_ABBA_case6_1(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case6_2a(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case6_2b(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case6_3(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+/*
+ * When the final dependency is recursive-read lock and write lock
+ * ---------------------------------------------------------------
+ *
+ * Case #7.1:
+ *
+ * T1 T2
+ *
+ * X1 RR2
+ * W2 W1 [Deadlock]
+ *
+ * Case #7.2:
+ *
+ * T1 T2
+ *
+ * X1 RR2
+ * RR2 W1 [No deadlock]
+ *
+ * Case #7.3:
+ *
+ * T1 T2
+ *
+ * X1 RR2
+ * R2 W1 [Deadlock]
+ */
+
+static void rlock_ABBA_case7_1a(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case7_1b(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case7_2a(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case7_2b(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+/*
+ * When the final dependency is write lock and read lock
+ * -----------------------------------------------------
+ *
+ * Case #8:
+ *
+ * T1 T2
+ *
+ * X1 W2
+ * X2 R1 [Deadlock]
+ */
+
+static void rwsem_ABBA_case8a(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case8b(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case8c(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case8d(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ WSU(Y1);
+}
+
+/*
+ * When the final dependency is write lock and recursive-read lock
+ * ---------------------------------------------------------------
+ *
+ * Case #9.1:
+ *
+ * T1 T2
+ *
+ * W1 W2
+ * X2 RR1 [Deadlock]
+ *
+ * Case #9.2:
+ *
+ * T1 T2
+ *
+ * RR1 W2
+ * X2 RR1 [No deadlock]
+ *
+ * Case #9.3:
+ *
+ * T1 T2
+ *
+ * R1 W2
+ * X2 RR1 [No deadlock]
+ */
+static void rlock_ABBA_case9_1a(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case9_1b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case9_2a(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case9_2b(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+/*
+ * When the final dependency is write lock and write lock
+ * ------------------------------------------------------
+ *
+ * Case #10:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 W2
+ * X2 W1 [Deadlock]
+ */
+static void rlock_ABBA_case10a(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rlock_ABBA_case10b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rlock_ABBA_case10c(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rlock_ABBA_case10d(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rwsem_ABBA_case10a(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case10b(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case10c(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rwsem_ABBA_case10d(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+/*
+ * Case #11:
+ *
+ * T1 T2
+ * -- --
+ *
+ * RR1
+ * RR2
+ *
+ * (RR1 RR2 released)
+ *
+ * W1 RR2
+ * W2 RR1 [Deadlock]
+ */
+static void rlock_ABBA_case11(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+/*
+ * Case #12:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 X3
+ * RR2 RR2
+ * X3 X1 [Deadlock]
+ */
+static void rlock_ABBA_case12a(void)
+{
+ WL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rlock_ABBA_case12b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rlock_ABBA_case12c(void)
+{
+ RL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rlock_ABBA_case12d(void)
+{
+ RL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rlock_ABBA_case12e(void)
+{
+ RL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ RU(Z1);
+}
+
+static void rlock_ABBA_case12f(void)
+{
+ WL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+/*
+ * Case #13.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1
+ * X3 RR2
+ * RR2 X1 [No deadlock]
+ *
+ * Case #13.2:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1
+ * RR2 RR2
+ * X3 X1 [No deadlock]
+ */
+static void rlock_ABBA_case13_1(void)
+{
+ WL(X1);
+ WL(Z1);
+ RL(Y1);
+ RU(Y1);
+ WU(Z1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rlock_ABBA_case13_2(void)
+{
+ WL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+/*
* ABBA deadlock:
*
* Should fail except for either A or B is rlock.
@@ -2060,6 +2973,170 @@ void locking_selftest(void)
pr_cont(" |");
dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+ print_testname("read-write lock ABBA Case #1.1");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case1_1, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #1.2");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case1_2, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #2.1a");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2_1a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #2.1b");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2_1b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #2.2a");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2_2a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #2.2b");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2_2b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4.1a");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4_1a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4.1b");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4_1b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4.2a");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4_2a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4.2b");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4_2b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #6.1");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6_1, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #6.2a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6_2a, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #6.2b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6_2b, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #6.3");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6_3, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #7.1a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7_1a, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #7.1b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7_1b, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #7.2a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7_2a, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #7.2b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7_2b, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #8a");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case8a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8b");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case8b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8c");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case8c, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8d");
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case8d, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #9.1a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case9_1a, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #9.1b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case9_1b, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #9.2a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case9_2a, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #9.2b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case9_2b, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #10a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case10a, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case10a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #10b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case10b, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case10b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #10c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case10c, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case10c, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #10d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case10d, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case10d, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #11");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case11, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12a, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12b, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12c, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12d, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12e");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12e, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #12f");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case12f, FAILURE, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #13.1");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case13_1, SUCCESS, LOCKTYPE_RWLOCK);
+
+ print_testname("read-write lock ABBA Case #13.2");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case13_2, SUCCESS, LOCKTYPE_RWLOCK);
+
printk(" --------------------------------------------------------------------------\n");
/*
--
1.8.3.1
Lockdep effectively uses a two-task model to track workload's locking
behavior and then do the checking to find inversion deadlock scenarios
based on such model. Lets explain it in detail.
When there is a new lock dependency L1 -> L2 (i.e., the current task
attempts to acquire L2 while holding L1), which is from a new lock
chain's latest two locks, lockdep's view of the world is composed of two
virtual tasks:
- Task1: the entire previous locking behavior depicted by the forward
lock dependency graph.
- Task2: the current task's new locking behavior, i.e., the L1 -> L2 dependency.
Whenever a Task2 comes, lockdep tries to find in Task1 whether there exists
the inverse locking order of L1 -> L2, namely L2 -> L1. If this inverse
locking order exists, then lockdep has found the typical inversion deadlock
scenario, a.k.a, ABBA deadlock. And if not, Task2 will be merged into Task1
to become a new bigger Task1 with a bigger graph. Then this track and check
cycle goes on and on.
There is some nuances between this two-task model and the real workload
locking behavior. Some examples:
(The following Tx denotes concrete tasks)
T1
--
L1
L2
(L1 and L2 released)
L2
L3
T2
--
L1
L2
L3
T1 and T2 will produce the same Task1 from the perspective of lockdep's
two-task model. However, this model does not actually reflect the reality in
full length. In T1, L1 -> L3 actually has no "real" dependency while in T2
it is "real" (a real X -> Y lock dependency is defined as a task is
attempting to acquire Y while holding X). This may result in false positive
deadlock scenarios. For example:
Case #1.1:
T1 T2
-- --
L1
L2 L3
L3 L1 [Deadlock]
Case #1.2 (T1 is a singular task):
T1 T2
-- --
L1
L2
(L1 L2 released)
L2 L3
L3 L1 [No deadlock]
Case #1.3:
T1a T1b T2
--- --- --
L1 L1
L2 L2
(L1 L2 released
in T1a and T1b)
L2 L2 L3
L3 L3 L1 [Deadlock]
From Case #1.2 (no deadlock) to Case #1.3 (deadlock), we can see that
lockdep is also assuming there can be multiple Task1's on top of the
two-task model, and from pragmatic point of view, this assumption is not
unrealistic to make.
However, with read locks that can be fully concurrent with read locks
and not be blocked by write locks (such as the rwlock). Lockdep's such
model is flawed. For example (the following read locks, denoted as RR,
and write locks are of type rwlock):
Case #2.1:
T1 T2
-- --
W1
RR2 W3
W3 W1 [Deadlock]
Case #2.2:
T1a T1b T2
--- --- --
W1 RR2 W3
RR2 W3 W1 [No deadlock]
Case #2.3:
T1a T1b T2
--- --- --
W1 W1
RR2 RR2
(W1 RR2 released
in T1a and T1b)
RR2 RR2 W3
W3 W3 W1 [No deadlock]
Lockdep cannot differentiate Case #2.1 from Case #2.2 and Case #2.3 or
vice versa. This is because when modeling Task1, it cannot tell whether
two neighboring direct dependencies in the graph are in the same real
task and hence create a "real" dependency.
To make up for this, the two-task model needs to be strengthened. We
previously mapped lock chains to lock dependency graph and added
read-write lock types into dependencies. This patch finally modifies the
graph traversing algorithm (__bfs()) to stop when two neighboring direct
dependencies are not in the same lock chain and the middle lock is a
recursive-read lock (rwlock).
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 68 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3ad97bc..05c70be 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1617,6 +1617,71 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
}
/*
+ * A lock stopper in the dependency graph is a read lock that stops the
+ * graph traversal passing through it even if the two dependencies are
+ * linked in a path. A stopper sits in such two lock dependencies:
+ *
+ * X -> RR (stopper) -> X (where RR is recursive-read lock)
+ *
+ * and these two dependencies are NOT from one lock chain.
+ */
+static inline bool
+read_lock_stopper(struct lock_list *parent, struct lock_list *child, int forward)
+{
+ struct lock_chain *chain1, *chain2;
+ struct lock_list *list[2] = { child, parent };
+ u64 key1, key2;
+ int distance, other = 1 - forward;
+
+ /* This is the source entry */
+ if (!get_lock_parent(parent))
+ return false;
+
+ if (!(get_lock_type1(list[other]) == LOCK_TYPE_RECURSIVE &&
+ get_lock_type2(list[forward]) == LOCK_TYPE_RECURSIVE))
+ return false;
+
+ distance = list[forward]->distance;
+
+ list_for_each_entry_rcu(chain1, &list[forward]->chains, chain_entry) {
+ key1 = chain1->chain_key;
+
+ list_for_each_entry_rcu(chain2, &list[other]->chains, chain_entry) {
+ /*
+ * If the two chains are in the same task lock stack,
+ * we should be able to calculate key2 from key1 if
+ * distance is 1, or calculate key1 from key2 if
+ * distance is larger than 1.
+ */
+ if (distance == 1) {
+ int class_idx = fw_dep_class(list[other]) - lock_classes;
+ key1 = iterate_chain_key(key1, class_idx,
+ get_lock_type2(list[other]));
+ key2 = chain2->chain_key;
+
+ if (key1 == key2)
+ return false;
+ }
+ else {
+ int i = chain1->base, j = chain2->base;
+
+ if (chain1->depth != chain2->depth - distance)
+ continue;
+
+ for (; i < chain1->depth - 1; i++, j++)
+ if (chain_hlocks[i] != chain_hlocks[j] ||
+ chain_hlocks_type[i] != chain_hlocks_type[i])
+ continue;
+
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/*
* Forward- or backward-dependency search, used for both circular dependency
* checking and hardirq-unsafe/softirq-unsafe checking.
*/
@@ -1656,6 +1721,9 @@ static int __bfs(struct lock_list *source_entry,
if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
+ if (read_lock_stopper(lock, entry, forward))
+ continue;
+
mark_lock_accessed(entry, lock, forward);
if (__cq_enqueue(cq, entry)) {
--
1.8.3.1
We are going to combine forward dependency lock_lists and backward
dependency lock_lists. To prepare for that, we combine locks_before and
locks_after lists, which makes the codes a bit clearer after all.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 6 ++---
kernel/locking/lockdep.c | 63 +++++++++++++++++++------------------------
kernel/locking/lockdep_proc.c | 2 +-
3 files changed, 32 insertions(+), 39 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 0246a70..06b686d 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -89,10 +89,10 @@ struct lock_class {
/*
* These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
+ * to every node we attach a list of "forward" graph nodes
+ * @dep_list[1] and a list of "backward" graph nodes @dep_list[0].
*/
- struct list_head locks_after, locks_before;
+ struct list_head dep_list[2];
const struct lockdep_subclass_key *key;
unsigned int subclass;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 21d9e99..8c53b59 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -901,8 +901,7 @@ static bool in_list(struct list_head *e, struct list_head *h)
}
/*
- * Check whether entry @e occurs in any of the locks_after or locks_before
- * lists.
+ * Check whether entry @e occurs in any of the dep lists.
*/
static bool in_any_class_list(struct list_head *e)
{
@@ -911,8 +910,8 @@ static bool in_any_class_list(struct list_head *e)
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
- if (in_list(e, &class->locks_after) ||
- in_list(e, &class->locks_before))
+ if (in_list(e, &class->dep_list[0]) ||
+ in_list(e, &class->dep_list[1]))
return true;
}
return false;
@@ -1000,9 +999,9 @@ static bool __check_data_structures(void)
/* Check whether all classes have valid lock lists. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = &lock_classes[i];
- if (!class_lock_list_valid(class, &class->locks_before))
+ if (!class_lock_list_valid(class, &class->dep_list[0]))
return false;
- if (!class_lock_list_valid(class, &class->locks_after))
+ if (!class_lock_list_valid(class, &class->dep_list[1]))
return false;
}
@@ -1098,8 +1097,8 @@ static void init_data_structures_once(void)
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
- INIT_LIST_HEAD(&lock_classes[i].locks_after);
- INIT_LIST_HEAD(&lock_classes[i].locks_before);
+ INIT_LIST_HEAD(&lock_classes[i].dep_list[0]);
+ INIT_LIST_HEAD(&lock_classes[i].dep_list[1]);
}
}
@@ -1228,8 +1227,8 @@ static bool is_dynamic_key(const struct lock_class_key *key)
class->key = key;
class->name = lock->name;
class->subclass = subclass;
- WARN_ON_ONCE(!list_empty(&class->locks_before));
- WARN_ON_ONCE(!list_empty(&class->locks_after));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[0]));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[1]));
class->name_version = count_matching_names(class);
/*
* We use RCU's safe list-add method to make
@@ -1448,15 +1447,14 @@ static inline int get_lock_depth(struct lock_list *child)
/*
* Return the forward or backward dependency list.
*
- * @lock: the lock_list to get its class's dependency list
- * @offset: the offset to struct lock_class to determine whether it is
- * locks_after or locks_before
+ * @lock: the lock_list to get its class's dependency list
+ * @forward: the forward dep list or backward dep list
*/
-static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
+static inline struct list_head *get_dep_list(struct lock_list *lock, int forward)
{
- void *lock_class = lock->class;
+ struct lock_class *class = lock->class;
- return lock_class + offset;
+ return &class->dep_list[forward];
}
/*
@@ -1466,8 +1464,7 @@ static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry,
- int offset)
+ struct lock_list **target_entry, int forward)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1481,7 +1478,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- head = get_dep_list(source_entry, offset);
+ head = get_dep_list(source_entry, forward);
if (list_empty(head))
goto exit;
@@ -1495,7 +1492,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- head = get_dep_list(lock, offset);
+ head = get_dep_list(lock, forward);
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -1528,9 +1525,7 @@ static inline int __bfs_forwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry,
- offsetof(struct lock_class, locks_after));
-
+ return __bfs(src_entry, data, match, target_entry, 1);
}
static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1538,9 +1533,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry,
- offsetof(struct lock_class, locks_before));
-
+ return __bfs(src_entry, data, match, target_entry, 0);
}
static void print_lock_trace(const struct lock_trace *trace,
@@ -2445,7 +2438,7 @@ static inline void inc_chains(void)
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
- list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+ list_for_each_entry(entry, &hlock_class(prev)->dep_list[1], entry) {
if (entry->class == hlock_class(next)) {
debug_atomic_inc(nr_redundant);
@@ -2495,14 +2488,14 @@ static inline void inc_chains(void)
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
- &hlock_class(prev)->locks_after,
+ &hlock_class(prev)->dep_list[1],
next->acquire_ip, distance, *trace);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- &hlock_class(next)->locks_before,
+ &hlock_class(next)->dep_list[0],
next->acquire_ip, distance, *trace);
if (!ret)
return 0;
@@ -4811,8 +4804,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
nr_list_entries--;
list_del_rcu(&entry->entry);
}
- if (list_empty(&class->locks_after) &&
- list_empty(&class->locks_before)) {
+ if (list_empty(&class->dep_list[0]) &&
+ list_empty(&class->dep_list[1])) {
list_move_tail(&class->lock_entry, &pf->zapped);
hlist_del_rcu(&class->hash_entry);
WRITE_ONCE(class->key, NULL);
@@ -4833,12 +4826,12 @@ static void reinit_class(struct lock_class *class)
const unsigned int offset = offsetof(struct lock_class, key);
WARN_ON_ONCE(!class->lock_entry.next);
- WARN_ON_ONCE(!list_empty(&class->locks_after));
- WARN_ON_ONCE(!list_empty(&class->locks_before));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[0]));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[1]));
memset(p + offset, 0, sizeof(*class) - offset);
WARN_ON_ONCE(!class->lock_entry.next);
- WARN_ON_ONCE(!list_empty(&class->locks_after));
- WARN_ON_ONCE(!list_empty(&class->locks_before));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[0]));
+ WARN_ON_ONCE(!list_empty(&class->dep_list[1]));
}
static inline int within(const void *addr, void *start, unsigned long size)
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index edc4a7b..989c27e 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -82,7 +82,7 @@ static int l_show(struct seq_file *m, void *v)
print_name(m, class);
seq_puts(m, "\n");
- list_for_each_entry(entry, &class->locks_after, entry) {
+ list_for_each_entry(entry, &class->dep_list[1], entry) {
if (entry->distance == 1) {
seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
--
1.8.3.1
Add an enum to formally quantify lock types. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 27 ++++++++++++++++++++-------
include/linux/rcupdate.h | 2 +-
kernel/locking/lockdep.c | 19 +++++++++++--------
3 files changed, 32 insertions(+), 16 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fcfc1dd..2335447 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -350,10 +350,18 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
*
* Values for "read":
*
- * 0: exclusive (write) acquire
- * 1: read-acquire (no recursion allowed)
- * 2: read-acquire with same-instance recursion allowed
- *
+ * LOCK_TYPE_EXCLUSIVE (LOCK_TYPE_WRITE): exclusive (write) acquire
+ * LOCK_TYPE_READ: read-acquire (no recursion allowed)
+ * LOCK_TYPE_RECURSIVE: read-acquire with same-instance recursion allowed
+ */
+enum lock_type {
+ LOCK_TYPE_EXCLUSIVE = 0,
+ LOCK_TYPE_WRITE = 0,
+ LOCK_TYPE_READ,
+ LOCK_TYPE_RECURSIVE,
+};
+
+/*
* Values for check:
*
* 0: simple checks (freeing, held-at-exit-time, etc.)
@@ -599,9 +607,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
* on the per lock-class debug mode:
*/
-#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
-#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
-#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
+#define lock_acquire_exclusive(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_EXCLUSIVE, 1, n, i)
+
+#define lock_acquire_shared(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_READ, 1, n, i)
+
+#define lock_acquire_shared_recursive(l, s, t, n, i) \
+ lock_acquire(l, s, t, LOCK_TYPE_RECURSIVE, 1, n, i)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c4f76a3..09128d5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -205,7 +205,7 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
- lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+ lock_acquire(map, 0, 0, LOCK_TYPE_RECURSIVE, 0, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 754a718..bdc7e94 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2430,7 +2430,10 @@ static inline void inc_chains(void)
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns:
+ * 0: on deadlock detected;
+ * 1: on OK;
+ * LOCK_TYPE_RECURSIVE: on recursive read
*/
static int
check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2452,15 +2455,15 @@ static inline void inc_chains(void)
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
- if ((next->read == 2) && prev->read)
- return 2;
+ if ((next->read == LOCK_TYPE_RECURSIVE) && prev->read)
+ return LOCK_TYPE_RECURSIVE;
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
*/
if (nest)
- return 2;
+ return LOCK_TYPE_RECURSIVE;
print_deadlock_bug(curr, prev, next);
return 0;
@@ -2563,7 +2566,7 @@ static inline void inc_chains(void)
* write-lock never takes any other locks, then the reads are
* equivalent to a NOP.
*/
- if (next->read == 2 || prev->read == 2)
+ if (next->read == LOCK_TYPE_RECURSIVE || prev->read == LOCK_TYPE_RECURSIVE)
return 1;
if (!*trace) {
@@ -2946,7 +2949,7 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
* chain, and if it's not a second recursive-read lock. If
* not, there is no need to check further.
*/
- if (!(chain->depth > 1 && ret != 2))
+ if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
goto out_unlock;
}
@@ -2955,7 +2958,7 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
* added:
*/
if (chain) {
- if (hlock->read != 2 && hlock->check) {
+ if (hlock->read != LOCK_TYPE_RECURSIVE && hlock->check) {
int distance = curr->lockdep_depth - depth + 1;
if (!check_prev_add(curr, hlock, next, distance,
@@ -4208,7 +4211,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
curr->curr_chain_key = hlock->prev_chain_key;
WARN(hlock->read, "downgrading a read lock");
- hlock->read = 1;
+ hlock->read = LOCK_TYPE_READ;
hlock->acquire_ip = ip;
if (reacquire_held_locks(curr, depth, i, &merged))
--
1.8.3.1
When adding a dependency, if the dependency exists the dependency's
read-write type will be "upgraded" if the new dependency has more
exclusive lock types. The order toward more exclusiveness is:
recursive read -> read -> write.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 41cb735..eb88314 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1588,6 +1588,21 @@ static inline int get_lock_type2(struct lock_list *lock)
return lock->lock_type2;
}
+static inline int hlock_type(struct held_lock *hlock)
+{
+ return hlock->read;
+}
+
+static inline void set_lock_type1(struct lock_list *lock, int read)
+{
+ lock->lock_type1 = read;
+}
+
+static inline void set_lock_type2(struct lock_list *lock, int read)
+{
+ lock->lock_type2 = read;
+}
+
/*
* Forward- or backward-dependency search, used for both circular dependency
* checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2585,6 +2600,17 @@ static inline void inc_chains(void)
list_add_tail_rcu(&chain->chain_entry, &entry->chains);
__set_bit(chain - lock_chains, lock_chains_in_dep);
+ /*
+ * For a direct dependency, smaller type value
+ * generally means more lock exlusiveness; we
+ * keep the more exlusive one, in other words,
+ * we "upgrade" the dependency if we can.
+ */
+ if (prev->read < get_lock_type1(entry))
+ set_lock_type1(entry, prev->read);
+ if (next->read < get_lock_type2(entry))
+ set_lock_type2(entry, next->read);
+
if (distance == 1)
entry->distance = 1;
--
1.8.3.1
When computing a chain's hash key, we need to consider a held lock's
read-write type. To do so, we hash the held lock's read-write type into the
chain key. So the additional data to use Jenkins hash algorithm is a
composite of the new held lock's lock class index (lower 16 bits) and its
read-write type (higher 16 bits) as opposed to just class index before:
held lock type (16 bits) : lock class index (16 bits)
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 1 +
kernel/locking/lockdep.c | 55 ++++++++++++++++++++++++++++++++++--------------
2 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eab8a90..3de4b37 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -183,6 +183,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
}
#define LOCK_TYPE_BITS 2
+#define LOCK_TYPE_SHIFT 16
/*
* Every lock has a list of other locks that were taken after or before
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1166262..d13b6b7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -370,11 +370,22 @@ struct pending_free {
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
+ *
+ * The additional u32 data to hash is a composite of the new held lock's
+ * lock class index (lower 16 bits) and its read-write type (higher 16
+ * bits):
+ *
+ * hlock type (16 bits) : lock class index (16 bits)
+ *
+ * N.B. The bits taken for lock type and index are specified by
+ * LOCK_TYPE_SHIFT.
*/
-static inline u64 iterate_chain_key(u64 key, u32 idx)
+static inline u64 iterate_chain_key(u64 key, u32 idx, int hlock_type)
{
u32 k0 = key, k1 = key >> 32;
+ idx += hlock_type << LOCK_TYPE_SHIFT;
+
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
return k0 | (u64)k1 << 32;
@@ -960,7 +971,8 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
@@ -2700,12 +2712,13 @@ static inline int get_first_held_lock(struct task_struct *curr,
/*
* Returns the next chain_key iteration
*/
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key, int lock_type)
{
- u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+ u64 new_chain_key = iterate_chain_key(chain_key, class_idx, lock_type);
- printk(" class_idx:%d -> chain_key:%016Lx",
+ printk(" class_idx:%d (lock_type %d) -> chain_key:%016Lx",
class_idx,
+ lock_type,
(unsigned long long)new_chain_key);
return new_chain_key;
}
@@ -2722,12 +2735,15 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
- chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+ chain_key = print_chain_key_iteration(hlock->class_idx,
+ chain_key,
+ hlock->read);
print_lock(hlock);
}
- print_chain_key_iteration(hlock_next->class_idx, chain_key);
+ print_chain_key_iteration(hlock_next->class_idx, chain_key,
+ hlock_next->read);
print_lock(hlock_next);
}
@@ -2735,12 +2751,14 @@ static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
- int class_id;
+ int class_id, lock_type;
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
- chain_key = print_chain_key_iteration(class_id, chain_key);
+ lock_type = chain_hlocks_type[chain->base + i];
+ chain_key = print_chain_key_iteration(class_id, chain_key,
+ lock_type);
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2780,7 +2798,7 @@ static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
struct lock_chain *chain, int depth)
{
#ifdef CONFIG_DEBUG_LOCKDEP
- int i, j, id;
+ int i, j, id, type;
i = get_first_held_lock(curr, hlock, depth);
@@ -2789,10 +2807,12 @@ static int check_no_collision(struct task_struct *curr, struct held_lock *hlock,
return 0;
}
- for (j = 0; j < chain->depth - 1; j++, i++) {
+ for (j = chain->base; j < chain->base + chain->depth - 1; j++, i++) {
id = curr->held_locks[i].class_idx;
+ type = curr->held_locks[i].read;
- if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+ if (DEBUG_LOCKS_WARN_ON((chain_hlocks[j] != id) ||
+ (chain_hlocks_type[j] != type))) {
print_collision(curr, hlock, chain, depth);
return 0;
}
@@ -3081,7 +3101,8 @@ static int validate_chain(struct task_struct *curr, struct held_lock *next,
* lock_chains. If it exists the check is actually not needed.
*/
chain_key = iterate_chain_key(hlock->prev_chain_key,
- hlock_class(next) - lock_classes);
+ hlock_class(next) - lock_classes,
+ next->read);
goto chain_again;
@@ -3139,7 +3160,8 @@ static void check_chain_key(struct task_struct *curr)
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = INITIAL_CHAIN_KEY;
- chain_key = iterate_chain_key(chain_key, hlock->class_idx);
+ chain_key = iterate_chain_key(chain_key, hlock->class_idx,
+ hlock->read);
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
@@ -4048,7 +4070,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (separate_irq_context(curr, hlock))
chain_key = INITIAL_CHAIN_KEY;
- chain_key = iterate_chain_key(chain_key, class_idx);
+ chain_key = iterate_chain_key(chain_key, class_idx, read);
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
print_lock_nested_lock_not_held(curr, hlock, ip);
@@ -4908,7 +4930,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
if (chain->depth && chain->chain_key == chain_key)
return;
/* Overwrite the chain key for concurrent RCU readers. */
--
1.8.3.1
Lock chain needs to include information about the read-write lock type. To
that end, introduce:
chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS]
in tandem with:
chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index eb88314..1166262 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -950,6 +950,7 @@ static bool class_lock_list_valid(struct lock_class *c, int forward)
#ifdef CONFIG_PROVE_LOCKING
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
#endif
static bool check_lock_chain_key(struct lock_chain *chain)
@@ -2669,6 +2670,7 @@ static inline void inc_chains(void)
static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
int nr_chain_hlocks;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
{
@@ -2872,9 +2874,13 @@ static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
chain->base = nr_chain_hlocks;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx;
+ int lock_type = curr->held_locks[i].read;
+
chain_hlocks[chain->base + j] = lock_id;
+ chain_hlocks_type[chain->base + j] = lock_type;
}
chain_hlocks[chain->base + j] = class - lock_classes;
+ chain_hlocks_type[chain->base + j] = hlock->read;
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
@@ -4886,6 +4892,9 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
(chain->base + chain->depth - i) *
sizeof(chain_hlocks[0]));
+ memmove(&chain_hlocks_type[i], &chain_hlocks_type[i + 1],
+ (chain->base + chain->depth - i) *
+ sizeof(chain_hlocks_type[0]));
}
/*
* Each lock class occurs at most once in a lock chain so once
@@ -5356,6 +5365,7 @@ void __init lockdep_init(void)
+ sizeof(lock_chains_in_use)
+ sizeof(lock_chains_in_dep)
+ sizeof(chain_hlocks)
+ + sizeof(chain_hlocks_type)
#endif
) / 1024
);
--
1.8.3.1