Hi Peter and Ingo,
Historically, the read-write locks (recursive-read locks included) are not
well supported in lockdep. This patchset attempts to solve this problem
sound and complete.
The bulk of the algorithm is in patch #10, which is actually not complex at
all. Hopefully, it simply works.
Now that we have read-write locks suppported, we have all the 262 cases
passed, though I have to flip some cases which, I think, are wrong.
P.S. To Boqun, I haven't got time to read your patchset except that I did
carefully read your design doc and learnt from it a lot. It is helpful.
Please give this patchset at least a look.
Thanks,
Yuyang
--
Yuyang Du (17):
locking/lockdep: Add lock type enum to explicitly specify read or
write locks
locking/lockdep: Add read-write type for dependency
locking/lockdep: Add helper functions to operate on the searched path
locking/lockdep: Update direct dependency's read-write type if it
exists
locking/lockdep: Rename deadlock check functions
locking/lockdep: Adjust BFS algorithm to support multiple matches
locking/lockdep: Introduce mark_lock_unaccessed()
locking/lockdep: Introduce chain_hlocks_type for held lock's
read-write type
locking/lockdep: Hash held lock's read-write type into chain key
locking/lockdep: Support read-write lock's deadlock detection
locking/lockdep: Adjust lockdep selftest cases
locking/lockdep: Remove useless lock type assignment
locking/lockdep: Add nest lock type
locking/lockdep: Support recursive read locks
locking/lockdep: Adjust selftest case for recursive read lock
locking/lockdep: Add more lockdep selftest cases
locking/lockdep: Remove irq-safe to irq-unsafe read check
include/linux/lockdep.h | 40 +-
kernel/locking/lockdep.c | 454 +++++++++++----
kernel/locking/lockdep_internals.h | 4 +
lib/locking-selftest.c | 1099 +++++++++++++++++++++++++++++++++++-
4 files changed, 1464 insertions(+), 133 deletions(-)
--
1.8.3.1
Direct dependency needs to keep track of its locks' read-write types. A
union field is added to lock_list struct so the type is stored there as
this:
lock_type[1] (u16), lock_type[0] (u16)
or:
dep_type (int)
where value:
0: exclusive / write
1: read
2: recursive read
Note that (int) dep_type value may vary with different architectural
endianness, so use helpers to operate on these types.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 12 ++++++++++++
kernel/locking/lockdep.c | 34 +++++++++++++++++++++++++++++++---
kernel/locking/lockdep_internals.h | 3 +++
3 files changed, 46 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 441288c..6aa9af2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -195,6 +195,18 @@ struct lock_list {
struct lock_class *links_to;
struct lock_trace trace;
int distance;
+ /*
+ * This field keeps track of the read-write type of this dependency.
+ *
+ * With L1 -> L2:
+ *
+ * lock_type[0] stores the type of L1, while lock_type[1] stores the
+ * type of L2.
+ */
+ union {
+ int dep_type;
+ u16 lock_type[2];
+ };
/*
* The parent field is used to implement breadth-first search, and the
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e9eafcf..4091002 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1225,7 +1225,7 @@ static struct lock_list *alloc_list_entry(void)
static int add_lock_to_list(struct lock_class *this,
struct lock_class *links_to, struct list_head *head,
unsigned long ip, int distance,
- struct lock_trace *trace)
+ struct lock_trace *trace, int dep_type)
{
struct lock_list *entry;
/*
@@ -1240,6 +1240,8 @@ static int add_lock_to_list(struct lock_class *this,
entry->links_to = links_to;
entry->distance = distance;
entry->trace = *trace;
+ entry->dep_type = dep_type;
+
/*
* Both allocation and removal are done under the graph lock; but
* iteration is under RCU-sched; see look_up_lock_class() and
@@ -1677,6 +1679,30 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
+static inline int get_dep_type(struct held_lock *lock1, struct held_lock *lock2)
+{
+ /*
+ * With dependency lock1 -> lock2:
+ *
+ * lock_type[0] is lock1, while lock_type[1] is lock2.
+ *
+ * Avoid architectural endianness difference composing dep_type.
+ */
+ u16 type[2] = { lock1->read, lock2->read };
+
+ return *(int *)type;
+}
+
+static inline int get_lock_type1(struct lock_list *lock)
+{
+ return lock->lock_type[0];
+}
+
+static inline int get_lock_type2(struct lock_list *lock)
+{
+ return lock->lock_type[1];
+}
+
/*
* Check that the dependency graph starting at <src> can lead to
* <target> or not. Print an error and return 0 if it does.
@@ -2446,14 +2472,16 @@ static inline void inc_chains(void)
*/
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(prev)->locks_after,
- next->acquire_ip, distance, trace);
+ next->acquire_ip, distance, trace,
+ get_dep_type(prev, next));
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(next)->locks_before,
- next->acquire_ip, distance, trace);
+ next->acquire_ip, distance, trace,
+ get_dep_type(next, prev));
if (!ret)
return 0;
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 150ec3f..c287bcb 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -26,6 +26,9 @@ enum lock_usage_bit {
#define LOCK_USAGE_DIR_MASK 2
#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
+#define LOCK_TYPE_BITS 16
+#define LOCK_TYPE_MASK 0xFFFF
+
/*
* Usage-state bitmasks:
*/
--
1.8.3.1
Since in graph search, multiple matches may be needed, a matched lock
needs to rejoin the search for another match, thereby introduce
mark_lock_unaccessed().
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4d96bdd..a2d5148 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1338,6 +1338,15 @@ static inline void mark_lock_accessed(struct lock_list *lock,
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
+static inline void mark_lock_unaccessed(struct lock_list *lock)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
+ lock->class->dep_gen_id--;
+}
+
static inline unsigned long lock_accessed(struct lock_list *lock)
{
unsigned long nr;
--
1.8.3.1
When computing a chain's hash key, we need to consider a held lock's type,
so the additional data to use Jenkins hash algorithm is a composite of the
new held lock's lock class index (lower 16 bits) and its read-write type
(higher 16 bits) as opposed to just class index before:
held lock type (u16) : lock class index (u16)
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 46 ++++++++++++++++++++++++++++++++--------------
1 file changed, 32 insertions(+), 14 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0456f75..fed5d11 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -359,11 +359,19 @@ struct pending_free {
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
+ *
+ * The additional u32 data to hash is a composite of the new held lock's
+ * lock class index (lower 16 bits) and its read-write type (higher 16
+ * bits):
+ *
+ * hlock type (u16) : lock class index (u16)
*/
-static inline u64 iterate_chain_key(u64 key, u32 idx)
+static inline u64 iterate_chain_key(u64 key, u32 idx, u16 hlock_type)
{
u32 k0 = key, k1 = key >> 32;
+ idx += hlock_type << LOCK_TYPE_BITS;
+
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
return k0 | (u64)k1 << 32;
@@ -871,7 +879,8 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
@@ -2699,9 +2708,9 @@ static inline int get_first_held_lock(struct task_struct *curr,
/*
* Returns the next chain_key iteration
*/
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key, int lock_type)
{
- u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+ u64 new_chain_key = iterate_chain_key(chain_key, class_idx, lock_type);
printk(" class_idx:%d -> chain_key:%016Lx",
class_idx,
@@ -2721,12 +2730,15 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
- chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+ chain_key = print_chain_key_iteration(hlock->class_idx,
+ chain_key,
+ hlock->read);
print_lock(hlock);
}
- print_chain_key_iteration(hlock_next->class_idx, chain_key);
+ print_chain_key_iteration(hlock_next->class_idx, chain_key,
+ hlock_next->read);
print_lock(hlock_next);
}
@@ -2734,12 +2746,14 @@ static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
- int class_id;
+ int class_id, lock_type;
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
- chain_key = print_chain_key_iteration(class_id, chain_key);
+ lock_type = chain_hlocks_type[chain->base + i];
+ chain_key = print_chain_key_iteration(class_id, chain_key,
+ lock_type);
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2780,7 +2794,7 @@ static int check_no_collision(struct task_struct *curr,
struct lock_chain *chain)
{
#ifdef CONFIG_DEBUG_LOCKDEP
- int i, j, id;
+ int i, j, id, type;
i = get_first_held_lock(curr, hlock);
@@ -2789,10 +2803,12 @@ static int check_no_collision(struct task_struct *curr,
return 0;
}
- for (j = 0; j < chain->depth - 1; j++, i++) {
+ for (j = chain->base; j < chain->base + chain->depth - 1; j++, i++) {
id = curr->held_locks[i].class_idx;
+ type = curr->held_locks[i].read;
- if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+ if (DEBUG_LOCKS_WARN_ON((chain_hlocks[j] != id) ||
+ (chain_hlocks_type[j] != type))) {
print_collision(curr, hlock, chain);
return 0;
}
@@ -3078,7 +3094,8 @@ static void check_chain_key(struct task_struct *curr)
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = INITIAL_CHAIN_KEY;
- chain_key = iterate_chain_key(chain_key, hlock->class_idx);
+ chain_key = iterate_chain_key(chain_key, hlock->class_idx,
+ hlock->read);
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
@@ -4001,7 +4018,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
chain_key = INITIAL_CHAIN_KEY;
chain_head = 1;
}
- chain_key = iterate_chain_key(chain_key, class_idx);
+ chain_key = iterate_chain_key(chain_key, class_idx, read);
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
print_lock_nested_lock_not_held(curr, hlock, ip);
@@ -4845,7 +4862,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
if (chain->depth && chain->chain_key == chain_key)
return;
/* Overwrite the chain key for concurrent RCU readers. */
--
1.8.3.1
Deadlock checks are performed at two places:
- Within current's held lock stack, check for lock recursion deadlock.
- Within dependency graph, check for lock inversion deadlock.
Rename the two relevant functions for later use. Plus, with read locks,
dependency circle in graph is not a sufficient condition for lock
inversion deadlocks anymore, so check_noncircular() is not entirely
accurate.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 27ca55f..4adaf27 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1771,8 +1771,8 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
* Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct held_lock *src, struct held_lock *target,
- struct lock_trace *trace)
+check_deadlock_graph(struct held_lock *src, struct held_lock *target,
+ struct lock_trace *trace)
{
int ret;
struct lock_list *uninitialized_var(target_entry);
@@ -2385,7 +2385,8 @@ static inline void inc_chains(void)
}
/*
- * Check whether we are holding such a class already.
+ * Check whether we are holding such a class already in current
+ * context's held lock stack.
*
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
@@ -2396,7 +2397,7 @@ static inline void inc_chains(void)
* 2: LOCK_TYPE_RECURSIVE on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next)
+check_deadlock_current(struct task_struct *curr, struct held_lock *next)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2480,7 +2481,7 @@ static inline void inc_chains(void)
/*
* Prove that the new <prev> -> <next> dependency would not
- * create a circular dependency in the graph. (We do this by
+ * create a deadlock scenario in the graph. (We do this by
* a breadth-first search into the graph starting at <next>,
* and check whether we can reach <prev>.)
*
@@ -2488,7 +2489,7 @@ static inline void inc_chains(void)
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- ret = check_noncircular(next, prev, trace);
+ ret = check_deadlock_graph(next, prev, trace);
if (unlikely(ret <= 0))
return 0;
@@ -2983,7 +2984,7 @@ static int validate_chain(struct task_struct *curr,
* The simple case: does the current hold the same lock
* already?
*/
- int ret = check_deadlock(curr, hlock);
+ int ret = check_deadlock_current(curr, hlock);
if (!ret)
return 0;
--
1.8.3.1
- find_lock_in_path() tries to find whether a lock class is in the path.
- find_next_dep_in_path() returns the next dependency after a
given dependency in the path.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4091002..0617375 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1365,6 +1365,37 @@ static inline int get_lock_depth(struct lock_list *child)
}
/*
+ * Return the dependency if @lock is in the path, otherwise NULL.
+ */
+static inline struct lock_list *
+find_lock_in_path(struct lock_class *lock, struct lock_list *target)
+{
+ while ((target = get_lock_parent(target)))
+ if (target->class == lock)
+ return target;
+
+ return NULL;
+}
+
+/*
+ * Walk back to the next dependency after @source from @target. Note
+ * that @source must be in the path, and @source can not be the same as
+ * @target, otherwise this is going to fail and reutrn NULL.
+ */
+static inline struct lock_list *
+find_next_dep_in_path(struct lock_list *source, struct lock_list *target)
+{
+ while (get_lock_parent(target) != source) {
+ target = get_lock_parent(target);
+
+ if (!target)
+ break;
+ }
+
+ return target;
+}
+
+/*
* Return the forward or backward dependency list.
*
* @lock: the lock_list to get its class's dependency list
--
1.8.3.1
Sorry, forgot to mention the patchset is based on my previous small
improvements:
[PATCH v2 00/23] locking/lockdep: Small improvements
(https://lkml.org/lkml/2019/5/6/106).
On Mon, 13 May 2019 at 17:13, Yuyang Du <[email protected]> wrote:
>
> Hi Peter and Ingo,
>
> Historically, the read-write locks (recursive-read locks included) are not
> well supported in lockdep. This patchset attempts to solve this problem
> sound and complete.
>
> The bulk of the algorithm is in patch #10, which is actually not complex at
> all. Hopefully, it simply works.
>
> Now that we have read-write locks suppported, we have all the 262 cases
> passed, though I have to flip some cases which, I think, are wrong.
>
> P.S. To Boqun, I haven't got time to read your patchset except that I did
> carefully read your design doc and learnt from it a lot. It is helpful.
> Please give this patchset at least a look.
>
> Thanks,
> Yuyang
>
> --
>
> Yuyang Du (17):
> locking/lockdep: Add lock type enum to explicitly specify read or
> write locks
> locking/lockdep: Add read-write type for dependency
> locking/lockdep: Add helper functions to operate on the searched path
> locking/lockdep: Update direct dependency's read-write type if it
> exists
> locking/lockdep: Rename deadlock check functions
> locking/lockdep: Adjust BFS algorithm to support multiple matches
> locking/lockdep: Introduce mark_lock_unaccessed()
> locking/lockdep: Introduce chain_hlocks_type for held lock's
> read-write type
> locking/lockdep: Hash held lock's read-write type into chain key
> locking/lockdep: Support read-write lock's deadlock detection
> locking/lockdep: Adjust lockdep selftest cases
> locking/lockdep: Remove useless lock type assignment
> locking/lockdep: Add nest lock type
> locking/lockdep: Support recursive read locks
> locking/lockdep: Adjust selftest case for recursive read lock
> locking/lockdep: Add more lockdep selftest cases
> locking/lockdep: Remove irq-safe to irq-unsafe read check
>
> include/linux/lockdep.h | 40 +-
> kernel/locking/lockdep.c | 454 +++++++++++----
> kernel/locking/lockdep_internals.h | 4 +
> lib/locking-selftest.c | 1099 +++++++++++++++++++++++++++++++++++-
> 4 files changed, 1464 insertions(+), 133 deletions(-)
>
> --
> 1.8.3.1
>
A read-write lock is different from an exclusive lock only in that there
can be concurrent read locks, while a write lock is essentially the same
as an exclusive lock.
Read-write lock has not been well supported for deadlock detection so
far. This patch series designs and implements an algorithm to add this
important missing feature to lockdep.
To articulate the algorithm plainly, lets first give an abstract of the
problem statement. And, to make the problem simple and easy to
describe, recursive-read locks are not considered; they will be covered
later, and actually recursive-read lock is very easy if the read-lock
problem is solved.
Waiting relations in a circular fashion are at the heart of a deadlock:
a circle is universally *necessary* for any deadlock, albeit not
*sufficient* with read-write locks. A deadlock circle can be arbitrarily
long with many tasks contributing some arcs. But no matter how long the
circle is, it has to complete with a final arc, so the problem to solve
can be stated as:
Lemma #1
--------
Detecing deadlock is to find when such a circle is going to complete
at the *final* arc.
Assume there are n tasks contribute to that circle, denoted as T_1, T_2,
..., T_n, these tasks can be grouped as (T_1, ..., T_n-1) and T_n. And
by combining the former virtually, we get a big T1 and T2 (with task
numbers adjusted). This essentially means:
Lemma #2
--------
Two tasks can virtually represent any situation with any number of
tasks to check for deadlock.
Actually, how long the circle is does not really matter since the
problem as stated is what a difference the final missing arc can make:
deadlock or not; therefore, we need a just few locks that are enough to
represent all the possibilities. And this leads to:
Lemma #3
--------
Any deadlock scenario can be converted to an ABBA deadlock.
where AB comes from T1 and BA from T2 (T1 and T2 are made by Lemma #2),
which says any other associated locks in the graph are not critical or
necessary and thus may be ignored. For example:
T1 T2
-- --
L1 L7
L2 L2
A B
L3 L3
L4 L8
B A [Deadlock]
L5
L6
from deadlock perspective is equivalent to an ABBA:
T1 T2
-- --
A B
B A [Deadlock]
Despite the lemmas, three facts are relevant to the problem: (a) with a
new arc, determining whether it completes a circle is an easy task, (b)
a new direct arc (a direct dependency) can introduce a number of
indirect arcs (indirect dependencies), and (c) checking all the
additional arcs (direct and indirect) efficiently is not so easy since
lock operations are frequent and lock graph can be gigantic. Actually,
if it is free to check any number of arcs, deadlock detection even with
read-write locks is fairly easy. That said performance is what the real
difficulty is, so a good algorithm should not only be self-evident that
it does solve the problem but also do so at low cost.
Here we try a start to solve it!
Having grasped the problem statement, we are good to proceed to a
divide-and-conquer approach to the solution: the entire problem is
broken down into a comprehensive list of simple and abstract problem
cases to solve, and if each and every one of them is solved, the entire
problem is solved.
The division is based on the type of the final arc or dependency in T2.
Based on Lemma #2, we use only two tasks in the following discussion.
And based on Lemma #3, these cases are all ABBAs. To be concise, the
following symbol R stands for read lock, W stands for write lock or
exclusive lock, and X stands for either R or W.
---------------------------------------------------------------
When the final dependency is ended with read lock and read lock
---------------------------------------------------------------
Case #1:
T1 T2
-- --
W1 R2
W2 R1 [Deadlock]
Case #2:
T1 T2
X1 R2
R2 R1 [No deadlock]
------------------------------------------------------
When the final dependency is write lock and write lock
------------------------------------------------------
Case #3:
T1 T2
-- --
X1 W2
X2 W1 [Deadlock]
-----------------------------------------------------
When the final dependency is read lock and write lock
-----------------------------------------------------
Note that the final dependency is ended with write lock and read lock is
essentially the same as this one so that case is omitted.
Case #4:
T1 T2
-- --
X1 R2
W2 W1 [Deadlock]
Case #5:
T1 T2
-- --
X1 R2
R2 W1 [No deadlock]
Solving the above cases (no false positive or false negative) is
actually fairly easy to do; we therefore have our first *Simple
Algorithm*:
----------------
Simple Algorithm
----------------
Step 1
------
Keep track of each dependency's read or write ends. There is a
combination of four types:
- read -> read
- read -> write
- write -> read
- write -> write
Step 2
------
Redundancy check (as to whether adding a dependency into graph) for a
direct dependency needs to be beefed up considering dependency's read-
or write-ended types: a direct dependency is redundant to an indirect
dependency only if their ends have the same types. However, for
simplicity, direct dependencies can be added right away and if the
dependency to add already exists, the dependency can simply be
"upgraded" update the end type towards more exclusive (the exlusiveness
increases from recursive read -> read -> write).
Step 3
------
Traverse the dependency graph to find whether a deadlock circle can be
formed by adding a new direct dependency. There can be circles that are
not deadlock. In order to find a deadlock circle efficiently, the new
dependency's read lock or read locks if existent can *start* from or/and
*end* to write-ended dependences correspondingly. As a result, Step 3
can avoid the following false negative case easily for example:
Case #6:
T1 T2
-- --
R1
R2
(R1 R2 released)
W1 R2
W2 R1 [Deadlock]
*Simple Algorithm* done loosely described.
I wish we lived in a fairy-tale world that the problem has been solved
so easily, but the reality is not. Huge false negatives owing to
indirect dependencies could appear, which is illustrated as the
following case to further solve:
Case #7:
T1 T2
-- --
X1 X3
R2 R2
X3 X1 [Deadlock]
where X1's and X3's in the two tasks create a deadlock scenario (each may
be one of the the deadlock cases above). When checking direct dependency
R2 -> X1, there is no obvious deadlock using our *Simple Algorithm*,
however, under the hood the actual deadlock is formed after R2
introduces an indirect dependency X3 -> X1, which could comfortably be
missed.
To detect deadlock scenario like Case #7, a naive option is to check all
additional indirect dependencies, but this option would be so
inefficient to do and thus is simply passed. To find an efficient
solution instead, lets first contrive a no-deadlock Case #8 for
comparison (which is essentially rewritten from Case #5).
Case #8:
T1 T2
-- --
X1
X3 R2
R2 X1 [No deadlock]
Having considered Case #7 and Case #8, a final working algorithm can be
formulated:
---------------
Final Algorithm
---------------
This *Final Algorithm* is beefed up from Simple Algorithm using the
following lemmas:
Lemma #4
--------
The direct dependency R2 -> X1 that serves in the path from X3 -> X1 is
*critical*.
Although the actual deadlock in Case #7 cannot be easily found by our
Simple Algorithm, however, by strengthening the algorithm somehow the
deadlock *definitely* can be found from the direct dependency (i.e., R2
-> X1 in Case #7). In other words, the critical direct dependency (a
final arc) suffices to find any deadlock if there is a deadlock,
otherwise there is no deadlock. As a matter of fact, after a false
deadlock (R2 -> X1 -> R2), if the search continues the true deadlock (R2
-> X1 -> R2 -> X3 -> R2) will eventually be taken out of the hood.
Lemma #5
--------
Missed in Case #8, the game changer to Case #7 is that it has X3 in T2
whereas Case #8 does not.
Having considered this, our *Final Algorithm* can be adjusted from
*Simple Algorithm* by adding:
(a). Continue searching the graph to find a new circle.
(b). In the new circle, if previous locks in T2's stack (or chain) are in
it and then check whether the circle is indeed a deadlock. This is
done by checking each newly introduced indirect dependency (such as
X3 -> X1 in Case #7) according to our Simple Algorithm as before.
(c). If a deadlock is found then the algorithm is done, otherwise go to
(a) unless the entire graph is traversed.
Lemma #6
--------
Lemma #5 nicely raises a question whether a previous lock involved
indirect dependency in T2 is *necessary*. The answer is yes, otherwise
our *Simple Algorithm* has already solved the problem.
Lemma #7
--------
It is also natual to ask whether the indirect dependencies in T2 only
is *sufficient*: what if the indirect dependency (partly) has
dependencies from T1. The answer is yes too.
Because Lemma #2 and Lemma #3 say that any deadlock is an ABBA so that
T1 can only contribute an AB and T2 must have a BA. Since we assumed T1
has no deadlock and Lemma #4 says the new dependency is *critical*, then
any deadlock formed by new direct or indirect dependencies introduced in
T2 (which is the BA part) will definitely be found with *Simple
Algorithm* or *Final Algorithm* respectively.
This is perhaps the most subtle and difficult part of this algorithm. To
test Lemma #7 holds true, one may try to contrive a case based on the
Case #8 or freely to generate a deadlock case if possible.
Anyway, any new cases are welcome. Cases matter in this algorithm
because as stated before, this algorithm solves the read-write lock
deadlock detection problem by having solved all the contrived cases (be
it deadlock or no deadlock). And if a case is not covered here, it
likely will defeat this algorithm, but if otherwise this algorithm just
works sound and complete.
*Final Algorithm* done loosely described.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 133 ++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 120 insertions(+), 13 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index fed5d11..26690f88 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -85,6 +85,7 @@
*/
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static struct task_struct *lockdep_selftest_task_struct;
+static bool inside_selftest(void);
static int graph_lock(void)
{
@@ -1788,13 +1789,16 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
* lead to <target>. If it can, there is a circle when adding
* <target> -> <src> dependency.
*
+ * If there is a circle, there may be a deadlock.
+ *
* Print an error and return 0 if it does.
*/
static noinline int
-check_deadlock_graph(struct held_lock *src, struct held_lock *target,
- struct lock_trace *trace)
+check_deadlock_graph(struct task_struct *curr, struct held_lock *src,
+ struct held_lock *target, struct lock_trace *trace)
{
- int ret;
+ int ret, i;
+ bool init = true;
struct lock_list *uninitialized_var(target_entry);
struct lock_list src_entry = {
.class = hlock_class(src),
@@ -1803,19 +1807,122 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
+ while (true) {
+ ret = check_path(hlock_class(target), &src_entry,
+ &target_entry, init);
+ init = false;
+
+ /* Found a circle, is it deadlock? */
+ if (unlikely(!ret)) {
+ struct lock_list *parent;
- if (unlikely(!ret)) {
- if (!trace->nr_entries) {
/*
- * If save_trace fails here, the printing might
- * trigger a WARN but because of the !nr_entries it
- * should not do bad things.
+ * Note that we have an assumption that no lock
+ * class can be both read and recursive-read.
+ *
+ * Check this direct dependency.
+ *
+ * Step 1: next's lock type and source dependency's
+ * lock type are exclusive, no?
+ *
+ * Find the first dependency after source dependency.
*/
- save_trace(trace);
- }
+ parent = find_next_dep_in_path(&src_entry, target_entry);
+ if (!parent) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return -3;
+ }
+
+ if (src->read & get_lock_type1(parent))
+ goto cont;
+
+ /*
+ * Step 2: prev's lock type and target dependency's
+ * lock type are exclusive, yes?
+ */
+ if (!(target->read & get_lock_type2(target_entry)))
+ goto print;
+
+ /*
+ * Check indirect dependency from even further
+ * previous lock.
+ */
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *prev = curr->held_locks + i;
+
+ if (prev->irq_context != src->irq_context)
+ continue;
- print_circular_bug(&src_entry, target_entry, src, target);
+ /*
+ * We arrived at the same prev lock in this
+ * direct dependency checking.
+ */
+ if (prev == target)
+ break;
+
+ /*
+ * Since the src lock (the next lock to
+ * acquire) is neither recursive nor nested
+ * lock, so this prev class cannot be src
+ * class, then does the path have this
+ * previous lock?
+ *
+ * With read locks it would be possible a
+ * lock can reoccur in a path. For example:
+ *
+ * -> RL1 -> RL2 -> RL3 -> RL1 -> ...
+ *
+ * and for another three examples:
+ *
+ * Ex1: -> RL1 -> WL2 -> RL3 -> RL1
+ * Ex2; -> WL1 -> RL2 -> RL3 -> WL1
+ * Ex3: -> RL1 -> RL2 -> RL3 -> WL1
+ *
+ * This can result in that a path may
+ * encounter a lock twice or more, but we
+ * used the breadth-first search algorithm
+ * that will find the shortest path,
+ * which means that this path can not have
+ * the same (middle) lock multiple times.
+ * However, is Ex3 a problem?
+ */
+ parent = find_lock_in_path(hlock_class(prev),
+ target_entry);
+ if (parent) {
+ /*
+ * Yes, we have a candidiate indirect
+ * dependency to check.
+ *
+ * Again step 2: new prev's lock
+ * type and its dependency in graph
+ * are exclusive, yes?
+ *
+ * Note that we don't need step 1
+ * again.
+ */
+ if (!(prev->read & get_lock_type2(parent)))
+ goto print;
+ }
+ }
+cont:
+ mark_lock_unaccessed(target_entry);
+ continue;
+print:
+ if (!trace->nr_entries) {
+ /*
+ * If save_trace fails here, the printing
+ * might trigger a WARN but because of the
+ * !nr_entries it should not do bad things.
+ */
+ save_trace(trace);
+ }
+
+ print_circular_bug(&src_entry, target_entry,
+ src, target);
+ break;
+ } else
+ /* The graph is all traversed or an error occurred */
+ break;
}
return ret;
@@ -2510,7 +2617,7 @@ static inline void inc_chains(void)
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- ret = check_deadlock_graph(next, prev, trace);
+ ret = check_deadlock_graph(curr, next, prev, trace);
if (unlikely(ret <= 0))
return 0;
--
1.8.3.1
Next lock to acquire has the lock type set already. There is no need to
reassign it when it is recursive read. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 7 -------
1 file changed, 7 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 26690f88..c94c105 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3128,13 +3128,6 @@ static int validate_chain(struct task_struct *curr,
if (!ret)
return 0;
/*
- * Mark recursive read, as we jump over it when
- * building dependencies (just like we jump over
- * trylock entries):
- */
- if (ret == LOCK_TYPE_RECURSIVE)
- hlock->read = LOCK_TYPE_RECURSIVE;
- /*
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
--
1.8.3.1
Lets make sure our 8 cases can be correctly handled. In contrast, before
this patchset, these 8 cases have 24 failures:
----------------------------------------------------------------------------
| spin |wlock |rlock |mutex | wsem | rsem |
--------------------------------------------------------------------------
read-write lock ABBA Case #1: |FAILED| | ok |
read-write lock ABBA Case #2a: | ok | |FAILED|
read-write lock ABBA Case #2b: | ok | |FAILED|
read-write lock ABBA Case #3a: |FAILED| | ok |
read-write lock ABBA Case #3b: |FAILED| | ok |
read-write lock ABBA Case #3c: |FAILED| | ok |
read-write lock ABBA Case #3d: | ok | | ok |
read-write lock ABBA Case #4a: |FAILED| | ok |
read-write lock ABBA Case #4b: |FAILED| | ok |
read-write lock ABBA Case #5a: | ok | |FAILED|
read-write lock ABBA Case #5b: | ok | |FAILED|
read-write lock ABBA Case #6a: |FAILED| | ok |
read-write lock ABBA Case #6b: |FAILED| | ok |
read-write lock ABBA Case #6c: |FAILED| | ok |
read-write lock ABBA Case #7a: | ok | | ok |
read-write lock ABBA Case #7b: |FAILED| | ok |
read-write lock ABBA Case #7c: |FAILED| | ok |
read-write lock ABBA Case #7d: |FAILED| | ok |
read-write lock ABBA Case #8.1a: | ok | |FAILED|
read-write lock ABBA Case #8.1b: | ok | |FAILED|
read-write lock ABBA Case #8.1c: | ok | |FAILED|
read-write lock ABBA Case #8.1d: | ok | |FAILED|
read-write lock ABBA Case #8.2a: | ok | |FAILED|
read-write lock ABBA Case #8.2b: | ok | |FAILED|
read-write lock ABBA Case #8.2c: | ok | |FAILED|
read-write lock ABBA Case #8.2d: | ok | |FAILED|
--------------------------------------------------------------------------
Note that even many of the cases passed, it is simply because the
recursive-read locks are *not* considered.
Now that this patch marks the finish of the implementation of the read-write
lock detection algorithm. Looking forward, we may have some ramifications:
(1) Some previous false positive read-lock involved deadlocks should not be
a false positive anymore (hopefully), so however a such false positive
was resolved, it has a chance to have a second look at it.
(2) With recursive-read lock dependencies in graph, there may be new
deadlock scenarios that have never been able to be discovered before.
Admittedly, they include both true and possibly false positves.
Have fun and brace for impact!
Signed-off-by: Yuyang Du <[email protected]>
---
lib/locking-selftest.c | 1022 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 1022 insertions(+)
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 4c6dd8a..52d5494 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -461,6 +461,872 @@ static void rwsem_ABBA3(void)
}
/*
+ * Read-write lock ABBA cases.
+ *
+ * Notation:
+ * R: read lock
+ * W: write lock
+ * X: either read or write lock
+ *
+ * Case #1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * W1 R2
+ * W2 R1 [Deadlock]
+ */
+static void rlock_ABBA_case1(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case1(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * Case #2:
+ *
+ * T1 T2
+ *
+ * X1 R2
+ * R2 R1 [No deadlock]
+ */
+static void rlock_ABBA_case2a(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case2a(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case2b(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case2b(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * Case #3:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 W2
+ * X2 W1 [Deadlock]
+ */
+static void rlock_ABBA_case3a(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rwsem_ABBA_case3a(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rlock_ABBA_case3b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rwsem_ABBA_case3b(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rlock_ABBA_case3c(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rwsem_ABBA_case3c(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+static void rlock_ABBA_case3d(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ WL(Y1);
+ WL(X1);
+ WU(X1);
+ WU(Y1);
+}
+
+static void rwsem_ABBA_case3d(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ WSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ WSU(Y1);
+}
+
+/*
+ * Case #4:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * W2 W1 [Deadlock]
+ */
+static void rlock_ABBA_case4a(void)
+{
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case4a(void)
+{
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case4b(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case4b(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * Case #5:
+
+ * T1 T2
+ * -- --
+ *
+ * X1 R2
+ * R2 W1 [No deadlock]
+ */
+static void rlock_ABBA_case5a(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case5a(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case5b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case5b(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * Case #6:
+ *
+ * T1 T2
+ * -- --
+ *
+ * R1
+ * R2
+ *
+ * (R1 R2 released)
+ *
+ * W1 R2
+ * W2 R1 [Deadlock]
+ */
+static void rlock_ABBA_case6a(void)
+{
+ RL(X1);
+ RL(Y1);
+ RU(Y1);
+ RU(X1);
+
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case6a(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case6b(void)
+{
+ WL(X1);
+ RL(Y1);
+ RU(Y1);
+ WU(X1);
+
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case6b(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case6c(void)
+{
+ RL(X1);
+ WL(Y1);
+ WU(Y1);
+ RU(X1);
+
+ WL(X1);
+ WL(Y1);
+ WU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ RL(X1);
+ RU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case6c(void)
+{
+ RSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ RSU(X1);
+
+ WSL(X1);
+ WSL(Y1);
+ WSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ RSL(X1);
+ RSU(X1);
+ RSU(Y1);
+}
+
+/*
+ * Case #7:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1 X3
+ * R2 R2
+ * X3 X1 [Deadlock]
+ */
+static void rlock_ABBA_case7a(void)
+{
+ WL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rwsem_ABBA_case7a(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ WSL(Z1);
+ WSU(Z1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(Z1);
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+ WSU(Z1);
+}
+
+static void rlock_ABBA_case7b(void)
+{
+ RL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rwsem_ABBA_case7b(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ WSL(Z1);
+ WSU(Z1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(Z1);
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+ WSU(Z1);
+}
+
+static void rlock_ABBA_case7c(void)
+{
+ WL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rwsem_ABBA_case7c(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSL(Z1);
+ RSU(Z1);
+ RSU(Y1);
+ WSU(X1);
+
+ WSL(Z1);
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+ WSU(Z1);
+}
+
+static void rlock_ABBA_case7d(void)
+{
+ RL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ WL(Z1);
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+ WU(Z1);
+}
+
+static void rwsem_ABBA_case7d(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSL(Z1);
+ RSU(Z1);
+ RSU(Y1);
+ RSU(X1);
+
+ WSL(Z1);
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+ WSU(Z1);
+}
+
+/*
+ * Case #8.1:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1
+ * X3 R2
+ * R2 X1 [No deadlock]
+ */
+static void rlock_ABBA_case81a(void)
+{
+ WL(X1);
+ WL(Z1);
+ RL(Y1);
+ RU(Y1);
+ WU(Z1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case81a(void)
+{
+ WSL(X1);
+ WSL(Z1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(Z1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case81b(void)
+{
+ RL(X1);
+ WL(Z1);
+ RL(Y1);
+ RU(Y1);
+ WU(Z1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case81b(void)
+{
+ RSL(X1);
+ WSL(Z1);
+ RSL(Y1);
+ RSU(Y1);
+ WSU(Z1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case81c(void)
+{
+ WL(X1);
+ RL(Z1);
+ RL(Y1);
+ RU(Y1);
+ RU(Z1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case81c(void)
+{
+ WSL(X1);
+ RSL(Z1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(Z1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case81d(void)
+{
+ RL(X1);
+ RL(Z1);
+ RL(Y1);
+ RU(Y1);
+ RU(Z1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case81d(void)
+{
+ RSL(X1);
+ RSL(Z1);
+ RSL(Y1);
+ RSU(Y1);
+ RSU(Z1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+
+/*
+ * Case #8.2:
+ *
+ * T1 T2
+ * -- --
+ *
+ * X1
+ * R2 R2
+ * X3 X1 [No deadlock]
+ */
+static void rlock_ABBA_case82a(void)
+{
+ WL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case82a(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ WSL(Z1);
+ WSU(Z1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case82b(void)
+{
+ RL(X1);
+ RL(Y1);
+ WL(Z1);
+ WU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case82b(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ WSL(Z1);
+ WSU(Z1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case82c(void)
+{
+ WL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ WU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case82c(void)
+{
+ WSL(X1);
+ RSL(Y1);
+ RSL(Z1);
+ RSU(Z1);
+ RSU(Y1);
+ WSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+static void rlock_ABBA_case82d(void)
+{
+ RL(X1);
+ RL(Y1);
+ RL(Z1);
+ RU(Z1);
+ RU(Y1);
+ RU(X1);
+
+ RL(Y1);
+ WL(X1);
+ WU(X1);
+ RU(Y1);
+}
+
+static void rwsem_ABBA_case82d(void)
+{
+ RSL(X1);
+ RSL(Y1);
+ RSL(Z1);
+ RSU(Z1);
+ RSU(Y1);
+ RSU(X1);
+
+ RSL(Y1);
+ WSL(X1);
+ WSU(X1);
+ RSU(Y1);
+}
+
+/*
* ABBA deadlock:
*
* Should fail except for either A or B is read lock.
@@ -2071,6 +2937,162 @@ void locking_selftest(void)
pr_cont(" |");
dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+ print_testname("read-write lock ABBA Case #1");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case1, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case1, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #2a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case2a, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2a, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #2b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case2b, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case2b, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #3a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case3a, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case3a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #3b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case3b, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case3b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #3c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case3c, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case3c, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #3d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case3d, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case3d, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case4a, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #4b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case4b, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case4b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #5a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case5a, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case5a, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #5b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case5b, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case5b, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #6a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6a, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case6a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #6b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6b, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case6b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #6c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case6c, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case6c, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #7a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7a, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case7a, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #7b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7b, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case7b, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #7c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7c, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case7c, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #7d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case7d, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case7d, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.1a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case81a, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case81a, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.1b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case81b, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case81b, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.1c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case81c, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case81c, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.1d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case81d, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case81d, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.2a");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case82a, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case82a, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.2b");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case82b, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case82b, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.2c");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case82c, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case82c, SUCCESS, LOCKTYPE_RWSEM);
+
+ print_testname("read-write lock ABBA Case #8.2d");
+ pr_cont(" |");
+ dotest(rlock_ABBA_case82d, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA_case82d, SUCCESS, LOCKTYPE_RWSEM);
+
printk(" --------------------------------------------------------------------------\n");
/*
--
1.8.3.1