Hi Peter,
Let me post these small bits first while waiting for Frederic's patches
to be merged.
This version should have addressed the comments you made. Thanks so much
for the review.
Thanks,
Yuyang
--
Yuyang Du (23):
locking/lockdep: Change all print_*() return type to void
locking/lockdep: Add description and explanation in lockdep design doc
locking/lockdep: Adjust lock usage bit character checks
locking/lockdep: Remove useless conditional macro
locking/lockdep: Print the right depth for chain key colission
locking/lockdep: Update obsolete struct field description
locking/lockdep: Use lockdep_init_task for task initiation
consistently
locking/lockdep: Define INITIAL_CHAIN_KEY for chain keys to start with
locking/lockdep: Change the range of class_idx in held_lock struct
locking/lockdep: Remove unused argument in validate_chain() and
check_deadlock()
locking/lockdep: Update comment
locking/lockdep: Change type of the element field in circular_queue
locking/lockdep: Change the return type of __cq_dequeue()
locking/lockdep: Avoid constant checks in __bfs by using offset
reference
locking/lockdep: Update comments on dependency search
locking/lockdep: Add explanation to lock usage rules in lockdep design
doc
locking/lockdep: Remove redundant argument in check_deadlock
locking/lockdep: Remove unused argument in __lock_release
locking/lockdep: Refactorize check_noncircular and check_redundant
locking/lockdep: Check redundant dependency only when
CONFIG_LOCKDEP_SMALL
locking/lockdep: Consolidate lock usage bit initialization
locking/lockdep: Adjust new bit cases in mark_lock
locking/lockdep: Remove !dir in lock irq usage check
Documentation/locking/lockdep-design.txt | 112 ++++--
include/linux/lockdep.h | 32 +-
init/init_task.c | 2 +
kernel/fork.c | 3 -
kernel/locking/lockdep.c | 607 ++++++++++++++++++-------------
5 files changed, 461 insertions(+), 295 deletions(-)
--
1.8.3.1
Since none of the print_*() function's return value is necessary, change
their return type to void. No functional change.
In cases where an invariable return value is used, this change slightly
improves readability, i.e.:
print_x();
return 0;
is definitely better than:
return print_x(); /* where print_x() always returns 0 */
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 209 ++++++++++++++++++++++++-----------------------
1 file changed, 108 insertions(+), 101 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 27b992f..a019330 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1422,16 +1422,15 @@ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
-static noinline int
+static noinline void
print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
- return 0;
+ return;
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(KERN_CONT ":\n");
print_lock_trace(&target->trace, 6);
- return 0;
}
static void
@@ -1488,7 +1487,7 @@ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
* When a circular dependency is detected, print the
* header first:
*/
-static noinline int
+static noinline void
print_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct held_lock *check_src,
struct held_lock *check_tgt)
@@ -1496,7 +1495,7 @@ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
struct task_struct *curr = current;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("======================================================\n");
@@ -1514,8 +1513,6 @@ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
print_circular_bug_entry(entry, depth);
-
- return 0;
}
static inline int class_equal(struct lock_list *entry, void *data)
@@ -1523,10 +1520,10 @@ static inline int class_equal(struct lock_list *entry, void *data)
return entry->class == data;
}
-static noinline int print_circular_bug(struct lock_list *this,
- struct lock_list *target,
- struct held_lock *check_src,
- struct held_lock *check_tgt)
+static noinline void print_circular_bug(struct lock_list *this,
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
struct lock_list *parent;
@@ -1534,10 +1531,10 @@ static noinline int print_circular_bug(struct lock_list *this,
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
if (!save_trace(&this->trace))
- return 0;
+ return;
depth = get_lock_depth(target);
@@ -1559,21 +1556,17 @@ static noinline int print_circular_bug(struct lock_list *this,
printk("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
-static noinline int print_bfs_bug(int ret)
+static noinline void print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
- return 0;
+ return;
/*
* Breadth-first-search failed, graph got corrupted?
*/
WARN(1, "lockdep bfs error:%d\n", ret);
-
- return 0;
}
static int noop_count(struct lock_list *entry, void *data)
@@ -1762,7 +1755,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
- struct lock_list *root)
+ struct lock_list *root)
{
struct lock_list *entry = leaf;
int depth;
@@ -1784,8 +1777,6 @@ static void print_lock_class_header(struct lock_class *class, int depth)
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
-
- return;
}
static void
@@ -1844,7 +1835,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
printk("\n *** DEADLOCK ***\n\n");
}
-static int
+static void
print_bad_irq_dependency(struct task_struct *curr,
struct lock_list *prev_root,
struct lock_list *next_root,
@@ -1857,7 +1848,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
const char *irqclass)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=====================================================\n");
@@ -1903,19 +1894,17 @@ static void print_lock_class_header(struct lock_class *class, int depth)
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
if (!save_trace(&prev_root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
if (!save_trace(&next_root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(forwards_entry, next_root);
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static const char *state_names[] = {
@@ -2062,8 +2051,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
this.class = hlock_class(prev);
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
usage_mask &= LOCKF_USED_IN_IRQ_ALL;
if (!usage_mask)
@@ -2079,8 +2070,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
that.class = hlock_class(next);
ret = find_usage_forwards(&that, forward_mask, &target_entry1);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
@@ -2092,8 +2085,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
backward_mask = original_mask(target_entry1->class->usage_mask);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (DEBUG_LOCKS_WARN_ON(ret == 1))
return 1;
@@ -2107,11 +2102,13 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
if (DEBUG_LOCKS_WARN_ON(ret == -1))
return 1;
- return print_bad_irq_dependency(curr, &this, &that,
- target_entry, target_entry1,
- prev, next,
- backward_bit, forward_bit,
- state_name(backward_bit));
+ print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
+ backward_bit, forward_bit,
+ state_name(backward_bit));
+
+ return 0;
}
static void inc_chains(void)
@@ -2142,8 +2139,7 @@ static inline void inc_chains(void)
#endif
static void
-print_deadlock_scenario(struct held_lock *nxt,
- struct held_lock *prv)
+print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
{
struct lock_class *next = hlock_class(nxt);
struct lock_class *prev = hlock_class(prv);
@@ -2161,12 +2157,12 @@ static inline void inc_chains(void)
printk(" May be due to missing lock nesting notation\n\n");
}
-static int
+static void
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("============================================\n");
@@ -2185,8 +2181,6 @@ static inline void inc_chains(void)
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -2228,7 +2222,8 @@ static inline void inc_chains(void)
if (nest)
return 2;
- return print_deadlock_bug(curr, prev, next);
+ print_deadlock_bug(curr, prev, next);
+ return 0;
}
return 1;
}
@@ -2303,10 +2298,13 @@ static inline void inc_chains(void)
*/
save_trace(trace);
}
- return print_circular_bug(&this, target_entry, next, prev);
+ print_circular_bug(&this, target_entry, next, prev);
+ return 0;
+ }
+ else if (unlikely(ret < 0)) {
+ print_bfs_bug(ret);
+ return 0;
}
- else if (unlikely(ret < 0))
- return print_bfs_bug(ret);
if (!check_irq_usage(curr, prev, next))
return 0;
@@ -2347,8 +2345,10 @@ static inline void inc_chains(void)
debug_atomic_inc(nr_redundant);
return 2;
}
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (!trace->nr_entries && !save_trace(trace))
@@ -2876,8 +2876,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
-static void
-print_usage_bug_scenario(struct held_lock *lock)
+static void print_usage_bug_scenario(struct held_lock *lock)
{
struct lock_class *class = hlock_class(lock);
@@ -2894,12 +2893,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
printk("\n *** DEADLOCK ***\n\n");
}
-static int
+static void
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("================================\n");
@@ -2929,8 +2928,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -2940,8 +2937,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
- if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
- return print_usage_bug(curr, this, bad_bit, new_bit);
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
+ print_usage_bug(curr, this, bad_bit, new_bit);
+ return 0;
+ }
return 1;
}
@@ -2949,7 +2948,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* print irq inversion bug:
*/
-static int
+static void
print_irq_inversion_bug(struct task_struct *curr,
struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
@@ -2960,7 +2959,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("========================================================\n");
@@ -3001,13 +3000,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
if (!save_trace(&root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(other, root);
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -3025,13 +3022,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, &root, target_entry,
- this, 1, irqclass);
+ print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
+ return 0;
}
/*
@@ -3049,13 +3049,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, &root, target_entry,
- this, 0, irqclass);
+ print_irq_inversion_bug(curr, &root, target_entry,
+ this, 0, irqclass);
+ return 0;
}
void print_irqtrace_events(struct task_struct *curr)
@@ -3598,15 +3601,15 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
-static int
+static void
print_lock_nested_lock_not_held(struct task_struct *curr,
struct held_lock *hlock,
unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("==================================\n");
@@ -3628,8 +3631,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static int __lock_is_held(const struct lockdep_map *lock, int read);
@@ -3778,8 +3779,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
chain_key = iterate_chain_key(chain_key, class_idx);
- if (nest_lock && !__lock_is_held(nest_lock, -1))
- return print_lock_nested_lock_not_held(curr, hlock, ip);
+ if (nest_lock && !__lock_is_held(nest_lock, -1)) {
+ print_lock_nested_lock_not_held(curr, hlock, ip);
+ return 0;
+ }
if (!debug_locks_silent) {
WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
@@ -3815,14 +3818,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return 1;
}
-static int
-print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
- unsigned long ip)
+static void print_unlock_imbalance_bug(struct task_struct *curr,
+ struct lockdep_map *lock,
+ unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=====================================\n");
@@ -3840,8 +3843,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static int match_held_lock(const struct held_lock *hlock,
@@ -3960,8 +3961,10 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
@@ -4001,8 +4004,10 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -4046,16 +4051,20 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
* So we're all set to release this lock.. wait what lock? We don't
* own any locks, you've been drinking again?
*/
- if (DEBUG_LOCKS_WARN_ON(depth <= 0))
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (DEBUG_LOCKS_WARN_ON(depth <= 0)) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
/*
* Check whether the lock exists in the current stack
* of held locks:
*/
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
if (hlock->instance == lock)
lock_release_holdtime(hlock);
@@ -4398,14 +4407,14 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
EXPORT_SYMBOL_GPL(lock_unpin_lock);
#ifdef CONFIG_LOCK_STAT
-static int
-print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
- unsigned long ip)
+static void print_lock_contention_bug(struct task_struct *curr,
+ struct lockdep_map *lock,
+ unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=================================\n");
@@ -4423,8 +4432,6 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static void
--
1.8.3.1
The lock usage bit characters are defined and determined with tricks.
Add some explanation to make it a bit clearer, then adjust the logic to
check the usage, which optimizes the code a bit.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a019330..720d195 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -499,15 +499,26 @@ static inline unsigned long lock_flag(enum lock_usage_bit bit)
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
+ /*
+ * The usage character defaults to '.' (i.e., irqs disabled and not in
+ * irq context), which is the safest usage category.
+ */
char c = '.';
- if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+ /*
+ * The order of the following usage checks matters, which will
+ * result in the outcome character as follows:
+ *
+ * - '+': irq is enabled and not in irq context
+ * - '-': in irq context and irq is disabled
+ * - '?': in irq context and irq is enabled
+ */
+ if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
c = '+';
- if (class->usage_mask & lock_flag(bit)) {
- c = '-';
- if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+ if (class->usage_mask & lock_flag(bit))
c = '?';
- }
+ } else if (class->usage_mask & lock_flag(bit))
+ c = '-';
return c;
}
--
1.8.3.1
Since #defined(CONFIG_PROVE_LOCKING) is used in the scope of #ifdef
CONFIG_PROVE_LOCKING, it can be removed.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 720d195..a0837a0 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1669,7 +1669,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return result;
}
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_TRACE_IRQFLAGS
static inline int usage_accumulate(struct lock_list *entry, void *mask)
{
@@ -2147,7 +2147,7 @@ static inline void inc_chains(void)
nr_process_chains++;
}
-#endif
+#endif /* CONFIG_TRACE_IRQFLAGS */
static void
print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
@@ -2828,7 +2828,7 @@ static inline int validate_chain(struct task_struct *curr,
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
{
}
-#endif
+#endif /* CONFIG_PROVE_LOCKING */
/*
* We are building curr_chain_key incrementally, so double-check
--
1.8.3.1
Since chains are separated by irq context, so when printing a chain the
depth should be consistent with it.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a0837a0..222ee91 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2514,10 +2514,11 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
struct held_lock *hlock;
u64 chain_key = 0;
int depth = curr->lockdep_depth;
- int i;
+ int i = get_first_held_lock(curr, hlock_next);
- printk("depth: %u\n", depth + 1);
- for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+ printk("depth: %u (irq_context %u)\n", depth - i + 1,
+ hlock_next->irq_context);
+ for (; i < depth; i++) {
hlock = curr->held_locks + i;
chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
--
1.8.3.1
The lock_chain struct definition has outdated comment, update it and add
struct member description.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6e2377e..851d44f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -203,11 +203,17 @@ struct lock_list {
struct lock_list *parent;
};
-/*
- * We record lock dependency chains, so that we can cache them:
+/**
+ * struct lock_chain - lock dependency chain record
+ *
+ * @irq_context: the same as irq_context in held_lock below
+ * @depth: the number of held locks in this chain
+ * @base: the index in chain_hlocks for this chain
+ * @entry: the collided lock chains in lock_chain hash list
+ * @chain_key: the hash key of this lock_chain
*/
struct lock_chain {
- /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ /* see BUILD_BUG_ON()s in add_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
--
1.8.3.1
Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 7 ++++++-
init/init_task.c | 2 ++
kernel/fork.c | 3 ---
kernel/locking/lockdep.c | 11 ++++++++---
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 851d44f..751a2cc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -287,6 +287,8 @@ struct held_lock {
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);
+extern inline void lockdep_init_task(struct task_struct *task);
+
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -411,6 +413,10 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -503,7 +509,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }
static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index c70ef65..1b15cb9 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -166,6 +166,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
+ .lockdep_depth = 0, /* no locks held yet */
+ .curr_chain_key = 0,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 9dcd18a..8ec9e78 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1868,9 +1868,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
lockdep_init_task(p);
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 222ee91..f02f7dc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -358,6 +358,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
}
+inline void lockdep_init_task(struct task_struct *task)
+{
+ task->lockdep_depth = 0; /* no locks held yet */
+ task->curr_chain_key = 0;
+ task->lockdep_recursion = 0;
+}
+
void lockdep_off(void)
{
current->lockdep_recursion++;
@@ -4588,9 +4595,7 @@ void lockdep_reset(void)
int i;
raw_local_irq_save(flags);
- current->curr_chain_key = 0;
- current->lockdep_depth = 0;
- current->lockdep_recursion = 0;
+ lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
--
1.8.3.1
Chain keys are computed using Jenkins hash function, which needs an initial
hash to start with. Dedicate a macro to make this clear and configurable. A
later patch changes this initial chain key.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 1 +
init/init_task.c | 2 +-
kernel/locking/lockdep.c | 18 +++++++++---------
3 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 751a2cc..bb47d7c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -229,6 +229,7 @@ struct lock_chain {
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+#define INITIAL_CHAIN_KEY 0
struct held_lock {
/*
diff --git a/init/init_task.c b/init/init_task.c
index 1b15cb9..afa6ad7 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -167,7 +167,7 @@ struct task_struct init_task
#endif
#ifdef CONFIG_LOCKDEP
.lockdep_depth = 0, /* no locks held yet */
- .curr_chain_key = 0,
+ .curr_chain_key = INITIAL_CHAIN_KEY,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f02f7dc..7d16fcc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -361,7 +361,7 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
inline void lockdep_init_task(struct task_struct *task)
{
task->lockdep_depth = 0; /* no locks held yet */
- task->curr_chain_key = 0;
+ task->curr_chain_key = INITIAL_CHAIN_KEY;
task->lockdep_recursion = 0;
}
@@ -852,7 +852,7 @@ static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
static bool check_lock_chain_key(struct lock_chain *chain)
{
#ifdef CONFIG_PROVE_LOCKING
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
@@ -2519,7 +2519,7 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
{
struct held_lock *hlock;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int depth = curr->lockdep_depth;
int i = get_first_held_lock(curr, hlock_next);
@@ -2539,7 +2539,7 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int class_id;
printk("depth: %u\n", chain->depth);
@@ -2847,7 +2847,7 @@ static void check_chain_key(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -2871,7 +2871,7 @@ static void check_chain_key(struct task_struct *curr)
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
chain_key = iterate_chain_key(chain_key, hlock->class_idx);
prev_hlock = hlock;
}
@@ -3786,14 +3786,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
/*
* How can we have a chain hash when we ain't got no keys?!
*/
- if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
+ if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
chain_head = 1;
}
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, class_idx);
@@ -4635,7 +4635,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
return;
recalc:
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
if (chain->depth && chain->chain_key == chain_key)
--
1.8.3.1
The lockdep_map argument in them is not used, remove it.
Signed-off-by: Yuyang Du <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
---
kernel/locking/lockdep.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 76152cc..aa36502 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2225,8 +2225,7 @@ static inline void inc_chains(void)
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next,
- struct lockdep_map *next_instance, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2784,8 +2783,9 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
return 1;
}
-static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
- struct held_lock *hlock, int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr,
+ struct held_lock *hlock,
+ int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
@@ -2811,7 +2811,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* any of these scenarios could lead to a deadlock. If
* All validations
*/
- int ret = check_deadlock(curr, hlock, lock, hlock->read);
+ int ret = check_deadlock(curr, hlock, hlock->read);
if (!ret)
return 0;
@@ -2842,8 +2842,8 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
}
#else
static inline int validate_chain(struct task_struct *curr,
- struct lockdep_map *lock, struct held_lock *hlock,
- int chain_head, u64 chain_key)
+ struct held_lock *hlock,
+ int chain_head, u64 chain_key)
{
return 1;
}
@@ -3825,7 +3825,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
- if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
+ if (!validate_chain(curr, hlock, chain_head, chain_key))
return 0;
curr->curr_chain_key = chain_key;
--
1.8.3.1
A leftover comment is removed. While at it, add more explanatory
comments. Such a trivial patch!
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index aa36502..b8e6ba3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2806,10 +2806,16 @@ static int validate_chain(struct task_struct *curr,
* - is softirq-safe, if this lock is hardirq-unsafe
*
* And check whether the new lock's dependency graph
- * could lead back to the previous lock.
+ * could lead back to the previous lock:
*
- * any of these scenarios could lead to a deadlock. If
- * All validations
+ * - within the current held-lock stack
+ * - across our accumulated lock dependency records
+ *
+ * any of these scenarios could lead to a deadlock.
+ */
+ /*
+ * The simple case: does the current hold the same lock
+ * already?
*/
int ret = check_deadlock(curr, hlock, hlock->read);
--
1.8.3.1
With the change, we can slightly adjust the code to iterate the queue in BFS
search, which simplifies the code. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index eb8b190..5a0c908f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1303,14 +1303,21 @@ static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem
return 0;
}
-static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list **elem)
+/*
+ * Dequeue an element from the circular_queue, return a lock_list if
+ * the queue is not empty, or NULL if otherwise.
+ */
+static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
{
+ struct lock_list * lock;
+
if (__cq_empty(cq))
- return -1;
+ return NULL;
- *elem = cq->element[cq->front];
+ lock = cq->element[cq->front];
cq->front = (cq->front + 1) & CQ_MASK;
- return 0;
+
+ return lock;
}
static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
@@ -1362,6 +1369,7 @@ static int __bfs(struct lock_list *source_entry,
int forward)
{
struct lock_list *entry;
+ struct lock_list *lock;
struct list_head *head;
struct circular_queue *cq = &lock_cq;
int ret = 1;
@@ -1383,10 +1391,7 @@ static int __bfs(struct lock_list *source_entry,
__cq_init(cq);
__cq_enqueue(cq, source_entry);
- while (!__cq_empty(cq)) {
- struct lock_list *lock;
-
- __cq_dequeue(cq, &lock);
+ while ((lock = __cq_dequeue(cq))) {
if (!lock->class) {
ret = -2;
--
1.8.3.1
In mark_lock_irq(), the following checks are performed:
----------------------------------
| -> | unsafe | read unsafe |
|----------------------------------|
| safe | F B | F* B* |
|----------------------------------|
| read safe | F? B* | - |
----------------------------------
Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS
?: check enabled by the !dir condition
From checking point of view, the special F? case does not make sense,
whereas it perhaps is made for peroformance concern. As later patch will
address this issue, remove this exception, which makes the checks
consistent later.
With STRICT_READ_CHECKS = 1 which is default, there is no functional
change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 24e3bca..7275d6c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3234,7 +3234,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
* Validate that the lock dependencies don't have conflicting usage
* states.
*/
- if ((!read || !dir || STRICT_READ_CHECKS) &&
+ if ((!read || STRICT_READ_CHECKS) &&
!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
--
1.8.3.1
As Peter has put it all sound and complete for the cause, I simply quote:
"It (check_redundant) was added for cross-release (which has since been
reverted) which would generate a lot of redundant links (IIRC) but
having it makes the reports more convoluted -- basically, if we had an
A-B-C relation, then A-C will not be added to the graph because it is
already covered. This then means any report will include B, even though
a shorter cycle might have been possible."
This would increase the number of direct dependencies. For a simple workload
(make clean; reboot; make vmlinux -j8), the data looks like this:
CONFIG_LOCKDEP_SMALL: direct dependencies: 6926
!CONFIG_LOCKDEP_SMALL: direct dependencies: 9052 (+30.7%)
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2502ea4..9d2728c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1734,6 +1734,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
+#ifdef CONFIG_LOCKDEP_SMALL
/*
* Check that the dependency graph starting at <src> can lead to
* <target> or not. If it can, <src> -> <target> dependency is already
@@ -1763,6 +1764,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2423,12 +2425,14 @@ static inline void inc_chains(void)
}
}
+#ifdef CONFIG_LOCKDEP_SMALL
/*
* Is the <prev> -> <next> link redundant?
*/
ret = check_redundant(prev, next);
if (ret != 1)
return ret;
+#endif
if (!trace->nr_entries && !save_trace(trace))
return 0;
--
1.8.3.1
These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 118 +++++++++++++++++++++++++++++------------------
1 file changed, 74 insertions(+), 44 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ebfa42a..2502ea4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1678,33 +1678,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
}
/*
- * Prove that the dependency graph starting at <entry> can not
- * lead to <target>. Print an error and return 0 if it does.
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct lock_list *root, struct lock_class *target,
- struct lock_list **target_entry)
+check_path(struct lock_class *target, struct lock_list *src_entry,
+ struct lock_list **target_entry)
{
- int result;
+ int ret;
+
+ ret = __bfs_forwards(src_entry, (void *)target, class_equal,
+ target_entry);
+
+ if (unlikely(ret < 0))
+ print_bfs_bug(ret);
+
+ return ret;
+}
+
+/*
+ * Prove that the dependency graph starting at <src> can not
+ * lead to <target>. If it can, there is a circle when adding
+ * <target> -> <src> dependency.
+ *
+ * Print an error and return 0 if it does.
+ */
+static noinline int
+check_noncircular(struct held_lock *src, struct held_lock *target,
+ struct lock_trace *trace)
+{
+ int ret;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list src_entry = {
+ .class = hlock_class(src),
+ .parent = NULL,
+ };
debug_atomic_inc(nr_cyclic_checks);
- result = __bfs_forwards(root, target, class_equal, target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry);
- return result;
+ if (unlikely(!ret)) {
+ if (!trace->nr_entries) {
+ /*
+ * If save_trace fails here, the printing might
+ * trigger a WARN but because of the !nr_entries it
+ * should not do bad things.
+ */
+ save_trace(trace);
+ }
+
+ print_circular_bug(&src_entry, target_entry, src, target);
+ }
+
+ return ret;
}
+/*
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. If it can, <src> -> <target> dependency is already
+ * in the graph.
+ *
+ * Print an error and return 2 if it does or 1 if it does not.
+ */
static noinline int
-check_redundant(struct lock_list *root, struct lock_class *target,
- struct lock_list **target_entry)
+check_redundant(struct held_lock *src, struct held_lock *target)
{
- int result;
+ int ret;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list src_entry = {
+ .class = hlock_class(src),
+ .parent = NULL,
+ };
debug_atomic_inc(nr_redundant_checks);
- result = __bfs_forwards(root, target, class_equal, target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry);
- return result;
+ if (!ret) {
+ debug_atomic_inc(nr_redundant);
+ ret = 2;
+ } else if (ret < 0)
+ ret = 0;
+
+ return ret;
}
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2302,9 +2359,7 @@ static inline void inc_chains(void)
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, struct lock_trace *trace)
{
- struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
- struct lock_list this;
int ret;
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
@@ -2335,25 +2390,9 @@ static inline void inc_chains(void)
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- this.class = hlock_class(next);
- this.parent = NULL;
- ret = check_noncircular(&this, hlock_class(prev), &target_entry);
- if (unlikely(!ret)) {
- if (!trace->nr_entries) {
- /*
- * If save_trace fails here, the printing might
- * trigger a WARN but because of the !nr_entries it
- * should not do bad things.
- */
- save_trace(trace);
- }
- print_circular_bug(&this, target_entry, next, prev);
+ ret = check_noncircular(next, prev, trace);
+ if (unlikely(ret <= 0))
return 0;
- }
- else if (unlikely(ret < 0)) {
- print_bfs_bug(ret);
- return 0;
- }
if (!check_irq_usage(curr, prev, next))
return 0;
@@ -2387,18 +2426,9 @@ static inline void inc_chains(void)
/*
* Is the <prev> -> <next> link redundant?
*/
- this.class = hlock_class(prev);
- this.parent = NULL;
- ret = check_redundant(&this, hlock_class(next), &target_entry);
- if (!ret) {
- debug_atomic_inc(nr_redundant);
- return 2;
- }
- if (ret < 0) {
- print_bfs_bug(ret);
- return 0;
- }
-
+ ret = check_redundant(prev, next);
+ if (ret != 1)
+ return ret;
if (!trace->nr_entries && !save_trace(trace))
return 0;
--
1.8.3.1
In check_deadlock(), the third argument read comes from the second
argument hlock so that it can be removed. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7bd62e2..67b6a76 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2241,7 +2241,7 @@ static inline void inc_chains(void)
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2260,7 +2260,7 @@ static inline void inc_chains(void)
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
- if ((read == 2) && prev->read)
+ if ((next->read == 2) && prev->read)
return 2;
/*
@@ -2834,7 +2834,7 @@ static int validate_chain(struct task_struct *curr,
* The simple case: does the current hold the same lock
* already?
*/
- int ret = check_deadlock(curr, hlock, hlock->read);
+ int ret = check_deadlock(curr, hlock);
if (!ret)
return 0;
--
1.8.3.1
The breadth-first search is implemented as flat-out non-recursive now, but
the comments are still describing it as recursive, update the comments in
that regard.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 15cf2ac..7bd62e2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1376,6 +1376,10 @@ static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
return lock_class + offset;
}
+/*
+ * Forward- or backward-dependency search, used for both circular dependency
+ * checking and hardirq-unsafe/softirq-unsafe checking.
+ */
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
@@ -1456,12 +1460,6 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
}
-/*
- * Recursive, forwards-direction lock-dependency checking, used for
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
- * checking.
- */
-
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
{
unsigned long *entries = stack_trace + trace->offset;
@@ -2280,7 +2278,7 @@ static inline void inc_chains(void)
/*
* There was a chain-cache miss, and we are about to add a new dependency
- * to a previous lock. We recursively validate the following rules:
+ * to a previous lock. We validate the following rules:
*
* - would the adding of the <prev> -> <next> dependency create a
* circular dependency in the graph? [== circular deadlock]
@@ -2330,11 +2328,12 @@ static inline void inc_chains(void)
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
- * forward-recursing into the graph starting at <next>, and
- * checking whether we can reach <prev>.)
+ * a breadth-first search into the graph starting at <next>,
+ * and check whether we can reach <prev>.)
*
- * We are using global variables to control the recursion, to
- * keep the stackframe size of the recursive functions low:
+ * The search is limited by the size of the circular queue (i.e.,
+ * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
+ * in the graph whose neighbours are to be checked.
*/
this.class = hlock_class(next);
this.parent = NULL;
--
1.8.3.1
The @nested is not used in __release_lock so remove it despite that it
is not used in lock_release in the first place.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 67b6a76..ebfa42a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4095,7 +4095,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
* @nested is an hysterical artifact, needs a tree wide cleanup.
*/
static int
-__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+__lock_release(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock;
@@ -4383,7 +4383,7 @@ void lock_release(struct lockdep_map *lock, int nested,
check_flags(flags);
current->lockdep_recursion = 1;
trace_lock_release(lock, ip);
- if (__lock_release(lock, nested, ip))
+ if (__lock_release(lock, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
--
1.8.3.1
The irq usage and lock dependency rules that if violated a deacklock may
happen are explained in more detail.
Signed-off-by: Yuyang Du <[email protected]>
---
Documentation/locking/lockdep-design.txt | 33 ++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index ae65758..f189d13 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -108,14 +108,24 @@ Unused locks (e.g., mutexes) cannot be part of the cause of an error.
Single-lock state rules:
------------------------
+A lock is irq-safe means it was ever used in an irq context, while a lock
+is irq-unsafe means it was ever acquired with irq enabled.
+
A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The
-following states are exclusive, and only one of them is allowed to be
-set for any lock-class:
+following states must be exclusive: only one of them is allowed to be set
+for any lock-class based on its usage:
+
+ <hardirq-safe> or <hardirq-unsafe>
+ <softirq-safe> or <softirq-unsafe>
- <hardirq-safe> and <hardirq-unsafe>
- <softirq-safe> and <softirq-unsafe>
+This is because if a lock can be used in irq context (irq-safe) then it
+cannot be ever acquired with irq enabled (irq-unsafe). Otherwise, a
+deadlock may happen. For example, in the scenario that after this lock
+was acquired but before released, if the context is interrupted this
+lock will be attempted to acquire twice, which creates a deadlock,
+referred to as lock recursion deadlock.
-The validator detects and reports lock usage that violate these
+The validator detects and reports lock usage that violates these
single-lock state rules.
Multi-lock dependency rules:
@@ -124,15 +134,18 @@ Multi-lock dependency rules:
The same lock-class must not be acquired twice, because this could lead
to lock recursion deadlocks.
-Furthermore, two locks may not be taken in different order:
+Furthermore, two locks can not be taken in inverse order:
<L1> -> <L2>
<L2> -> <L1>
-because this could lead to lock inversion deadlocks. (The validator
-finds such dependencies in arbitrary complexity, i.e. there can be any
-other locking sequence between the acquire-lock operations, the
-validator will still track all dependencies between locks.)
+because this could lead to a deadlock - referred to as lock inversion
+deadlock - as attempts to acquire the two locks form a circle which
+could lead to the two contexts waiting for each other permanently. The
+validator will find such dependency circle in arbitrary complexity,
+i.e., there can be any other locking sequence between the acquire-lock
+operations; the validator will still find whether these locks can be
+acquired in a circular fashion.
Furthermore, the following usage based lock dependencies are not allowed
between any two lock-classes:
--
1.8.3.1
In search of a dependency in the lock graph, there is contant checks for
forward or backward search. Directly reference the field offset of the
struct that differentiates the type of search to avoid those checks.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 33 +++++++++++++++++++++------------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5a0c908f..15cf2ac 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1362,11 +1362,25 @@ static inline int get_lock_depth(struct lock_list *child)
return depth;
}
+/*
+ * Return the forward or backward dependency list.
+ *
+ * @lock: the lock_list to get its class's dependency list
+ * @offset: the offset to struct lock_class to determine whether it is
+ * locks_after or locks_before
+ */
+static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
+{
+ void *lock_class = lock->class;
+
+ return lock_class + offset;
+}
+
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
- int forward)
+ int offset)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1380,11 +1394,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (forward)
- head = &source_entry->class->locks_after;
- else
- head = &source_entry->class->locks_before;
-
+ head = get_dep_list(source_entry, offset);
if (list_empty(head))
goto exit;
@@ -1398,10 +1408,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (forward)
- head = &lock->class->locks_after;
- else
- head = &lock->class->locks_before;
+ head = get_dep_list(lock, offset);
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -1434,7 +1441,8 @@ static inline int __bfs_forwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 1);
+ return __bfs(src_entry, data, match, target_entry,
+ offsetof(struct lock_class, locks_after));
}
@@ -1443,7 +1451,8 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 0);
+ return __bfs(src_entry, data, match, target_entry,
+ offsetof(struct lock_class, locks_before));
}
--
1.8.3.1
Lock usage bit initialization is consolidated into one function
mark_usage(). Trivial readability improvement. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9d2728c..79bc6cd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3459,8 +3459,12 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(redundant_softirqs_off);
}
-static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+static int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
+ if (!check)
+ goto lock_used;
+
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
@@ -3504,6 +3508,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
}
}
+lock_used:
+ /* mark it as used: */
+ if (!mark_lock(curr, hlock, LOCK_USED))
+ return 0;
+
return 1;
}
@@ -3545,8 +3554,8 @@ int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
return 1;
}
-static inline int mark_irqflags(struct task_struct *curr,
- struct held_lock *hlock)
+static inline int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
return 1;
}
@@ -3832,11 +3841,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
#endif
hlock->pin_count = pin_count;
- if (check && !mark_irqflags(curr, hlock))
- return 0;
-
- /* mark it as used: */
- if (!mark_lock(curr, hlock, LOCK_USED))
+ /* Initialize the lock usage bit */
+ if (!mark_usage(curr, hlock, check))
return 0;
/*
--
1.8.3.1
The new bit can be any possible lock usage except it is garbage, so the
cases in switch can be made simpler. Warn early on if wrong usage bit is
passed without taking locks. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 79bc6cd..24e3bca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3581,6 +3581,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
{
unsigned int new_mask = 1 << new_bit, ret = 1;
+ if (new_bit >= LOCK_USAGE_STATES) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return 0;
+ }
+
/*
* If already set then do not dirty the cacheline,
* nor do any checks:
@@ -3604,25 +3609,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
switch (new_bit) {
-#define LOCKDEP_STATE(__STATE) \
- case LOCK_USED_IN_##__STATE: \
- case LOCK_USED_IN_##__STATE##_READ: \
- case LOCK_ENABLED_##__STATE: \
- case LOCK_ENABLED_##__STATE##_READ:
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- ret = mark_lock_irq(curr, this, new_bit);
- if (!ret)
- return 0;
- break;
case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;
default:
- if (!debug_locks_off_graph_unlock())
+ ret = mark_lock_irq(curr, this, new_bit);
+ if (!ret)
return 0;
- WARN_ON(1);
- return 0;
}
graph_unlock();
--
1.8.3.1
The element field is an array in struct circular_queue to keep track of locks
in the search. Making it the same type as the locks avoids type cast. Also
fix a typo and elaborate the comment above struct circular_queue.
No functional change.
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: Yuyang Du <[email protected]>
---
kernel/locking/lockdep.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b8e6ba3..eb8b190 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1257,13 +1257,17 @@ static int add_lock_to_list(struct lock_class *this,
#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
/*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
+ * The circular_queue and helpers are used to implement graph
+ * breadth-first search (BFS) algorithm, by which we can determine
+ * whether there is a path from a lock to another. In deadlock checks,
+ * a path from the next lock to be acquired to a previous held lock
+ * indicates that adding the <prev> -> <next> lock dependency will
+ * produce a circle in the graph. Breadth-first search instead of
+ * depth-first search is used in order to find the shortest (circular)
+ * path.
*/
struct circular_queue {
- unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+ struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
unsigned int front, rear;
};
@@ -1289,7 +1293,7 @@ static inline int __cq_full(struct circular_queue *cq)
return ((cq->rear + 1) & CQ_MASK) == cq->front;
}
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
{
if (__cq_full(cq))
return -1;
@@ -1299,7 +1303,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
return 0;
}
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list **elem)
{
if (__cq_empty(cq))
return -1;
@@ -1377,12 +1381,12 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
__cq_init(cq);
- __cq_enqueue(cq, (unsigned long)source_entry);
+ __cq_enqueue(cq, source_entry);
while (!__cq_empty(cq)) {
struct lock_list *lock;
- __cq_dequeue(cq, (unsigned long *)&lock);
+ __cq_dequeue(cq, &lock);
if (!lock->class) {
ret = -2;
@@ -1406,7 +1410,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (__cq_enqueue(cq, (unsigned long)entry)) {
+ if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
--
1.8.3.1
held_lock->class_idx is used to point to the class of the held lock. The
index is shifted by 1 to make index 0 mean no class, which results in class
index shifting back and forth but is not worth doing so.
The reason is: (1) there will be no "no-class" held_lock to begin with, and
(2) index 0 seems to be used for error checking, but if something wrong
indeed happened, the index can't be counted on to distinguish it as that
something won't set the class_idx to 0 on purpose to tell us it is wrong.
Therefore, change the index to start from 0. This saves a lot of
back-and-forth shifts and a class slot back to lock_classes.
Since index 0 is now used for lock class, we change the initial chain key to
-1 to avoid key collision, which is due to the fact that __jhash_mix(0, 0, 0) = 0.
Actually, the initial chain key can be any arbitrary value other than 0.
In addition, a bitmap is maintained to keep track of the used lock classes,
and we check the validity of the held lock against that bitmap.
Signed-off-by: Yuyang Du <[email protected]>
---
include/linux/lockdep.h | 14 ++++++------
kernel/locking/lockdep.c | 59 ++++++++++++++++++++++++++++++++----------------
2 files changed, 46 insertions(+), 27 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index bb47d7c..7c2fefa 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -223,13 +223,8 @@ struct lock_chain {
};
#define MAX_LOCKDEP_KEYS_BITS 13
-/*
- * Subtract one because we offset hlock->class_idx by 1 in order
- * to make 0 mean no class. This avoids overflowing the class_idx
- * bitfield and hitting the BUG in hlock_class().
- */
-#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
-#define INITIAL_CHAIN_KEY 0
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
struct held_lock {
/*
@@ -254,6 +249,11 @@ struct held_lock {
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7d16fcc..76152cc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -150,17 +150,28 @@ static inline int debug_locks_off_graph_unlock(void)
static
#endif
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
- if (!hlock->class_idx) {
+ unsigned int class_idx = hlock->class_idx;
+
+ /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
+ barrier();
+
+ if (!test_bit(class_idx, lock_classes_in_use)) {
/*
* Someone passed in garbage, we give up.
*/
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
- return lock_classes + hlock->class_idx - 1;
+
+ /*
+ * At this point, if the passed hlock->class_idx is still garbage,
+ * we just have to live with it
+ */
+ return lock_classes + class_idx;
}
#ifdef CONFIG_LOCK_STAT
@@ -589,19 +600,22 @@ static void print_lock(struct held_lock *hlock)
/*
* We can be called locklessly through debug_show_all_locks() so be
* extra careful, the hlock might have been released and cleared.
+ *
+ * If this indeed happens, lets pretend it does not hurt to continue
+ * to print the lock unless the hlock class_idx does not point to a
+ * registered class. The rationale here is: since we don't attempt
+ * to distinguish whether we are in this situation, if it just
+ * happened we can't count on class_idx to tell either.
*/
- unsigned int class_idx = hlock->class_idx;
+ struct lock_class *lock = hlock_class(hlock);
- /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
- barrier();
-
- if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
+ if (!lock) {
printk(KERN_CONT "<RELEASED>\n");
return;
}
printk(KERN_CONT "%p", hlock->instance);
- print_lock_name(lock_classes + class_idx - 1);
+ print_lock_name(lock);
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
@@ -856,7 +870,7 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
@@ -1131,6 +1145,7 @@ static bool is_dynamic_key(const struct lock_class_key *key)
return NULL;
}
nr_lock_classes++;
+ __set_bit(class - lock_classes, lock_classes_in_use);
debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
@@ -2545,7 +2560,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
- chain_key = print_chain_key_iteration(class_id + 1, chain_key);
+ chain_key = print_chain_key_iteration(class_id, chain_key);
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2596,7 +2611,7 @@ static int check_no_collision(struct task_struct *curr,
}
for (j = 0; j < chain->depth - 1; j++, i++) {
- id = curr->held_locks[i].class_idx - 1;
+ id = curr->held_locks[i].class_idx;
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
print_collision(curr, hlock, chain);
@@ -2679,7 +2694,7 @@ static inline int add_chain_cache(struct task_struct *curr,
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = nr_chain_hlocks;
for (j = 0; j < chain->depth - 1; j++, i++) {
- int lock_id = curr->held_locks[i].class_idx - 1;
+ int lock_id = curr->held_locks[i].class_idx;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
@@ -2863,10 +2878,12 @@ static void check_chain_key(struct task_struct *curr)
(unsigned long long)hlock->prev_chain_key);
return;
}
+
/*
- * Whoops ran out of static storage again?
+ * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
+ * it registered lock class index?
*/
- if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
return;
if (prev_hlock && (prev_hlock->irq_context !=
@@ -3714,7 +3731,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
- class_idx = class - lock_classes + 1;
+ class_idx = class - lock_classes;
if (depth) {
hlock = curr->held_locks + depth - 1;
@@ -3776,9 +3793,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* the hash, not class->key.
*/
/*
- * Whoops, we did it again.. ran straight out of our static allocation.
+ * Whoops, we did it again.. class_idx is invalid.
*/
- if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
return 0;
chain_key = curr->curr_chain_key;
@@ -3893,7 +3910,7 @@ static int match_held_lock(const struct held_lock *hlock,
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
return 0;
- if (hlock->class_idx == class - lock_classes + 1)
+ if (hlock->class_idx == class - lock_classes)
return 1;
}
@@ -3987,7 +4004,7 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
- hlock->class_idx = class - lock_classes + 1;
+ hlock->class_idx = class - lock_classes;
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -4637,7 +4654,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
if (chain->depth && chain->chain_key == chain_key)
return;
/* Overwrite the chain key for concurrent RCU readers. */
@@ -4711,6 +4728,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
WRITE_ONCE(class->key, NULL);
WRITE_ONCE(class->name, NULL);
nr_lock_classes--;
+ __clear_bit(class - lock_classes, lock_classes_in_use);
} else {
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
class->name);
@@ -5056,6 +5074,7 @@ void __init lockdep_init(void)
printk(" memory used by lock dependency info: %zu kB\n",
(sizeof(lock_classes) +
+ sizeof(lock_classes_in_use) +
sizeof(classhash_table) +
sizeof(list_entries) +
sizeof(list_entries_in_use) +
--
1.8.3.1
More words are added to lockdep design document regarding key concepts,
which should help people without lockdep experience read and understand
lockdep reports.
Signed-off-by: Yuyang Du <[email protected]>
---
Documentation/locking/lockdep-design.txt | 79 ++++++++++++++++++++++++--------
1 file changed, 61 insertions(+), 18 deletions(-)
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 39fae14..ae65758 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -15,34 +15,48 @@ tens of thousands of) instantiations. For example a lock in the inode
struct is one class, while each inode has its own instantiation of that
lock class.
-The validator tracks the 'state' of lock-classes, and it tracks
-dependencies between different lock-classes. The validator maintains a
-rolling proof that the state and the dependencies are correct.
-
-Unlike an lock instantiation, the lock-class itself never goes away: when
-a lock-class is used for the first time after bootup it gets registered,
-and all subsequent uses of that lock-class will be attached to this
-lock-class.
+The validator tracks the 'usage state' of lock-classes, and it tracks
+the dependencies between different lock-classes. Lock usage indicates
+how a lock is used with regard to its IRQ contexts, while lock
+dependency can be understood as lock order, where L1 -> L2 suggests that
+a task is attempting to acquire L2 while holding L1. From lockdep's
+perspective, the two locks (L1 and L2) are not necessarily related; that
+dependency just means the order ever happened. The validator maintains a
+continuing effort to prove lock usages and dependencies are correct or
+the validator will shoot a splat if incorrect.
+
+A lock-class's behavior is constructed by its instances collectively:
+when the first instance of a lock-class is used after bootup the class
+gets registered, then all (subsequent) instances will be mapped to the
+class and hence their usages and dependecies will contribute to those of
+the class. A lock-class does not go away when a lock instance does, but
+it can be removed if the memory space of the lock class (static or
+dynamic) is reclaimed, this happens for example when a module is
+unloaded or a workqueue is destroyed.
State
-----
-The validator tracks lock-class usage history into 4 * nSTATEs + 1 separate
-state bits:
+The validator tracks lock-class usage history and divides the usage into
+(4 usages * n STATEs + 1) categories:
+where the 4 usages can be:
- 'ever held in STATE context'
- 'ever held as readlock in STATE context'
- 'ever held with STATE enabled'
- 'ever held as readlock with STATE enabled'
-Where STATE can be either one of (kernel/locking/lockdep_states.h)
- - hardirq
- - softirq
+where the n STATEs are coded in kernel/locking/lockdep_states.h and as of
+now they include:
+- hardirq
+- softirq
+where the last 1 category is:
- 'ever used' [ == !unused ]
-When locking rules are violated, these state bits are presented in the
-locking error messages, inside curlies. A contrived example:
+When locking rules are violated, these usage bits are presented in the
+locking error messages, inside curlies, with a total of 2 * n STATEs bits.
+A contrived example:
modprobe/2287 is trying to acquire lock:
(&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
@@ -51,15 +65,44 @@ locking error messages, inside curlies. A contrived example:
(&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
-The bit position indicates STATE, STATE-read, for each of the states listed
-above, and the character displayed in each indicates:
+For a given lock, the bit positions from left to right indicate the usage
+of the lock and readlock (if exists), for each of the n STATEs listed
+above respectively, and the character displayed at each bit position
+indicates:
'.' acquired while irqs disabled and not in irq context
'-' acquired in irq context
'+' acquired with irqs enabled
'?' acquired in irq context with irqs enabled.
-Unused mutexes cannot be part of the cause of an error.
+The bits are illustrated with an example:
+
+ (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
+ ||||
+ ||| \-> softirq disabled and not in softirq context
+ || \--> acquired in softirq context
+ | \---> hardirq disabled and not in hardirq context
+ \----> acquired in hardirq context
+
+
+For a given STATE, whether the lock is ever acquired in that STATE
+context and whether that STATE is enabled yields four possible cases as
+shown in the table below. The bit character is able to indicate which
+exact case is for the lock as of the reporting time.
+
+ -------------------------------------------
+ | | irq enabled | irq disabled |
+ |-------------------------------------------|
+ | ever in irq | ? | - |
+ |-------------------------------------------|
+ | never in irq | + | . |
+ -------------------------------------------
+
+The character '-' suggests irq is disabled because if otherwise the
+charactor '?' would have been shown instead. Similar deduction can be
+applied for '+' too.
+
+Unused locks (e.g., mutexes) cannot be part of the cause of an error.
Single-lock state rules:
--
1.8.3.1
On Mon, May 06, 2019 at 04:19:16PM +0800, Yuyang Du wrote:
> Hi Peter,
>
> Let me post these small bits first while waiting for Frederic's patches
> to be merged.
>
They apply nicely and should show up in tip after the merge window
closes or thereabout.
Thanks!
Thank you so much.
On Wed, 8 May 2019 at 16:56, Peter Zijlstra <[email protected]> wrote:
>
> On Mon, May 06, 2019 at 04:19:16PM +0800, Yuyang Du wrote:
> > Hi Peter,
> >
> > Let me post these small bits first while waiting for Frederic's patches
> > to be merged.
> >
>
> They apply nicely and should show up in tip after the merge window
> closes or thereabout.
>
> Thanks!
Commit-ID: c01fbbc83f42748b3ed094497933601e6c9e0a03
Gitweb: https://git.kernel.org/tip/c01fbbc83f42748b3ed094497933601e6c9e0a03
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:18 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:34 +0200
locking/lockdep: Add description and explanation in lockdep design doc
More words are added to lockdep design document regarding key concepts,
which should help people without lockdep experience read and understand
lockdep reports.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
Documentation/locking/lockdep-design.txt | 79 ++++++++++++++++++++++++--------
1 file changed, 61 insertions(+), 18 deletions(-)
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 39fae143c9cb..ae65758383ea 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -15,34 +15,48 @@ tens of thousands of) instantiations. For example a lock in the inode
struct is one class, while each inode has its own instantiation of that
lock class.
-The validator tracks the 'state' of lock-classes, and it tracks
-dependencies between different lock-classes. The validator maintains a
-rolling proof that the state and the dependencies are correct.
-
-Unlike an lock instantiation, the lock-class itself never goes away: when
-a lock-class is used for the first time after bootup it gets registered,
-and all subsequent uses of that lock-class will be attached to this
-lock-class.
+The validator tracks the 'usage state' of lock-classes, and it tracks
+the dependencies between different lock-classes. Lock usage indicates
+how a lock is used with regard to its IRQ contexts, while lock
+dependency can be understood as lock order, where L1 -> L2 suggests that
+a task is attempting to acquire L2 while holding L1. From lockdep's
+perspective, the two locks (L1 and L2) are not necessarily related; that
+dependency just means the order ever happened. The validator maintains a
+continuing effort to prove lock usages and dependencies are correct or
+the validator will shoot a splat if incorrect.
+
+A lock-class's behavior is constructed by its instances collectively:
+when the first instance of a lock-class is used after bootup the class
+gets registered, then all (subsequent) instances will be mapped to the
+class and hence their usages and dependecies will contribute to those of
+the class. A lock-class does not go away when a lock instance does, but
+it can be removed if the memory space of the lock class (static or
+dynamic) is reclaimed, this happens for example when a module is
+unloaded or a workqueue is destroyed.
State
-----
-The validator tracks lock-class usage history into 4 * nSTATEs + 1 separate
-state bits:
+The validator tracks lock-class usage history and divides the usage into
+(4 usages * n STATEs + 1) categories:
+where the 4 usages can be:
- 'ever held in STATE context'
- 'ever held as readlock in STATE context'
- 'ever held with STATE enabled'
- 'ever held as readlock with STATE enabled'
-Where STATE can be either one of (kernel/locking/lockdep_states.h)
- - hardirq
- - softirq
+where the n STATEs are coded in kernel/locking/lockdep_states.h and as of
+now they include:
+- hardirq
+- softirq
+where the last 1 category is:
- 'ever used' [ == !unused ]
-When locking rules are violated, these state bits are presented in the
-locking error messages, inside curlies. A contrived example:
+When locking rules are violated, these usage bits are presented in the
+locking error messages, inside curlies, with a total of 2 * n STATEs bits.
+A contrived example:
modprobe/2287 is trying to acquire lock:
(&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
@@ -51,15 +65,44 @@ locking error messages, inside curlies. A contrived example:
(&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
-The bit position indicates STATE, STATE-read, for each of the states listed
-above, and the character displayed in each indicates:
+For a given lock, the bit positions from left to right indicate the usage
+of the lock and readlock (if exists), for each of the n STATEs listed
+above respectively, and the character displayed at each bit position
+indicates:
'.' acquired while irqs disabled and not in irq context
'-' acquired in irq context
'+' acquired with irqs enabled
'?' acquired in irq context with irqs enabled.
-Unused mutexes cannot be part of the cause of an error.
+The bits are illustrated with an example:
+
+ (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
+ ||||
+ ||| \-> softirq disabled and not in softirq context
+ || \--> acquired in softirq context
+ | \---> hardirq disabled and not in hardirq context
+ \----> acquired in hardirq context
+
+
+For a given STATE, whether the lock is ever acquired in that STATE
+context and whether that STATE is enabled yields four possible cases as
+shown in the table below. The bit character is able to indicate which
+exact case is for the lock as of the reporting time.
+
+ -------------------------------------------
+ | | irq enabled | irq disabled |
+ |-------------------------------------------|
+ | ever in irq | ? | - |
+ |-------------------------------------------|
+ | never in irq | + | . |
+ -------------------------------------------
+
+The character '-' suggests irq is disabled because if otherwise the
+charactor '?' would have been shown instead. Similar deduction can be
+applied for '+' too.
+
+Unused locks (e.g., mutexes) cannot be part of the cause of an error.
Single-lock state rules:
Commit-ID: e7a38f63ba50dc95426dd50c43383dfecaa35d7f
Gitweb: https://git.kernel.org/tip/e7a38f63ba50dc95426dd50c43383dfecaa35d7f
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:20 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:35 +0200
locking/lockdep: Remove useless conditional macro
Since #defined(CONFIG_PROVE_LOCKING) is used in the scope of #ifdef
CONFIG_PROVE_LOCKING, it can be removed.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a033df00fd1d..3c477018e184 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1674,7 +1674,7 @@ check_redundant(struct lock_list *root, struct lock_class *target,
return result;
}
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_TRACE_IRQFLAGS
static inline int usage_accumulate(struct lock_list *entry, void *mask)
{
@@ -2152,7 +2152,7 @@ static inline void inc_chains(void)
nr_process_chains++;
}
-#endif
+#endif /* CONFIG_TRACE_IRQFLAGS */
static void
print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
@@ -2829,7 +2829,7 @@ static inline int validate_chain(struct task_struct *curr,
{
return 1;
}
-#endif
+#endif /* CONFIG_PROVE_LOCKING */
/*
* We are building curr_chain_key incrementally, so double-check
Commit-ID: f6ec8829ac9d59b637366c13038f15d6f6156fe1
Gitweb: https://git.kernel.org/tip/f6ec8829ac9d59b637366c13038f15d6f6156fe1
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:24 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:43 +0200
locking/lockdep: Define INITIAL_CHAIN_KEY for chain keys to start with
Chain keys are computed using Jenkins hash function, which needs an initial
hash to start with. Dedicate a macro to make this clear and configurable. A
later patch changes this initial chain key.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/lockdep.h | 1 +
init/init_task.c | 2 +-
kernel/locking/lockdep.c | 18 +++++++++---------
3 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5d05b8149f19..d4e69595dbd4 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -229,6 +229,7 @@ struct lock_chain {
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+#define INITIAL_CHAIN_KEY 0
struct held_lock {
/*
diff --git a/init/init_task.c b/init/init_task.c
index 1b15cb90d64f..afa6ad795355 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -167,7 +167,7 @@ struct task_struct init_task
#endif
#ifdef CONFIG_LOCKDEP
.lockdep_depth = 0, /* no locks held yet */
- .curr_chain_key = 0,
+ .curr_chain_key = INITIAL_CHAIN_KEY,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b7d9c28ecf3b..9edf6f12b711 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -362,7 +362,7 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
void lockdep_init_task(struct task_struct *task)
{
task->lockdep_depth = 0; /* no locks held yet */
- task->curr_chain_key = 0;
+ task->curr_chain_key = INITIAL_CHAIN_KEY;
task->lockdep_recursion = 0;
}
@@ -857,7 +857,7 @@ static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
static bool check_lock_chain_key(struct lock_chain *chain)
{
#ifdef CONFIG_PROVE_LOCKING
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
@@ -2524,7 +2524,7 @@ static void
print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
{
struct held_lock *hlock;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int depth = curr->lockdep_depth;
int i = get_first_held_lock(curr, hlock_next);
@@ -2544,7 +2544,7 @@ print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_ne
static void print_chain_keys_chain(struct lock_chain *chain)
{
int i;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
int class_id;
printk("depth: %u\n", chain->depth);
@@ -2848,7 +2848,7 @@ static void check_chain_key(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i;
- u64 chain_key = 0;
+ u64 chain_key = INITIAL_CHAIN_KEY;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -2872,7 +2872,7 @@ static void check_chain_key(struct task_struct *curr)
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
chain_key = iterate_chain_key(chain_key, hlock->class_idx);
prev_hlock = hlock;
}
@@ -3787,14 +3787,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
/*
* How can we have a chain hash when we ain't got no keys?!
*/
- if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
+ if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
chain_head = 1;
}
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, class_idx);
@@ -4636,7 +4636,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
return;
recalc:
- chain_key = 0;
+ chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
if (chain->depth && chain->chain_key == chain_key)
Commit-ID: 0b9fc8ecfa30000c8900da7adbbef23438de9ec0
Gitweb: https://git.kernel.org/tip/0b9fc8ecfa30000c8900da7adbbef23438de9ec0
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:26 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:44 +0200
locking/lockdep: Remove unused argument in validate_chain() and check_deadlock()
The lockdep_map argument in them is not used, remove it.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3eecae315885..6cf14c84eb6d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2230,8 +2230,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next,
- struct lockdep_map *next_instance, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2789,8 +2788,9 @@ cache_hit:
return 1;
}
-static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
- struct held_lock *hlock, int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr,
+ struct held_lock *hlock,
+ int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
@@ -2816,7 +2816,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* any of these scenarios could lead to a deadlock. If
* All validations
*/
- int ret = check_deadlock(curr, hlock, lock, hlock->read);
+ int ret = check_deadlock(curr, hlock, hlock->read);
if (!ret)
return 0;
@@ -2847,8 +2847,8 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
}
#else
static inline int validate_chain(struct task_struct *curr,
- struct lockdep_map *lock, struct held_lock *hlock,
- int chain_head, u64 chain_key)
+ struct held_lock *hlock,
+ int chain_head, u64 chain_key)
{
return 1;
}
@@ -3826,7 +3826,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
- if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
+ if (!validate_chain(curr, hlock, chain_head, chain_key))
return 0;
curr->curr_chain_key = chain_key;
Commit-ID: 31a490e5c54f5499aa744f8524611e2a4b19f8ba
Gitweb: https://git.kernel.org/tip/31a490e5c54f5499aa744f8524611e2a4b19f8ba
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:27 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:44 +0200
locking/lockdep: Update comment
A leftover comment is removed. While at it, add more explanatory
comments. Such a trivial patch!
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6cf14c84eb6d..a9799f9ed093 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2811,10 +2811,16 @@ static int validate_chain(struct task_struct *curr,
* - is softirq-safe, if this lock is hardirq-unsafe
*
* And check whether the new lock's dependency graph
- * could lead back to the previous lock.
+ * could lead back to the previous lock:
*
- * any of these scenarios could lead to a deadlock. If
- * All validations
+ * - within the current held-lock stack
+ * - across our accumulated lock dependency records
+ *
+ * any of these scenarios could lead to a deadlock.
+ */
+ /*
+ * The simple case: does the current hold the same lock
+ * already?
*/
int ret = check_deadlock(curr, hlock, hlock->read);
Commit-ID: 01bb6f0af992a1e6b7797d92fd31a7864872e347
Gitweb: https://git.kernel.org/tip/01bb6f0af992a1e6b7797d92fd31a7864872e347
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:25 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:43 +0200
locking/lockdep: Change the range of class_idx in held_lock struct
held_lock->class_idx is used to point to the class of the held lock. The
index is shifted by 1 to make index 0 mean no class, which results in class
index shifting back and forth but is not worth doing so.
The reason is: (1) there will be no "no-class" held_lock to begin with, and
(2) index 0 seems to be used for error checking, but if something wrong
indeed happened, the index can't be counted on to distinguish it as that
something won't set the class_idx to 0 on purpose to tell us it is wrong.
Therefore, change the index to start from 0. This saves a lot of
back-and-forth shifts and a class slot back to lock_classes.
Since index 0 is now used for lock class, we change the initial chain key to
-1 to avoid key collision, which is due to the fact that __jhash_mix(0, 0, 0) = 0.
Actually, the initial chain key can be any arbitrary value other than 0.
In addition, a bitmap is maintained to keep track of the used lock classes,
and we check the validity of the held lock against that bitmap.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/lockdep.h | 14 ++++++------
kernel/locking/lockdep.c | 59 ++++++++++++++++++++++++++++++++----------------
2 files changed, 46 insertions(+), 27 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d4e69595dbd4..30a0f81aa130 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -223,13 +223,8 @@ struct lock_chain {
};
#define MAX_LOCKDEP_KEYS_BITS 13
-/*
- * Subtract one because we offset hlock->class_idx by 1 in order
- * to make 0 mean no class. This avoids overflowing the class_idx
- * bitfield and hitting the BUG in hlock_class().
- */
-#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
-#define INITIAL_CHAIN_KEY 0
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
struct held_lock {
/*
@@ -254,6 +249,11 @@ struct held_lock {
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9edf6f12b711..3eecae315885 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -151,17 +151,28 @@ unsigned long nr_lock_classes;
static
#endif
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
- if (!hlock->class_idx) {
+ unsigned int class_idx = hlock->class_idx;
+
+ /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
+ barrier();
+
+ if (!test_bit(class_idx, lock_classes_in_use)) {
/*
* Someone passed in garbage, we give up.
*/
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
- return lock_classes + hlock->class_idx - 1;
+
+ /*
+ * At this point, if the passed hlock->class_idx is still garbage,
+ * we just have to live with it
+ */
+ return lock_classes + class_idx;
}
#ifdef CONFIG_LOCK_STAT
@@ -590,19 +601,22 @@ static void print_lock(struct held_lock *hlock)
/*
* We can be called locklessly through debug_show_all_locks() so be
* extra careful, the hlock might have been released and cleared.
+ *
+ * If this indeed happens, lets pretend it does not hurt to continue
+ * to print the lock unless the hlock class_idx does not point to a
+ * registered class. The rationale here is: since we don't attempt
+ * to distinguish whether we are in this situation, if it just
+ * happened we can't count on class_idx to tell either.
*/
- unsigned int class_idx = hlock->class_idx;
+ struct lock_class *lock = hlock_class(hlock);
- /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
- barrier();
-
- if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
+ if (!lock) {
printk(KERN_CONT "<RELEASED>\n");
return;
}
printk(KERN_CONT "%p", hlock->instance);
- print_lock_name(lock_classes + class_idx - 1);
+ print_lock_name(lock);
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
@@ -861,7 +875,7 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
/*
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
@@ -1136,6 +1150,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
return NULL;
}
nr_lock_classes++;
+ __set_bit(class - lock_classes, lock_classes_in_use);
debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
@@ -2550,7 +2565,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
- chain_key = print_chain_key_iteration(class_id + 1, chain_key);
+ chain_key = print_chain_key_iteration(class_id, chain_key);
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2601,7 +2616,7 @@ static int check_no_collision(struct task_struct *curr,
}
for (j = 0; j < chain->depth - 1; j++, i++) {
- id = curr->held_locks[i].class_idx - 1;
+ id = curr->held_locks[i].class_idx;
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
print_collision(curr, hlock, chain);
@@ -2684,7 +2699,7 @@ static inline int add_chain_cache(struct task_struct *curr,
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = nr_chain_hlocks;
for (j = 0; j < chain->depth - 1; j++, i++) {
- int lock_id = curr->held_locks[i].class_idx - 1;
+ int lock_id = curr->held_locks[i].class_idx;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
@@ -2864,10 +2879,12 @@ static void check_chain_key(struct task_struct *curr)
(unsigned long long)hlock->prev_chain_key);
return;
}
+
/*
- * Whoops ran out of static storage again?
+ * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
+ * it registered lock class index?
*/
- if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
return;
if (prev_hlock && (prev_hlock->irq_context !=
@@ -3715,7 +3732,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
- class_idx = class - lock_classes + 1;
+ class_idx = class - lock_classes;
if (depth) {
hlock = curr->held_locks + depth - 1;
@@ -3777,9 +3794,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
* the hash, not class->key.
*/
/*
- * Whoops, we did it again.. ran straight out of our static allocation.
+ * Whoops, we did it again.. class_idx is invalid.
*/
- if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
+ if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
return 0;
chain_key = curr->curr_chain_key;
@@ -3894,7 +3911,7 @@ static int match_held_lock(const struct held_lock *hlock,
if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
return 0;
- if (hlock->class_idx == class - lock_classes + 1)
+ if (hlock->class_idx == class - lock_classes)
return 1;
}
@@ -3988,7 +4005,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
- hlock->class_idx = class - lock_classes + 1;
+ hlock->class_idx = class - lock_classes;
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -4638,7 +4655,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
recalc:
chain_key = INITIAL_CHAIN_KEY;
for (i = chain->base; i < chain->base + chain->depth; i++)
- chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
if (chain->depth && chain->chain_key == chain_key)
return;
/* Overwrite the chain key for concurrent RCU readers. */
@@ -4712,6 +4729,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
WRITE_ONCE(class->key, NULL);
WRITE_ONCE(class->name, NULL);
nr_lock_classes--;
+ __clear_bit(class - lock_classes, lock_classes_in_use);
} else {
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
class->name);
@@ -5057,6 +5075,7 @@ void __init lockdep_init(void)
printk(" memory used by lock dependency info: %zu kB\n",
(sizeof(lock_classes) +
+ sizeof(lock_classes_in_use) +
sizeof(classhash_table) +
sizeof(list_entries) +
sizeof(list_entries_in_use) +
Commit-ID: c1661325597f68bc9e632c4fa9c86983d56fba4f
Gitweb: https://git.kernel.org/tip/c1661325597f68bc9e632c4fa9c86983d56fba4f
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:29 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:46 +0200
locking/lockdep: Change the return type of __cq_dequeue()
With the change, we can slightly adjust the code to iterate the queue in BFS
search, which simplifies the code. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d467ba825dca..d23dcb47389e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1308,14 +1308,21 @@ static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem
return 0;
}
-static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list **elem)
+/*
+ * Dequeue an element from the circular_queue, return a lock_list if
+ * the queue is not empty, or NULL if otherwise.
+ */
+static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
{
+ struct lock_list * lock;
+
if (__cq_empty(cq))
- return -1;
+ return NULL;
- *elem = cq->element[cq->front];
+ lock = cq->element[cq->front];
cq->front = (cq->front + 1) & CQ_MASK;
- return 0;
+
+ return lock;
}
static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
@@ -1367,6 +1374,7 @@ static int __bfs(struct lock_list *source_entry,
int forward)
{
struct lock_list *entry;
+ struct lock_list *lock;
struct list_head *head;
struct circular_queue *cq = &lock_cq;
int ret = 1;
@@ -1388,10 +1396,7 @@ static int __bfs(struct lock_list *source_entry,
__cq_init(cq);
__cq_enqueue(cq, source_entry);
- while (!__cq_empty(cq)) {
- struct lock_list *lock;
-
- __cq_dequeue(cq, &lock);
+ while ((lock = __cq_dequeue(cq))) {
if (!lock->class) {
ret = -2;
Commit-ID: aa4807719e076bfb2dee9c96adf2c648e47d472f
Gitweb: https://git.kernel.org/tip/aa4807719e076bfb2dee9c96adf2c648e47d472f
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:28 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:45 +0200
locking/lockdep: Change type of the element field in circular_queue
The element field is an array in struct circular_queue to keep track of locks
in the search. Making it the same type as the locks avoids type cast. Also
fix a typo and elaborate the comment above struct circular_queue.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a9799f9ed093..d467ba825dca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1262,13 +1262,17 @@ static int add_lock_to_list(struct lock_class *this,
#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
/*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
+ * The circular_queue and helpers are used to implement graph
+ * breadth-first search (BFS) algorithm, by which we can determine
+ * whether there is a path from a lock to another. In deadlock checks,
+ * a path from the next lock to be acquired to a previous held lock
+ * indicates that adding the <prev> -> <next> lock dependency will
+ * produce a circle in the graph. Breadth-first search instead of
+ * depth-first search is used in order to find the shortest (circular)
+ * path.
*/
struct circular_queue {
- unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+ struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
unsigned int front, rear;
};
@@ -1294,7 +1298,7 @@ static inline int __cq_full(struct circular_queue *cq)
return ((cq->rear + 1) & CQ_MASK) == cq->front;
}
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
{
if (__cq_full(cq))
return -1;
@@ -1304,7 +1308,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
return 0;
}
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list **elem)
{
if (__cq_empty(cq))
return -1;
@@ -1382,12 +1386,12 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
__cq_init(cq);
- __cq_enqueue(cq, (unsigned long)source_entry);
+ __cq_enqueue(cq, source_entry);
while (!__cq_empty(cq)) {
struct lock_list *lock;
- __cq_dequeue(cq, (unsigned long *)&lock);
+ __cq_dequeue(cq, &lock);
if (!lock->class) {
ret = -2;
@@ -1411,7 +1415,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (__cq_enqueue(cq, (unsigned long)entry)) {
+ if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
Commit-ID: 77a806922cfdebcf3ae89d31a8b592a7f7fbe537
Gitweb: https://git.kernel.org/tip/77a806922cfdebcf3ae89d31a8b592a7f7fbe537
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:30 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:46 +0200
locking/lockdep: Avoid constant checks in __bfs by using offset reference
In search of a dependency in the lock graph, there is contant checks for
forward or backward search. Directly reference the field offset of the
struct that differentiates the type of search to avoid those checks.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 33 +++++++++++++++++++++------------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d23dcb47389e..2e8ef6082f72 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1367,11 +1367,25 @@ static inline int get_lock_depth(struct lock_list *child)
return depth;
}
+/*
+ * Return the forward or backward dependency list.
+ *
+ * @lock: the lock_list to get its class's dependency list
+ * @offset: the offset to struct lock_class to determine whether it is
+ * locks_after or locks_before
+ */
+static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
+{
+ void *lock_class = lock->class;
+
+ return lock_class + offset;
+}
+
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
- int forward)
+ int offset)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1385,11 +1399,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (forward)
- head = &source_entry->class->locks_after;
- else
- head = &source_entry->class->locks_before;
-
+ head = get_dep_list(source_entry, offset);
if (list_empty(head))
goto exit;
@@ -1403,10 +1413,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
- if (forward)
- head = &lock->class->locks_after;
- else
- head = &lock->class->locks_before;
+ head = get_dep_list(lock, offset);
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -1439,7 +1446,8 @@ static inline int __bfs_forwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 1);
+ return __bfs(src_entry, data, match, target_entry,
+ offsetof(struct lock_class, locks_after));
}
@@ -1448,7 +1456,8 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
- return __bfs(src_entry, data, match, target_entry, 0);
+ return __bfs(src_entry, data, match, target_entry,
+ offsetof(struct lock_class, locks_before));
}
Commit-ID: 154f185e9c0f6c50ac8e901630e14aa5b36f9414
Gitweb: https://git.kernel.org/tip/154f185e9c0f6c50ac8e901630e14aa5b36f9414
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:31 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:47 +0200
locking/lockdep: Update comments on dependency search
The breadth-first search is implemented as flat-out non-recursive now, but
the comments are still describing it as recursive, update the comments in
that regard.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2e8ef6082f72..b2ca20aa69aa 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1381,6 +1381,10 @@ static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
return lock_class + offset;
}
+/*
+ * Forward- or backward-dependency search, used for both circular dependency
+ * checking and hardirq-unsafe/softirq-unsafe checking.
+ */
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
@@ -1461,12 +1465,6 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
}
-/*
- * Recursive, forwards-direction lock-dependency checking, used for
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
- * checking.
- */
-
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
{
unsigned long *entries = stack_trace + trace->offset;
@@ -2285,7 +2283,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
/*
* There was a chain-cache miss, and we are about to add a new dependency
- * to a previous lock. We recursively validate the following rules:
+ * to a previous lock. We validate the following rules:
*
* - would the adding of the <prev> -> <next> dependency create a
* circular dependency in the graph? [== circular deadlock]
@@ -2335,11 +2333,12 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
- * forward-recursing into the graph starting at <next>, and
- * checking whether we can reach <prev>.)
+ * a breadth-first search into the graph starting at <next>,
+ * and check whether we can reach <prev>.)
*
- * We are using global variables to control the recursion, to
- * keep the stackframe size of the recursive functions low:
+ * The search is limited by the size of the circular queue (i.e.,
+ * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
+ * in the graph whose neighbours are to be checked.
*/
this.class = hlock_class(next);
this.parent = NULL;
Commit-ID: 8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab
Gitweb: https://git.kernel.org/tip/8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:35 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:50 +0200
locking/lockdep: Refactorize check_noncircular and check_redundant
These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 118 +++++++++++++++++++++++++++++------------------
1 file changed, 74 insertions(+), 44 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8169706df767..30a1c0e32573 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
}
/*
- * Prove that the dependency graph starting at <entry> can not
- * lead to <target>. Print an error and return 0 if it does.
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct lock_list *root, struct lock_class *target,
- struct lock_list **target_entry)
+check_path(struct lock_class *target, struct lock_list *src_entry,
+ struct lock_list **target_entry)
{
- int result;
+ int ret;
+
+ ret = __bfs_forwards(src_entry, (void *)target, class_equal,
+ target_entry);
+
+ if (unlikely(ret < 0))
+ print_bfs_bug(ret);
+
+ return ret;
+}
+
+/*
+ * Prove that the dependency graph starting at <src> can not
+ * lead to <target>. If it can, there is a circle when adding
+ * <target> -> <src> dependency.
+ *
+ * Print an error and return 0 if it does.
+ */
+static noinline int
+check_noncircular(struct held_lock *src, struct held_lock *target,
+ struct lock_trace *trace)
+{
+ int ret;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list src_entry = {
+ .class = hlock_class(src),
+ .parent = NULL,
+ };
debug_atomic_inc(nr_cyclic_checks);
- result = __bfs_forwards(root, target, class_equal, target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry);
- return result;
+ if (unlikely(!ret)) {
+ if (!trace->nr_entries) {
+ /*
+ * If save_trace fails here, the printing might
+ * trigger a WARN but because of the !nr_entries it
+ * should not do bad things.
+ */
+ save_trace(trace);
+ }
+
+ print_circular_bug(&src_entry, target_entry, src, target);
+ }
+
+ return ret;
}
+/*
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. If it can, <src> -> <target> dependency is already
+ * in the graph.
+ *
+ * Print an error and return 2 if it does or 1 if it does not.
+ */
static noinline int
-check_redundant(struct lock_list *root, struct lock_class *target,
- struct lock_list **target_entry)
+check_redundant(struct held_lock *src, struct held_lock *target)
{
- int result;
+ int ret;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list src_entry = {
+ .class = hlock_class(src),
+ .parent = NULL,
+ };
debug_atomic_inc(nr_redundant_checks);
- result = __bfs_forwards(root, target, class_equal, target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry);
- return result;
+ if (!ret) {
+ debug_atomic_inc(nr_redundant);
+ ret = 2;
+ } else if (ret < 0)
+ ret = 0;
+
+ return ret;
}
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2307,9 +2364,7 @@ static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, struct lock_trace *trace)
{
- struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
- struct lock_list this;
int ret;
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
@@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
* in the graph whose neighbours are to be checked.
*/
- this.class = hlock_class(next);
- this.parent = NULL;
- ret = check_noncircular(&this, hlock_class(prev), &target_entry);
- if (unlikely(!ret)) {
- if (!trace->nr_entries) {
- /*
- * If save_trace fails here, the printing might
- * trigger a WARN but because of the !nr_entries it
- * should not do bad things.
- */
- save_trace(trace);
- }
- print_circular_bug(&this, target_entry, next, prev);
+ ret = check_noncircular(next, prev, trace);
+ if (unlikely(ret <= 0))
return 0;
- }
- else if (unlikely(ret < 0)) {
- print_bfs_bug(ret);
- return 0;
- }
if (!check_irq_usage(curr, prev, next))
return 0;
@@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
/*
* Is the <prev> -> <next> link redundant?
*/
- this.class = hlock_class(prev);
- this.parent = NULL;
- ret = check_redundant(&this, hlock_class(next), &target_entry);
- if (!ret) {
- debug_atomic_inc(nr_redundant);
- return 2;
- }
- if (ret < 0) {
- print_bfs_bug(ret);
- return 0;
- }
-
+ ret = check_redundant(prev, next);
+ if (ret != 1)
+ return ret;
if (!trace->nr_entries && !save_trace(trace))
return 0;
Commit-ID: 68e9dc29f8f42c79d2a3755223ed910ce36b4ae2
Gitweb: https://git.kernel.org/tip/68e9dc29f8f42c79d2a3755223ed910ce36b4ae2
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:36 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:50 +0200
locking/lockdep: Check redundant dependency only when CONFIG_LOCKDEP_SMALL
As Peter has put it all sound and complete for the cause, I simply quote:
"It (check_redundant) was added for cross-release (which has since been
reverted) which would generate a lot of redundant links (IIRC) but
having it makes the reports more convoluted -- basically, if we had an
A-B-C relation, then A-C will not be added to the graph because it is
already covered. This then means any report will include B, even though
a shorter cycle might have been possible."
This would increase the number of direct dependencies. For a simple workload
(make clean; reboot; make vmlinux -j8), the data looks like this:
CONFIG_LOCKDEP_SMALL: direct dependencies: 6926
!CONFIG_LOCKDEP_SMALL: direct dependencies: 9052 (+30.7%)
Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 30a1c0e32573..63b82921698d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1739,6 +1739,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
return ret;
}
+#ifdef CONFIG_LOCKDEP_SMALL
/*
* Check that the dependency graph starting at <src> can lead to
* <target> or not. If it can, <src> -> <target> dependency is already
@@ -1768,6 +1769,7 @@ check_redundant(struct held_lock *src, struct held_lock *target)
return ret;
}
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2428,12 +2430,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
}
}
+#ifdef CONFIG_LOCKDEP_SMALL
/*
* Is the <prev> -> <next> link redundant?
*/
ret = check_redundant(prev, next);
if (ret != 1)
return ret;
+#endif
if (!trace->nr_entries && !save_trace(trace))
return 0;
Commit-ID: 091806515124b20f8cff7927b4b7ff399483b109
Gitweb: https://git.kernel.org/tip/091806515124b20f8cff7927b4b7ff399483b109
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:37 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:51 +0200
locking/lockdep: Consolidate lock usage bit initialization
Lock usage bit initialization is consolidated into one function
mark_usage(). Trivial readability improvement. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 63b82921698d..1123e7e6c78d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3460,8 +3460,12 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(redundant_softirqs_off);
}
-static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+static int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
+ if (!check)
+ goto lock_used;
+
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
@@ -3505,6 +3509,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
}
}
+lock_used:
+ /* mark it as used: */
+ if (!mark_lock(curr, hlock, LOCK_USED))
+ return 0;
+
return 1;
}
@@ -3546,8 +3555,8 @@ int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
return 1;
}
-static inline int mark_irqflags(struct task_struct *curr,
- struct held_lock *hlock)
+static inline int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
{
return 1;
}
@@ -3833,11 +3842,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
#endif
hlock->pin_count = pin_count;
- if (check && !mark_irqflags(curr, hlock))
- return 0;
-
- /* mark it as used: */
- if (!mark_lock(curr, hlock, LOCK_USED))
+ /* Initialize the lock usage bit */
+ if (!mark_usage(curr, hlock, check))
return 0;
/*
Commit-ID: bf998b98f5bce4ebc97b3980016f54fabb7a4958
Gitweb: https://git.kernel.org/tip/bf998b98f5bce4ebc97b3980016f54fabb7a4958
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:39 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:53 +0200
locking/lockdep: Remove !dir in lock irq usage check
In mark_lock_irq(), the following checks are performed:
----------------------------------
| -> | unsafe | read unsafe |
|----------------------------------|
| safe | F B | F* B* |
|----------------------------------|
| read safe | F? B* | - |
----------------------------------
Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS
?: check enabled by the !dir condition
From checking point of view, the special F? case does not make sense,
whereas it perhaps is made for peroformance concern. As later patch will
address this issue, remove this exception, which makes the checks
consistent later.
With STRICT_READ_CHECKS = 1 which is default, there is no functional
change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9c4e2a7547d3..2168e94715b9 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3235,7 +3235,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
* Validate that the lock dependencies don't have conflicting usage
* states.
*/
- if ((!read || !dir || STRICT_READ_CHECKS) &&
+ if ((!read || STRICT_READ_CHECKS) &&
!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
Commit-ID: f7c1c6b36a3874d3a7987fb3af829d5b0d75bda7
Gitweb: https://git.kernel.org/tip/f7c1c6b36a3874d3a7987fb3af829d5b0d75bda7
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:17 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:32 +0200
locking/lockdep: Change all print_*() return type to void
Since none of the print_*() function's return value is necessary, change
their return type to void. No functional change.
In cases where an invariable return value is used, this change slightly
improves readability, i.e.:
print_x();
return 0;
is definitely better than:
return print_x(); /* where print_x() always returns 0 */
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 209 ++++++++++++++++++++++++-----------------------
1 file changed, 108 insertions(+), 101 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8d32ae7768a7..109b56267c8f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1427,16 +1427,15 @@ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
-static noinline int
+static noinline void
print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
- return 0;
+ return;
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(KERN_CONT ":\n");
print_lock_trace(&target->trace, 6);
- return 0;
}
static void
@@ -1493,7 +1492,7 @@ print_circular_lock_scenario(struct held_lock *src,
* When a circular dependency is detected, print the
* header first:
*/
-static noinline int
+static noinline void
print_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct held_lock *check_src,
struct held_lock *check_tgt)
@@ -1501,7 +1500,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct task_struct *curr = current;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("======================================================\n");
@@ -1519,8 +1518,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
print_circular_bug_entry(entry, depth);
-
- return 0;
}
static inline int class_equal(struct lock_list *entry, void *data)
@@ -1528,10 +1525,10 @@ static inline int class_equal(struct lock_list *entry, void *data)
return entry->class == data;
}
-static noinline int print_circular_bug(struct lock_list *this,
- struct lock_list *target,
- struct held_lock *check_src,
- struct held_lock *check_tgt)
+static noinline void print_circular_bug(struct lock_list *this,
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
struct lock_list *parent;
@@ -1539,10 +1536,10 @@ static noinline int print_circular_bug(struct lock_list *this,
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
if (!save_trace(&this->trace))
- return 0;
+ return;
depth = get_lock_depth(target);
@@ -1564,21 +1561,17 @@ static noinline int print_circular_bug(struct lock_list *this,
printk("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
-static noinline int print_bfs_bug(int ret)
+static noinline void print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
- return 0;
+ return;
/*
* Breadth-first-search failed, graph got corrupted?
*/
WARN(1, "lockdep bfs error:%d\n", ret);
-
- return 0;
}
static int noop_count(struct lock_list *entry, void *data)
@@ -1767,7 +1760,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
- struct lock_list *root)
+ struct lock_list *root)
{
struct lock_list *entry = leaf;
int depth;
@@ -1789,8 +1782,6 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
-
- return;
}
static void
@@ -1849,7 +1840,7 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
printk("\n *** DEADLOCK ***\n\n");
}
-static int
+static void
print_bad_irq_dependency(struct task_struct *curr,
struct lock_list *prev_root,
struct lock_list *next_root,
@@ -1862,7 +1853,7 @@ print_bad_irq_dependency(struct task_struct *curr,
const char *irqclass)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=====================================================\n");
@@ -1908,19 +1899,17 @@ print_bad_irq_dependency(struct task_struct *curr,
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
if (!save_trace(&prev_root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
if (!save_trace(&next_root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(forwards_entry, next_root);
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static const char *state_names[] = {
@@ -2067,8 +2056,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
this.class = hlock_class(prev);
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
usage_mask &= LOCKF_USED_IN_IRQ_ALL;
if (!usage_mask)
@@ -2084,8 +2075,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
that.class = hlock_class(next);
ret = find_usage_forwards(&that, forward_mask, &target_entry1);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
@@ -2097,8 +2090,10 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
backward_mask = original_mask(target_entry1->class->usage_mask);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (DEBUG_LOCKS_WARN_ON(ret == 1))
return 1;
@@ -2112,11 +2107,13 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
if (DEBUG_LOCKS_WARN_ON(ret == -1))
return 1;
- return print_bad_irq_dependency(curr, &this, &that,
- target_entry, target_entry1,
- prev, next,
- backward_bit, forward_bit,
- state_name(backward_bit));
+ print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
+ backward_bit, forward_bit,
+ state_name(backward_bit));
+
+ return 0;
}
static void inc_chains(void)
@@ -2147,8 +2144,7 @@ static inline void inc_chains(void)
#endif
static void
-print_deadlock_scenario(struct held_lock *nxt,
- struct held_lock *prv)
+print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
{
struct lock_class *next = hlock_class(nxt);
struct lock_class *prev = hlock_class(prv);
@@ -2166,12 +2162,12 @@ print_deadlock_scenario(struct held_lock *nxt,
printk(" May be due to missing lock nesting notation\n\n");
}
-static int
+static void
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("============================================\n");
@@ -2190,8 +2186,6 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -2233,7 +2227,8 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
if (nest)
return 2;
- return print_deadlock_bug(curr, prev, next);
+ print_deadlock_bug(curr, prev, next);
+ return 0;
}
return 1;
}
@@ -2308,10 +2303,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
save_trace(trace);
}
- return print_circular_bug(&this, target_entry, next, prev);
+ print_circular_bug(&this, target_entry, next, prev);
+ return 0;
+ }
+ else if (unlikely(ret < 0)) {
+ print_bfs_bug(ret);
+ return 0;
}
- else if (unlikely(ret < 0))
- return print_bfs_bug(ret);
if (!check_irq_usage(curr, prev, next))
return 0;
@@ -2352,8 +2350,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
debug_atomic_inc(nr_redundant);
return 2;
}
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (!trace->nr_entries && !save_trace(trace))
@@ -2877,8 +2877,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
-static void
-print_usage_bug_scenario(struct held_lock *lock)
+static void print_usage_bug_scenario(struct held_lock *lock)
{
struct lock_class *class = hlock_class(lock);
@@ -2895,12 +2894,12 @@ print_usage_bug_scenario(struct held_lock *lock)
printk("\n *** DEADLOCK ***\n\n");
}
-static int
+static void
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("================================\n");
@@ -2930,8 +2929,6 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -2941,8 +2938,10 @@ static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
- if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
- return print_usage_bug(curr, this, bad_bit, new_bit);
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
+ print_usage_bug(curr, this, bad_bit, new_bit);
+ return 0;
+ }
return 1;
}
@@ -2950,7 +2949,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
/*
* print irq inversion bug:
*/
-static int
+static void
print_irq_inversion_bug(struct task_struct *curr,
struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
@@ -2961,7 +2960,7 @@ print_irq_inversion_bug(struct task_struct *curr,
int depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("========================================================\n");
@@ -3002,13 +3001,11 @@ print_irq_inversion_bug(struct task_struct *curr,
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
if (!save_trace(&root->trace))
- return 0;
+ return;
print_shortest_lock_dependencies(other, root);
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
/*
@@ -3026,13 +3023,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, &root, target_entry,
- this, 1, irqclass);
+ print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
+ return 0;
}
/*
@@ -3050,13 +3050,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
+ if (ret < 0) {
+ print_bfs_bug(ret);
+ return 0;
+ }
if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, &root, target_entry,
- this, 0, irqclass);
+ print_irq_inversion_bug(curr, &root, target_entry,
+ this, 0, irqclass);
+ return 0;
}
void print_irqtrace_events(struct task_struct *curr)
@@ -3599,15 +3602,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
-static int
+static void
print_lock_nested_lock_not_held(struct task_struct *curr,
struct held_lock *hlock,
unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("==================================\n");
@@ -3629,8 +3632,6 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static int __lock_is_held(const struct lockdep_map *lock, int read);
@@ -3779,8 +3780,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
chain_key = iterate_chain_key(chain_key, class_idx);
- if (nest_lock && !__lock_is_held(nest_lock, -1))
- return print_lock_nested_lock_not_held(curr, hlock, ip);
+ if (nest_lock && !__lock_is_held(nest_lock, -1)) {
+ print_lock_nested_lock_not_held(curr, hlock, ip);
+ return 0;
+ }
if (!debug_locks_silent) {
WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
@@ -3816,14 +3819,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return 1;
}
-static int
-print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
- unsigned long ip)
+static void print_unlock_imbalance_bug(struct task_struct *curr,
+ struct lockdep_map *lock,
+ unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=====================================\n");
@@ -3841,8 +3844,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static int match_held_lock(const struct held_lock *hlock,
@@ -3961,8 +3962,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
@@ -4002,8 +4005,10 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
return 0;
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -4047,16 +4052,20 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
* So we're all set to release this lock.. wait what lock? We don't
* own any locks, you've been drinking again?
*/
- if (DEBUG_LOCKS_WARN_ON(depth <= 0))
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (DEBUG_LOCKS_WARN_ON(depth <= 0)) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
/*
* Check whether the lock exists in the current stack
* of held locks:
*/
hlock = find_held_lock(curr, lock, depth, &i);
- if (!hlock)
- return print_unlock_imbalance_bug(curr, lock, ip);
+ if (!hlock) {
+ print_unlock_imbalance_bug(curr, lock, ip);
+ return 0;
+ }
if (hlock->instance == lock)
lock_release_holdtime(hlock);
@@ -4399,14 +4408,14 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
EXPORT_SYMBOL_GPL(lock_unpin_lock);
#ifdef CONFIG_LOCK_STAT
-static int
-print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
- unsigned long ip)
+static void print_lock_contention_bug(struct task_struct *curr,
+ struct lockdep_map *lock,
+ unsigned long ip)
{
if (!debug_locks_off())
- return 0;
+ return;
if (debug_locks_silent)
- return 0;
+ return;
pr_warn("\n");
pr_warn("=================================\n");
@@ -4424,8 +4433,6 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
pr_warn("\nstack backtrace:\n");
dump_stack();
-
- return 0;
}
static void
Commit-ID: 4d56330df22dd9dd9a24f147014f60ee4c914fb8
Gitweb: https://git.kernel.org/tip/4d56330df22dd9dd9a24f147014f60ee4c914fb8
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:38 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:52 +0200
locking/lockdep: Adjust new bit cases in mark_lock
The new bit can be any possible lock usage except it is garbage, so the
cases in switch can be made simpler. Warn early on if wrong usage bit is
passed without taking locks. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1123e7e6c78d..9c4e2a7547d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3582,6 +3582,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
{
unsigned int new_mask = 1 << new_bit, ret = 1;
+ if (new_bit >= LOCK_USAGE_STATES) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return 0;
+ }
+
/*
* If already set then do not dirty the cacheline,
* nor do any checks:
@@ -3605,25 +3610,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
switch (new_bit) {
-#define LOCKDEP_STATE(__STATE) \
- case LOCK_USED_IN_##__STATE: \
- case LOCK_USED_IN_##__STATE##_READ: \
- case LOCK_ENABLED_##__STATE: \
- case LOCK_ENABLED_##__STATE##_READ:
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- ret = mark_lock_irq(curr, this, new_bit);
- if (!ret)
- return 0;
- break;
case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;
default:
- if (!debug_locks_off_graph_unlock())
+ ret = mark_lock_irq(curr, this, new_bit);
+ if (!ret)
return 0;
- WARN_ON(1);
- return 0;
}
graph_unlock();
Commit-ID: c52478f4f38ace598475413a08dba9b9fd827eaf
Gitweb: https://git.kernel.org/tip/c52478f4f38ace598475413a08dba9b9fd827eaf
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:19 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:35 +0200
locking/lockdep: Adjust lock usage bit character checks
The lock usage bit characters are defined and determined with tricks.
Add some explanation to make it a bit clearer, then adjust the logic to
check the usage, which optimizes the code a bit.
No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 109b56267c8f..a033df00fd1d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -500,15 +500,26 @@ static inline unsigned long lock_flag(enum lock_usage_bit bit)
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
+ /*
+ * The usage character defaults to '.' (i.e., irqs disabled and not in
+ * irq context), which is the safest usage category.
+ */
char c = '.';
- if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+ /*
+ * The order of the following usage checks matters, which will
+ * result in the outcome character as follows:
+ *
+ * - '+': irq is enabled and not in irq context
+ * - '-': in irq context and irq is disabled
+ * - '?': in irq context and irq is enabled
+ */
+ if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
c = '+';
- if (class->usage_mask & lock_flag(bit)) {
- c = '-';
- if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+ if (class->usage_mask & lock_flag(bit))
c = '?';
- }
+ } else if (class->usage_mask & lock_flag(bit))
+ c = '-';
return c;
}
Commit-ID: d16dbd1b8a29bb9f8aca2c2f3bd1a0d2b7621126
Gitweb: https://git.kernel.org/tip/d16dbd1b8a29bb9f8aca2c2f3bd1a0d2b7621126
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:22 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:41 +0200
locking/lockdep: Update obsolete struct field description
The lock_chain struct definition has outdated comment, update it and add
struct member description.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/lockdep.h | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6e2377e6c1d6..851d44fa5457 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -203,11 +203,17 @@ struct lock_list {
struct lock_list *parent;
};
-/*
- * We record lock dependency chains, so that we can cache them:
+/**
+ * struct lock_chain - lock dependency chain record
+ *
+ * @irq_context: the same as irq_context in held_lock below
+ * @depth: the number of held locks in this chain
+ * @base: the index in chain_hlocks for this chain
+ * @entry: the collided lock chains in lock_chain hash list
+ * @chain_key: the hash key of this lock_chain
*/
struct lock_chain {
- /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ /* see BUILD_BUG_ON()s in add_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
Commit-ID: e196e479a3b844da6e6e71e0d2a8694040cb4e52
Gitweb: https://git.kernel.org/tip/e196e479a3b844da6e6e71e0d2a8694040cb4e52
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:23 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:42 +0200
locking/lockdep: Use lockdep_init_task for task initiation consistently
Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/linux/lockdep.h | 7 ++++++-
init/init_task.c | 2 ++
kernel/fork.c | 3 ---
kernel/locking/lockdep.c | 11 ++++++++---
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 851d44fa5457..5d05b8149f19 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -287,6 +287,8 @@ extern void lockdep_free_key_range(void *start, unsigned long size);
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);
+extern void lockdep_init_task(struct task_struct *task);
+
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -411,6 +413,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -503,7 +509,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }
static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index c70ef656d0f4..1b15cb90d64f 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -166,6 +166,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
+ .lockdep_depth = 0, /* no locks held yet */
+ .curr_chain_key = 0,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 75675b9bf6df..735d0b4a89e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1984,9 +1984,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
lockdep_init_task(p);
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index bc1efc12a8c5..b7d9c28ecf3b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -359,6 +359,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
}
+void lockdep_init_task(struct task_struct *task)
+{
+ task->lockdep_depth = 0; /* no locks held yet */
+ task->curr_chain_key = 0;
+ task->lockdep_recursion = 0;
+}
+
void lockdep_off(void)
{
current->lockdep_recursion++;
@@ -4589,9 +4596,7 @@ void lockdep_reset(void)
int i;
raw_local_irq_save(flags);
- current->curr_chain_key = 0;
- current->lockdep_depth = 0;
- current->lockdep_recursion = 0;
+ lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
Commit-ID: 834494b28024b39d45aea6bcc642b0fe94fe2503
Gitweb: https://git.kernel.org/tip/834494b28024b39d45aea6bcc642b0fe94fe2503
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:21 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:36 +0200
locking/lockdep: Print the right depth for chain key collision
Since chains are separated by IRQ context, so when printing a chain the
depth should be consistent with it.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c477018e184..bc1efc12a8c5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2519,10 +2519,11 @@ print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_ne
struct held_lock *hlock;
u64 chain_key = 0;
int depth = curr->lockdep_depth;
- int i;
+ int i = get_first_held_lock(curr, hlock_next);
- printk("depth: %u\n", depth + 1);
- for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+ printk("depth: %u (irq_context %u)\n", depth - i + 1,
+ hlock_next->irq_context);
+ for (; i < depth; i++) {
hlock = curr->held_locks + i;
chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
Commit-ID: b4adfe8e05f15d7e73309c93c2c337df7eb5278f
Gitweb: https://git.kernel.org/tip/b4adfe8e05f15d7e73309c93c2c337df7eb5278f
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:34 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:49 +0200
locking/lockdep: Remove unused argument in __lock_release
The @nested is not used in __release_lock so remove it despite that it
is not used in lock_release in the first place.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index be4c1348ddcd..8169706df767 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4096,7 +4096,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
* @nested is an hysterical artifact, needs a tree wide cleanup.
*/
static int
-__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+__lock_release(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock;
@@ -4384,7 +4384,7 @@ void lock_release(struct lockdep_map *lock, int nested,
check_flags(flags);
current->lockdep_recursion = 1;
trace_lock_release(lock, ip);
- if (__lock_release(lock, nested, ip))
+ if (__lock_release(lock, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
Commit-ID: 1ac4ba5ed0114bcc146d5743d97df414af25c524
Gitweb: https://git.kernel.org/tip/1ac4ba5ed0114bcc146d5743d97df414af25c524
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:32 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:48 +0200
locking/lockdep: Add explanation to lock usage rules in lockdep design doc
The irq usage and lock dependency rules that if violated a deacklock may
happen are explained in more detail.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
Documentation/locking/lockdep-design.txt | 33 ++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index ae65758383ea..f189d130e543 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -108,14 +108,24 @@ Unused locks (e.g., mutexes) cannot be part of the cause of an error.
Single-lock state rules:
------------------------
+A lock is irq-safe means it was ever used in an irq context, while a lock
+is irq-unsafe means it was ever acquired with irq enabled.
+
A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The
-following states are exclusive, and only one of them is allowed to be
-set for any lock-class:
+following states must be exclusive: only one of them is allowed to be set
+for any lock-class based on its usage:
+
+ <hardirq-safe> or <hardirq-unsafe>
+ <softirq-safe> or <softirq-unsafe>
- <hardirq-safe> and <hardirq-unsafe>
- <softirq-safe> and <softirq-unsafe>
+This is because if a lock can be used in irq context (irq-safe) then it
+cannot be ever acquired with irq enabled (irq-unsafe). Otherwise, a
+deadlock may happen. For example, in the scenario that after this lock
+was acquired but before released, if the context is interrupted this
+lock will be attempted to acquire twice, which creates a deadlock,
+referred to as lock recursion deadlock.
-The validator detects and reports lock usage that violate these
+The validator detects and reports lock usage that violates these
single-lock state rules.
Multi-lock dependency rules:
@@ -124,15 +134,18 @@ Multi-lock dependency rules:
The same lock-class must not be acquired twice, because this could lead
to lock recursion deadlocks.
-Furthermore, two locks may not be taken in different order:
+Furthermore, two locks can not be taken in inverse order:
<L1> -> <L2>
<L2> -> <L1>
-because this could lead to lock inversion deadlocks. (The validator
-finds such dependencies in arbitrary complexity, i.e. there can be any
-other locking sequence between the acquire-lock operations, the
-validator will still track all dependencies between locks.)
+because this could lead to a deadlock - referred to as lock inversion
+deadlock - as attempts to acquire the two locks form a circle which
+could lead to the two contexts waiting for each other permanently. The
+validator will find such dependency circle in arbitrary complexity,
+i.e., there can be any other locking sequence between the acquire-lock
+operations; the validator will still find whether these locks can be
+acquired in a circular fashion.
Furthermore, the following usage based lock dependencies are not allowed
between any two lock-classes:
Commit-ID: 4609c4f963f353613812f999bb027aac795bcde8
Gitweb: https://git.kernel.org/tip/4609c4f963f353613812f999bb027aac795bcde8
Author: Yuyang Du <[email protected]>
AuthorDate: Mon, 6 May 2019 16:19:33 +0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 3 Jun 2019 11:55:49 +0200
locking/lockdep: Remove redundant argument in check_deadlock
In check_deadlock(), the third argument read comes from the second
argument hlock so that it can be removed. No functional change.
Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/locking/lockdep.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b2ca20aa69aa..be4c1348ddcd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2246,7 +2246,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
-check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next)
{
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2265,7 +2265,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
- if ((read == 2) && prev->read)
+ if ((next->read == 2) && prev->read)
return 2;
/*
@@ -2839,7 +2839,7 @@ static int validate_chain(struct task_struct *curr,
* The simple case: does the current hold the same lock
* already?
*/
- int ret = check_deadlock(curr, hlock, hlock->read);
+ int ret = check_deadlock(curr, hlock);
if (!ret)
return 0;