2021-03-18 17:30:55

by Waiman Long

[permalink] [raw]
Subject: [PATCH-tip 5/5] locking/locktorture: locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test

The ww_acquire_ctx structure for ww_mutex needs to persist for a complete
lock/unlock cycle. In the ww_mutex test in locktorture, however, both
ww_acquire_init() and ww_acquire_fini() are called within the lock
function only. This causes a lockdep splat of "WARNING: Nested lock
was not taken" when lockdep is enabled in the kernel.

To fix this problem, we need to move the ww_acquire_fini() after
the ww_mutex_unlock() in torture_ww_mutex_unlock(). This is done by
allocating a global array of ww_acquire_ctx structures. Each locking
thread is associated with its own ww_acquire_ctx via the unique thread
id it has so that both the lock and unlock functions can access the
same ww_acquire_ctx structure.

Signed-off-by: Waiman Long <[email protected]>
---
kernel/locking/locktorture.c | 39 +++++++++++++++++++++++++-----------
1 file changed, 27 insertions(+), 12 deletions(-)

diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 90a975a95a13..b3adb40549bf 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -374,15 +374,27 @@ static struct lock_torture_ops mutex_lock_ops = {
*/
static DEFINE_WD_CLASS(torture_ww_class);
static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
+static struct ww_acquire_ctx *ww_acquire_ctxs;

static void torture_ww_mutex_init(void)
{
ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
+
+ ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
+ sizeof(*ww_acquire_ctxs),
+ GFP_KERNEL);
+ if (!ww_acquire_ctxs)
+ VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
+}
+
+static void torture_ww_mutex_exit(void)
+{
+ kfree(ww_acquire_ctxs);
}

-static int torture_ww_mutex_lock(int tid __maybe_unused)
+static int torture_ww_mutex_lock(int tid)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
@@ -392,7 +404,7 @@ __acquires(torture_ww_mutex_2)
struct list_head link;
struct ww_mutex *lock;
} locks[3], *ll, *ln;
- struct ww_acquire_ctx ctx;
+ struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];

locks[0].lock = &torture_ww_mutex_0;
list_add(&locks[0].link, &list);
@@ -403,12 +415,12 @@ __acquires(torture_ww_mutex_2)
locks[2].lock = &torture_ww_mutex_2;
list_add(&locks[2].link, &list);

- ww_acquire_init(&ctx, &torture_ww_class);
+ ww_acquire_init(ctx, &torture_ww_class);

list_for_each_entry(ll, &list, link) {
int err;

- err = ww_mutex_lock(ll->lock, &ctx);
+ err = ww_mutex_lock(ll->lock, ctx);
if (!err)
continue;

@@ -419,26 +431,29 @@ __acquires(torture_ww_mutex_2)
if (err != -EDEADLK)
return err;

- ww_mutex_lock_slow(ll->lock, &ctx);
+ ww_mutex_lock_slow(ll->lock, ctx);
list_move(&ll->link, &list);
}

- ww_acquire_fini(&ctx);
return 0;
}

-static void torture_ww_mutex_unlock(int tid __maybe_unused)
+static void torture_ww_mutex_unlock(int tid)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
+ struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
+
ww_mutex_unlock(&torture_ww_mutex_0);
ww_mutex_unlock(&torture_ww_mutex_1);
ww_mutex_unlock(&torture_ww_mutex_2);
+ ww_acquire_fini(ctx);
}

static struct lock_torture_ops ww_mutex_lock_ops = {
.init = torture_ww_mutex_init,
+ .exit = torture_ww_mutex_exit,
.writelock = torture_ww_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_boost_dummy,
@@ -924,16 +939,16 @@ static int __init lock_torture_init(void)
goto unwind;
}

- if (cxt.cur_ops->init) {
- cxt.cur_ops->init();
- cxt.init_called = true;
- }
-
if (nwriters_stress >= 0)
cxt.nrealwriters_stress = nwriters_stress;
else
cxt.nrealwriters_stress = 2 * num_online_cpus();

+ if (cxt.cur_ops->init) {
+ cxt.cur_ops->init();
+ cxt.init_called = true;
+ }
+
#ifdef CONFIG_DEBUG_MUTEXES
if (str_has_prefix(torture_type, "mutex"))
cxt.debug_lock = true;
--
2.18.1


2021-03-19 12:56:35

by tip-bot2 for Haifeng Xu

[permalink] [raw]
Subject: [tip: locking/core] locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test

The following commit has been merged into the locking/core branch of tip:

Commit-ID: 8c52cca04f97a4c09ec2f0bd8fe6d0cdf49834e4
Gitweb: https://git.kernel.org/tip/8c52cca04f97a4c09ec2f0bd8fe6d0cdf49834e4
Author: Waiman Long <[email protected]>
AuthorDate: Thu, 18 Mar 2021 13:28:14 -04:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Fri, 19 Mar 2021 12:13:10 +01:00

locking/locktorture: Fix incorrect use of ww_acquire_ctx in ww_mutex test

The ww_acquire_ctx structure for ww_mutex needs to persist for a complete
lock/unlock cycle. In the ww_mutex test in locktorture, however, both
ww_acquire_init() and ww_acquire_fini() are called within the lock
function only. This causes a lockdep splat of "WARNING: Nested lock
was not taken" when lockdep is enabled in the kernel.

To fix this problem, we need to move the ww_acquire_fini() after
the ww_mutex_unlock() in torture_ww_mutex_unlock(). This is done by
allocating a global array of ww_acquire_ctx structures. Each locking
thread is associated with its own ww_acquire_ctx via the unique thread
id it has so that both the lock and unlock functions can access the
same ww_acquire_ctx structure.

Signed-off-by: Waiman Long <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
kernel/locking/locktorture.c | 39 ++++++++++++++++++++++++-----------
1 file changed, 27 insertions(+), 12 deletions(-)

diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 90a975a..b3adb40 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -374,15 +374,27 @@ static struct lock_torture_ops mutex_lock_ops = {
*/
static DEFINE_WD_CLASS(torture_ww_class);
static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
+static struct ww_acquire_ctx *ww_acquire_ctxs;

static void torture_ww_mutex_init(void)
{
ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
+
+ ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
+ sizeof(*ww_acquire_ctxs),
+ GFP_KERNEL);
+ if (!ww_acquire_ctxs)
+ VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
+}
+
+static void torture_ww_mutex_exit(void)
+{
+ kfree(ww_acquire_ctxs);
}

-static int torture_ww_mutex_lock(int tid __maybe_unused)
+static int torture_ww_mutex_lock(int tid)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
@@ -392,7 +404,7 @@ __acquires(torture_ww_mutex_2)
struct list_head link;
struct ww_mutex *lock;
} locks[3], *ll, *ln;
- struct ww_acquire_ctx ctx;
+ struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];

locks[0].lock = &torture_ww_mutex_0;
list_add(&locks[0].link, &list);
@@ -403,12 +415,12 @@ __acquires(torture_ww_mutex_2)
locks[2].lock = &torture_ww_mutex_2;
list_add(&locks[2].link, &list);

- ww_acquire_init(&ctx, &torture_ww_class);
+ ww_acquire_init(ctx, &torture_ww_class);

list_for_each_entry(ll, &list, link) {
int err;

- err = ww_mutex_lock(ll->lock, &ctx);
+ err = ww_mutex_lock(ll->lock, ctx);
if (!err)
continue;

@@ -419,26 +431,29 @@ __acquires(torture_ww_mutex_2)
if (err != -EDEADLK)
return err;

- ww_mutex_lock_slow(ll->lock, &ctx);
+ ww_mutex_lock_slow(ll->lock, ctx);
list_move(&ll->link, &list);
}

- ww_acquire_fini(&ctx);
return 0;
}

-static void torture_ww_mutex_unlock(int tid __maybe_unused)
+static void torture_ww_mutex_unlock(int tid)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{
+ struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
+
ww_mutex_unlock(&torture_ww_mutex_0);
ww_mutex_unlock(&torture_ww_mutex_1);
ww_mutex_unlock(&torture_ww_mutex_2);
+ ww_acquire_fini(ctx);
}

static struct lock_torture_ops ww_mutex_lock_ops = {
.init = torture_ww_mutex_init,
+ .exit = torture_ww_mutex_exit,
.writelock = torture_ww_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_boost_dummy,
@@ -924,16 +939,16 @@ static int __init lock_torture_init(void)
goto unwind;
}

- if (cxt.cur_ops->init) {
- cxt.cur_ops->init();
- cxt.init_called = true;
- }
-
if (nwriters_stress >= 0)
cxt.nrealwriters_stress = nwriters_stress;
else
cxt.nrealwriters_stress = 2 * num_online_cpus();

+ if (cxt.cur_ops->init) {
+ cxt.cur_ops->init();
+ cxt.init_called = true;
+ }
+
#ifdef CONFIG_DEBUG_MUTEXES
if (str_has_prefix(torture_type, "mutex"))
cxt.debug_lock = true;