2021-05-17 04:02:12

by Zhang, Qiang

[permalink] [raw]
Subject: [PATCH v3] locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

From: Zqiang <[email protected]>

When a interruptible mutex locker is interrupted by a signal
without acquiring this lock and removed from the wait queue.
if the mutex isn't contended enough to have a waiter
put into the wait queue again, the setting of the WAITER
bit will force mutex locker to go into the slowpath to
acquire the lock every time, so if the wait queue is empty,
the WAITER bit need to be clear.

Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Zqiang <[email protected]>
---
v1->v2:
Make commit info clearer and modify the code again.
v2->v3:
Better description of submission and add 'Suggested-by' tags

kernel/locking/mutex-debug.c | 4 ++--
kernel/locking/mutex-debug.h | 2 +-
kernel/locking/mutex.c | 15 +++++++++++----
kernel/locking/mutex.h | 4 +---
4 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aaf2abc..db9301591e3f 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}

-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;

- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}

diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112ce155..5de85a575dac 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -205,6 +205,15 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}

+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ __list_del(waiter->list.prev, waiter->list.next);
+ debug_mutex_remove_waiter(lock, waiter, current);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1070,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
__ww_mutex_check_waiters(lock, ww_ctx);
}

- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);

debug_mutex_free_waiter(&waiter);

@@ -1080,7 +1087,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,

err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/

-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)

--
2.17.1



2021-05-19 17:54:26

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v3] locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

On Mon, May 17, 2021 at 11:40:05AM +0800, [email protected] wrote:
> From: Zqiang <[email protected]>
>
> When a interruptible mutex locker is interrupted by a signal
> without acquiring this lock and removed from the wait queue.
> if the mutex isn't contended enough to have a waiter
> put into the wait queue again, the setting of the WAITER
> bit will force mutex locker to go into the slowpath to
> acquire the lock every time, so if the wait queue is empty,
> the WAITER bit need to be clear.

I'm still interestd in knowing how you found this. Did you have an
actual problem, or were you just reading the code?

AFAICT, this needs:

Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")

> Suggested-by: Peter Zijlstra <[email protected]>
> Signed-off-by: Zqiang <[email protected]>

Thanks!

Updated patch below.

---
Subject: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal
From: Zqiang <[email protected]>
Date: Mon, 17 May 2021 11:40:05 +0800

From: Zqiang <[email protected]>

When a interruptible mutex locker is interrupted by a signal
without acquiring this lock and removed from the wait queue.
if the mutex isn't contended enough to have a waiter
put into the wait queue again, the setting of the WAITER
bit will force mutex locker to go into the slowpath to
acquire the lock every time, so if the wait queue is empty,
the WAITER bit need to be clear.

Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/mutex-debug.c | 4 ++--
kernel/locking/mutex-debug.h | 2 +-
kernel/locking/mutex.c | 18 +++++++++++++-----
kernel/locking/mutex.h | 4 +---
4 files changed, 17 insertions(+), 11 deletions(-)

--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex
task->blocked_on = waiter;
}

-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *l
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;

- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}

--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(stru
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_fir
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, s
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}

+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock,
__ww_mutex_check_waiters(lock, ww_ctx);
}

- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);

debug_mutex_free_waiter(&waiter);

@@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock,

err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/

-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)


2021-05-19 19:01:30

by Zhang, Qiang

[permalink] [raw]
Subject: 回复: [PATCH v3] locking/mutex: clear MUTEX_F LAGS if wait_list is empty due to signal



________________________________________
??????: Peter Zijlstra <[email protected]>
????ʱ??: 2021??5??18?? 18:49
?ռ???: Zhang, Qiang
????: [email protected]; [email protected]; [email protected]; [email protected]; [email protected]; [email protected]
????: Re: [PATCH v3] locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

[Please note: This e-mail is from an EXTERNAL e-mail address]

On Mon, May 17, 2021 at 11:40:05AM +0800, [email protected] wrote:
> From: Zqiang <[email protected]>
>
> When a interruptible mutex locker is interrupted by a signal
> without acquiring this lock and removed from the wait queue.
> if the mutex isn't contended enough to have a waiter
> put into the wait queue again, the setting of the WAITER
> bit will force mutex locker to go into the slowpath to
> acquire the lock every time, so if the wait queue is empty,
> the WAITER bit need to be clear.
>
>I'm still interestd in knowing how you found this. Did you have an
>actual problem, or were you just reading the code?

Thanks peterz
I found it by reading the code. I'm learning about kernel lock recently

Qiang

>
>AFAICT, this needs:
>
>Fixes: 040a0a371005 ("mutex: Add support for wound/wait style >locks")
>
> Suggested-by: Peter Zijlstra <[email protected]>
> Signed-off-by: Zqiang <[email protected]>
>
>Thanks!
>
>Updated patch below.

---
Subject: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal
From: Zqiang <[email protected]>
Date: Mon, 17 May 2021 11:40:05 +0800

From: Zqiang <[email protected]>

When a interruptible mutex locker is interrupted by a signal
without acquiring this lock and removed from the wait queue.
if the mutex isn't contended enough to have a waiter
put into the wait queue again, the setting of the WAITER
bit will force mutex locker to go into the slowpath to
acquire the lock every time, so if the wait queue is empty,
the WAITER bit need to be clear.

Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/mutex-debug.c | 4 ++--
kernel/locking/mutex-debug.h | 2 +-
kernel/locking/mutex.c | 18 +++++++++++++-----
kernel/locking/mutex.h | 4 +---
4 files changed, 17 insertions(+), 11 deletions(-)

--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex
task->blocked_on = waiter;
}

-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *l
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;

- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}

--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(stru
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_fir
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, s
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}

+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock,
__ww_mutex_check_waiters(lock, ww_ctx);
}

- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);

debug_mutex_free_waiter(&waiter);

@@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock,

err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/

-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)

Subject: [tip: locking/urgent] locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

The following commit has been merged into the locking/urgent branch of tip:

Commit-ID: 3a010c493271f04578b133de977e0e5dd2848cea
Gitweb: https://git.kernel.org/tip/3a010c493271f04578b133de977e0e5dd2848cea
Author: Zqiang <[email protected]>
AuthorDate: Mon, 17 May 2021 11:40:05 +08:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Tue, 18 May 2021 12:53:51 +02:00

locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

When a interruptible mutex locker is interrupted by a signal
without acquiring this lock and removed from the wait queue.
if the mutex isn't contended enough to have a waiter
put into the wait queue again, the setting of the WAITER
bit will force mutex locker to go into the slowpath to
acquire the lock every time, so if the wait queue is empty,
the WAITER bit need to be clear.

Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/locking/mutex-debug.c | 4 ++--
kernel/locking/mutex-debug.h | 2 +-
kernel/locking/mutex.c | 18 +++++++++++++-----
kernel/locking/mutex.h | 4 +---
4 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aa..db93015 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}

-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;

- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}

diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f4..53e631e 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112..013e1b0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}

+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}

- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);

debug_mutex_free_waiter(&waiter);

@@ -1080,7 +1088,7 @@ skip_wait:

err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d..f0c710b 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/

-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)


2021-05-19 20:15:06

by Waiman Long

[permalink] [raw]
Subject: Re: [PATCH v3] locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal

On 5/18/21 6:49 AM, Peter Zijlstra wrote:
> On Mon, May 17, 2021 at 11:40:05AM +0800, [email protected] wrote:
>> From: Zqiang <[email protected]>
>>
>> When a interruptible mutex locker is interrupted by a signal
>> without acquiring this lock and removed from the wait queue.
>> if the mutex isn't contended enough to have a waiter
>> put into the wait queue again, the setting of the WAITER
>> bit will force mutex locker to go into the slowpath to
>> acquire the lock every time, so if the wait queue is empty,
>> the WAITER bit need to be clear.
> I'm still interestd in knowing how you found this. Did you have an
> actual problem, or were you just reading the code?
>
> AFAICT, this needs:
>
> Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
>
>> Suggested-by: Peter Zijlstra <[email protected]>
>> Signed-off-by: Zqiang <[email protected]>
> Thanks!
>
> Updated patch below.
>
> ---
> Subject: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal
> From: Zqiang <[email protected]>
> Date: Mon, 17 May 2021 11:40:05 +0800
>
> From: Zqiang <[email protected]>
>
> When a interruptible mutex locker is interrupted by a signal
> without acquiring this lock and removed from the wait queue.
> if the mutex isn't contended enough to have a waiter
> put into the wait queue again, the setting of the WAITER
> bit will force mutex locker to go into the slowpath to
> acquire the lock every time, so if the wait queue is empty,
> the WAITER bit need to be clear.
>
> Fixes: 040a0a371005 ("mutex: Add support for wound/wait style locks")
> Suggested-by: Peter Zijlstra <[email protected]>
> Signed-off-by: Zqiang <[email protected]>
> Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
> Link: https://lkml.kernel.org/r/[email protected]
> ---
> kernel/locking/mutex-debug.c | 4 ++--
> kernel/locking/mutex-debug.h | 2 +-
> kernel/locking/mutex.c | 18 +++++++++++++-----
> kernel/locking/mutex.h | 4 +---
> 4 files changed, 17 insertions(+), 11 deletions(-)
>
> --- a/kernel/locking/mutex-debug.c
> +++ b/kernel/locking/mutex-debug.c
> @@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex
> task->blocked_on = waiter;
> }
>
> -void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> +void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> struct task_struct *task)
> {
> DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
> @@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *l
> DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
> task->blocked_on = NULL;
>
> - list_del_init(&waiter->list);
> + INIT_LIST_HEAD(&waiter->list);
> waiter->task = NULL;
> }
>
> --- a/kernel/locking/mutex-debug.h
> +++ b/kernel/locking/mutex-debug.h
> @@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(stru
> extern void debug_mutex_add_waiter(struct mutex *lock,
> struct mutex_waiter *waiter,
> struct task_struct *task);
> -extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> +extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> struct task_struct *task);
> extern void debug_mutex_unlock(struct mutex *lock);
> extern void debug_mutex_init(struct mutex *lock, const char *name,
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_fir
> * Add @waiter to a given location in the lock wait_list and set the
> * FLAG_WAITERS flag if it's the first waiter.
> */
> -static void __sched
> +static void
> __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> struct list_head *list)
> {
> @@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, s
> __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
> }
>
> +static void
> +__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
> +{
> + list_del(&waiter->list);
> + if (likely(list_empty(&lock->wait_list)))
> + __mutex_clear_flag(lock, MUTEX_FLAGS);
> +
> + debug_mutex_remove_waiter(lock, waiter, current);
> +}
> +
> /*
> * Give up ownership to a specific task, when @task = NULL, this is equivalent
> * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
> @@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock,
> __ww_mutex_check_waiters(lock, ww_ctx);
> }
>
> - mutex_remove_waiter(lock, &waiter, current);
> - if (likely(list_empty(&lock->wait_list)))
> - __mutex_clear_flag(lock, MUTEX_FLAGS);
> + __mutex_remove_waiter(lock, &waiter);
>
> debug_mutex_free_waiter(&waiter);
>
> @@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock,
>
> err:
> __set_current_state(TASK_RUNNING);
> - mutex_remove_waiter(lock, &waiter, current);
> + __mutex_remove_waiter(lock, &waiter);
> err_early_kill:
> spin_unlock(&lock->wait_lock);
> debug_mutex_free_waiter(&waiter);
> --- a/kernel/locking/mutex.h
> +++ b/kernel/locking/mutex.h
> @@ -10,12 +10,10 @@
> * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
> */
>
> -#define mutex_remove_waiter(lock, waiter, task) \
> - __list_del((waiter)->list.prev, (waiter)->list.next)
> -
> #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
> #define debug_mutex_free_waiter(waiter) do { } while (0)
> #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
> +#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
> #define debug_mutex_unlock(lock) do { } while (0)
> #define debug_mutex_init(lock, name, key) do { } while (0)
>
>
Reviewed-by: Waiman Long <[email protected]>