Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758714AbcCVRrH (ORCPT ); Tue, 22 Mar 2016 13:47:07 -0400 Received: from g1t6223.austin.hp.com ([15.73.96.124]:41233 "EHLO g1t6223.austin.hp.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756061AbcCVRrD (ORCPT ); Tue, 22 Mar 2016 13:47:03 -0400 From: Waiman Long To: Ingo Molnar Cc: Peter Zijlstra , linux-kernel@vger.kernel.org, Linus Torvalds , Ding Tianhong , Jason Low , Davidlohr Bueso , "Paul E. McKenney" , Thomas Gleixner , Will Deacon , Tim Chen , Waiman Long , Waiman Long Subject: [PATCH v3 1/3] locking/mutex: Add waiter parameter to mutex_optimistic_spin() Date: Tue, 22 Mar 2016 13:46:42 -0400 Message-Id: <1458668804-10138-2-git-send-email-Waiman.Long@hpe.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1458668804-10138-1-git-send-email-Waiman.Long@hpe.com> References: <1458668804-10138-1-git-send-email-Waiman.Long@hpe.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4959 Lines: 149 This patch adds a new waiter parameter to the mutex_optimistic_spin() function to prepare it to be used by a waiter-spinner that doesn't need to go into the OSQ as there can only be one waiter-spinner which is the head of the waiting queue. Signed-off-by: Waiman Long --- kernel/locking/mutex.c | 66 +++++++++++++++++++++++++++++++++-------------- 1 files changed, 46 insertions(+), 20 deletions(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 0551c21..5dd6171 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -273,11 +273,15 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) /* * Atomically try to take the lock when it is available + * + * For waiter-spinner, the count needs to be set to -1 first which will be + * cleared to 0 later on if the list becomes empty. For regular spinner, + * the count will be set to 0. */ -static inline bool mutex_try_to_acquire(struct mutex *lock) +static inline bool mutex_try_to_acquire(struct mutex *lock, int waiter) { return !mutex_is_locked(lock) && - (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1); + (atomic_cmpxchg_acquire(&lock->count, 1, waiter ? -1 : 0) == 1); } /* @@ -302,22 +306,42 @@ static inline bool mutex_try_to_acquire(struct mutex *lock) * * Returns true when the lock was taken, otherwise false, indicating * that we need to jump to the slowpath and sleep. + * + * The waiter flag is set to true if the spinner is a waiter in the wait + * queue. As the waiter has slept for a while, it should have priority to + * get the lock over the regular spinners. So going to wait at the end of + * the OSQ isn't fair to the waiter. Instead, it will spin on the lock + * directly and concurrently with the spinner at the head of the OSQ, if + * present. There may be a bit more cacheline contention in this case. + * The waiter also needs to set the lock to -1 instead of 0 on lock + * acquisition. */ static bool mutex_optimistic_spin(struct mutex *lock, - struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) + struct ww_acquire_ctx *ww_ctx, + const bool use_ww_ctx, int waiter) { struct task_struct *task = current; + bool acquired = false; - if (!mutex_can_spin_on_owner(lock)) - goto done; + if (!waiter) { + /* + * The purpose of the mutex_can_spin_on_owner() function is + * to eliminate the overhead of osq_lock() and osq_unlock() + * in case spinning isn't possible. As a waiter-spinner + * is not going to take OSQ lock anyway, there is no need + * to call mutex_can_spin_on_owner(). + */ + if (!mutex_can_spin_on_owner(lock)) + goto done; - /* - * In order to avoid a stampede of mutex spinners trying to - * acquire the mutex all at once, the spinners need to take a - * MCS (queued) lock first before spinning on the owner field. - */ - if (!osq_lock(&lock->osq)) - goto done; + /* + * In order to avoid a stampede of mutex spinners trying to + * acquire the mutex all at once, the spinners need to take a + * MCS (queued) lock first before spinning on the owner field. + */ + if (!osq_lock(&lock->osq)) + goto done; + } while (true) { struct task_struct *owner; @@ -347,7 +371,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, break; /* Try to acquire the mutex if it is unlocked. */ - if (mutex_try_to_acquire(lock)) { + if (mutex_try_to_acquire(lock, waiter)) { lock_acquired(&lock->dep_map, ip); if (use_ww_ctx) { @@ -358,8 +382,8 @@ static bool mutex_optimistic_spin(struct mutex *lock, } mutex_set_owner(lock); - osq_unlock(&lock->osq); - return true; + acquired = true; + break; } /* @@ -380,14 +404,15 @@ static bool mutex_optimistic_spin(struct mutex *lock, cpu_relax_lowlatency(); } - osq_unlock(&lock->osq); + if (!waiter) + osq_unlock(&lock->osq); done: /* * If we fell out of the spin path because of need_resched(), * reschedule now, before we try-lock the mutex. This avoids getting * scheduled out right after we obtained the mutex. */ - if (need_resched()) { + if (!acquired && need_resched()) { /* * We _should_ have TASK_RUNNING here, but just in case * we do not, make it so, otherwise we might get stuck. @@ -396,11 +421,12 @@ done: schedule_preempt_disabled(); } - return false; + return acquired; } #else static bool mutex_optimistic_spin(struct mutex *lock, - struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) + struct ww_acquire_ctx *ww_ctx, + const bool use_ww_ctx, int waiter) { return false; } @@ -517,7 +543,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, preempt_disable(); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); - if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { + if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) { /* got the lock, yay! */ preempt_enable(); return 0; -- 1.7.1