Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754937AbbB0Q57 (ORCPT ); Fri, 27 Feb 2015 11:57:59 -0500 Received: from www.linutronix.de ([62.245.132.108]:54022 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752386AbbB0Q5U (ORCPT ); Fri, 27 Feb 2015 11:57:20 -0500 From: Sebastian Andrzej Siewior To: linux-kernel@vger.kernel.org Cc: Peter Zijlstra , Ingo Molnar , Maarten Lankhorst , Sebastian Andrzej Siewior , Mike Galbraith Subject: [PATCH 2/3] locking: ww_mutex: Allow to use rt_mutex instead of mutex for the baselock Date: Fri, 27 Feb 2015 17:57:08 +0100 Message-Id: <1425056229-22326-3-git-send-email-bigeasy@linutronix.de> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1425056229-22326-1-git-send-email-bigeasy@linutronix.de> References: <1425056229-22326-1-git-send-email-bigeasy@linutronix.de> X-Linutronix-Spam-Score: -1.0 X-Linutronix-Spam-Level: - X-Linutronix-Spam-Status: No , -1.0 points, 5.0 required, ALL_TRUSTED=-1,SHORTCIRCUIT=-0.0001,URIBL_BLOCKED=0.001 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 20212 Lines: 720 This patch makes it possible to replace the base mutex by a rt_mutex. In general one would not do this. In -RT we use rt_mutex instead of the mutex by default and this means we need the ww_mutex functionality based on rt_mutex. This patch includes a slightly modified version of what we have in the -RT try including Mike Galbraith's latest lockdep annotations fixups and proper deadlock detection which was broken after the rt_mutex rework. Cc: Mike Galbraith Signed-off-by: Sebastian Andrzej Siewior --- include/linux/ww_mutex.h | 87 ++++++++++++++++- kernel/locking/mutex.c | 22 ++++- kernel/locking/rtmutex.c | 237 ++++++++++++++++++++++++++++++++++++++++++++--- lib/Kconfig.debug | 6 ++ lib/locking-selftest.c | 4 + 5 files changed, 339 insertions(+), 17 deletions(-) diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 6e5d5ee3138d..99793f00755c 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -15,6 +15,7 @@ #define __LINUX_WW_MUTEX_H #include +#include struct ww_class { atomic_long_t stamp; @@ -42,8 +43,19 @@ struct ww_acquire_ctx { #endif }; +struct rt_mutex_base_lock { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + struct ww_mutex { +#ifdef CONFIG_WW_MUTEX_RTMUTEX + struct rt_mutex_base_lock base; +#else struct mutex base; +#endif struct ww_acquire_ctx *ctx; #ifdef CONFIG_DEBUG_MUTEXES struct ww_class *ww_class; @@ -62,9 +74,15 @@ struct ww_mutex { , .acquire_name = #ww_class "_acquire" \ , .mutex_name = #ww_class "_mutex" } -#define __WW_MUTEX_INITIALIZER(lockname, class) \ +#ifdef CONFIG_WW_MUTEX_RTMUTEX +# define __WW_MUTEX_INITIALIZER(lockname, class) \ + { .base = { \__RT_MUTEX_INITIALIZER(lockname) } \ + __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } +#else +# define __WW_MUTEX_INITIALIZER(lockname, class) \ { .base = { \__MUTEX_INITIALIZER(lockname) } \ __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } +#endif #define DEFINE_WW_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname) @@ -72,6 +90,71 @@ struct ww_mutex { #define DEFINE_WW_MUTEX(mutexname, ww_class) \ struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) +#ifdef CONFIG_WW_MUTEX_RTMUTEX + +static inline void __ww_mutex_init(struct rt_mutex_base_lock *lock, + struct ww_class *ww_class) +{ + __rt_mutex_init(&lock->lock, ww_class->mutex_name); +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed(lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, ww_class->mutex_name, + &ww_class->mutex_key, 0); +#endif +#ifdef CONFIG_DEBUG_RT_MUTEXES + lock->lock.save_state = 0; +#endif +} + +static inline void __ww_mutex_lock_lock(struct rt_mutex_base_lock *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} + +static inline int +__ww_mutex_lock_interruptible_lock(struct rt_mutex_base_lock *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} + +static inline int __ww_mutex_trylock_lock(struct rt_mutex_base_lock *lock) +{ + int ret; + + ret = rt_mutex_trylock(&lock->lock); + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return ret; +} + +static inline void __ww_mutex_destroy_lock(struct rt_mutex_base_lock *lock) +{ + rt_mutex_destroy(&lock->lock); +} + +static inline bool __ww_mutex_is_locked_lock(struct rt_mutex_base_lock *lock) +{ + return rt_mutex_is_locked(&lock->lock); +} + +static inline void __ww_mutex_unlock_lock(struct rt_mutex_base_lock *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->lock); +} + +#else + static inline void __ww_mutex_init(struct mutex *lock, struct ww_class *ww_class) { @@ -108,6 +191,8 @@ static inline void __ww_mutex_unlock_lock(struct mutex *lock) mutex_unlock(lock); } +#endif + /** * ww_mutex_init - initialize the w/w mutex * @lock: the mutex to be initialized diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 16b2d3cc88b0..0a652ba46081 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -106,6 +106,7 @@ void __sched mutex_lock(struct mutex *lock) EXPORT_SYMBOL(mutex_lock); #endif +#ifndef CONFIG_WW_MUTEX_RTMUTEX static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) { @@ -215,6 +216,7 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock, wake_up_process(cur->task); } } +#endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER /* @@ -328,6 +330,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, while (true) { struct task_struct *owner; +#ifndef CONFIG_WW_MUTEX_RTMUTEX if (use_ww_ctx && ww_ctx->acquired > 0) { struct ww_mutex *ww; @@ -343,7 +346,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, if (READ_ONCE(ww->ctx)) break; } - +#endif /* * If there's an owner, wait for it to either * release the lock or go to sleep. @@ -356,12 +359,14 @@ static bool mutex_optimistic_spin(struct mutex *lock, if (mutex_try_to_acquire(lock)) { lock_acquired(&lock->dep_map, ip); +#ifndef CONFIG_WW_MUTEX_RTMUTEX if (use_ww_ctx) { struct ww_mutex *ww; ww = container_of(lock, struct ww_mutex, base); ww_mutex_set_context_fastpath(ww, ww_ctx); } +#endif mutex_set_owner(lock); osq_unlock(&lock->osq); @@ -445,6 +450,7 @@ void __sched mutex_unlock(struct mutex *lock) EXPORT_SYMBOL(mutex_unlock); +#ifndef CONFIG_WW_MUTEX_RTMUTEX /** * ww_mutex_unlock - release the w/w mutex * @lock: the mutex to be released @@ -506,6 +512,7 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) return 0; } +#endif /* * Lock a mutex (possibly interruptible), slowpath: @@ -570,12 +577,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ret = -EINTR; goto err; } - +#ifndef CONFIG_WW_MUTEX_RTMUTEX if (use_ww_ctx && ww_ctx->acquired > 0) { ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); if (ret) goto err; } +#endif __set_task_state(task, state); @@ -597,11 +605,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, lock_acquired(&lock->dep_map, ip); mutex_set_owner(lock); +#ifndef CONFIG_WW_MUTEX_RTMUTEX if (use_ww_ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); ww_mutex_set_context_slowpath(ww, ww_ctx); } - +#endif spin_unlock_mutex(&lock->wait_lock, flags); preempt_enable(); return 0; @@ -655,6 +664,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); +#ifndef CONFIG_WW_MUTEX_RTMUTEX static inline int ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { @@ -711,7 +721,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) return ret; } EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); - +#endif #endif /* @@ -840,6 +850,7 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock) NULL, _RET_IP_, NULL, 0); } +#ifndef CONFIG_WW_MUTEX_RTMUTEX static noinline int __sched __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { @@ -854,6 +865,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_, ctx, 1); } +#endif #endif @@ -915,6 +927,7 @@ int __sched mutex_trylock(struct mutex *lock) EXPORT_SYMBOL(mutex_trylock); #ifndef CONFIG_DEBUG_LOCK_ALLOC +#ifndef CONFIG_WW_MUTEX_RTMUTEX int __sched __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { @@ -952,6 +965,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) EXPORT_SYMBOL(__ww_mutex_lock_interruptible); #endif +#endif /** * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index e16e5542bf13..6d7d72ffa619 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "rtmutex_common.h" @@ -738,6 +739,31 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, return ret; } +static int __sched __mutex_lock_check_stamp(struct rt_mutex *lock, + struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_WW_MUTEX_RTMUTEX + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); + + if (!hold_ctx) + return 0; + + if (unlikely(ctx == hold_ctx)) + return -EALREADY; + + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(ctx->contending_lock); + ctx->contending_lock = ww; +#endif + return -EDEADLK; + } +#endif + return 0; +} + /* * Try to take an rt-mutex * @@ -1097,7 +1123,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + struct ww_acquire_ctx *ww_ctx) { int ret = 0; @@ -1120,6 +1147,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } + if (ww_ctx && ww_ctx->acquired > 0) { + ret = __mutex_lock_check_stamp(lock, ww_ctx); + if (ret) + break; + } + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1154,13 +1187,84 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, } } +#ifdef CONFIG_WW_MUTEX_RTMUTEX +static void ww_mutex_lock_acquired(struct ww_mutex *ww, + struct ww_acquire_ctx *ww_ctx) +{ +#ifdef CONFIG_DEBUG_MUTEXES + /* + * If this WARN_ON triggers, you used ww_mutex_lock to acquire, + * but released with a normal mutex_unlock in this call. + * + * This should never happen, always use ww_mutex_unlock. + */ + DEBUG_LOCKS_WARN_ON(ww->ctx); + + /* + * Not quite done after calling ww_acquire_done() ? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); + + if (ww_ctx->contending_lock) { + /* + * After -EDEADLK you tried to + * acquire a different ww_mutex? Bad! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); + + /* + * You called ww_mutex_lock after receiving -EDEADLK, + * but 'forgot' to unlock everything else first? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); + ww_ctx->contending_lock = NULL; + } + + /* + * Naughty, using a different class will lead to undefined behavior! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); +#endif + ww_ctx->acquired++; +} +#endif + +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ +#ifdef CONFIG_WW_MUTEX_RTMUTEX + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct rt_mutex_waiter *waiter, *n; + + /* + * This branch gets optimized out for the common case, + * and is only important for ww_mutex_lock. + */ + ww_mutex_lock_acquired(ww, ww_ctx); + ww->ctx = ww_ctx; + + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, + tree_entry) { + /* XXX debug rt mutex waiter wakeup */ + + BUG_ON(waiter->lock != lock); + wake_up_process(waiter->task); + } +#endif +} + /* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx) { struct rt_mutex_waiter waiter; int ret = 0; @@ -1173,6 +1277,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { + if (ww_ctx) + ww_mutex_account_lock(lock, ww_ctx); raw_spin_unlock(&lock->wait_lock); return 0; } @@ -1190,12 +1296,22 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (likely(!ret)) /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, + ww_ctx); + else if (ww_ctx) { + /* ww_mutex may need to return EALREADY insted of EDEADLK */ + ret = __mutex_lock_check_stamp(lock, ww_ctx); + BUG_ON(!ret); + } if (unlikely(ret)) { if (rt_mutex_has_waiters(lock)) remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + /* ww_mutex need the error reported */ + if (!ww_ctx) + rt_mutex_handle_deadlock(ret, chwalk, &waiter); + } else if (ww_ctx) { + ww_mutex_account_lock(lock, ww_ctx); } /* @@ -1320,31 +1436,36 @@ rt_mutex_slowunlock(struct rt_mutex *lock) */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, + ww_ctx); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { if (chwalk == RT_MUTEX_MIN_CHAINWALK && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, chwalk); + return slowfn(lock, state, timeout, chwalk, ww_ctx); } static inline int @@ -1377,7 +1498,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); @@ -1394,7 +1515,8 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { might_sleep(); - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, + rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1407,7 +1529,7 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock, might_sleep(); return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_FULL_CHAINWALK, + RT_MUTEX_FULL_CHAINWALK, NULL, rt_mutex_slowlock); } @@ -1431,6 +1553,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, RT_MUTEX_MIN_CHAINWALK, + NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1628,7 +1751,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, set_current_state(TASK_INTERRUPTIBLE); /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); if (unlikely(ret)) remove_waiter(lock, waiter); @@ -1643,3 +1766,93 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, return ret; } + +#ifdef CONFIG_WW_MUTEX_RTMUTEX +static int ww_mutex_deadlock_injection(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned tmp; + + if (ctx->deadlock_inject_countdown-- == 0) { + tmp = ctx->deadlock_inject_interval; + if (tmp > UINT_MAX/4) + tmp = UINT_MAX; + else + tmp = tmp*2 + tmp + tmp/2; + + ctx->deadlock_inject_interval = tmp; + ctx->deadlock_inject_countdown = tmp; + ctx->contending_lock = lock; + + ww_mutex_unlock(lock); + + return -EDEADLK; + } +#endif + + return 0; +} + +int __sched __ww_mutex_lock_interruptible(struct ww_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, + _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, + ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); + +int __sched __ww_mutex_lock(struct ww_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, + _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, + ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(__ww_mutex_lock); + +void __sched ww_mutex_unlock(struct ww_mutex *lock) +{ + int nest = !!lock->ctx; + + /* + * The unlocking fastpath is the 0->1 transition from 'locked' + * into 'unlocked' state: + */ + if (nest) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); +#endif + if (lock->ctx->acquired > 0) + lock->ctx->acquired--; + lock->ctx = NULL; + } + + mutex_release(&lock->base.dep_map, nest, _RET_IP_); + rt_mutex_unlock(&lock->base.lock); +} +EXPORT_SYMBOL(ww_mutex_unlock); +#endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c5cefb3c009c..05b6a63f7218 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -921,6 +921,12 @@ config DEBUG_MUTEXES This feature allows mutex semantics violations to be detected and reported. +config WW_MUTEX_RTMUTEX + bool "Wait/wound mutex: use RT-Mutex instead of the regular mutex" + help + This replaces the mutex in ww_mutex by an rt_mutex. This is probably + only useful for testing. + config DEBUG_WW_MUTEX_SLOWPATH bool "Wait/wound mutex debugging: Slowpath testing" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 2c35e38b4013..84c017ac9c39 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1192,6 +1192,7 @@ static void ww_test_normal(void) WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); +#ifndef CONFIG_WW_MUTEX_RTMUTEX /* mutex_lock_killable (and *_nested) */ o.ctx = (void *)~0UL; ret = mutex_lock_killable(&o.base); @@ -1200,6 +1201,7 @@ static void ww_test_normal(void) else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); +#endif /* trylock, succeeding */ o.ctx = (void *)~0UL; @@ -1219,11 +1221,13 @@ static void ww_test_normal(void) __ww_mutex_unlock_lock(&o.base); WARN_ON(o.ctx != (void *)~0UL); +#ifndef CONFIG_WW_MUTEX_RTMUTEX /* nest_lock */ o.ctx = (void *)~0UL; mutex_lock_nest_lock(&o.base, &t); __ww_mutex_unlock_lock(&o.base); WARN_ON(o.ctx != (void *)~0UL); +#endif } static void ww_test_two_contexts(void) -- 2.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/