We have a couple of places in the tree that really could do with a
down_timeout() function. I noticed it in qla2xxx and ACPI, but maybe
there are others that don't have the courtesy of putting "I wish we had
a down_timeout" comment beside their implementation.
I don't really want to poke at 25 different implementations of
down_timeout, but doing a mutex_timeout() seems easy enough, and at
least qla2xxx can use it.
I mused on whether to put the timeout argument into the struct
mutex, but on the whole, I thought it better to somewhat penalise
mutex_lock_interruptible(), mutex_lock_nested and the mutex lock slowpaths
in time than it was to penalise all mutex users in space.
Here's the patch to add mutex_lock_timeout(). I booted the resulting
kernel on a parisc machine, but I wouldn't claim to have done any real
testing. It's fairly mechanical though.
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
index cb29d84..6fa4e5f 100644
--- a/include/asm-arm/mutex.h
+++ b/include/asm-arm/mutex.h
@@ -44,7 +44,8 @@ __mutex_fastpath_lock(atomic_t *count, f
}
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ fastcall int (*fail_fn)(atomic_t *, long jiffies))
{
int __ex_flag, __res;
@@ -60,7 +61,7 @@ __mutex_fastpath_lock_retval(atomic_t *c
__res |= __ex_flag;
if (unlikely(__res != 0))
- __res = fail_fn(count);
+ __res = fail_fn(count, jiffies);
return __res;
}
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index 0134151..92a93df 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -37,10 +37,11 @@ __mutex_fastpath_lock(atomic_t *count, f
* or anything the slow path function returns.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ fastcall int (*fail_fn)(atomic_t *, long))
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return fail_fn(count, jiffies);
else {
smp_mb();
return 0;
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc7..5a65210 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,8 @@ #ifndef _ASM_GENERIC_MUTEX_NULL_H
#define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
+#define __mutex_fastpath_lock_retval(count, jiffies, fail_fn) \
+ fail_fn(count, jiffies)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 6a7e8c1..b5eee4e 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -42,10 +42,11 @@ __mutex_fastpath_lock(atomic_t *count, f
* or anything the slow path function returns
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ fastcall int (*fail_fn)(atomic_t *, long))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- return fail_fn(count);
+ return fail_fn(count, jiffies);
else {
smp_mb();
return 0;
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 7a17d9e..7e13ded 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -51,11 +51,11 @@ do { \
* or anything the slow path function returns
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int fastcall (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ int fastcall (*fail_fn)(atomic_t *, long))
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return fail_fn(count, jiffies);
else
return 0;
}
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
index bed73a6..151f067 100644
--- a/include/asm-ia64/mutex.h
+++ b/include/asm-ia64/mutex.h
@@ -36,10 +36,11 @@ __mutex_fastpath_lock(atomic_t *count, v
* or anything the slow path function returns.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ int (*fail_fn)(atomic_t *, long))
{
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- return fail_fn(count);
+ return fail_fn(count, jiffies);
return 0;
}
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 16396b1..18668fa 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -46,11 +46,11 @@ do { \
* or anything the slow path function returns
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int fastcall (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies
+ int fastcall (*fail_fn)(atomic_t *, long))
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return fail_fn(count, jiffies);
else
return 0;
}
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 27c48da..b70caf2 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -122,6 +122,7 @@ static inline int fastcall mutex_is_lock
*/
extern void fastcall mutex_lock(struct mutex *lock);
extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+extern int fastcall mutex_lock_timeout(struct mutex *lock, long jiffies);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 8c71cf7..d2ec4d3 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -122,7 +122,8 @@ EXPORT_SYMBOL(mutex_unlock);
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ long jiffies)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -158,19 +159,19 @@ __mutex_lock_common(struct mutex *lock,
* TASK_UNINTERRUPTIBLE case.)
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
- signal_pending(task))) {
+ (signal_pending(task) || jiffies == 0))) {
mutex_remove_waiter(lock, &waiter, task->thread_info);
mutex_release(&lock->dep_map, 1, _RET_IP_);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
- return -EINTR;
+ return jiffies ? -EINTR : -ETIMEDOUT;
}
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- schedule();
+ jiffies = schedule_timeout(jiffies);
spin_lock_mutex(&lock->wait_lock, flags);
}
@@ -194,7 +195,8 @@ __mutex_lock_slowpath(atomic_t *lock_cou
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
+ MAX_SCHEDULE_TIMEOUT);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -202,7 +204,8 @@ void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass,
+ MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -256,10 +259,10 @@ __mutex_unlock_slowpath(atomic_t *lock_c
/*
* Here come the less common (and hence less performance-critical) APIs:
- * mutex_lock_interruptible() and mutex_trylock().
+ * mutex_lock_interruptible(), mutex_lock_timeout and mutex_trylock().
*/
static int fastcall noinline __sched
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count, long jiffies);
/***
* mutex_lock_interruptible - acquire the mutex, interruptable
@@ -275,18 +278,39 @@ __mutex_lock_interruptible_slowpath(atom
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
{
might_sleep();
- return __mutex_fastpath_lock_retval
- (&lock->count, __mutex_lock_interruptible_slowpath);
+ return __mutex_fastpath_lock_retval(&lock->count,
+ MAX_SCHEDULE_TIMEOUT, __mutex_lock_interruptible_slowpath);
}
EXPORT_SYMBOL(mutex_lock_interruptible);
+/**
+ * mutex_lock_timeout - try to acquire the mutex and fail if it takes too long
+ * @lock: the mutex to be acquired
+ * @timeout: the number of jiffies to wait
+ *
+ * Lock the mutex like mutex_lock(), and return 0 if the mutex has
+ * been acquired or sleep until the mutex becomes available. If a
+ * signal arrives while waiting for the lock then this function
+ * returns -EINTR. If the timeout expires, it returns -ETIMEDOUT.
+ *
+ * This function is similar to (but not equivalent to) down_interruptible().
+ */
+int fastcall __sched mutex_lock_timeout(struct mutex *lock, long jiffies)
+{
+ might_sleep();
+ return __mutex_fastpath_lock_retval(&lock->count, jiffies,
+ __mutex_lock_interruptible_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_lock_timeout);
+
static int fastcall noinline __sched
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count, long jiffies)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, jiffies);
}
/*
qla2xxx can use a mutex instead of a semaphore for mailbox serialisation.
It can also use the new mutex_down_timeout function I introduced in
patch 1/2.
Compile-tested only (I don't have a qlogic card conveniently available
right now).
Signed-off-by: Matthew Wilcox <[email protected]>
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index c4fc40f..f78b058 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2316,7 +2316,7 @@ #define MBX_UPDATE_FLASH_ACTIVE 3
spinlock_t mbx_reg_lock; /* Mbx Cmd Register Lock */
- struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
+ struct mutex mbx_cmd_mtx; /* Serialialize mbx access */
struct semaphore mbx_intr_sem; /* Used for completion notification */
uint32_t mbx_flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 32ebeec..21cbd6c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern char *qla2x00_get_fw_version_str(
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
-extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
-
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4cde76c..fbd5822 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -69,7 +69,7 @@ qla2x00_mailbox_command(scsi_qla_host_t
* non ISP abort time.
*/
if (!abort_active) {
- if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) {
+ if (mutex_lock_timeout(&ha->mbx_cmd_mtx, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
"Exiting.\n", __func__, ha->host_no));
@@ -311,7 +311,7 @@ #endif
/* Allow next mbx cmd to come in. */
if (!abort_active)
- up(&ha->mbx_cmd_sem);
+ mutex_unlock(&ha->mbx_cmd_mtx);
if (rval) {
DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 208607b..212f392 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1530,7 +1530,7 @@ qla2x00_probe_one(struct pci_dev *pdev,
/* load the F/W, read paramaters, and init the H/W */
ha->instance = num_hosts;
- init_MUTEX(&ha->mbx_cmd_sem);
+ mutex_init(&ha->mbx_cmd_mtx);
init_MUTEX_LOCKED(&ha->mbx_intr_sem);
INIT_LIST_HEAD(&ha->list);
@@ -2580,23 +2580,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
qla2x00_restart_timer(ha, WATCH_INTERVAL);
}
-/* XXX(hch): crude hack to emulate a down_timeout() */
-int
-qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
-{
- const unsigned int step = 100; /* msecs */
- unsigned int iterations = jiffies_to_msecs(timeout)/100;
-
- do {
- if (!down_trylock(sema))
- return 0;
- if (msleep_interruptible(step))
- break;
- } while (--iterations >= 0);
-
- return -ETIMEDOUT;
-}
-
/* Firmware interface routines. */
#define FW_BLOBS 5
On Thu, 09 Nov 2006, Matthew Wilcox wrote:
> We have a couple of places in the tree that really could do with a
> down_timeout() function. I noticed it in qla2xxx and ACPI, but maybe
> there are others that don't have the courtesy of putting "I wish we had
> a down_timeout" comment beside their implementation.
I'm testing this with qla2xxx... btw: there's a minor cut-n-paste
error in the x86_64 variant where you forgot a ',' (comma).
> diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
> index 16396b1..18668fa 100644
> --- a/include/asm-x86_64/mutex.h
> +++ b/include/asm-x86_64/mutex.h
> @@ -46,11 +46,11 @@ do { \
> * or anything the slow path function returns
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count,
> - int fastcall (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies
> + int fastcall (*fail_fn)(atomic_t *, long))
should be:
+__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
+ int fastcall (*fail_fn)(atomic_t *, long))
On Thu, 09 Nov 2006, Matthew Wilcox wrote:
> qla2xxx can use a mutex instead of a semaphore for mailbox serialisation.
> It can also use the new mutex_down_timeout function I introduced in
> patch 1/2.
>
> Compile-tested only (I don't have a qlogic card conveniently available
> right now).
>
> Signed-off-by: Matthew Wilcox <[email protected]>
Initial testing appears promising.
Ack-by: Andrew Vasquez <[email protected]>
No comment on this?
On Thu, Nov 09, 2006 at 11:27:21AM -0700, Matthew Wilcox wrote:
>
> We have a couple of places in the tree that really could do with a
> down_timeout() function. I noticed it in qla2xxx and ACPI, but maybe
> there are others that don't have the courtesy of putting "I wish we had
> a down_timeout" comment beside their implementation.
>
> I don't really want to poke at 25 different implementations of
> down_timeout, but doing a mutex_timeout() seems easy enough, and at
> least qla2xxx can use it.
>
> I mused on whether to put the timeout argument into the struct
> mutex, but on the whole, I thought it better to somewhat penalise
> mutex_lock_interruptible(), mutex_lock_nested and the mutex lock slowpaths
> in time than it was to penalise all mutex users in space.
>
> Here's the patch to add mutex_lock_timeout(). I booted the resulting
> kernel on a parisc machine, but I wouldn't claim to have done any real
> testing. It's fairly mechanical though.
>
> diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
> index cb29d84..6fa4e5f 100644
> --- a/include/asm-arm/mutex.h
> +++ b/include/asm-arm/mutex.h
> @@ -44,7 +44,8 @@ __mutex_fastpath_lock(atomic_t *count, f
> }
>
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
> + fastcall int (*fail_fn)(atomic_t *, long jiffies))
> {
> int __ex_flag, __res;
>
> @@ -60,7 +61,7 @@ __mutex_fastpath_lock_retval(atomic_t *c
>
> __res |= __ex_flag;
> if (unlikely(__res != 0))
> - __res = fail_fn(count);
> + __res = fail_fn(count, jiffies);
> return __res;
> }
>
> diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
> index 0134151..92a93df 100644
> --- a/include/asm-generic/mutex-dec.h
> +++ b/include/asm-generic/mutex-dec.h
> @@ -37,10 +37,11 @@ __mutex_fastpath_lock(atomic_t *count, f
> * or anything the slow path function returns.
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
> + fastcall int (*fail_fn)(atomic_t *, long))
> {
> if (unlikely(atomic_dec_return(count) < 0))
> - return fail_fn(count);
> + return fail_fn(count, jiffies);
> else {
> smp_mb();
> return 0;
> diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
> index e1bbbc7..5a65210 100644
> --- a/include/asm-generic/mutex-null.h
> +++ b/include/asm-generic/mutex-null.h
> @@ -11,7 +11,8 @@ #ifndef _ASM_GENERIC_MUTEX_NULL_H
> #define _ASM_GENERIC_MUTEX_NULL_H
>
> #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
> -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
> +#define __mutex_fastpath_lock_retval(count, jiffies, fail_fn) \
> + fail_fn(count, jiffies)
> #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
> #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
> #define __mutex_slowpath_needs_to_unlock() 1
> diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
> index 6a7e8c1..b5eee4e 100644
> --- a/include/asm-generic/mutex-xchg.h
> +++ b/include/asm-generic/mutex-xchg.h
> @@ -42,10 +42,11 @@ __mutex_fastpath_lock(atomic_t *count, f
> * or anything the slow path function returns
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
> + fastcall int (*fail_fn)(atomic_t *, long))
> {
> if (unlikely(atomic_xchg(count, 0) != 1))
> - return fail_fn(count);
> + return fail_fn(count, jiffies);
> else {
> smp_mb();
> return 0;
> diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
> index 7a17d9e..7e13ded 100644
> --- a/include/asm-i386/mutex.h
> +++ b/include/asm-i386/mutex.h
> @@ -51,11 +51,11 @@ do { \
> * or anything the slow path function returns
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count,
> - int fastcall (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
> + int fastcall (*fail_fn)(atomic_t *, long))
> {
> if (unlikely(atomic_dec_return(count) < 0))
> - return fail_fn(count);
> + return fail_fn(count, jiffies);
> else
> return 0;
> }
> diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
> index bed73a6..151f067 100644
> --- a/include/asm-ia64/mutex.h
> +++ b/include/asm-ia64/mutex.h
> @@ -36,10 +36,11 @@ __mutex_fastpath_lock(atomic_t *count, v
> * or anything the slow path function returns.
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies,
> + int (*fail_fn)(atomic_t *, long))
> {
> if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
> - return fail_fn(count);
> + return fail_fn(count, jiffies);
> return 0;
> }
>
> diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
> index 16396b1..18668fa 100644
> --- a/include/asm-x86_64/mutex.h
> +++ b/include/asm-x86_64/mutex.h
> @@ -46,11 +46,11 @@ do { \
> * or anything the slow path function returns
> */
> static inline int
> -__mutex_fastpath_lock_retval(atomic_t *count,
> - int fastcall (*fail_fn)(atomic_t *))
> +__mutex_fastpath_lock_retval(atomic_t *count, long jiffies
> + int fastcall (*fail_fn)(atomic_t *, long))
> {
> if (unlikely(atomic_dec_return(count) < 0))
> - return fail_fn(count);
> + return fail_fn(count, jiffies);
> else
> return 0;
> }
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 27c48da..b70caf2 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -122,6 +122,7 @@ static inline int fastcall mutex_is_lock
> */
> extern void fastcall mutex_lock(struct mutex *lock);
> extern int fastcall mutex_lock_interruptible(struct mutex *lock);
> +extern int fastcall mutex_lock_timeout(struct mutex *lock, long jiffies);
>
> #ifdef CONFIG_DEBUG_LOCK_ALLOC
> extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
> diff --git a/kernel/mutex.c b/kernel/mutex.c
> index 8c71cf7..d2ec4d3 100644
> --- a/kernel/mutex.c
> +++ b/kernel/mutex.c
> @@ -122,7 +122,8 @@ EXPORT_SYMBOL(mutex_unlock);
> * Lock a mutex (possibly interruptible), slowpath:
> */
> static inline int __sched
> -__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
> +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> + long jiffies)
> {
> struct task_struct *task = current;
> struct mutex_waiter waiter;
> @@ -158,19 +159,19 @@ __mutex_lock_common(struct mutex *lock,
> * TASK_UNINTERRUPTIBLE case.)
> */
> if (unlikely(state == TASK_INTERRUPTIBLE &&
> - signal_pending(task))) {
> + (signal_pending(task) || jiffies == 0))) {
> mutex_remove_waiter(lock, &waiter, task->thread_info);
> mutex_release(&lock->dep_map, 1, _RET_IP_);
> spin_unlock_mutex(&lock->wait_lock, flags);
>
> debug_mutex_free_waiter(&waiter);
> - return -EINTR;
> + return jiffies ? -EINTR : -ETIMEDOUT;
> }
> __set_task_state(task, state);
>
> /* didnt get the lock, go to sleep: */
> spin_unlock_mutex(&lock->wait_lock, flags);
> - schedule();
> + jiffies = schedule_timeout(jiffies);
> spin_lock_mutex(&lock->wait_lock, flags);
> }
>
> @@ -194,7 +195,8 @@ __mutex_lock_slowpath(atomic_t *lock_cou
> {
> struct mutex *lock = container_of(lock_count, struct mutex, count);
>
> - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
> + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
> + MAX_SCHEDULE_TIMEOUT);
> }
>
> #ifdef CONFIG_DEBUG_LOCK_ALLOC
> @@ -202,7 +204,8 @@ void __sched
> mutex_lock_nested(struct mutex *lock, unsigned int subclass)
> {
> might_sleep();
> - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
> + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass,
> + MAX_SCHEDULE_TIMEOUT);
> }
>
> EXPORT_SYMBOL_GPL(mutex_lock_nested);
> @@ -256,10 +259,10 @@ __mutex_unlock_slowpath(atomic_t *lock_c
>
> /*
> * Here come the less common (and hence less performance-critical) APIs:
> - * mutex_lock_interruptible() and mutex_trylock().
> + * mutex_lock_interruptible(), mutex_lock_timeout and mutex_trylock().
> */
> static int fastcall noinline __sched
> -__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
> +__mutex_lock_interruptible_slowpath(atomic_t *lock_count, long jiffies);
>
> /***
> * mutex_lock_interruptible - acquire the mutex, interruptable
> @@ -275,18 +278,39 @@ __mutex_lock_interruptible_slowpath(atom
> int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
> {
> might_sleep();
> - return __mutex_fastpath_lock_retval
> - (&lock->count, __mutex_lock_interruptible_slowpath);
> + return __mutex_fastpath_lock_retval(&lock->count,
> + MAX_SCHEDULE_TIMEOUT, __mutex_lock_interruptible_slowpath);
> }
>
> EXPORT_SYMBOL(mutex_lock_interruptible);
>
> +/**
> + * mutex_lock_timeout - try to acquire the mutex and fail if it takes too long
> + * @lock: the mutex to be acquired
> + * @timeout: the number of jiffies to wait
> + *
> + * Lock the mutex like mutex_lock(), and return 0 if the mutex has
> + * been acquired or sleep until the mutex becomes available. If a
> + * signal arrives while waiting for the lock then this function
> + * returns -EINTR. If the timeout expires, it returns -ETIMEDOUT.
> + *
> + * This function is similar to (but not equivalent to) down_interruptible().
> + */
> +int fastcall __sched mutex_lock_timeout(struct mutex *lock, long jiffies)
> +{
> + might_sleep();
> + return __mutex_fastpath_lock_retval(&lock->count, jiffies,
> + __mutex_lock_interruptible_slowpath);
> +}
> +
> +EXPORT_SYMBOL(mutex_lock_timeout);
> +
> static int fastcall noinline __sched
> -__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
> +__mutex_lock_interruptible_slowpath(atomic_t *lock_count, long jiffies)
> {
> struct mutex *lock = container_of(lock_count, struct mutex, count);
>
> - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
> + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, jiffies);
> }
>
> /*
Hi Matthew,
On Saturday, 25. November 2006 04:55, Matthew Wilcox wrote:
> No comment on this?
Ok, I will comment it. But I'll NOT comment on the implementation.
I'll prove you instead, that timout based locking is non-sense.
What should the timout mutex_timeout() prevent? Usually the answer is
"if sombody hangs on a mutex forever ...?" and people tell you "ok, that is
a deadlock -> fix your code not to deadlock instead."
Then they tell you "Ok, but if somebody takes a mutex too long?"
and people will answer "ok, then this is a livelock -> fix your code not to
livelock."
Another answer is "I like to block until sth. happens wihin a specific time frame"
-> fine, this is accomplished by wait_event_timout (which blocks only you and
not every other user of the mutex).
So you see there is no real need for this and having it yould introduce an
instrument for hiding such bugs.
When you talk with untrusted binary only code such locks with timeouts
are sometimes needed, since you don't know the locking rules within
such code.
As this is not the case for kernel code, it is simply not needed.
I know why ACPI needs it (API requirement) and I think the qla???-driver
just needs to be fixed to work without it and nobody did it yet.
Regards
Ingo Oeser, who actually IS the implementation of mutex_lock_timeout()[1]
[1] Everytime (since 2001) someone suggest it, I discuss it away :-)
On Sat, Nov 25, 2006 at 05:00:27PM +0100, Ingo Oeser wrote:
> Ok, I will comment it. But I'll NOT comment on the implementation.
> I'll prove you instead, that timout based locking is non-sense.
Your proof misses a case and is thus invalid.
> What should the timout mutex_timeout() prevent? Usually the answer is
> "if sombody hangs on a mutex forever ...?" and people tell you "ok, that is
> a deadlock -> fix your code not to deadlock instead."
> Then they tell you "Ok, but if somebody takes a mutex too long?"
> and people will answer "ok, then this is a livelock -> fix your code not to
> livelock."
>
> Another answer is "I like to block until sth. happens wihin a specific time frame"
> -> fine, this is accomplished by wait_event_timout (which blocks only you and
> not every other user of the mutex).
In the qla case, the mutex can be acquired by a thread which then waits
for the hardware to do something. If the hardware locks up, it is
preferable that the system not hang.
> I know why ACPI needs it (API requirement) and I think the qla???-driver
> just needs to be fixed to work without it and nobody did it yet.
Since Christoph is the one who has his name on it:
/* XXX(hch): crude hack to emulate a down_timeout() */
I assumed that he'd spent enough time thinking about it that fixing it
really wasn't feasible.
Hi Matthew,
On Saturday, 25. November 2006 17:32, Matthew Wilcox wrote:
> In the qla case, the mutex can be acquired by a thread which then waits
> for the hardware to do something. If the hardware locks up, it is
> preferable that the system not hang.
Ok, I looked at it (drivers/scsi/qla2xxx/qla_mbx.c)
and the solution seems simple:
- Introduce an busy flag, check that BEFORE this mutex_lock()
and don't protect it by that mutex.
- return -EBUSY to the upper layers, if mailbox still busy
- upper layers can either queue the command or use a retry mechanism
There are many examples for this in the kernel. NICs have the same problems
(transmitter busy or stuck) and have no problem handling that gracefully
since ages.
> I assumed that he'd spent enough time thinking about it that fixing it
> really wasn't feasible.
That doesn't depend on time, just whether you get the right idea or not.
Anyway I CCed the current maintainers.
So my point still stands: Timeout based locking is evil and hides bugs.
In this case the bugs are:
1. That mutex protects a code path (mailbox command submission
and retrieve) instead of data.
2. "Mailbox is free" is an event, so you should use wait_event_timout()
for that
Regards
Ingo Oeser