2005-12-29 21:04:39

by Ingo Molnar

[permalink] [raw]
Subject: [patch 03/13] mutex subsystem, add include/asm-i386/mutex.h

add the i386 version of mutex.h, optimized in assembly.

Signed-off-by: Arjan van de Ven <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>

----

include/asm-i386/mutex.h | 140 +++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 140 insertions(+)

Index: linux/include/asm-i386/mutex.h
===================================================================
--- /dev/null
+++ linux/include/asm-i386/mutex.h
@@ -0,0 +1,140 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar <[email protected]>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fn_name) \
+do { \
+ /* type-check the function too: */ \
+ void fastcall (*__tmp)(atomic_t *) = fn_name; \
+ unsigned int dummy; \
+ \
+ (void)__tmp; \
+ typecheck(atomic_t *, count); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%eax) \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fn_name" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fn_name)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fn_name(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <fn>.
+ * In the failure case, this function is allowed to either set the value
+ * to 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fn_name) \
+do { \
+ /* type-check the function too: */ \
+ void fastcall (*__tmp)(atomic_t *) = fn_name; \
+ unsigned int dummy; \
+ \
+ (void)__tmp; \
+ typecheck(atomic_t *, count); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%eax) \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fn_name" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
+ return 1;
+ return 0;
+#else
+ return fn(count);
+#endif
+}
+
+#endif


2005-12-31 06:42:34

by Chuck Ebbert

[permalink] [raw]
Subject: Re: [patch 03/13] mutex subsystem, add include/asm-i386/mutex.h

In-Reply-To: <[email protected]>

On Thu, 29 Dec 2005 at 22:03:36 +0100, Ingo Molnar wrote:

> +#define __mutex_fastpath_lock(count, fn_name) \
> +do { \
> + /* type-check the function too: */ \
> + void fastcall (*__tmp)(atomic_t *) = fn_name; \
> + unsigned int dummy; \
> + \
> + (void)__tmp; \
> + typecheck(atomic_t *, count); \

The function type checking is ugly. Wouldn't this be better?

Signed-off-by: Chuck Ebbert <[email protected]>

include/asm-arm/mutex.h | 12 +++---------
include/asm-i386/mutex.h | 8 ++------
include/asm-x86_64/mutex.h | 8 ++------
include/linux/kernel.h | 9 +++++++++
include/linux/mutex.h | 4 ++++
5 files changed, 20 insertions(+), 21 deletions(-)

--- 2.6.15-rc7b.orig/include/asm-arm/mutex.h
+++ 2.6.15-rc7b/include/asm-arm/mutex.h
@@ -25,11 +25,9 @@
*/
#define __mutex_fastpath_lock(count, fail_fn) \
do { \
- /* type-check the function too: */ \
- void fastcall (*__tmp)(atomic_t *) = fail_fn; \
int __ex_flag, __res; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fail_fn); \
typecheck(atomic_t *, count); \
\
__asm__ ( \
@@ -47,11 +45,9 @@ do { \

#define __mutex_fastpath_lock_retval(count, fail_fn) \
({ \
- /* type-check the function too: */ \
- int fastcall (*__tmp)(atomic_t *) = fail_fn; \
int __ex_flag, __res; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_int_fail_fn_t, fail_fn); \
typecheck(atomic_t *, count); \
\
__asm__ ( \
@@ -76,11 +72,9 @@ do { \
*/
#define __mutex_fastpath_unlock(count, fail_fn) \
do { \
- /* type-check the function too: */ \
- void fastcall (*__tmp)(atomic_t *) = fail_fn; \
int __ex_flag, __res, __orig; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fail_fn); \
typecheck(atomic_t *, count); \
\
__asm__ ( \
--- 2.6.15-rc7b.orig/include/asm-i386/mutex.h
+++ 2.6.15-rc7b/include/asm-i386/mutex.h
@@ -21,11 +21,9 @@
*/
#define __mutex_fastpath_lock(count, fn_name) \
do { \
- /* type-check the function too: */ \
- void fastcall (*__tmp)(atomic_t *) = fn_name; \
unsigned int dummy; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fn_name); \
typecheck(atomic_t *, count); \
\
__asm__ __volatile__( \
@@ -79,11 +77,9 @@ __mutex_fastpath_lock_retval(atomic_t *c
*/
#define __mutex_fastpath_unlock(count, fn_name) \
do { \
- /* type-check the function too: */ \
- void fastcall (*__tmp)(atomic_t *) = fn_name; \
unsigned int dummy; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fn_name); \
typecheck(atomic_t *, count); \
\
__asm__ __volatile__( \
--- 2.6.15-rc7b.orig/include/asm-x86_64/mutex.h
+++ 2.6.15-rc7b/include/asm-x86_64/mutex.h
@@ -18,11 +18,9 @@
*/
#define __mutex_fastpath_lock(v, fn_name) \
do { \
- /* type-check the function too: */ \
- fastcall void (*__tmp)(atomic_t *) = fn_name; \
unsigned long dummy; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fn_name); \
typecheck(atomic_t *, v); \
\
__asm__ __volatile__( \
@@ -50,11 +48,9 @@ do { \
*/
#define __mutex_fastpath_unlock(v, fn_name) \
do { \
- /* type-check the function too: */ \
- fastcall void (*__tmp)(atomic_t *) = fn_name; \
unsigned long dummy; \
\
- (void)__tmp; \
+ typecheck_fn(mutex_void_fail_fn_t, fn_name); \
typecheck(atomic_t *, v); \
\
__asm__ __volatile__( \
--- 2.6.15-rc7b.orig/include/linux/mutex.h
+++ 2.6.15-rc7b/include/linux/mutex.h
@@ -69,6 +69,10 @@ struct mutex_waiter {
#endif
};

+/* mutex functions called when extra work needs to be done have these types */
+typedef void fastcall mutex_void_fail_fn_t(atomic_t *);
+typedef int fastcall mutex_int_fail_fn_t(atomic_t *);
+
#ifdef CONFIG_DEBUG_MUTEXES
# include <linux/mutex-debug.h>
#else
--- 2.6.15-rc7b.orig/include/linux/kernel.h
+++ 2.6.15-rc7b/include/linux/kernel.h
@@ -286,6 +286,15 @@ extern void dump_stack(void);
1; \
})

+/*
+ * Check at compile time that 'function' is a certain type, or is a pointer
+ * to that type (needs to use typedef for the function type.)
+ */
+#define typecheck_fn(type,function) \
+({ type *__dummy = function; \
+ (void)__dummy; \
+})
+
#endif /* __KERNEL__ */

#define SI_LOAD_SHIFT 16
--
Chuck

2006-01-02 13:30:19

by Ingo Molnar

[permalink] [raw]
Subject: Re: [patch 03/13] mutex subsystem, add include/asm-i386/mutex.h


* Chuck Ebbert <[email protected]> wrote:

> In-Reply-To: <[email protected]>
>
> On Thu, 29 Dec 2005 at 22:03:36 +0100, Ingo Molnar wrote:
>
> > +#define __mutex_fastpath_lock(count, fn_name) \
> > +do { \
> > + /* type-check the function too: */ \
> > + void fastcall (*__tmp)(atomic_t *) = fn_name; \
> > + unsigned int dummy; \
> > + \
> > + (void)__tmp; \
> > + typecheck(atomic_t *, count); \
>
> The function type checking is ugly. Wouldn't this be better?
>
> Signed-off-by: Chuck Ebbert <[email protected]>

thanks, applied.

Ingo

2006-01-02 13:37:40

by Ingo Molnar

[permalink] [raw]
Subject: Re: [patch 03/13] mutex subsystem, add include/asm-i386/mutex.h


* Chuck Ebbert <[email protected]> wrote:

> +/* mutex functions called when extra work needs to be done have these types */
> +typedef void fastcall mutex_void_fail_fn_t(atomic_t *);
> +typedef int fastcall mutex_int_fail_fn_t(atomic_t *);

i didnt apply this bit: there's not much to be won and readability
suffers.

Ingo