2005-01-20 11:43:34

by Ingo Molnar

[permalink] [raw]
Subject: [patch 1/3] spinlock fix #1


i've split up spinlocking-fixes.patch into 3 parts and reworked them.
This is the first one, against BK-curr:

it fixes the BUILD_LOCK_OPS() bug by introducing the following 3 new
locking primitives:

spin_trylock_test(lock)
read_trylock_test(lock)
write_trylock_test(lock)

this is what is needed by BUILD_LOCK_OPS(): a nonintrusive test to check
whether the real (intrusive) trylock op would succeed or not. Semantics
and naming is completely symmetric to the trylock counterpart. No
changes to exit.c.

build/boot-tested on x86. Architectures that want to support PREEMPT
need to add these definitions.

Ingo

Signed-off-by: Ingo Molnar <[email protected]>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
* (We do this in a function because inlining it would be excessive.)
*/

-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn) \
-void __lockfunc _##op##_lock(locktype *lock) \
+#define BUILD_LOCK_OPS(op, locktype) \
+void __lockfunc _##op##_lock(locktype##_t *lock) \
{ \
preempt_disable(); \
for (;;) { \
@@ -183,7 +183,7 @@ void __lockfunc _##op##_lock(locktype *l
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (is_locked_fn(lock) && (lock)->break_lock) \
+ while (!op##_trylock_test(lock) && (lock)->break_lock) \
cpu_relax(); \
preempt_disable(); \
} \
@@ -191,7 +191,7 @@ void __lockfunc _##op##_lock(locktype *l
\
EXPORT_SYMBOL(_##op##_lock); \
\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock) \
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -205,7 +205,7 @@ unsigned long __lockfunc _##op##_lock_ir
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (is_locked_fn(lock) && (lock)->break_lock) \
+ while (!op##_trylock_test(lock) && (lock)->break_lock) \
cpu_relax(); \
preempt_disable(); \
} \
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
\
EXPORT_SYMBOL(_##op##_lock_irqsave); \
\
-void __lockfunc _##op##_lock_irq(locktype *lock) \
+void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
{ \
_##op##_lock_irqsave(lock); \
} \
\
EXPORT_SYMBOL(_##op##_lock_irq); \
\
-void __lockfunc _##op##_lock_bh(locktype *lock) \
+void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
* _[spin|read|write]_lock_irqsave()
* _[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);

#endif /* CONFIG_PREEMPT */

--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -584,4 +584,10 @@ static inline int bit_spin_is_locked(int
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED

+/**
+ * spin_trylock_test - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_trylock_test(lock) (!spin_is_locked(lock))
+
#endif /* __LINUX_SPINLOCK_H */
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -188,6 +188,18 @@ typedef struct {

#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)

+/**
+ * read_trylock_test - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+
+/**
+ * write_trylock_test - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+
/*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.


2005-01-20 12:00:49

by Ingo Molnar

[permalink] [raw]
Subject: [patch 2/3] spinlock fix #2: generalize [spin|rw]lock yielding


this patch generalizes the facility of targeted lock-yielding originally
implemented for ppc64. This facility enables a virtual CPU to indicate
towards the hypervisor which other virtual CPU this CPU is 'blocked on',
and hence which CPU the hypervisor should yield the current timeslice
to, in order to make progress on releasing the lock.

On physical platforms these functions default to cpu_relax(). Since
physical platforms are in the overwhelming majority i've added the two
new functions to the new asm-generic/spinlock.h include file - here i
hope we can later on move more generic spinlock bits as well.

this patch solves the ppc64/PREEMPT functionality-regression reported by
Paul Mackerras. I only tested it on x86, Paul, would you mind to test it
on ppc64?

Ingo

Signed-off-by: Ingo Molnar <[email protected]>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -184,7 +184,7 @@ void __lockfunc _##op##_lock(locktype##_
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while (!op##_trylock_test(lock) && (lock)->break_lock) \
- cpu_relax(); \
+ locktype##_yield(lock); \
preempt_disable(); \
} \
} \
@@ -206,7 +206,7 @@ unsigned long __lockfunc _##op##_lock_ir
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while (!op##_trylock_test(lock) && (lock)->break_lock) \
- cpu_relax(); \
+ locktype##_yield(lock); \
preempt_disable(); \
} \
return flags; \
--- linux/arch/ppc64/lib/locks.c.orig
+++ linux/arch/ppc64/lib/locks.c
@@ -23,7 +23,7 @@
/* waiting for a spinlock... */
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)

-void __spin_yield(spinlock_t *lock)
+void spinlock_yield(spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
struct paca_struct *holder_paca;
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(rwlock_t *rw)
+void rwlock_yield(rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
@@ -87,7 +87,7 @@ void spin_unlock_wait(spinlock_t *lock)
while (lock->lock) {
HMT_low();
if (SHARED_PROCESSOR)
- __spin_yield(lock);
+ spinlock_yield(lock);
}
HMT_medium();
}
--- linux/include/asm-ia64/spinlock.h.orig
+++ linux/include/asm-ia64/spinlock.h
@@ -17,6 +17,8 @@
#include <asm/intrinsics.h>
#include <asm/system.h>

+#include <asm-generic/spinlock.h>
+
typedef struct {
volatile unsigned int lock;
#ifdef CONFIG_PREEMPT
--- linux/include/asm-generic/spinlock.h.orig
+++ linux/include/asm-generic/spinlock.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_SPINLOCK_H
+#define _ASM_GENERIC_SPINLOCK_H
+
+/*
+ * Virtual platforms might use these to
+ * yield to specific virtual CPUs:
+ */
+#define spinlock_yield(lock) cpu_relax()
+#define rwlock_yield(lock) cpu_relax()
+
+#endif /* _ASM_GENERIC_SPINLOCK_H */
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -206,6 +206,8 @@ typedef struct {
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
#endif /* CONFIG_DEBUG_SPINLOCK */

+#define spinlock_yield(lock) (void)(lock)
+
/* RW spinlocks: No debug version */

#if (__GNUC__ > 2)
@@ -224,6 +226,8 @@ typedef struct {
#define _raw_read_trylock(lock) ({ (void)(lock); (1); })
#define _raw_write_trylock(lock) ({ (void)(lock); (1); })

+#define rwlock_yield(lock) (void)(lock)
+
#define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
1 : ({preempt_enable(); 0;});})

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -7,6 +7,8 @@
#include <linux/config.h>
#include <linux/compiler.h>

+#include <asm-generic/spinlock.h>
+
asmlinkage int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));

--- linux/include/asm-ppc64/spinlock.h.orig
+++ linux/include/asm-ppc64/spinlock.h
@@ -64,11 +64,11 @@ static __inline__ void _raw_spin_unlock(
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void spinlock_yield(spinlock_t *lock);
+extern void rwlock_yield(rwlock_t *lock);
#else /* SPLPAR || ISERIES */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
+#define spinlock_yield(x) barrier()
+#define rwlock_yield(x) barrier()
#define SHARED_PROCESSOR 0
#endif
extern void spin_unlock_wait(spinlock_t *lock);
@@ -109,7 +109,7 @@ static void __inline__ _raw_spin_lock(sp
do {
HMT_low();
if (SHARED_PROCESSOR)
- __spin_yield(lock);
+ spinlock_yield(lock);
} while (likely(lock->lock != 0));
HMT_medium();
}
@@ -127,7 +127,7 @@ static void __inline__ _raw_spin_lock_fl
do {
HMT_low();
if (SHARED_PROCESSOR)
- __spin_yield(lock);
+ spinlock_yield(lock);
} while (likely(lock->lock != 0));
HMT_medium();
local_irq_restore(flags_dis);
@@ -201,7 +201,7 @@ static void __inline__ _raw_read_lock(rw
do {
HMT_low();
if (SHARED_PROCESSOR)
- __rw_yield(rw);
+ rwlock_yield(rw);
} while (likely(rw->lock < 0));
HMT_medium();
}
@@ -258,7 +258,7 @@ static void __inline__ _raw_write_lock(r
do {
HMT_low();
if (SHARED_PROCESSOR)
- __rw_yield(rw);
+ rwlock_yield(rw);
} while (likely(rw->lock != 0));
HMT_medium();
}
--- linux/include/asm-x86_64/spinlock.h.orig
+++ linux/include/asm-x86_64/spinlock.h
@@ -6,6 +6,8 @@
#include <asm/page.h>
#include <linux/config.h>

+#include <asm-generic/spinlock.h>
+
extern int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));

2005-01-20 12:09:20

by Ingo Molnar

[permalink] [raw]
Subject: [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86


this patch would have caught the bug in -BK-curr (that patch #1 fixes),
via a compiler warning. Test-built/booted on x86.

Ingo

Signed-off-by: Ingo Molnar <[email protected]>

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -36,7 +36,10 @@ typedef struct {

#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }

-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+static inline void spin_lock_init(spinlock_t *lock)
+{
+ *lock = SPIN_LOCK_UNLOCKED;
+}

/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -45,8 +48,17 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/

-#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
-#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return *(volatile signed char *)(&lock->lock) <= 0;
+}
+
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ do {
+ barrier();
+ } while (spin_is_locked(lock));
+}

#define spin_lock_string \
"\n1:\t" \

2005-01-20 12:19:34

by Ingo Molnar

[permalink] [raw]
Subject: [patch] stricter type-checking rwlock primitives, x86


turn x86 rwlock macros into inline functions, to get stricter
type-checking. Test-built/booted on x86. (patch comes after all
previous spinlock patches.)

Ingo

Signed-off-by: Ingo Molnar <[email protected]>

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -198,21 +198,33 @@ typedef struct {

#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }

-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+static inline void rwlock_init(rwlock_t *rw)
+{
+ *rw = RW_LOCK_UNLOCKED;
+}

-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+static inline int rwlock_is_locked(rwlock_t *rw)
+{
+ return rw->lock != RW_LOCK_BIAS;
+}

/**
* read_trylock_test - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+static inline int read_trylock_test(rwlock_t *rw)
+{
+ return atomic_read((atomic_t *)&rw->lock) > 0;
+}

/**
* write_trylock_test - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+static inline int write_trylock_test(rwlock_t *rw)
+{
+ return atomic_read((atomic_t *)&rw->lock) == RW_LOCK_BIAS;
+}

/*
* On x86, we implement read-write locks as a 32-bit counter
@@ -241,8 +253,16 @@ static inline void _raw_write_lock(rwloc
__build_write_lock(rw, "__write_lock_failed");
}

-#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+static inline void _raw_read_unlock(rwlock_t *rw)
+{
+ asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void _raw_write_unlock(rwlock_t *rw)
+{
+ asm volatile("lock ; addl $" RW_LOCK_BIAS_STR
+ ",%0":"=m" (rw->lock) : : "memory");
+}

static inline int _raw_read_trylock(rwlock_t *lock)
{

2005-01-20 12:23:08

by Ingo Molnar

[permalink] [raw]
Subject: [patch] minor spinlock cleanups


cleanup: remove stale semicolon from linux/spinlock.h and stale space
from asm-i386/spinlock.h.

Ingo

Signed-off-by: Ingo Molnar <[email protected]>

--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -202,7 +202,7 @@ typedef struct {
#define _raw_spin_lock(lock) do { (void)(lock); } while(0)
#define spin_is_locked(lock) ((void)(lock), 0)
#define _raw_spin_trylock(lock) (((void)(lock), 1))
-#define spin_unlock_wait(lock) (void)(lock);
+#define spin_unlock_wait(lock) (void)(lock)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
#endif /* CONFIG_DEBUG_SPINLOCK */

--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -92,7 +92,7 @@ static inline void spin_unlock_wait(spin
* (except on PPro SMP or if we are using OOSTORE)
* (PPro errata 66, 92)
*/
-
+
#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)

#define spin_unlock_string \

2005-01-20 22:51:57

by J.A. Magallon

[permalink] [raw]
Subject: Re: [patch 3/3] spinlock fix #3: type-checking spinlock primitives, x86


On 2005.01.20, Ingo Molnar wrote:
>
> this patch would have caught the bug in -BK-curr (that patch #1 fixes),
> via a compiler warning. Test-built/booted on x86.
>
> Ingo
>
> Signed-off-by: Ingo Molnar <[email protected]>
>
> --- linux/include/asm-i386/spinlock.h.orig
> +++ linux/include/asm-i386/spinlock.h
> @@ -36,7 +36,10 @@ typedef struct {
>
> #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
>
> -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
> +static inline void spin_lock_init(spinlock_t *lock)

Will have any real effect if you add things like:

+static inline void spin_lock_init(spinlock_t *lock) __attribute__((__pure__));

??

TIA

--
J.A. Magallon <jamagallon()able!es> \ Software is like sex:
werewolf!able!es \ It's better when it's free
Mandrakelinux release 10.2 (Cooker) for i586
Linux 2.6.10-jam4 (gcc 3.4.3 (Mandrakelinux 10.2 3.4.3-3mdk)) #2


Attachments:
(No filename) (965.00 B)
(No filename) (189.00 B)
Download all attachments