2001-02-20 03:09:11

by Brian J. Watson

[permalink] [raw]
Subject: [PATCH] trylock for rw_semaphores: 2.4.1

diff -Nar -u4 2.4.1/arch/i386/kernel/semaphore.c 2.4.1-trylock/arch/i386/kernel/semaphore.c
--- 2.4.1/arch/i386/kernel/semaphore.c Sat Nov 18 17:31:25 2000
+++ 2.4.1-trylock/arch/i386/kernel/semaphore.c Fri Feb 16 18:13:16 2001
@@ -382,8 +382,41 @@

return sem;
}

+/* We have the bias, but we can't sleep. We have to get rid of it
+ * as gracefully as we can.
+ *
+ * This routine does have the unfortunate side-effect that we could
+ * spin for awhile if there's a lot of contention for this lock. If
+ * that's the case, however, then it's less likely that we would hold
+ * the bias and be running this code.
+ */
+void __up_biased(int val, struct rw_semaphore *sem)
+{
+ int count, newcount;
+repeat:
+ /* Does it look like we're racing with another contender? */
+ count = atomic_read(&sem->count);
+ newcount = count + val;
+ if (newcount < 0)
+ /* Yes: Try again. */
+ goto repeat;
+ else
+ /* No: Bump the count while no one's looking. Did we race? */
+ if (cmpxchg((volatile int *)&sem->count, count, newcount)
+ != count)
+ /* Yes: Try again. */
+ goto repeat;
+ else
+ /* No: Let the real waiters duke it out for the bias.
+ * FIXME: This has the same potential stampede problem
+ * as down_write_failed_biased().
+ */
+ if (atomic_read(&sem->count) >= 0)
+ wake_up(&sem->wait);
+}
+
asm(
"
.align 4
.globl __rwsem_wake
diff -Nar -u4 2.4.1/include/asm-i386/atomic.h 2.4.1-trylock/include/asm-i386/atomic.h
--- 2.4.1/include/asm-i386/atomic.h Thu Jan 4 14:50:46 2001
+++ 2.4.1-trylock/include/asm-i386/atomic.h Fri Feb 16 18:13:16 2001
@@ -52,8 +52,21 @@
:"ir" (i), "m" (v->counter) : "memory");
return c;
}

+#define ATOMIC_SUB_SIGN_BIT 0x1
+#define ATOMIC_SUB_CARRY_BIT 0x2
+static __inline__ int atomic_sub_sign_and_carry(int i, atomic_t *v)
+{
+ unsigned char s, c;
+
+ __asm__ __volatile__(
+ LOCK "subl %3,%0; sets %1; setc %2"
+ :"=m" (v->counter), "=qm" (s), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return s | c<<1;
+}
+
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
LOCK "incl %0"
diff -Nar -u4 2.4.1/include/asm-i386/semaphore.h 2.4.1-trylock/include/asm-i386/semaphore.h
--- 2.4.1/include/asm-i386/semaphore.h Thu Jan 4 14:50:46 2001
+++ 2.4.1-trylock/include/asm-i386/semaphore.h Fri Feb 16 18:13:16 2001
@@ -381,6 +381,76 @@
#endif
__up_write(sem);
}

+extern void __up_biased(int val, struct rw_semaphore *sem);
+
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ int retval;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ retval = atomic_sub_sign_and_carry(1, &sem->count);
+ /* Did we get the lock? */
+ if (retval & ATOMIC_SUB_SIGN_BIT) {
+ /* No: Does someone else have the bias? */
+ if (retval & ATOMIC_SUB_CARRY_BIT)
+ /* No: Guess we have to do this the hard way. */
+ __up_biased(1, sem);
+ else
+ /* Yes: Fix the count and pretend nothing happened. */
+ __up_read(sem);
+ return 1;
+ }
+ else {
+ /* Yes: We got the lock!! */
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+ return 0;
+ }
+}
+
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ int retval;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ retval = atomic_sub_sign_and_carry(RW_LOCK_BIAS, &sem->count);
+ /* Did we get the lock? */
+ if (retval & ATOMIC_SUB_SIGN_BIT) {
+ /* No: Does someone else have the bias? */
+ if (retval & ATOMIC_SUB_CARRY_BIT)
+ /* No: Guess we have to do this the hard way. */
+ __up_biased(RW_LOCK_BIAS, sem);
+ else
+ /* Yes: Fix the count and pretend nothing happened. */
+ __up_write(sem);
+ return 1;
+ }
+ else {
+ /* Yes: We got the lock!! */
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+ return 0;
+ }
+}
+
#endif
#endif




Attachments:
patch-2.4.1-trylock (4.00 kB)

2001-02-20 04:53:54

by Benjamin LaHaise

[permalink] [raw]
Subject: Re: [PATCH] trylock for rw_semaphores: 2.4.1

On Mon, 19 Feb 2001, Brian J. Watson wrote:

> Here is an x86 implementation of down_read_trylock() and down_write_trylock()
> for read/write semaphores. As with down_trylock() for exclusive semaphores, they
> don't block if they fail to get the lock. They just return 1, as opposed to 0 in
> the success case.

How about the following instead? Warning: compiled, not tested.

-ben

diff -ur v2.4.2-pre3/include/asm-i386/semaphore.h trylock/include/asm-i386/semaphore.h
--- v2.4.2-pre3/include/asm-i386/semaphore.h Mon Feb 12 16:04:59 2001
+++ trylock/include/asm-i386/semaphore.h Mon Feb 19 23:50:03 2001
@@ -382,5 +382,32 @@
__up_write(sem);
}

+/* returns 1 if it successfully obtained the semaphore for write */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ int old = RW_LOCK_BIAS, new = 0;
+ int res;
+
+ res = cmpxchg(&sem->count.counter, old, new);
+ return (res == RW_LOCK_BIAS);
+}
+
+/* returns 1 if it successfully obtained the semaphore for read */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ int ret = 1;
+ asm volatile(
+ LOCK "subl $1,%0
+ js 2f
+ 1:
+ .section .text.lock,\"ax\"
+ 2:" LOCK "inc %0
+ subl %1,%1
+ jmp 1b
+ .previous"
+ :"=m" (*(volatile int *)sem), "=r" (ret) : "1" (ret) : "memory");
+ return ret;
+}
+
#endif
#endif

2001-02-20 22:58:42

by Brian J. Watson

[permalink] [raw]
Subject: Re: [PATCH] trylock for rw_semaphores: 2.4.1

diff -ru4 2.4.1/include/asm-i386/semaphore.h 2.4.1-ben/include/asm-i386/semaphore.h
--- 2.4.1/include/asm-i386/semaphore.h Fri Feb 16 18:47:23 2001
+++ 2.4.1-ben/include/asm-i386/semaphore.h Tue Feb 20 14:23:19 2001
@@ -381,6 +381,61 @@
#endif
__up_write(sem);
}

+/* returns 0 if it successfully obtained the semaphore for write */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ int old = RW_LOCK_BIAS, new = 0;
+
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ if (cmpxchg(&sem->count.counter, old, new) == RW_LOCK_BIAS) {
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+ return 0;
+ }
+ else
+ return 1;
+}
+
+/* returns 0 if it successfully obtained the semaphore for read */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ int old, new;
+
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+repeat:
+ old = atomic_read(&sem->count);
+ if (old <= 0)
+ return 1;
+ new = old - 1;
+ if (cmpxchg(&sem->count.counter, old, new) == old) {
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+ return 0;
+ }
+ else
+ goto repeat;
+}
+
#endif
#endif


Attachments:
patch-2.4.1-trylock (1.42 kB)