New in V2:
* get rid of ugly #ifdef's in kernel/spinlock.h
* convert __raw_{read|write}_lock_flags to an inline func
SGI has observed that on large systems, interrupts are not serviced for
a long period of time when waiting for a rwlock. The following patch
series re-enables irqs while waiting for the lock, resembling the code
which is already there for spinlocks.
I only made the ia64 version, because the patch adds some overhead to
the fast path. I assume there is currently no demand to have this for
other architectures, because the systems are not so large. Of course,
the possibility to implement raw_{read|write}_lock_flags for any
architecture is still there.
Petr Tesarik
The new macro LOCK_CONTENDED_FLAGS expands to the correct implementation
depending on the config options, so that IRQ's are re-enabled when
possible, but they remain disabled if CONFIG_LOCKDEP is set.
Signed-off-by: Petr Tesarik <[email protected]>
---
include/linux/lockdep.h | 17 +++++++++++++++++
kernel/spinlock.c | 12 ++----------
2 files changed, 19 insertions(+), 10 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 331e5f1..1e6e578 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -376,6 +376,23 @@ do
{ \
#endif /* CONFIG_LOCK_STAT */
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_*_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ LOCK_CONTENDED((_lock), (try), (lock))
+
+#else /* CONFIG_LOCKDEP */
+
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ lockfl((_lock), (flags))
+
+#endif /* CONFIG_LOCKDEP */
+
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
extern void early_init_irq_lock_class(void);
#else
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 29ab207..cf41b87 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -299,16 +299,8 @@ unsigned long __lockfunc
_spin_lock_irqsave_nested(spinlock_t *lock, int subclas
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-#else
- _raw_spin_lock_flags(lock, &flags);
-#endif
+ LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
+ _raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
Pass the original flags to rwlock arch-code, so that it can re-enable
interrupts if implemented for that architecture.
Initially, make __raw_read_lock_flags and __raw_write_lock_flags
stubs which just do the same thing as non-flags variants.
Signed-off-by: Petr Tesarik <[email protected]>
---
arch/alpha/include/asm/spinlock.h | 3 +++
arch/arm/include/asm/spinlock.h | 3 +++
arch/ia64/include/asm/spinlock.h | 3 +++
arch/mips/include/asm/spinlock.h | 2 ++
arch/powerpc/include/asm/spinlock.h | 3 +++
arch/s390/include/asm/spinlock.h | 3 +++
arch/sh/include/asm/spinlock.h | 3 +++
arch/sparc/include/asm/spinlock_32.h | 2 ++
arch/sparc/include/asm/spinlock_64.h | 2 ++
include/asm-cris/arch-v32/spinlock.h | 2 ++
include/asm-m32r/spinlock.h | 3 +++
include/asm-parisc/spinlock.h | 3 +++
include/asm-x86/spinlock.h | 3 +++
include/linux/spinlock.h | 6 ++++++
kernel/spinlock.c | 6 ++++--
15 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aeeb125..e38fb95 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -166,6 +166,9 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
lock->lock = 0;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 2b41ebb..c13681a 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -217,6 +217,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
/* read_can_lock - would read_trylock() succeed? */
#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 0229fb9..0a61961 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -213,6 +213,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 1a1f320..de03e47 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -479,6 +479,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return ret;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f56a843..f6491ae 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -287,6 +287,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
rw->lock = 0;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) __spin_yield(lock)
#define _raw_read_relax(lock) __rw_yield(lock)
#define _raw_write_relax(lock) __rw_yield(lock)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index df84ae9..f3861b0 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -172,6 +172,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return _raw_write_trylock_retry(rw);
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index e793181..6028356 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -216,6 +216,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index bf2d532..46f91ab 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -177,6 +177,8 @@ static inline int __read_trylock(raw_rwlock_t *rw)
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 120cfe4..e856cd0 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -230,9 +230,11 @@ static int inline __write_trylock(raw_rwlock_t *lock)
}
#define __raw_read_lock(p) __read_lock(p)
+#define __raw_read_lock_flags(p, f) __read_lock(p)
#define __raw_read_trylock(p) __read_trylock(p)
#define __raw_read_unlock(p) __read_unlock(p)
#define __raw_write_lock(p) __write_lock(p)
+#define __raw_write_lock_flags(p, f) __write_lock(p)
#define __raw_write_unlock(p) __write_unlock(p)
#define __raw_write_trylock(p) __write_trylock(p)
diff --git a/include/asm-cris/arch-v32/spinlock.h b/include/asm-cris/arch-v32/spinlock.h
index 0d5709b..129756b 100644
--- a/include/asm-cris/arch-v32/spinlock.h
+++ b/include/asm-cris/arch-v32/spinlock.h
@@ -121,6 +121,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return 1;
}
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h
index f5cfba8..dded923 100644
--- a/include/asm-m32r/spinlock.h
+++ b/include/asm-m32r/spinlock.h
@@ -316,6 +316,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index f3d2090..fae03e1 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -187,6 +187,9 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
return !rw->counter;
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 157ff7f..132f5d3 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -357,6 +357,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fcc..9e3fe36 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -148,9 +148,11 @@ do { \
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
extern void _raw_read_lock(rwlock_t *lock);
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
extern int _raw_read_trylock(rwlock_t *lock);
extern void _raw_read_unlock(rwlock_t *lock);
extern void _raw_write_lock(rwlock_t *lock);
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
@@ -160,9 +162,13 @@ do { \
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock_flags(lock, flags) \
+ __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock_flags(lock, flags) \
+ __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index cf41b87..7c2f5b5 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_read_lock_irqsave);
@@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_write_lock_irqsave);
On Thu, 2008-10-23 at 18:08 +0200, Petr Tesarik wrote:
> New in V2:
> * get rid of ugly #ifdef's in kernel/spinlock.h
> * convert __raw_{read|write}_lock_flags to an inline func
>
> SGI has observed that on large systems, interrupts are not serviced for
> a long period of time when waiting for a rwlock. The following patch
> series re-enables irqs while waiting for the lock, resembling the code
> which is already there for spinlocks.
>
> I only made the ia64 version, because the patch adds some overhead to
> the fast path. I assume there is currently no demand to have this for
> other architectures, because the systems are not so large. Of course,
> the possibility to implement raw_{read|write}_lock_flags for any
> architecture is still there.
>
> Petr Tesarik
Any comments on my second patch series? Not even an Acked-by? Dislike of
the concept? Should I post it again?
I feel a bit too uncertain what the status is...
Petr
> Any comments on my second patch series? Not even an Acked-by? Dislike of
> the concept? Should I post it again?
Better response to interrupts is good, but it comes at the
cost of longer latency acquiring the lock (in the case where
an interrupt happens while we are waiting for the lock, and
the lock is freed while we are off executing the interrupt
handler).
Any suggestions on how to measure the trade-off here? Possibly
it doesn't matter because this may only be significant when
the lock is heavily contended and you are probably aleady
hosed in this case.
-Tony
On Fri, Oct 31, 2008 at 01:22:56PM -0700, Luck, Tony wrote:
> > Any comments on my second patch series? Not even an Acked-by? Dislike of
> > the concept? Should I post it again?
>
> Better response to interrupts is good, but it comes at the
> cost of longer latency acquiring the lock (in the case where
> an interrupt happens while we are waiting for the lock, and
> the lock is freed while we are off executing the interrupt
> handler).
>
> Any suggestions on how to measure the trade-off here? Possibly
> it doesn't matter because this may only be significant when
> the lock is heavily contended and you are probably aleady
> hosed in this case.
Just a few years of experiencal evidence. The equivalent of this patch
has been in the SuSE SLES10 kernel for years and not been detected as
being negative.
Sorry I don't have more detail. The person at SGI who first detected
this problem has long since left, and actually passed away from an
aneurysm a couple years ago. The first version of the patch was in our
one-off kernel based on Redhat's 2.4 kernel. The patch was not pushed
to SuSE and the community for SLES9. It was in SLES10, but I can not
find our internal tracking tool's record of it (searching is failing me
this morning). Without that, I have not been able to find why it was
not pushed to the community.
Thanks,
Robin
> Just a few years of experiencal evidence. The equivalent of this patch
> has been in the SuSE SLES10 kernel for years and not been detected as
> being negative.
That's good to know.
Petr: I tried to apply these ... but the patches seem corrupted
(leading spaces dropped? ... I checked in the marc.info archive
and they seem broken there too). Can you regenerate them and
repost (attachments are ok).
-Tony
On Mon, 2008-11-03 at 13:47 -0800, Luck, Tony wrote:
> > Just a few years of experiencal evidence. The equivalent of this patch
> > has been in the SuSE SLES10 kernel for years and not been detected as
> > being negative.
>
> That's good to know.
>
> Petr: I tried to apply these ... but the patches seem corrupted
> (leading spaces dropped? ... I checked in the marc.info archive
> and they seem broken there too). Can you regenerate them and
> repost (attachments are ok).
Oops. Interesting.
Anyway, I can see that Robin Holt has already regenerated the patches
against the latest directory structure, so I assume there's nothing more
to be done.
Petr
On Thu, 2008-10-23 at 18:06 +0200, Petr Tesarik wrote:
> The new macro LOCK_CONTENDED_FLAGS expands to the correct implementation
> depending on the config options, so that IRQ's are re-enabled when
> possible, but they remain disabled if CONFIG_LOCKDEP is set.
>
> Signed-off-by: Petr Tesarik <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Sorry, got lost in the inbox...
> ---
> include/linux/lockdep.h | 17 +++++++++++++++++
> kernel/spinlock.c | 12 ++----------
> 2 files changed, 19 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index 331e5f1..1e6e578 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -376,6 +376,23 @@ do
> { \
>
> #endif /* CONFIG_LOCK_STAT */
>
> +#ifdef CONFIG_LOCKDEP
> +
> +/*
> + * On lockdep we dont want the hand-coded irq-enable of
> + * _raw_*_lock_flags() code, because lockdep assumes
> + * that interrupts are not re-enabled during lock-acquire:
> + */
> +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
> + LOCK_CONTENDED((_lock), (try), (lock))
> +
> +#else /* CONFIG_LOCKDEP */
> +
> +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
> + lockfl((_lock), (flags))
> +
> +#endif /* CONFIG_LOCKDEP */
> +
> #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
> extern void early_init_irq_lock_class(void);
> #else
> diff --git a/kernel/spinlock.c b/kernel/spinlock.c
> index 29ab207..cf41b87 100644
> --- a/kernel/spinlock.c
> +++ b/kernel/spinlock.c
> @@ -299,16 +299,8 @@ unsigned long __lockfunc
> _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
> local_irq_save(flags);
> preempt_disable();
> spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
> - /*
> - * On lockdep we dont want the hand-coded irq-enable of
> - * _raw_spin_lock_flags() code, because lockdep assumes
> - * that interrupts are not re-enabled during lock-acquire:
> - */
> -#ifdef CONFIG_LOCKDEP
> - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
> -#else
> - _raw_spin_lock_flags(lock, &flags);
> -#endif
> + LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
> + _raw_spin_lock_flags, &flags);
> return flags;
> }
> EXPORT_SYMBOL(_spin_lock_irqsave_nested);
>
>
On Thu, 2008-10-23 at 18:06 +0200, Petr Tesarik wrote:
> Pass the original flags to rwlock arch-code, so that it can re-enable
> interrupts if implemented for that architecture.
>
> Initially, make __raw_read_lock_flags and __raw_write_lock_flags
> stubs which just do the same thing as non-flags variants.
>
> Signed-off-by: Petr Tesarik <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
> diff --git a/kernel/spinlock.c b/kernel/spinlock.c
> index cf41b87..7c2f5b5 100644
> --- a/kernel/spinlock.c
> +++ b/kernel/spinlock.c
> @@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
> local_irq_save(flags);
> preempt_disable();
> rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
> - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
> + LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
> + _raw_read_lock_flags, &flags);
> return flags;
> }
> EXPORT_SYMBOL(_read_lock_irqsave);
> @@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
> local_irq_save(flags);
> preempt_disable();
> rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
> - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
> + LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
> + _raw_write_lock_flags, &flags);
> return flags;
> }
> EXPORT_SYMBOL(_write_lock_irqsave);
>
>