One of type of bugs steadily being fought is the following one:
unsigned int flags;
spin_lock_irqsave(&lock, flags);
where "flags" should be "unsigned long". Here is far from complete list of
commits fixing such bugs:
099575b6cb7eaf18211ba72de56264f67651b90b
5efee174f8a101cafb1607d2b629bed353701457
c53421b18f205c5f97c604ae55c6a921f034b0f6 (many)
ca7e235f5eb960d83b45cef4384b490672538cd9
361f6ed1d00f666a1a7c33f3e9aaccb713f9b9e4
So far remedies were:
a) grep(1) -- obviously fragile. I tried at some point grepping for
spin_lock_irqsave(), found quite a few, but it became booooring quickly.
b) BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)) -- was tried,
brutally broke some arches, survived one commit before revert :^)
Doesn't work on i386 where sizeof(unsigned int) == sizeof(unsigned long).
So it would be nice to have something more robust.
irq_flags_t
New type for use with spin_lock_irqsave() and friends.
* irq_flags_t is unsigned long which shouldn't change any code
(except broken one)
* irq_flags_t is marked with __bitwise__ which means sparse(1) will warn
developer when he accidently uses wrong type or totally wrong variable.
* irq_flags_t allows conversion to struct instead of typedef without flag day.
This will give compile-time breakage of buggy users later.
* irq_flags_t allows arch maintainers to eventually switch to something
smaller than "unsigned long" if they want to.
Typical code after conversion:
irq_flags_t flags;
spin_lock_irqsave(&lock, flags);
[stuff]
spin_unlock_irqrestore(&lock, flags);
This patch adds type itself, annotates core locking functions in generic
headers and i386 arch.
P.S.: Anyone checking for differences in sparse logs -- don't panic,
just remove __bitwise__ .
Signed-off-by: Alexey Dobriyan <[email protected]>
---
include/asm-x86/irqflags_32.h | 20 ++++++++++----------
include/asm-x86/paravirt.h | 14 +++++++-------
include/linux/interrupt.h | 10 +++++-----
include/linux/irqflags.h | 2 +-
include/linux/spinlock_api_smp.h | 14 +++++++-------
include/linux/spinlock_up.h | 2 +-
include/linux/types.h | 1 +
kernel/spinlock.c | 24 ++++++++++++------------
8 files changed, 44 insertions(+), 43 deletions(-)
--- a/include/asm-x86/irqflags_32.h
+++ b/include/asm-x86/irqflags_32.h
@@ -12,14 +12,14 @@
#include <asm/processor-flags.h>
#ifndef __ASSEMBLY__
-static inline unsigned long native_save_fl(void)
+static inline irq_flags_t native_save_fl(void)
{
- unsigned long f;
+ irq_flags_t f;
asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
return f;
}
-static inline void native_restore_fl(unsigned long f)
+static inline void native_restore_fl(irq_flags_t f)
{
asm volatile("pushl %0 ; popfl": /* no output */
:"g" (f)
@@ -52,12 +52,12 @@ static inline void native_halt(void)
#else
#ifndef __ASSEMBLY__
-static inline unsigned long __raw_local_save_flags(void)
+static inline irq_flags_t __raw_local_save_flags(void)
{
return native_save_fl();
}
-static inline void raw_local_irq_restore(unsigned long flags)
+static inline void raw_local_irq_restore(irq_flags_t flags)
{
native_restore_fl(flags);
}
@@ -93,9 +93,9 @@ static inline void halt(void)
/*
* For spinlocks, etc:
*/
-static inline unsigned long __raw_local_irq_save(void)
+static inline irq_flags_t __raw_local_irq_save(void)
{
- unsigned long flags = __raw_local_save_flags();
+ irq_flags_t flags = __raw_local_save_flags();
raw_local_irq_disable();
@@ -118,14 +118,14 @@ static inline unsigned long __raw_local_irq_save(void)
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
-static inline int raw_irqs_disabled_flags(unsigned long flags)
+static inline int raw_irqs_disabled_flags(irq_flags_t flags)
{
- return !(flags & X86_EFLAGS_IF);
+ return !((unsigned long __force)flags & X86_EFLAGS_IF);
}
static inline int raw_irqs_disabled(void)
{
- unsigned long flags = __raw_local_save_flags();
+ irq_flags_t flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -136,8 +136,8 @@ struct pv_irq_ops {
* returned from save_fl are undefined, and may be ignored by
* restore_fl.
*/
- unsigned long (*save_fl)(void);
- void (*restore_fl)(unsigned long);
+ irq_flags_t (*save_fl)(void);
+ void (*restore_fl)(irq_flags_t);
void (*irq_disable)(void);
void (*irq_enable)(void);
void (*safe_halt)(void);
@@ -1014,9 +1014,9 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
-static inline unsigned long __raw_local_save_flags(void)
+static inline irq_flags_t __raw_local_save_flags(void)
{
- unsigned long f;
+ irq_flags_t f;
asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
PARAVIRT_CALL
@@ -1028,7 +1028,7 @@ static inline unsigned long __raw_local_save_flags(void)
return f;
}
-static inline void raw_local_irq_restore(unsigned long f)
+static inline void raw_local_irq_restore(irq_flags_t f)
{
asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
PARAVIRT_CALL
@@ -1062,9 +1062,9 @@ static inline void raw_local_irq_enable(void)
: "memory", "eax", "cc");
}
-static inline unsigned long __raw_local_irq_save(void)
+static inline irq_flags_t __raw_local_irq_save(void)
{
- unsigned long f;
+ irq_flags_t f;
f = __raw_local_save_flags();
raw_local_irq_disable();
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -122,7 +122,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
#endif
}
-static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
+static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, irq_flags_t *flags)
{
disable_irq_nosync(irq);
#ifdef CONFIG_LOCKDEP
@@ -146,7 +146,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
enable_irq(irq);
}
-static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
+static inline void enable_irq_lockdep_irqrestore(unsigned int irq, irq_flags_t *flags)
{
#ifdef CONFIG_LOCKDEP
local_irq_restore(*flags);
@@ -211,17 +211,17 @@ static inline void __deprecated sti(void)
{
local_irq_enable();
}
-static inline void __deprecated save_flags(unsigned long *x)
+static inline void __deprecated save_flags(irq_flags_t *x)
{
local_save_flags(*x);
}
#define save_flags(x) save_flags(&x)
-static inline void __deprecated restore_flags(unsigned long x)
+static inline void __deprecated restore_flags(irq_flags_t x)
{
local_irq_restore(x);
}
-static inline void __deprecated save_and_cli(unsigned long *x)
+static inline void __deprecated save_and_cli(irq_flags_t *x)
{
local_irq_save(*x);
}
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -84,7 +84,7 @@
#define irqs_disabled() \
({ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
raw_local_save_flags(flags); \
raw_irqs_disabled_flags(flags); \
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -30,13 +30,13 @@ void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+irq_flags_t __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+irq_flags_t __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _read_trylock(rwlock_t *lock);
@@ -51,11 +51,11 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, irq_flags_t flags)
__releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
__releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
__releases(lock);
#endif /* __LINUX_SPINLOCK_API_SMP_H */
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -26,7 +26,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(raw_spinlock_t *lock, irq_flags_t flags)
{
local_irq_save(flags);
lock->slock = 0;
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -188,6 +188,7 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
+typedef unsigned long __bitwise__ irq_flags_t;
#ifdef CONFIG_RESOURCES_64BIT
typedef u64 resource_size_t;
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -76,9 +76,9 @@ void __lockfunc _read_lock(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+irq_flags_t __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -115,9 +115,9 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
}
EXPORT_SYMBOL(_spin_lock_bh);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -145,9 +145,9 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_lock_bh);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -222,9 +222,9 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
\
EXPORT_SYMBOL(_##op##_lock); \
\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+irq_flags_t __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
{ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
for (;;) { \
preempt_disable(); \
@@ -254,7 +254,7 @@ EXPORT_SYMBOL(_##op##_lock_irq); \
\
void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
{ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
/* */ \
/* Careful: we must exclude softirqs too, hence the */ \
@@ -341,7 +341,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_unlock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, irq_flags_t flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
@@ -368,7 +368,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
}
EXPORT_SYMBOL(_spin_unlock_bh);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_read_unlock(lock);
@@ -395,7 +395,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_unlock_bh);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_write_unlock(lock);
On Sun, Oct 21, 2007 at 03:55:46AM +0400, Alexey Dobriyan wrote:
> * irq_flags_t is marked with __bitwise__ which means sparse(1) will warn
> developer when he accidently uses wrong type or totally wrong variable.
> * irq_flags_t allows conversion to struct instead of typedef without flag day.
> This will give compile-time breakage of buggy users later.
> * irq_flags_t allows arch maintainers to eventually switch to something
> smaller than "unsigned long" if they want to.
> P.S.: Anyone checking for differences in sparse logs -- don't panic,
> just remove __bitwise__ .
Umm... Could you make that conditional on something, so that it could
be done with e.g. -D__CHECK_IRQFLAGS__? We could make that unconditional
closer to the end of conversion.
On Sun, Oct 21, 2007 at 01:54:18AM +0100, Al Viro wrote:
> On Sun, Oct 21, 2007 at 03:55:46AM +0400, Alexey Dobriyan wrote:
> > * irq_flags_t is marked with __bitwise__ which means sparse(1) will warn
> > developer when he accidently uses wrong type or totally wrong variable.
> > * irq_flags_t allows conversion to struct instead of typedef without flag day.
> > This will give compile-time breakage of buggy users later.
> > * irq_flags_t allows arch maintainers to eventually switch to something
> > smaller than "unsigned long" if they want to.
>
> > P.S.: Anyone checking for differences in sparse logs -- don't panic,
> > just remove __bitwise__ .
>
> Umm... Could you make that conditional on something, so that it could
> be done with e.g. -D__CHECK_IRQFLAGS__? We could make that unconditional
> closer to the end of conversion.
OK! Resending patch #1, patch #2 stays the same.
[PATCH 1/2] irq_flags_t: intro and core annotations
One type of bugs steadily being fought is the following one:
unsigned int flags;
spin_lock_irqsave(&lock, flags);
where "flags" should be "unsigned long". Here is far from complete list of
commits fixing such bugs:
099575b6cb7eaf18211ba72de56264f67651b90b
5efee174f8a101cafb1607d2b629bed353701457
c53421b18f205c5f97c604ae55c6a921f034b0f6 (many)
ca7e235f5eb960d83b45cef4384b490672538cd9
361f6ed1d00f666a1a7c33f3e9aaccb713f9b9e4
So far remedies were:
a) grep(1) -- obviously fragile. I tried at some point grepping for
spin_lock_irqsave(), found quite a few, but it became booooring quickly.
b) BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)) -- was tried,
brutally broke some arches, survived one commit before revert :^)
Doesn't work on i386 where sizeof(unsigned int) == sizeof(unsigned long).
So it would be nice to have something more robust.
irq_flags_t
New type for use with spin_lock_irqsave() and friends.
* irq_flags_t is unsigned long which shouldn't change any code
(except broken one)
* irq_flags_t is marked with __bitwise__ which means sparse(1) will warn
developer when he accidently uses wrong type or totally wrong variable.
* irq_flags_t allows conversion to struct instead of typedef without flag day.
This will give compile-time breakage of buggy users later.
* irq_flags_t allows arch maintainers to eventually switch to something
smaller than "unsigned long" if they want to.
Typical code after conversion:
irq_flags_t flags;
spin_lock_irqsave(&lock, flags);
[stuff]
spin_unlock_irqrestore(&lock, flags);
This patch adds type itself, annotates core locking functions in generic
headers and i386 arch. Warnings become visible by passing __CHECK_IRQFLAGS__
to sparse:
make C=2 CF="-D__CHECK_IRQFLAGS__"
Signed-off-by: Alexey Dobriyan <[email protected]>
---
include/asm-x86/irqflags_32.h | 20 ++++++++++----------
include/asm-x86/paravirt.h | 14 +++++++-------
include/linux/interrupt.h | 10 +++++-----
include/linux/irqflags.h | 2 +-
include/linux/spinlock_api_smp.h | 14 +++++++-------
include/linux/spinlock_up.h | 2 +-
include/linux/types.h | 5 +++++
kernel/spinlock.c | 24 ++++++++++++------------
8 files changed, 48 insertions(+), 43 deletions(-)
--- a/include/asm-x86/irqflags_32.h
+++ b/include/asm-x86/irqflags_32.h
@@ -12,14 +12,14 @@
#include <asm/processor-flags.h>
#ifndef __ASSEMBLY__
-static inline unsigned long native_save_fl(void)
+static inline irq_flags_t native_save_fl(void)
{
- unsigned long f;
+ irq_flags_t f;
asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
return f;
}
-static inline void native_restore_fl(unsigned long f)
+static inline void native_restore_fl(irq_flags_t f)
{
asm volatile("pushl %0 ; popfl": /* no output */
:"g" (f)
@@ -52,12 +52,12 @@ static inline void native_halt(void)
#else
#ifndef __ASSEMBLY__
-static inline unsigned long __raw_local_save_flags(void)
+static inline irq_flags_t __raw_local_save_flags(void)
{
return native_save_fl();
}
-static inline void raw_local_irq_restore(unsigned long flags)
+static inline void raw_local_irq_restore(irq_flags_t flags)
{
native_restore_fl(flags);
}
@@ -93,9 +93,9 @@ static inline void halt(void)
/*
* For spinlocks, etc:
*/
-static inline unsigned long __raw_local_irq_save(void)
+static inline irq_flags_t __raw_local_irq_save(void)
{
- unsigned long flags = __raw_local_save_flags();
+ irq_flags_t flags = __raw_local_save_flags();
raw_local_irq_disable();
@@ -118,14 +118,14 @@ static inline unsigned long __raw_local_irq_save(void)
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
-static inline int raw_irqs_disabled_flags(unsigned long flags)
+static inline int raw_irqs_disabled_flags(irq_flags_t flags)
{
- return !(flags & X86_EFLAGS_IF);
+ return !((unsigned long __force)flags & X86_EFLAGS_IF);
}
static inline int raw_irqs_disabled(void)
{
- unsigned long flags = __raw_local_save_flags();
+ irq_flags_t flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -136,8 +136,8 @@ struct pv_irq_ops {
* returned from save_fl are undefined, and may be ignored by
* restore_fl.
*/
- unsigned long (*save_fl)(void);
- void (*restore_fl)(unsigned long);
+ irq_flags_t (*save_fl)(void);
+ void (*restore_fl)(irq_flags_t);
void (*irq_disable)(void);
void (*irq_enable)(void);
void (*safe_halt)(void);
@@ -1014,9 +1014,9 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
-static inline unsigned long __raw_local_save_flags(void)
+static inline irq_flags_t __raw_local_save_flags(void)
{
- unsigned long f;
+ irq_flags_t f;
asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
PARAVIRT_CALL
@@ -1028,7 +1028,7 @@ static inline unsigned long __raw_local_save_flags(void)
return f;
}
-static inline void raw_local_irq_restore(unsigned long f)
+static inline void raw_local_irq_restore(irq_flags_t f)
{
asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
PARAVIRT_CALL
@@ -1062,9 +1062,9 @@ static inline void raw_local_irq_enable(void)
: "memory", "eax", "cc");
}
-static inline unsigned long __raw_local_irq_save(void)
+static inline irq_flags_t __raw_local_irq_save(void)
{
- unsigned long f;
+ irq_flags_t f;
f = __raw_local_save_flags();
raw_local_irq_disable();
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -122,7 +122,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
#endif
}
-static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
+static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, irq_flags_t *flags)
{
disable_irq_nosync(irq);
#ifdef CONFIG_LOCKDEP
@@ -146,7 +146,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
enable_irq(irq);
}
-static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
+static inline void enable_irq_lockdep_irqrestore(unsigned int irq, irq_flags_t *flags)
{
#ifdef CONFIG_LOCKDEP
local_irq_restore(*flags);
@@ -211,17 +211,17 @@ static inline void __deprecated sti(void)
{
local_irq_enable();
}
-static inline void __deprecated save_flags(unsigned long *x)
+static inline void __deprecated save_flags(irq_flags_t *x)
{
local_save_flags(*x);
}
#define save_flags(x) save_flags(&x)
-static inline void __deprecated restore_flags(unsigned long x)
+static inline void __deprecated restore_flags(irq_flags_t x)
{
local_irq_restore(x);
}
-static inline void __deprecated save_and_cli(unsigned long *x)
+static inline void __deprecated save_and_cli(irq_flags_t *x)
{
local_irq_save(*x);
}
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -84,7 +84,7 @@
#define irqs_disabled() \
({ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
raw_local_save_flags(flags); \
raw_irqs_disabled_flags(flags); \
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -30,13 +30,13 @@ void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+irq_flags_t __lockfunc _spin_lock_irqsave(spinlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+irq_flags_t __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
__acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _read_trylock(rwlock_t *lock);
@@ -51,11 +51,11 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, irq_flags_t flags)
__releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
__releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
__releases(lock);
#endif /* __LINUX_SPINLOCK_API_SMP_H */
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -26,7 +26,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(raw_spinlock_t *lock, irq_flags_t flags)
{
local_irq_save(flags);
lock->slock = 0;
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -188,6 +188,11 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
+#ifdef __CHECK_IRQFLAGS__
+typedef unsigned long __bitwise__ irq_flags_t;
+#else
+typedef unsigned long irq_flags_t;
+#endif
#ifdef CONFIG_RESOURCES_64BIT
typedef u64 resource_size_t;
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -76,9 +76,9 @@ void __lockfunc _read_lock(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+irq_flags_t __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -115,9 +115,9 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
}
EXPORT_SYMBOL(_spin_lock_bh);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -145,9 +145,9 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_lock_bh);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+irq_flags_t __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
+ irq_flags_t flags;
local_irq_save(flags);
preempt_disable();
@@ -222,9 +222,9 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
\
EXPORT_SYMBOL(_##op##_lock); \
\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+irq_flags_t __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
{ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
for (;;) { \
preempt_disable(); \
@@ -254,7 +254,7 @@ EXPORT_SYMBOL(_##op##_lock_irq); \
\
void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
{ \
- unsigned long flags; \
+ irq_flags_t flags; \
\
/* */ \
/* Careful: we must exclude softirqs too, hence the */ \
@@ -341,7 +341,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_unlock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, irq_flags_t flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
@@ -368,7 +368,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
}
EXPORT_SYMBOL(_spin_unlock_bh);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_read_unlock(lock);
@@ -395,7 +395,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
}
EXPORT_SYMBOL(_read_unlock_bh);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, irq_flags_t flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_write_unlock(lock);
On Sun, Oct 21, 2007 at 01:30:42PM +0400, Alexey Dobriyan wrote:
> irq_flags_t
>
> New type for use with spin_lock_irqsave() and friends.
Talking about it, why did we ever require this to be a long anyway? I could
get away with a single bit for MIPS; the rest of this variable is pure
bloat. An abstract datatype could help finally fix this.
Ralf
On Mon, 22 Oct 2007 16:29:12 +0100 Ralf Baechle <[email protected]> wrote:
> On Sun, Oct 21, 2007 at 01:30:42PM +0400, Alexey Dobriyan wrote:
>
> > irq_flags_t
> >
> > New type for use with spin_lock_irqsave() and friends.
>
> Talking about it, why did we ever require this to be a long anyway? I could
> get away with a single bit for MIPS; the rest of this variable is pure
> bloat. An abstract datatype could help finally fix this.
>
Yes, it's always been ugly that we use unsigned long for this rather than
abstracting it properly.
However I'd prefer that we have some really good reason for introducing
irq_flags_t now. Simply so that I don't needlessly spend the next two
years wrestling with literally thousands of convert-to-irq_flags_t patches
and having to type "please use irq_flags_t here" in hundreds of patch
reviews. (snivel, wimper)
On Mon, 22 Oct 2007, Andrew Morton wrote:
> On Mon, 22 Oct 2007 16:29:12 +0100 Ralf Baechle <[email protected]> wrote:
> > On Sun, Oct 21, 2007 at 01:30:42PM +0400, Alexey Dobriyan wrote:
> >
> > > irq_flags_t
> > >
> > > New type for use with spin_lock_irqsave() and friends.
> >
> > Talking about it, why did we ever require this to be a long anyway? I could
> > get away with a single bit for MIPS; the rest of this variable is pure
> > bloat. An abstract datatype could help finally fix this.
> >
>
> Yes, it's always been ugly that we use unsigned long for this rather than
> abstracting it properly.
>
> However I'd prefer that we have some really good reason for introducing
> irq_flags_t now. Simply so that I don't needlessly spend the next two
> years wrestling with literally thousands of convert-to-irq_flags_t patches
> and having to type "please use irq_flags_t here" in hundreds of patch
> reviews. (snivel, wimper)
The second part can be fixed easily using
`typedef struct { long x; } irq_flags_t' ;-)
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
On Monday 22 October 2007, Andrew Morton wrote:
> Yes, it's always been ugly that we use unsigned long for this rather than
> abstracting it properly.
>
> However I'd prefer that we have some really good reason for introducing
> irq_flags_t now. ?Simply so that I don't needlessly spend the next two
> years wrestling with literally thousands of convert-to-irq_flags_t patches
> and having to type "please use irq_flags_t here" in hundreds of patch
> reviews. (snivel, wimper)
On a related note, should we encourage the use of spin_lock() and
spin_lock_irq() instead of spin_lock_irqsave() where possible?
On some architectures, accessing the interrupt flag is a heavyweight
operation, especially when running under a hypervisor, so a number
of drivers could benefit from being converted to not save the flags
at all instead of just changing the type of the flags variable.
Arnd <><
On Mon, Oct 22, 2007 at 09:10:34PM +0200, Arnd Bergmann wrote:
> On a related note, should we encourage the use of spin_lock() and
> spin_lock_irq() instead of spin_lock_irqsave() where possible?
spin_lock(), certainly. On PowerPC, I'm reliably informed it's fewer
instructions to save/restore than it is to disable/enable. So no clear
advantage there.
> On some architectures, accessing the interrupt flag is a heavyweight
> operation, especially when running under a hypervisor, so a number
> of drivers could benefit from being converted to not save the flags
> at all instead of just changing the type of the flags variable.
We certainly don't want to encourage people to blindly make those
conversions ... and I've seen the results of encouraging kernel janitors
to do things a certain way.
--
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."
On Mon, 22 Oct 2007, Matthew Wilcox wrote:
>
> We certainly don't want to encourage people to blindly make those
> conversions ... and I've seen the results of encouraging kernel janitors
> to do things a certain way.
There's another issue: the "irqsave/irqrestore" versions are much safer
than the plain "irq" versions, in case the caller already has interrupts
disabled.
So anybody making the change not only would need to make the performance
argument, he'd better not be a janitor that blindly does the change
without thinking about all call-sites etc..
Linus
On Mon, 22 Oct 2007 12:56:29 -0700 (PDT)
Linus Torvalds <[email protected]> wrote:
>
>
> On Mon, 22 Oct 2007, Matthew Wilcox wrote:
> >
> > We certainly don't want to encourage people to blindly make those
> > conversions ... and I've seen the results of encouraging kernel janitors
> > to do things a certain way.
>
> There's another issue: the "irqsave/irqrestore" versions are much safer
> than the plain "irq" versions, in case the caller already has interrupts
> disabled.
>
> So anybody making the change not only would need to make the performance
> argument, he'd better not be a janitor that blindly does the change
> without thinking about all call-sites etc..
>
It's almost always a bug to do spin_lock_irq() when local interrupts are
disabled. However iirc when we've tried to add runtime debugging to catch
that, it triggered false-positives which made the idea unworkable. I forget
where.
However what we could do is to add a new
spin_lock_irq_tell_me_if_i_goofed() which would perform that runtime check.
Andrew Morton wrote:
> Linus Torvalds <[email protected]> wrote:
>> > On Mon, 22 Oct 2007, Matthew Wilcox wrote:
>>> > > We certainly don't want to encourage people to blindly make those
>>> > > conversions ... and I've seen the results of encouraging kernel janitors
>>> > > to do things a certain way.
>> > There's another issue: the "irqsave/irqrestore" versions are much safer
>> > than the plain "irq" versions, in case the caller already has interrupts
>> > disabled.
> It's almost always a bug to do spin_lock_irq() when local interrupts are
> disabled.
Let me add to the chorus of voices: I continually see two cases where
real bugs crop up:
1) hacker uses spin_lock_irq() in incorrect context (where it is not
safe to do a blind enable/disable)
2) hacker uses spin_lock_irq() correctly, but the surrounding code
changes, thus invalidating prior assumptions.
I would even go so far as to support the drastic measure of deleting
spin_lock_irq().
spin_lock_irqsave() generates fewer bugs, is more future-proof, and by
virtue of 'flags' permits architectures a bit more flexibility.
Jeff
On Monday 22 October 2007, Andrew Morton wrote:
> It's almost always a bug to do spin_lock_irq() when local interrupts are
> disabled. ?However iirc when we've tried to add runtime debugging to catch
> that, it triggered false-positives which made the idea unworkable. ?I forget
> where.
I tried this as well a few years ago, and I think I hit a few places in
the early initialization, but nothing unfixable.
> However what we could do is to add a new
> spin_lock_irq_tell_me_if_i_goofed() which would perform that runtime check.
How about the opposite? We could have a raw_spin_lock_irq() in places where
there are valid uses of spin_lock_irq() with irqs disabled and the same
for spin_unlock_irq with interrupts already enabled.
I can try to come up with a new implementation, including some rate-limiting,
which I think my first attempt was missing.
Arnd <><
On Mon, 22 Oct 2007, Arnd Bergmann wrote:
> On Monday 22 October 2007, Andrew Morton wrote:
> > It's almost always a bug to do spin_lock_irq() when local interrupts are
> > disabled. However iirc when we've tried to add runtime debugging to catch
> > that, it triggered false-positives which made the idea unworkable. I forget
> > where.
>
> I tried this as well a few years ago, and I think I hit a few places in
> the early initialization, but nothing unfixable.
Hmm, lockdep checks this already. If it does not catch it, we need to fix it.
tglx
From: Jeff Garzik <[email protected]>
Date: Mon, 22 Oct 2007 16:47:16 -0400
> Let me add to the chorus of voices: I continually see two cases where
> real bugs crop up:
>
> 1) hacker uses spin_lock_irq() in incorrect context (where it is not
> safe to do a blind enable/disable)
>
> 2) hacker uses spin_lock_irq() correctly, but the surrounding code
> changes, thus invalidating prior assumptions.
>
> I would even go so far as to support the drastic measure of deleting
> spin_lock_irq().
>
> spin_lock_irqsave() generates fewer bugs, is more future-proof, and by
> virtue of 'flags' permits architectures a bit more flexibility.
Whilst I agree with you fully on the error-prone'ness argument,
reading the processor interrupt level to fill in the 'flags'
can have non-trivial cost. I think it's about 9 cycles and
a full pipeline flush on most sparc64 chips for example.
The write to turn off interrupts costs about the same, so you'd
essentially be doubling the execution cost in every case that
you convert as you propose.
I seem to recall that on modern x86 chips the cost of dorking
around with eflags like this is even higher.
What's the relative cost of pushfl/popl/pushl/popfl vs. cli/sti on
like a core2 duo or a k8?
For 64-bit powerpc's software interrupt disabling scheme it seems
to cost is about equal for both cases.
On Monday 22 October 2007, Thomas Gleixner wrote:
> On Mon, 22 Oct 2007, Arnd Bergmann wrote:
>
> > I tried this as well a few years ago, and I think I hit a few places in
> > the early initialization, but nothing unfixable.
>
> Hmm, lockdep checks this already. If it does not catch it, we need to fix it.
I've looked at the lockdep code for some time but couldn't find out how
it is trying to catch this kind of bug. AFAICS, there are all sorts of
really complex spinlock/irqflags interaction problems that lockdep checks
for, but not this really simple one ;-)
I tried the trivial annotation below and (with lockdep enabled) got a few
warnings at boot time, but only one that I could still find in the log
buffer:
[ 10.298910] WARNING: at /home/arnd/linux/linux-2.6/kernel/signal.c:1799 get_signal_to_deliver()
[ 10.298978] [<c0105218>] show_trace_log_lvl+0x1a/0x2f
[ 10.299072] [<c0105c06>] show_trace+0x12/0x14
[ 10.299160] [<c0105d19>] dump_stack+0x15/0x17
[ 10.299248] [<c0129602>] get_signal_to_deliver+0x73/0x636
[ 10.299338] [<c0103574>] do_notify_resume+0x91/0x728
[ 10.299427] [<c010439c>] work_notifysig+0x13/0x1b
This one looks like in the syscall exit path, after disabling the interrupts,
we call back into a C function that first disables and then enables
interrupts again unconditionally, which seems to do the right thing here,
but still feels wrong.
I suppose lockdep should be extended to catch this kind of bug as well,
instead of the hack I used to find this.
Arnd <><
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index c376f3b..3ece198 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -207,13 +207,8 @@ do { \
#endif
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
-
-#define read_lock_irq(lock) _read_lock_irq(lock)
#define read_lock_bh(lock) _read_lock_bh(lock)
-
-#define write_lock_irq(lock) _write_lock_irq(lock)
#define write_lock_bh(lock) _write_lock_bh(lock)
/*
@@ -221,13 +216,25 @@ do { \
*/
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
!defined(CONFIG_SMP)
+# define spin_lock_irq(lock) \
+ do { WARN_ON_ONCE(irqs_disabled()); _spin_lock_irq(lock); } while (0)
+# define read_lock_irq(lock) \
+ do { WARN_ON_ONCE(irqs_disabled()); _read_lock_irq(lock); } while (0)
+# define write_lock_irq(lock) \
+ do { WARN_ON_ONCE(irqs_disabled()); _write_lock_irq(lock); } while (0)
# define spin_unlock(lock) _spin_unlock(lock)
# define read_unlock(lock) _read_unlock(lock)
# define write_unlock(lock) _write_unlock(lock)
-# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-# define read_unlock_irq(lock) _read_unlock_irq(lock)
-# define write_unlock_irq(lock) _write_unlock_irq(lock)
+# define spin_unlock_irq(lock) \
+ do { WARN_ON_ONCE(!irqs_disabled()); _spin_unlock_irq(lock); } while (0)
+# define read_unlock_irq(lock) \
+ do { WARN_ON_ONCE(!irqs_disabled()); _read_unlock_irq(lock); } while (0)
+# define write_unlock_irq(lock) \
+ do { WARN_ON_ONCE(!irqs_disabled()); _write_unlock_irq(lock); } while (0)
#else
+# define spin_lock_irq(lock) _spin_lock_irq(lock)
+# define read_lock_irq(lock) _read_lock_irq(lock)
+# define write_lock_irq(lock) _write_lock_irq(lock)
# define spin_unlock(lock) \
do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
# define read_unlock(lock) \
usb_hcd_flush_endpoint() has a retry loop that starts with a spin_lock_irq(),
but only gives up the spinlock, not the irq_disable before jumping to the
rescan label.
Split the spin_lock_irq into the retryable part and the local_irq_disable()
that is only done once as a micro-optimization and slight cleanup.
Signed-off-by: Arnd Bergmann <[email protected]>
---
On Tuesday 23 October 2007, I wrote:
> I tried the trivial annotation below and (with lockdep enabled) got a few
> warnings at boot time, but only one that I could still find in the log
> buffer:
One more such example that was not found by lockdep. I guess this counts
as a false positive, as it is clearly harmless, but working around
it is a small optimization for the case where local_irq_disable()
is a hypervisor call.
Should we try to fix this class of (non-)problem in other places?
Will this patch cause a different warning with lockdep since now we
are pairing spin_lock() with spin_unlock_irq()?
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1312,8 +1312,9 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
hcd = bus_to_hcd(udev->bus);
/* No more submits can occur */
+ local_irq_disable();
rescan:
- spin_lock_irq(&hcd_urb_list_lock);
+ spin_lock(&hcd_urb_list_lock);
list_for_each_entry (urb, &ep->urb_list, urb_list) {
int is_in;
Jeff Garzik <[email protected]> wrote:
>
> Let me add to the chorus of voices: I continually see two cases where
> real bugs crop up:
>
> 1) hacker uses spin_lock_irq() in incorrect context (where it is not
> safe to do a blind enable/disable)
>
> 2) hacker uses spin_lock_irq() correctly, but the surrounding code
> changes, thus invalidating prior assumptions.
>
> I would even go so far as to support the drastic measure of deleting
> spin_lock_irq().
>
> spin_lock_irqsave() generates fewer bugs, is more future-proof, and by
> virtue of 'flags' permits architectures a bit more flexibility.
Could we add a debug option that warned if spin_lock_irq is
executed with IRQs turned off already?
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Tue, 23 Oct 2007, Arnd Bergmann wrote:
> usb_hcd_flush_endpoint() has a retry loop that starts with a spin_lock_irq(),
> but only gives up the spinlock, not the irq_disable before jumping to the
> rescan label.
>
> Split the spin_lock_irq into the retryable part and the local_irq_disable()
> that is only done once as a micro-optimization and slight cleanup.
I agree with your sentiment, but it would be better to solve this
problem without using local_irq_disable(). The patch below does this.
---
Signed-off-by: Alan Stern <[email protected]>
Index: usb-2.6/drivers/usb/core/hcd.c
===================================================================
--- usb-2.6.orig/drivers/usb/core/hcd.c
+++ usb-2.6/drivers/usb/core/hcd.c
@@ -1312,8 +1312,8 @@ void usb_hcd_flush_endpoint(struct usb_d
hcd = bus_to_hcd(udev->bus);
/* No more submits can occur */
-rescan:
spin_lock_irq(&hcd_urb_list_lock);
+rescan:
list_for_each_entry (urb, &ep->urb_list, urb_list) {
int is_in;
@@ -1346,6 +1346,7 @@ rescan:
usb_put_urb (urb);
/* list contents may have changed */
+ spin_lock(&hcd_urb_list_lock);
goto rescan;
}
spin_unlock_irq(&hcd_urb_list_lock);
Herbert Xu wrote:
> Jeff Garzik <[email protected]> wrote:
>> Let me add to the chorus of voices: I continually see two cases where
>> real bugs crop up:
>>
>> 1) hacker uses spin_lock_irq() in incorrect context (where it is not
>> safe to do a blind enable/disable)
>>
>> 2) hacker uses spin_lock_irq() correctly, but the surrounding code
>> changes, thus invalidating prior assumptions.
>>
>> I would even go so far as to support the drastic measure of deleting
>> spin_lock_irq().
>>
>> spin_lock_irqsave() generates fewer bugs, is more future-proof, and by
>> virtue of 'flags' permits architectures a bit more flexibility.
>
> Could we add a debug option that warned if spin_lock_irq is
> executed with IRQs turned off already?
Seems reasonable but perhaps arch-specific?
Also, I think someone (akpm?) mentioned an effort had been made before,
and run into some problems. I don't have details...
Jeff
On Wednesday 24 October 2007, Jeff Garzik wrote:
> >
> > Could we add a debug option that warned if spin_lock_irq is
> > executed with IRQs turned off already?
>
> Seems reasonable but perhaps arch-specific?
>
> Also, I think someone (akpm?) mentioned an effort had been made before,
> and run into some problems. ?I don't have details...
I already posted a patch in this thread that does exactly that (and
warn about spin_unlock_irq with IRQs enabled), see my earlier reply.
The patch works fine, but my feeling is that it belongs into lockdep
instead.
Arnd <><
Hi,
On Oct 21 2007, Alexey Dobriyan wrote:
>
>One of type of bugs steadily being fought is the following one:
>
> unsigned int flags;
> spin_lock_irqsave(&lock, flags);
>
>where "flags" should be "unsigned long". Here is far from complete list
>of commits fixing such bugs:
>
How about making spin_lock_irqsave actually take a pointer to flags?
(Which would be the logical choice if it were a function and not a
macro...) That would flag up all violations ("without cast to different
pointer" or so) while usually not breaking compilation.
Of course, irq_flags_t is probably the best long-term solution if one
wants to hide a struct. (Even then perhaps, use a pointer instead?)
On Tue, Oct 23, 2007 at 12:01:37AM -0400, Alan Stern wrote:
> On Tue, 23 Oct 2007, Arnd Bergmann wrote:
>
> > usb_hcd_flush_endpoint() has a retry loop that starts with a spin_lock_irq(),
> > but only gives up the spinlock, not the irq_disable before jumping to the
> > rescan label.
> >
> > Split the spin_lock_irq into the retryable part and the local_irq_disable()
> > that is only done once as a micro-optimization and slight cleanup.
>
> I agree with your sentiment, but it would be better to solve this
> problem without using local_irq_disable(). The patch below does this.
>
> ---
>
> Signed-off-by: Alan Stern <[email protected]>
Alan, is this something you want added to the tree and in before 2.6.24
is out?
thanks,
greg k-h
On Thu, 25 Oct 2007, Greg KH wrote:
> On Tue, Oct 23, 2007 at 12:01:37AM -0400, Alan Stern wrote:
> > On Tue, 23 Oct 2007, Arnd Bergmann wrote:
> >
> > > usb_hcd_flush_endpoint() has a retry loop that starts with a spin_lock_irq(),
> > > but only gives up the spinlock, not the irq_disable before jumping to the
> > > rescan label.
> > >
> > > Split the spin_lock_irq into the retryable part and the local_irq_disable()
> > > that is only done once as a micro-optimization and slight cleanup.
> >
> > I agree with your sentiment, but it would be better to solve this
> > problem without using local_irq_disable(). The patch below does this.
> >
> > ---
> >
> > Signed-off-by: Alan Stern <[email protected]>
>
> Alan, is this something you want added to the tree and in before 2.6.24
> is out?
Yes. It's a small thing, but we're better off keeping IRQ
enable/disable calls properly balanced.
Alan Stern
Hi,
On Sun, 21 Oct 2007, Alexey Dobriyan wrote:
> So far remedies were:
> a) grep(1) -- obviously fragile. I tried at some point grepping for
> spin_lock_irqsave(), found quite a few, but it became booooring quickly.
> b) BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)) -- was tried,
> brutally broke some arches, survived one commit before revert :^)
> Doesn't work on i386 where sizeof(unsigned int) == sizeof(unsigned long).
>
> So it would be nice to have something more robust.
If it's just about the type checking, something like below should pretty
much do the same.
> * irq_flags_t allows arch maintainers to eventually switch to something
> smaller than "unsigned long" if they want to.
Considering how painful this conversion could be, the question is whether
this is really needed. Comments so far suggest some archs don't need all
of it, but does someone need more?
bye, Roman
---
include/linux/irqflags.h | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
Index: linux-2.6/include/linux/irqflags.h
===================================================================
--- linux-2.6.orig/include/linux/irqflags.h
+++ linux-2.6/include/linux/irqflags.h
@@ -41,6 +41,10 @@
# define INIT_TRACE_IRQFLAGS
#endif
+static __always_inline void __irq_flags_check(unsigned long *flags)
+{
+}
+
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#include <asm/irqflags.h>
@@ -50,10 +54,11 @@
#define local_irq_disable() \
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
#define local_irq_save(flags) \
- do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
+ do { __irq_flags_check(&flags); raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
#define local_irq_restore(flags) \
do { \
+ __irq_flags_check(&flags); \
if (raw_irqs_disabled_flags(flags)) { \
raw_local_irq_restore(flags); \
trace_hardirqs_off(); \
@@ -69,8 +74,8 @@
*/
# define raw_local_irq_disable() local_irq_disable()
# define raw_local_irq_enable() local_irq_enable()
-# define raw_local_irq_save(flags) local_irq_save(flags)
-# define raw_local_irq_restore(flags) local_irq_restore(flags)
+# define raw_local_irq_save(flags) ({ __irq_flags_check(&flags); local_irq_save(flags); })
+# define raw_local_irq_restore(flags) ({ __irq_flags_check(&flags); local_irq_restore(flags); })
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
On Sat, Oct 27, 2007 at 09:20:43PM +0200, Roman Zippel wrote:
> On Sun, 21 Oct 2007, Alexey Dobriyan wrote:
>
> > So far remedies were:
> > a) grep(1) -- obviously fragile. I tried at some point grepping for
> > spin_lock_irqsave(), found quite a few, but it became booooring quickly.
> > b) BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)) -- was tried,
> > brutally broke some arches, survived one commit before revert :^)
> > Doesn't work on i386 where sizeof(unsigned int) == sizeof(unsigned long).
> >
> > So it would be nice to have something more robust.
>
> If it's just about the type checking, something like below should pretty
> much do the same.
It won't catch, the following if both variables are unsigned long:
spin_lock_irqsave(&lock, flags);
[stuff]
spin_unlock_irqrestore(&lock, foo->flags);
It won't catch "static unsigned long flags;". With sparse, we can
eventually mark type as "on-stack only".
> > * irq_flags_t allows arch maintainers to eventually switch to something
> > smaller than "unsigned long" if they want to.
>
> Considering how painful this conversion could be, the question is whether
> this is really needed.
In the long run, I believe, this is needed. We want more bugs to be
found automatically, so we can have more time for non-trivial bugs.
> Comments so far suggest some archs don't need all
> of it, but does someone need more?
I don't know.
> --- linux-2.6.orig/include/linux/irqflags.h
> +++ linux-2.6/include/linux/irqflags.h
> @@ -41,6 +41,10 @@
> # define INIT_TRACE_IRQFLAGS
> #endif
>
> +static __always_inline void __irq_flags_check(unsigned long *flags)
> +{
> +}
> +
> #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
>
> #include <asm/irqflags.h>
> @@ -50,10 +54,11 @@
> #define local_irq_disable() \
> do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
> #define local_irq_save(flags) \
> - do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
> + do { __irq_flags_check(&flags); raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
>
> #define local_irq_restore(flags) \
> do { \
> + __irq_flags_check(&flags); \
> if (raw_irqs_disabled_flags(flags)) { \
> raw_local_irq_restore(flags); \
> trace_hardirqs_off(); \
> @@ -69,8 +74,8 @@
> */
> # define raw_local_irq_disable() local_irq_disable()
> # define raw_local_irq_enable() local_irq_enable()
> -# define raw_local_irq_save(flags) local_irq_save(flags)
> -# define raw_local_irq_restore(flags) local_irq_restore(flags)
> +# define raw_local_irq_save(flags) ({ __irq_flags_check(&flags); local_irq_save(flags); })
> +# define raw_local_irq_restore(flags) ({ __irq_flags_check(&flags); local_irq_restore(flags); })
> #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
>
> #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
On Thu, Oct 25, 2007 at 05:40:03PM +0200, Jan Engelhardt wrote:
> On Oct 21 2007, Alexey Dobriyan wrote:
> >
> >One of type of bugs steadily being fought is the following one:
> >
> > unsigned int flags;
> > spin_lock_irqsave(&lock, flags);
> >
> >where "flags" should be "unsigned long". Here is far from complete list
> >of commits fixing such bugs:
> >
>
> How about making spin_lock_irqsave actually take a pointer to flags?
How would you do it without flag day?
> (Which would be the logical choice if it were a function and not a
> macro...) That would flag up all violations ("without cast to different
> pointer" or so) while usually not breaking compilation.
>
> Of course, irq_flags_t is probably the best long-term solution if one
> wants to hide a struct. (Even then perhaps, use a pointer instead?)
IIRC, Christoph mentioned:
irq_flags_t flags;
flags = spin_lock_irqXXX(&lock);
spin_unlock_irqYYY(&lock, flags);
where XXX and YYY are still to be found good names :^) It's also a solution
without flag day and with more sane lock part -- "how flags are modified
if they are passed by value?"
I start to like this proposal but I can't come up with good names.
On Oct 28 2007 00:24, Alexey Dobriyan wrote:
>
>> (Which would be the logical choice if it were a function and not a
>> macro...) That would flag up all violations ("without cast to different
>> pointer" or so) while usually not breaking compilation.
>>
>> Of course, irq_flags_t is probably the best long-term solution if one
>> wants to hide a struct. (Even then perhaps, use a pointer instead?)
>
>IIRC, Christoph mentioned:
>
> irq_flags_t flags;
>
> flags = spin_lock_irqXXX(&lock);
> spin_unlock_irqYYY(&lock, flags);
>
>where XXX and YYY are still to be found good names :^) It's also a solution
>without flag day and with more sane lock part -- "how flags are modified
>if they are passed by value?"
>
>I start to like this proposal but I can't come up with good names.
>
The BSD way: just add a number -- spin_lock_irq2()
Of course names are preferable.
irq_spin_lock and irq_spin_unlock?
spinirq_lock, spinirq_unlock?
spin_lock_irq_disable, spin_lock_irq_enable (a bit verbose...)
Maybe the 'irq' part should be completely dropped from the name,
as with -rt extensions, it behaves differently, does not it?
(Or was it preemption?)
If it was a home project, I'd just flag it, if it was a business project,
I'd add the number, for Linux - ha, too big for me :-)
On Sun, 2007-10-28 at 00:14 +0400, Alexey Dobriyan wrote:
> On Sat, Oct 27, 2007 at 09:20:43PM +0200, Roman Zippel wrote:
> > On Sun, 21 Oct 2007, Alexey Dobriyan wrote:
> >
> > > So far remedies were:
> > > a) grep(1) -- obviously fragile. I tried at some point grepping for
> > > spin_lock_irqsave(), found quite a few, but it became booooring quickly.
> > > b) BUILD_BUG_ON(sizeof(flags) != sizeof(unsigned long)) -- was tried,
> > > brutally broke some arches, survived one commit before revert :^)
> > > Doesn't work on i386 where sizeof(unsigned int) == sizeof(unsigned long).
> > >
> > > So it would be nice to have something more robust.
> >
> > If it's just about the type checking, something like below should pretty
> > much do the same.
>
> It won't catch, the following if both variables are unsigned long:
>
> spin_lock_irqsave(&lock, flags);
> [stuff]
> spin_unlock_irqrestore(&lock, foo->flags);
>
> It won't catch "static unsigned long flags;". With sparse, we can
> eventually mark type as "on-stack only".
> > +static __always_inline void __irq_flags_check(unsigned long *flags)
> > +{
+ BUILD_BUG_ON(!__builtin_stack_addr(flags));
> > +}
> > +
obviously gcc doesn't (yet) support that __builtin function, but you
could make it work for sparse and define a dummy for gcc.
Hi,
On Sun, 28 Oct 2007, Alexey Dobriyan wrote:
> > If it's just about the type checking, something like below should pretty
> > much do the same.
>
> It won't catch, the following if both variables are unsigned long:
>
> spin_lock_irqsave(&lock, flags);
> [stuff]
> spin_unlock_irqrestore(&lock, foo->flags);
>
> It won't catch "static unsigned long flags;". With sparse, we can
> eventually mark type as "on-stack only".
It should be on the stack, but we have cases where a pointer to it is used
(e.g. lock_timer_base). How do you want to deal with this?
bye, Roman
Alexey Dobriyan <[email protected]> wrote:
>
> irq_flags_t flags;
>
> flags = spin_lock_irqXXX(&lock);
> spin_unlock_irqYYY(&lock, flags);
>
> where XXX and YYY are still to be found good names :^) It's also a solution
How about flags?
flags = spin_lock_irqflags(&lock);
spin_unlock_irqflags(&lock, flags);
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt