2013-06-18 15:37:09

by Kirill Tkhai

[permalink] [raw]
Subject: [PATCH 1/2] sched: Add schedule_(raw_)spin_unlock and schedule_(raw_)spin_unlock_irq

Helpers for replacement repeating patterns:

1)spin_unlock(lock);
schedule();
2)spin_unlock_irq(lock);
schedule();

(The same for raw_spinlock_t)

This allows to prevent excess preempt_schedule(), which can happen on preemptible kernel.

Signed-off-by: Kirill Tkhai <[email protected]>
CC: Steven Rostedt <[email protected]>
CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
---
include/linux/sched.h | 37 +++++++++++++++++++++++++++++++++++++
include/linux/spinlock_api_smp.h | 21 +++++++++++++++++++++
include/linux/spinlock_api_up.h | 8 ++++++++
kernel/spinlock.c | 13 +++++++++++++
4 files changed, 79 insertions(+), 0 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8b10339..2020071 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,43 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);

+/*
+ * schedule_raw_spin_unlock() -- should be used instead of pattern:
+ *
+ * raw_spin_unlock(lock);
+ * schedule();
+ *
+ * It's the same, but prevents preempt_schedule() call during the unlocking.
+ */
+static inline void schedule_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ _raw_spin_unlock_no_resched(lock);
+ schedule();
+}
+
+/*
+ * schedule_raw_spin_unlock_irq() -- should be used instead of pattern:
+ *
+ * raw_spin_unlock_irq(lock);
+ * schedule();
+ *
+ * It's the same, but prevents preempt_schedule() call during the unlocking.
+ */
+static inline void schedule_raw_spin_unlock_irq(raw_spinlock_t *lock)
+{
+ _raw_spin_unlock_irq_no_resched(lock);
+ schedule();
+}
+
+static inline void schedule_spin_unlock(spinlock_t *lock)
+{
+ schedule_raw_spin_unlock(&lock->rlock);
+}
+static inline void schedule_spin_unlock_irq(spinlock_t *lock)
+{
+ schedule_raw_spin_unlock_irq(&lock->rlock);
+}
+
struct nsproxy;
struct user_namespace;

diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 51df117..42dbdfe 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -42,6 +42,10 @@ void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
void __lockfunc
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
__releases(lock);
+void __lockfunc
+_raw_spin_unlock_no_resched(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irq_no_resched(raw_spinlock_t *lock) __releases(lock);

#ifdef CONFIG_INLINE_SPIN_LOCK
#define _raw_spin_lock(lock) __raw_spin_lock(lock)
@@ -69,6 +73,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)

#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
+#define _raw_spin_unlock_no_resched(lock) __raw_spin_unlock_no_resched(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
@@ -77,6 +82,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)

#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
+#define _raw_spin_unlock_irq_no_resched(lock) __raw_spin_unlock_irq_no_resched(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
@@ -153,6 +159,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
preempt_enable();
}

+static inline void __raw_spin_unlock_no_resched(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ preempt_enable_no_resched();
+}
+
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
@@ -170,6 +183,14 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
preempt_enable();
}

+static inline void __raw_spin_unlock_irq_no_resched(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_irq_enable();
+ preempt_enable_no_resched();
+}
+
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f472..82280da 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -39,6 +39,9 @@
#define __UNLOCK(lock) \
do { preempt_enable(); __release(lock); (void)(lock); } while (0)

+#define __UNLOCK_NO_RESCHED(lock) \
+ do { preempt_enable_no_resched(); __release(lock); (void)(lock); } while (0)
+
#define __UNLOCK_BH(lock) \
do { preempt_enable_no_resched(); local_bh_enable(); \
__release(lock); (void)(lock); } while (0)
@@ -46,6 +49,9 @@
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)

+#define __UNLOCK_IRQ_NO_RESCHED(lock) \
+ do { local_irq_enable(); __UNLOCK_NO_RESCHED(lock); } while (0)
+
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)

@@ -67,12 +73,14 @@
#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_no_resched(lock) __UNLOCK_NO_RESCHED(lock)
#define _raw_read_unlock(lock) __UNLOCK(lock)
#define _raw_write_unlock(lock) __UNLOCK(lock)
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irq_no_resched(lock) __UNLOCK_IRQ_NO_RESCHED(lock)
#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_spin_unlock_irqrestore(lock, flags) \
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd806..218058f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -169,6 +169,13 @@ void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
__raw_spin_unlock(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock);
+
+void __lockfunc _raw_spin_unlock_no_resched(raw_spinlock_t *lock)
+{
+ __raw_spin_unlock_no_resched(lock);
+}
+EXPORT_SYMBOL(_raw_spin_unlock_no_resched);
+
#endif

#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
@@ -185,6 +192,12 @@ void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
__raw_spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock_irq);
+
+void __lockfunc _raw_spin_unlock_irq_no_resched(raw_spinlock_t *lock)
+{
+ __raw_spin_unlock_irq_no_resched(lock);
+}
+EXPORT_SYMBOL(_raw_spin_unlock_irq_no_resched);
#endif

#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH


2013-06-18 17:28:11

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 1/2] sched: Add schedule_(raw_)spin_unlock and schedule_(raw_)spin_unlock_irq

On Tue, Jun 18, 2013 at 07:36:52PM +0400, Kirill Tkhai wrote:
> Helpers for replacement repeating patterns:
>
> 1)spin_unlock(lock);
> schedule();
> 2)spin_unlock_irq(lock);
> schedule();
>

I just noticed this; the existing schedule_preempt_disabled() is
equivalent to:

preempt_enable()
schedule()
preempt_disable()

So I somewhat expected these new primitives to be:

spin_unlock()
schedule()
spin_lock()

Now I haven't actually looked at the usage patch to see what the
converted sites look like (thanks for adding that one though!).

My OCD just triggered on the preemption and locked schedule calls having
different semantics.

2013-06-18 17:46:20

by Kirill Tkhai

[permalink] [raw]
Subject: Re: [PATCH 1/2] sched: Add schedule_(raw_)spin_unlock and schedule_(raw_)spin_unlock_irq



18.06.2013, 21:28, "Peter Zijlstra" <[email protected]>:
> On Tue, Jun 18, 2013 at 07:36:52PM +0400, Kirill Tkhai wrote:
>
>> ?Helpers for replacement repeating patterns:
>>
>> ?1)spin_unlock(lock);
>> ???schedule();
>> ?2)spin_unlock_irq(lock);
>> ???schedule();
>
> I just noticed this; the existing schedule_preempt_disabled() is
> equivalent to:
>
> ??preempt_enable()
> ??schedule()
> ??preempt_disable()
>
> So I somewhat expected these new primitives to be:
>
> ??spin_unlock()
> ??schedule()
> ??spin_lock()
>
> Now I haven't actually looked at the usage patch to see what the
> converted sites look like (thanks for adding that one though!).
>
> My OCD just triggered on the preemption and locked schedule calls having
> different semantics.

They have different semantic and different ending.

Many places (as you can see from the second patch) need additional actions
between schedule() and next spin_lock(). Several places don't do the second
lock.

Kirill

2013-06-24 12:54:46

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [PATCH 1/2] sched: Add schedule_(raw_)spin_unlock and schedule_(raw_)spin_unlock_irq

On Tue, 18 Jun 2013, Kirill Tkhai wrote:
> +/*
> + * schedule_raw_spin_unlock() -- should be used instead of pattern:
> + *
> + * raw_spin_unlock(lock);
> + * schedule();
> + *
> + * It's the same, but prevents preempt_schedule() call during the unlocking.
> + */
> +static inline void schedule_raw_spin_unlock(raw_spinlock_t *lock)

This should be raw_spin_unlock_and_schedule().

schedule_raw_spin_unlock() sounds like we schedule a raw_spin_unlock()
for some point in the future.

> +{
> + _raw_spin_unlock_no_resched(lock);

No, please do not expose such an interface. Instead of that implement
it as:

raw_spin_unlock_and_schedule()
{
spin_release(&lock->dep_map, 1, _RET_IP_);
do_raw_spin_unlock(lock);
preempt_enable_no_resched();
schedule();
}

And this goes into the spinlock header and not into sched.h.

Thanks,

tglx