2019-05-21 10:08:42

by Daniel Vetter

[permalink] [raw]
Subject: [PATCH] kernel.h: Add non_block_start/end()

In some special cases we must not block, but there's not a
spinlock, preempt-off, irqs-off or similar critical section already
that arms the might_sleep() debug checks. Add a non_block_start/end()
pair to annotate these.

This will be used in the oom paths of mmu-notifiers, where blocking is
not allowed to make sure there's forward progress. Quoting Michal:

"The notifier is called from quite a restricted context - oom_reaper -
which shouldn't depend on any locks or sleepable conditionals. The code
should be swift as well but we mostly do care about it to make a forward
progress. Checking for sleepable context is the best thing we could come
up with that would describe these demands at least partially."

Peter also asked whether we want to catch spinlocks on top, but Michal
said those are less of a problem because spinlocks can't have an
indirect dependency upon the page allocator and hence close the loop
with the oom reaper.

Suggested by Michal Hocko.

v2:
- Improve commit message (Michal)
- Also check in schedule, not just might_sleep (Peter)

v3: It works better when I actually squash in the fixup I had lying
around :-/

Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: "Christian König" <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: "Jérôme Glisse" <[email protected]>
Cc: [email protected]
Cc: Masahiro Yamada <[email protected]>
Cc: Wei Wang <[email protected]>
Cc: Andy Shevchenko <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Jann Horn <[email protected]>
Cc: Feng Tang <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Randy Dunlap <[email protected]>
Cc: [email protected]
Acked-by: Christian König <[email protected]> (v1)
Signed-off-by: Daniel Vetter <[email protected]>
---
include/linux/kernel.h | 10 +++++++++-
include/linux/sched.h | 4 ++++
kernel/sched/core.c | 19 ++++++++++++++-----
3 files changed, 27 insertions(+), 6 deletions(-)

diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 74b1ee9027f5..b5f2c2ff0eab 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -214,7 +214,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
- * context (spinlock, irq-handler, ...).
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
+ * not allowed can be annotated with non_block_start() and non_block_end()
+ * pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
@@ -230,6 +232,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
+# define non_block_start() \
+ do { current->non_block_count++; } while (0)
+# define non_block_end() \
+ do { WARN_ON(current->non_block_count-- == 0); } while (0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
@@ -238,6 +244,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
+# define non_block_start() do { } while (0)
+# define non_block_end() do { } while (0)
#endif

#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 11837410690f..7f5b293e72df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -908,6 +908,10 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif

+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ int non_block_count;
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 102dfcf0a29a..ed7755a28465 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3264,13 +3264,22 @@ static noinline void __schedule_bug(struct task_struct *prev)
/*
* Various schedule()-time debugging checks and statistics:
*/
-static inline void schedule_debug(struct task_struct *prev)
+static inline void schedule_debug(struct task_struct *prev, bool preempt)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
if (task_stack_end_corrupted(prev))
panic("corrupted stack end detected inside scheduler\n");
#endif

+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ if (!preempt && prev->state && prev->non_block_count) {
+ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
+ prev->comm, prev->pid, prev->non_block_count);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ }
+#endif
+
if (unlikely(in_atomic_preempt_off())) {
__schedule_bug(prev);
preempt_count_set(PREEMPT_DISABLED);
@@ -3377,7 +3386,7 @@ static void __sched notrace __schedule(bool preempt)
rq = cpu_rq(cpu);
prev = rq->curr;

- schedule_debug(prev);
+ schedule_debug(prev, preempt);

if (sched_feat(HRTICK))
hrtick_clear(rq);
@@ -6102,7 +6111,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
rcu_sleep_check();

if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
- !is_idle_task(current)) ||
+ !is_idle_task(current) && !current->non_block_count) ||
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
oops_in_progress)
return;
@@ -6118,8 +6127,8 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
- "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
- in_atomic(), irqs_disabled(),
+ "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), current->non_block_count,
current->pid, current->comm);

if (task_stack_end_corrupted(current))
--
2.20.1



2019-05-21 10:48:09

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH] kernel.h: Add non_block_start/end()

On Tue 21-05-19 12:06:11, Daniel Vetter wrote:
> In some special cases we must not block, but there's not a
> spinlock, preempt-off, irqs-off or similar critical section already
> that arms the might_sleep() debug checks. Add a non_block_start/end()
> pair to annotate these.
>
> This will be used in the oom paths of mmu-notifiers, where blocking is
> not allowed to make sure there's forward progress. Quoting Michal:
>
> "The notifier is called from quite a restricted context - oom_reaper -
> which shouldn't depend on any locks or sleepable conditionals. The code
> should be swift as well but we mostly do care about it to make a forward
> progress. Checking for sleepable context is the best thing we could come
> up with that would describe these demands at least partially."
>
> Peter also asked whether we want to catch spinlocks on top, but Michal
> said those are less of a problem because spinlocks can't have an
> indirect dependency upon the page allocator and hence close the loop
> with the oom reaper.
>
> Suggested by Michal Hocko.
>
> v2:
> - Improve commit message (Michal)
> - Also check in schedule, not just might_sleep (Peter)
>
> v3: It works better when I actually squash in the fixup I had lying
> around :-/
>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: "Christian K?nig" <[email protected]>
> Cc: Daniel Vetter <[email protected]>
> Cc: "J?r?me Glisse" <[email protected]>
> Cc: [email protected]
> Cc: Masahiro Yamada <[email protected]>
> Cc: Wei Wang <[email protected]>
> Cc: Andy Shevchenko <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Jann Horn <[email protected]>
> Cc: Feng Tang <[email protected]>
> Cc: Kees Cook <[email protected]>
> Cc: Randy Dunlap <[email protected]>
> Cc: [email protected]
> Acked-by: Christian K?nig <[email protected]> (v1)
> Signed-off-by: Daniel Vetter <[email protected]>

I like this in general. The implementation looks reasonable to me but I
didn't check deeply enough to give my R-by or A-by.

> ---
> include/linux/kernel.h | 10 +++++++++-
> include/linux/sched.h | 4 ++++
> kernel/sched/core.c | 19 ++++++++++++++-----
> 3 files changed, 27 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kernel.h b/include/linux/kernel.h
> index 74b1ee9027f5..b5f2c2ff0eab 100644
> --- a/include/linux/kernel.h
> +++ b/include/linux/kernel.h
> @@ -214,7 +214,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> * might_sleep - annotation for functions that can sleep
> *
> * this macro will print a stack trace if it is executed in an atomic
> - * context (spinlock, irq-handler, ...).
> + * context (spinlock, irq-handler, ...). Additional sections where blocking is
> + * not allowed can be annotated with non_block_start() and non_block_end()
> + * pairs.
> *
> * This is a useful debugging help to be able to catch problems early and not
> * be bitten later when the calling function happens to sleep when it is not
> @@ -230,6 +232,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> # define cant_sleep() \
> do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
> # define sched_annotate_sleep() (current->task_state_change = 0)
> +# define non_block_start() \
> + do { current->non_block_count++; } while (0)
> +# define non_block_end() \
> + do { WARN_ON(current->non_block_count-- == 0); } while (0)
> #else
> static inline void ___might_sleep(const char *file, int line,
> int preempt_offset) { }
> @@ -238,6 +244,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> # define might_sleep() do { might_resched(); } while (0)
> # define cant_sleep() do { } while (0)
> # define sched_annotate_sleep() do { } while (0)
> +# define non_block_start() do { } while (0)
> +# define non_block_end() do { } while (0)
> #endif
>
> #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 11837410690f..7f5b293e72df 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -908,6 +908,10 @@ struct task_struct {
> struct mutex_waiter *blocked_on;
> #endif
>
> +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> + int non_block_count;
> +#endif
> +
> #ifdef CONFIG_TRACE_IRQFLAGS
> unsigned int irq_events;
> unsigned long hardirq_enable_ip;
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 102dfcf0a29a..ed7755a28465 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3264,13 +3264,22 @@ static noinline void __schedule_bug(struct task_struct *prev)
> /*
> * Various schedule()-time debugging checks and statistics:
> */
> -static inline void schedule_debug(struct task_struct *prev)
> +static inline void schedule_debug(struct task_struct *prev, bool preempt)
> {
> #ifdef CONFIG_SCHED_STACK_END_CHECK
> if (task_stack_end_corrupted(prev))
> panic("corrupted stack end detected inside scheduler\n");
> #endif
>
> +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> + if (!preempt && prev->state && prev->non_block_count) {
> + printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
> + prev->comm, prev->pid, prev->non_block_count);
> + dump_stack();
> + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
> + }
> +#endif
> +
> if (unlikely(in_atomic_preempt_off())) {
> __schedule_bug(prev);
> preempt_count_set(PREEMPT_DISABLED);
> @@ -3377,7 +3386,7 @@ static void __sched notrace __schedule(bool preempt)
> rq = cpu_rq(cpu);
> prev = rq->curr;
>
> - schedule_debug(prev);
> + schedule_debug(prev, preempt);
>
> if (sched_feat(HRTICK))
> hrtick_clear(rq);
> @@ -6102,7 +6111,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
> rcu_sleep_check();
>
> if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
> - !is_idle_task(current)) ||
> + !is_idle_task(current) && !current->non_block_count) ||
> system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
> oops_in_progress)
> return;
> @@ -6118,8 +6127,8 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
> "BUG: sleeping function called from invalid context at %s:%d\n",
> file, line);
> printk(KERN_ERR
> - "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
> - in_atomic(), irqs_disabled(),
> + "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
> + in_atomic(), irqs_disabled(), current->non_block_count,
> current->pid, current->comm);
>
> if (task_stack_end_corrupted(current))
> --
> 2.20.1
>

--
Michal Hocko
SUSE Labs

Subject: Re: [PATCH] kernel.h: Add non_block_start/end()

On Tue, 21 May 2019, Daniel Vetter wrote:

> In some special cases we must not block, but there's not a
> spinlock, preempt-off, irqs-off or similar critical section already
> that arms the might_sleep() debug checks. Add a non_block_start/end()
> pair to annotate these.

Just putting preempt on/off around these is not sufficient?

If not and you need to add another type of critical section then would
this not need to be added to the preempt counters? See
include/linux/preempt.h? Looks like there are sufficient bits left to put
the counter in there.



2019-05-21 14:50:09

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH] kernel.h: Add non_block_start/end()

On Tue 21-05-19 14:43:38, Cristopher Lameter wrote:
> On Tue, 21 May 2019, Daniel Vetter wrote:
>
> > In some special cases we must not block, but there's not a
> > spinlock, preempt-off, irqs-off or similar critical section already
> > that arms the might_sleep() debug checks. Add a non_block_start/end()
> > pair to annotate these.
>
> Just putting preempt on/off around these is not sufficient?

It is not a critical section. It is a _debugging_ facility to help
discover blocking contexts.
--
Michal Hocko
SUSE Labs

2019-05-21 14:51:59

by Daniel Vetter

[permalink] [raw]
Subject: Re: [PATCH] kernel.h: Add non_block_start/end()

On Tue, May 21, 2019 at 12:46:38PM +0200, Michal Hocko wrote:
> On Tue 21-05-19 12:06:11, Daniel Vetter wrote:
> > In some special cases we must not block, but there's not a
> > spinlock, preempt-off, irqs-off or similar critical section already
> > that arms the might_sleep() debug checks. Add a non_block_start/end()
> > pair to annotate these.
> >
> > This will be used in the oom paths of mmu-notifiers, where blocking is
> > not allowed to make sure there's forward progress. Quoting Michal:
> >
> > "The notifier is called from quite a restricted context - oom_reaper -
> > which shouldn't depend on any locks or sleepable conditionals. The code
> > should be swift as well but we mostly do care about it to make a forward
> > progress. Checking for sleepable context is the best thing we could come
> > up with that would describe these demands at least partially."
> >
> > Peter also asked whether we want to catch spinlocks on top, but Michal
> > said those are less of a problem because spinlocks can't have an
> > indirect dependency upon the page allocator and hence close the loop
> > with the oom reaper.
> >
> > Suggested by Michal Hocko.
> >
> > v2:
> > - Improve commit message (Michal)
> > - Also check in schedule, not just might_sleep (Peter)
> >
> > v3: It works better when I actually squash in the fixup I had lying
> > around :-/
> >
> > Cc: Peter Zijlstra <[email protected]>
> > Cc: Ingo Molnar <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Michal Hocko <[email protected]>
> > Cc: David Rientjes <[email protected]>
> > Cc: "Christian K?nig" <[email protected]>
> > Cc: Daniel Vetter <[email protected]>
> > Cc: "J?r?me Glisse" <[email protected]>
> > Cc: [email protected]
> > Cc: Masahiro Yamada <[email protected]>
> > Cc: Wei Wang <[email protected]>
> > Cc: Andy Shevchenko <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Cc: Jann Horn <[email protected]>
> > Cc: Feng Tang <[email protected]>
> > Cc: Kees Cook <[email protected]>
> > Cc: Randy Dunlap <[email protected]>
> > Cc: [email protected]
> > Acked-by: Christian K?nig <[email protected]> (v1)
> > Signed-off-by: Daniel Vetter <[email protected]>
>
> I like this in general. The implementation looks reasonable to me but I
> didn't check deeply enough to give my R-by or A-by.

Thanks for all your comments. I'll ask Jerome Glisse to look into this, I
think it'd could be useful for all the HMM work too.

And I sent this out without reply-to the patch it's supposed to replace,
will need to do that again so patchwork and 0day pick up the correct
series. Sry about that noise :-/
-Daniel

>
> > ---
> > include/linux/kernel.h | 10 +++++++++-
> > include/linux/sched.h | 4 ++++
> > kernel/sched/core.c | 19 ++++++++++++++-----
> > 3 files changed, 27 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/kernel.h b/include/linux/kernel.h
> > index 74b1ee9027f5..b5f2c2ff0eab 100644
> > --- a/include/linux/kernel.h
> > +++ b/include/linux/kernel.h
> > @@ -214,7 +214,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> > * might_sleep - annotation for functions that can sleep
> > *
> > * this macro will print a stack trace if it is executed in an atomic
> > - * context (spinlock, irq-handler, ...).
> > + * context (spinlock, irq-handler, ...). Additional sections where blocking is
> > + * not allowed can be annotated with non_block_start() and non_block_end()
> > + * pairs.
> > *
> > * This is a useful debugging help to be able to catch problems early and not
> > * be bitten later when the calling function happens to sleep when it is not
> > @@ -230,6 +232,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> > # define cant_sleep() \
> > do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
> > # define sched_annotate_sleep() (current->task_state_change = 0)
> > +# define non_block_start() \
> > + do { current->non_block_count++; } while (0)
> > +# define non_block_end() \
> > + do { WARN_ON(current->non_block_count-- == 0); } while (0)
> > #else
> > static inline void ___might_sleep(const char *file, int line,
> > int preempt_offset) { }
> > @@ -238,6 +244,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
> > # define might_sleep() do { might_resched(); } while (0)
> > # define cant_sleep() do { } while (0)
> > # define sched_annotate_sleep() do { } while (0)
> > +# define non_block_start() do { } while (0)
> > +# define non_block_end() do { } while (0)
> > #endif
> >
> > #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
> > diff --git a/include/linux/sched.h b/include/linux/sched.h
> > index 11837410690f..7f5b293e72df 100644
> > --- a/include/linux/sched.h
> > +++ b/include/linux/sched.h
> > @@ -908,6 +908,10 @@ struct task_struct {
> > struct mutex_waiter *blocked_on;
> > #endif
> >
> > +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> > + int non_block_count;
> > +#endif
> > +
> > #ifdef CONFIG_TRACE_IRQFLAGS
> > unsigned int irq_events;
> > unsigned long hardirq_enable_ip;
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 102dfcf0a29a..ed7755a28465 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -3264,13 +3264,22 @@ static noinline void __schedule_bug(struct task_struct *prev)
> > /*
> > * Various schedule()-time debugging checks and statistics:
> > */
> > -static inline void schedule_debug(struct task_struct *prev)
> > +static inline void schedule_debug(struct task_struct *prev, bool preempt)
> > {
> > #ifdef CONFIG_SCHED_STACK_END_CHECK
> > if (task_stack_end_corrupted(prev))
> > panic("corrupted stack end detected inside scheduler\n");
> > #endif
> >
> > +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> > + if (!preempt && prev->state && prev->non_block_count) {
> > + printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
> > + prev->comm, prev->pid, prev->non_block_count);
> > + dump_stack();
> > + add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
> > + }
> > +#endif
> > +
> > if (unlikely(in_atomic_preempt_off())) {
> > __schedule_bug(prev);
> > preempt_count_set(PREEMPT_DISABLED);
> > @@ -3377,7 +3386,7 @@ static void __sched notrace __schedule(bool preempt)
> > rq = cpu_rq(cpu);
> > prev = rq->curr;
> >
> > - schedule_debug(prev);
> > + schedule_debug(prev, preempt);
> >
> > if (sched_feat(HRTICK))
> > hrtick_clear(rq);
> > @@ -6102,7 +6111,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
> > rcu_sleep_check();
> >
> > if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
> > - !is_idle_task(current)) ||
> > + !is_idle_task(current) && !current->non_block_count) ||
> > system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
> > oops_in_progress)
> > return;
> > @@ -6118,8 +6127,8 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
> > "BUG: sleeping function called from invalid context at %s:%d\n",
> > file, line);
> > printk(KERN_ERR
> > - "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
> > - in_atomic(), irqs_disabled(),
> > + "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
> > + in_atomic(), irqs_disabled(), current->non_block_count,
> > current->pid, current->comm);
> >
> > if (task_stack_end_corrupted(current))
> > --
> > 2.20.1
> >
>
> --
> Michal Hocko
> SUSE Labs

--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch