2021-04-01 22:31:57

by Sergei Trofimovich

[permalink] [raw]
Subject: [PATCH] mm: page_owner: detect page_owner recursion via task_struct

Before the change page_owner recursion was detected via fetching
backtrace and inspecting it for current instruction pointer.
It has a few problems:
- it is slightly slow as it requires extra backtrace and a linear
stack scan of the result
- it is too late to check if backtrace fetching required memory
allocation itself (ia64's unwinder requires it).

To simplify recursion tracking let's use page_owner recursion depth
as a counter in 'struct task_struct'.

The change make page_owner=on work on ia64 bu avoiding infinite
recursion in:
kmalloc()
-> __set_page_owner()
-> save_stack()
-> unwind() [ia64-specific]
-> build_script()
-> kmalloc()
-> __set_page_owner() [we short-circuit here]
-> save_stack()
-> unwind() [recursion]

CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
CC: Juri Lelli <[email protected]>
CC: Vincent Guittot <[email protected]>
CC: Dietmar Eggemann <[email protected]>
CC: Steven Rostedt <[email protected]>
CC: Ben Segall <[email protected]>
CC: Mel Gorman <[email protected]>
CC: Daniel Bristot de Oliveira <[email protected]>
CC: Andrew Morton <[email protected]>
CC: [email protected]
Signed-off-by: Sergei Trofimovich <[email protected]>
---
include/linux/sched.h | 9 +++++++++
init/init_task.c | 3 +++
mm/page_owner.c | 41 +++++++++++++++++------------------------
3 files changed, 29 insertions(+), 24 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ef00bb22164c..35771703fd89 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1371,6 +1371,15 @@ struct task_struct {
struct llist_head kretprobe_instances;
#endif

+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * Used by page_owner=on to detect recursion in page tracking.
+ * Is it fine to have non-atomic ops here if we ever access
+ * this variable via current->page_owner_depth?
+ */
+ unsigned int page_owner_depth;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/init/init_task.c b/init/init_task.c
index 3711cdaafed2..f579f2b2eca8 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -213,6 +213,9 @@ struct task_struct init_task
#ifdef CONFIG_SECCOMP
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
+#ifdef CONFIG_PAGE_OWNER
+ .page_owner_depth = 0,
+#endif
};
EXPORT_SYMBOL(init_task);

diff --git a/mm/page_owner.c b/mm/page_owner.c
index 7147fd34a948..422558605fcc 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -20,6 +20,16 @@
*/
#define PAGE_OWNER_STACK_DEPTH (16)

+/*
+ * How many reenters we allow to page_owner.
+ *
+ * Sometimes metadata allocation tracking requires more memory to be allocated:
+ * - when new stack trace is saved to stack depot
+ * - when backtrace itself is calculated (ia64)
+ * Instead of falling to infinite recursion give it a chance to recover.
+ */
+#define PAGE_OWNER_MAX_RECURSION_DEPTH (1)
+
struct page_owner {
unsigned short order;
short last_migrate_reason;
@@ -97,42 +107,25 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
return (void *)page_ext + page_owner_ops.offset;
}

-static inline bool check_recursive_alloc(unsigned long *entries,
- unsigned int nr_entries,
- unsigned long ip)
-{
- unsigned int i;
-
- for (i = 0; i < nr_entries; i++) {
- if (entries[i] == ip)
- return true;
- }
- return false;
-}
-
static noinline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
depot_stack_handle_t handle;
unsigned int nr_entries;

- nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
-
- /*
- * We need to check recursion here because our request to
- * stackdepot could trigger memory allocation to save new
- * entry. New memory allocation would reach here and call
- * stack_depot_save_entries() again if we don't catch it. There is
- * still not enough memory in stackdepot so it would try to
- * allocate memory again and loop forever.
- */
- if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
+ /* Avoid recursion. Used in stack trace generation code. */
+ if (current->page_owner_depth >= PAGE_OWNER_MAX_RECURSION_DEPTH)
return dummy_handle;

+ current->page_owner_depth++;
+
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
+
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;

+ current->page_owner_depth--;
return handle;
}

--
2.31.1


2021-04-02 00:06:18

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm: page_owner: detect page_owner recursion via task_struct

On Thu, 1 Apr 2021 23:30:10 +0100 Sergei Trofimovich <[email protected]> wrote:

> Before the change page_owner recursion was detected via fetching
> backtrace and inspecting it for current instruction pointer.
> It has a few problems:
> - it is slightly slow as it requires extra backtrace and a linear
> stack scan of the result
> - it is too late to check if backtrace fetching required memory
> allocation itself (ia64's unwinder requires it).
>
> To simplify recursion tracking let's use page_owner recursion depth
> as a counter in 'struct task_struct'.

Seems like a better approach.

> The change make page_owner=on work on ia64 bu avoiding infinite
> recursion in:
> kmalloc()
> -> __set_page_owner()
> -> save_stack()
> -> unwind() [ia64-specific]
> -> build_script()
> -> kmalloc()
> -> __set_page_owner() [we short-circuit here]
> -> save_stack()
> -> unwind() [recursion]
>
> ...
>
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1371,6 +1371,15 @@ struct task_struct {
> struct llist_head kretprobe_instances;
> #endif
>
> +#ifdef CONFIG_PAGE_OWNER
> + /*
> + * Used by page_owner=on to detect recursion in page tracking.
> + * Is it fine to have non-atomic ops here if we ever access
> + * this variable via current->page_owner_depth?

Yes, it is fine. This part of the comment can be removed.

> + */
> + unsigned int page_owner_depth;
> +#endif

Adding to the task_struct has a cost. But I don't expect that
PAGE_OWNER is commonly used in prodction builds (correct?).

> --- a/init/init_task.c
> +++ b/init/init_task.c
> @@ -213,6 +213,9 @@ struct task_struct init_task
> #ifdef CONFIG_SECCOMP
> .seccomp = { .filter_count = ATOMIC_INIT(0) },
> #endif
> +#ifdef CONFIG_PAGE_OWNER
> + .page_owner_depth = 0,
> +#endif
> };
> EXPORT_SYMBOL(init_task);

It will be initialized to zero by the compiler. We can omit this hunk
entirely.

> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -20,6 +20,16 @@
> */
> #define PAGE_OWNER_STACK_DEPTH (16)
>
> +/*
> + * How many reenters we allow to page_owner.
> + *
> + * Sometimes metadata allocation tracking requires more memory to be allocated:
> + * - when new stack trace is saved to stack depot
> + * - when backtrace itself is calculated (ia64)
> + * Instead of falling to infinite recursion give it a chance to recover.
> + */
> +#define PAGE_OWNER_MAX_RECURSION_DEPTH (1)

So this is presently a boolean. Is there any expectation that
PAGE_OWNER_MAX_RECURSION_DEPTH will ever be greater than 1? If not, we
could use a single bit in the task_struct. Add it to the
"Unserialized, strictly 'current'" bitfields. Could make it a 2-bit field if we want
to permit PAGE_OWNER_MAX_RECURSION_DEPTH=larger.


2021-04-02 11:52:02

by Sergei Trofimovich

[permalink] [raw]
Subject: Re: [PATCH] mm: page_owner: detect page_owner recursion via task_struct

On Thu, 1 Apr 2021 17:05:19 -0700
Andrew Morton <[email protected]> wrote:

> On Thu, 1 Apr 2021 23:30:10 +0100 Sergei Trofimovich <[email protected]> wrote:
>
> > Before the change page_owner recursion was detected via fetching
> > backtrace and inspecting it for current instruction pointer.
> > It has a few problems:
> > - it is slightly slow as it requires extra backtrace and a linear
> > stack scan of the result
> > - it is too late to check if backtrace fetching required memory
> > allocation itself (ia64's unwinder requires it).
> >
> > To simplify recursion tracking let's use page_owner recursion depth
> > as a counter in 'struct task_struct'.
>
> Seems like a better approach.
>
> > The change make page_owner=on work on ia64 bu avoiding infinite
> > recursion in:
> > kmalloc()
> > -> __set_page_owner()
> > -> save_stack()
> > -> unwind() [ia64-specific]
> > -> build_script()
> > -> kmalloc()
> > -> __set_page_owner() [we short-circuit here]
> > -> save_stack()
> > -> unwind() [recursion]
> >
> > ...
> >
> > --- a/include/linux/sched.h
> > +++ b/include/linux/sched.h
> > @@ -1371,6 +1371,15 @@ struct task_struct {
> > struct llist_head kretprobe_instances;
> > #endif
> >
> > +#ifdef CONFIG_PAGE_OWNER
> > + /*
> > + * Used by page_owner=on to detect recursion in page tracking.
> > + * Is it fine to have non-atomic ops here if we ever access
> > + * this variable via current->page_owner_depth?
>
> Yes, it is fine. This part of the comment can be removed.

Cool! Will do.

> > + */
> > + unsigned int page_owner_depth;
> > +#endif
>
> Adding to the task_struct has a cost. But I don't expect that
> PAGE_OWNER is commonly used in prodction builds (correct?).

Yeah, PAGE_OWNER should not be enabled for production kernels.

Not having extra memory overhead (or layout disruption) is a nice
benefit though. I'll switch to "Unserialized, strictly 'current'" bitfield.

> > --- a/init/init_task.c
> > +++ b/init/init_task.c
> > @@ -213,6 +213,9 @@ struct task_struct init_task
> > #ifdef CONFIG_SECCOMP
> > .seccomp = { .filter_count = ATOMIC_INIT(0) },
> > #endif
> > +#ifdef CONFIG_PAGE_OWNER
> > + .page_owner_depth = 0,
> > +#endif
> > };
> > EXPORT_SYMBOL(init_task);
>
> It will be initialized to zero by the compiler. We can omit this hunk
> entirely.
>
> > --- a/mm/page_owner.c
> > +++ b/mm/page_owner.c
> > @@ -20,6 +20,16 @@
> > */
> > #define PAGE_OWNER_STACK_DEPTH (16)
> >
> > +/*
> > + * How many reenters we allow to page_owner.
> > + *
> > + * Sometimes metadata allocation tracking requires more memory to be allocated:
> > + * - when new stack trace is saved to stack depot
> > + * - when backtrace itself is calculated (ia64)
> > + * Instead of falling to infinite recursion give it a chance to recover.
> > + */
> > +#define PAGE_OWNER_MAX_RECURSION_DEPTH (1)
>
> So this is presently a boolean. Is there any expectation that
> PAGE_OWNER_MAX_RECURSION_DEPTH will ever be greater than 1? If not, we
> could use a single bit in the task_struct. Add it to the
> "Unserialized, strictly 'current'" bitfields. Could make it a 2-bit field if we want
> to permit PAGE_OWNER_MAX_RECURSION_DEPTH=larger.

Let's settle on depth=1. depth>1 is not trivial for other reasons I don't
completely understand.

Follow-up patch incoming.

--

Sergei

2021-04-02 11:54:27

by Sergei Trofimovich

[permalink] [raw]
Subject: [PATCH v2] mm: page_owner: detect page_owner recursion via task_struct

Before the change page_owner recursion was detected via fetching
backtrace and inspecting it for current instruction pointer.
It has a few problems:
- it is slightly slow as it requires extra backtrace and a linear
stack scan of the result
- it is too late to check if backtrace fetching required memory
allocation itself (ia64's unwinder requires it).

To simplify recursion tracking let's use page_owner recursion flag
in 'struct task_struct'.

The change make page_owner=on work on ia64 by avoiding infinite
recursion in:
kmalloc()
-> __set_page_owner()
-> save_stack()
-> unwind() [ia64-specific]
-> build_script()
-> kmalloc()
-> __set_page_owner() [we short-circuit here]
-> save_stack()
-> unwind() [recursion]

CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
CC: Juri Lelli <[email protected]>
CC: Vincent Guittot <[email protected]>
CC: Dietmar Eggemann <[email protected]>
CC: Steven Rostedt <[email protected]>
CC: Ben Segall <[email protected]>
CC: Mel Gorman <[email protected]>
CC: Daniel Bristot de Oliveira <[email protected]>
CC: Andrew Morton <[email protected]>
CC: [email protected]
Signed-off-by: Sergei Trofimovich <[email protected]>
---
Change since v1:
- use bit from task_struct instead of a new field
- track only one recursion depth level so far

include/linux/sched.h | 4 ++++
mm/page_owner.c | 32 ++++++++++----------------------
2 files changed, 14 insertions(+), 22 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ef00bb22164c..00986450677c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -841,6 +841,10 @@ struct task_struct {
/* Stalled due to lack of memory */
unsigned in_memstall:1;
#endif
+#ifdef CONFIG_PAGE_OWNER
+ /* Used by page_owner=on to detect recursion in page tracking. */
+ unsigned in_page_owner:1;
+#endif

unsigned long atomic_flags; /* Flags requiring atomic access. */

diff --git a/mm/page_owner.c b/mm/page_owner.c
index 7147fd34a948..64b2e4c6afb7 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -97,42 +97,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
return (void *)page_ext + page_owner_ops.offset;
}

-static inline bool check_recursive_alloc(unsigned long *entries,
- unsigned int nr_entries,
- unsigned long ip)
-{
- unsigned int i;
-
- for (i = 0; i < nr_entries; i++) {
- if (entries[i] == ip)
- return true;
- }
- return false;
-}
-
static noinline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
depot_stack_handle_t handle;
unsigned int nr_entries;

- nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
-
/*
- * We need to check recursion here because our request to
- * stackdepot could trigger memory allocation to save new
- * entry. New memory allocation would reach here and call
- * stack_depot_save_entries() again if we don't catch it. There is
- * still not enough memory in stackdepot so it would try to
- * allocate memory again and loop forever.
+ * Avoid recursion.
+ *
+ * Sometimes page metadata allocation tracking requires more
+ * memory to be allocated:
+ * - when new stack trace is saved to stack depot
+ * - when backtrace itself is calculated (ia64)
*/
- if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
+ if (current->in_page_owner)
return dummy_handle;
+ current->in_page_owner = 1;

+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;

+ current->in_page_owner = 0;
return handle;
}

--
2.31.1

2021-04-07 20:57:00

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm: page_owner: detect page_owner recursion via task_struct

On 4/2/21 1:50 PM, Sergei Trofimovich wrote:
> On Thu, 1 Apr 2021 17:05:19 -0700
> Andrew Morton <[email protected]> wrote:
>
>> On Thu, 1 Apr 2021 23:30:10 +0100 Sergei Trofimovich <[email protected]> wrote:
>>
>> > Before the change page_owner recursion was detected via fetching
>> > backtrace and inspecting it for current instruction pointer.
>> > It has a few problems:
>> > - it is slightly slow as it requires extra backtrace and a linear
>> > stack scan of the result
>> > - it is too late to check if backtrace fetching required memory
>> > allocation itself (ia64's unwinder requires it).
>> >
>> > To simplify recursion tracking let's use page_owner recursion depth
>> > as a counter in 'struct task_struct'.
>>
>> Seems like a better approach.
>>
>> > The change make page_owner=on work on ia64 bu avoiding infinite
>> > recursion in:
>> > kmalloc()
>> > -> __set_page_owner()
>> > -> save_stack()
>> > -> unwind() [ia64-specific]
>> > -> build_script()
>> > -> kmalloc()
>> > -> __set_page_owner() [we short-circuit here]
>> > -> save_stack()
>> > -> unwind() [recursion]
>> >
>> > ...
>> >
>> > --- a/include/linux/sched.h
>> > +++ b/include/linux/sched.h
>> > @@ -1371,6 +1371,15 @@ struct task_struct {
>> > struct llist_head kretprobe_instances;
>> > #endif
>> >
>> > +#ifdef CONFIG_PAGE_OWNER
>> > + /*
>> > + * Used by page_owner=on to detect recursion in page tracking.
>> > + * Is it fine to have non-atomic ops here if we ever access
>> > + * this variable via current->page_owner_depth?
>>
>> Yes, it is fine. This part of the comment can be removed.
>
> Cool! Will do.
>
>> > + */
>> > + unsigned int page_owner_depth;
>> > +#endif
>>
>> Adding to the task_struct has a cost. But I don't expect that
>> PAGE_OWNER is commonly used in prodction builds (correct?).
>
> Yeah, PAGE_OWNER should not be enabled for production kernels.

Note that it was converted to use a static key exactly so that it can be always
built in production kernels, and simply enabled on boot when needed. Our kernels
have it enabled.

> Not having extra memory overhead (or layout disruption) is a nice
> benefit though. I'll switch to "Unserialized, strictly 'current'" bitfield.
>
>> > --- a/init/init_task.c
>> > +++ b/init/init_task.c
>> > @@ -213,6 +213,9 @@ struct task_struct init_task
>> > #ifdef CONFIG_SECCOMP
>> > .seccomp = { .filter_count = ATOMIC_INIT(0) },
>> > #endif
>> > +#ifdef CONFIG_PAGE_OWNER
>> > + .page_owner_depth = 0,
>> > +#endif
>> > };
>> > EXPORT_SYMBOL(init_task);
>>
>> It will be initialized to zero by the compiler. We can omit this hunk
>> entirely.
>>
>> > --- a/mm/page_owner.c
>> > +++ b/mm/page_owner.c
>> > @@ -20,6 +20,16 @@
>> > */
>> > #define PAGE_OWNER_STACK_DEPTH (16)
>> >
>> > +/*
>> > + * How many reenters we allow to page_owner.
>> > + *
>> > + * Sometimes metadata allocation tracking requires more memory to be allocated:
>> > + * - when new stack trace is saved to stack depot
>> > + * - when backtrace itself is calculated (ia64)
>> > + * Instead of falling to infinite recursion give it a chance to recover.
>> > + */
>> > +#define PAGE_OWNER_MAX_RECURSION_DEPTH (1)
>>
>> So this is presently a boolean. Is there any expectation that
>> PAGE_OWNER_MAX_RECURSION_DEPTH will ever be greater than 1? If not, we
>> could use a single bit in the task_struct. Add it to the
>> "Unserialized, strictly 'current'" bitfields. Could make it a 2-bit field if we want
>> to permit PAGE_OWNER_MAX_RECURSION_DEPTH=larger.
>
> Let's settle on depth=1. depth>1 is not trivial for other reasons I don't
> completely understand.

That's fine, I don't think depth>1 would bring us much benefit anyway.

> Follow-up patch incoming.
>

2021-04-07 20:57:51

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] mm: page_owner: detect page_owner recursion via task_struct

On 4/2/21 1:53 PM, Sergei Trofimovich wrote:
> Before the change page_owner recursion was detected via fetching
> backtrace and inspecting it for current instruction pointer.
> It has a few problems:
> - it is slightly slow as it requires extra backtrace and a linear
> stack scan of the result
> - it is too late to check if backtrace fetching required memory
> allocation itself (ia64's unwinder requires it).
>
> To simplify recursion tracking let's use page_owner recursion flag
> in 'struct task_struct'.
>
> The change make page_owner=on work on ia64 by avoiding infinite
> recursion in:
> kmalloc()
> -> __set_page_owner()
> -> save_stack()
> -> unwind() [ia64-specific]
> -> build_script()
> -> kmalloc()
> -> __set_page_owner() [we short-circuit here]
> -> save_stack()
> -> unwind() [recursion]
>
> CC: Ingo Molnar <[email protected]>
> CC: Peter Zijlstra <[email protected]>
> CC: Juri Lelli <[email protected]>
> CC: Vincent Guittot <[email protected]>
> CC: Dietmar Eggemann <[email protected]>
> CC: Steven Rostedt <[email protected]>
> CC: Ben Segall <[email protected]>
> CC: Mel Gorman <[email protected]>
> CC: Daniel Bristot de Oliveira <[email protected]>
> CC: Andrew Morton <[email protected]>
> CC: [email protected]
> Signed-off-by: Sergei Trofimovich <[email protected]>

Much better indeed, thanks.
Acked-by: Vlastimil Babka <[email protected]>

> ---
> Change since v1:
> - use bit from task_struct instead of a new field
> - track only one recursion depth level so far
>
> include/linux/sched.h | 4 ++++
> mm/page_owner.c | 32 ++++++++++----------------------
> 2 files changed, 14 insertions(+), 22 deletions(-)
>
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index ef00bb22164c..00986450677c 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -841,6 +841,10 @@ struct task_struct {
> /* Stalled due to lack of memory */
> unsigned in_memstall:1;
> #endif
> +#ifdef CONFIG_PAGE_OWNER
> + /* Used by page_owner=on to detect recursion in page tracking. */
> + unsigned in_page_owner:1;
> +#endif
>
> unsigned long atomic_flags; /* Flags requiring atomic access. */
>
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index 7147fd34a948..64b2e4c6afb7 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -97,42 +97,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
> return (void *)page_ext + page_owner_ops.offset;
> }
>
> -static inline bool check_recursive_alloc(unsigned long *entries,
> - unsigned int nr_entries,
> - unsigned long ip)
> -{
> - unsigned int i;
> -
> - for (i = 0; i < nr_entries; i++) {
> - if (entries[i] == ip)
> - return true;
> - }
> - return false;
> -}
> -
> static noinline depot_stack_handle_t save_stack(gfp_t flags)
> {
> unsigned long entries[PAGE_OWNER_STACK_DEPTH];
> depot_stack_handle_t handle;
> unsigned int nr_entries;
>
> - nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
> -
> /*
> - * We need to check recursion here because our request to
> - * stackdepot could trigger memory allocation to save new
> - * entry. New memory allocation would reach here and call
> - * stack_depot_save_entries() again if we don't catch it. There is
> - * still not enough memory in stackdepot so it would try to
> - * allocate memory again and loop forever.
> + * Avoid recursion.
> + *
> + * Sometimes page metadata allocation tracking requires more
> + * memory to be allocated:
> + * - when new stack trace is saved to stack depot
> + * - when backtrace itself is calculated (ia64)
> */
> - if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
> + if (current->in_page_owner)
> return dummy_handle;
> + current->in_page_owner = 1;
>
> + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
> handle = stack_depot_save(entries, nr_entries, flags);
> if (!handle)
> handle = failure_handle;
>
> + current->in_page_owner = 0;
> return handle;
> }
>
>