Subject: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

The task stack could be deallocated later in delayed_put_task_struct().
For fork()/exec() kind of workloads (say a shell script executing
several commands) it is important that the stack is released in
finish_task_switch() so that in VMAP_STACK case it can be cached and
reused in the new task.
If the free/ caching is RCU-delayed then a new stack has to be allocated
because the cache is filled in batches of which only two stacks, out of
many, are recycled.

For PREEMPT_RT it would be good if the wake-up in vfree_atomic() could
be avoided in the scheduling path. Far worse are the other
free_thread_stack() implementations which invoke __free_pages()/
kmem_cache_free() with disabled preemption.

Introduce put_task_stack_sched() which is invoked from the
finish_task_switch() and only caches the VMAP stack. If the cache is
full or !CONFIG_VMAP_STACK is used than the stack is freed from
delayed_put_task_struct(). In the VMAP case this is another opportunity
to fill the cache.

The stack is finally released in delayed_put_task_struct() which means
that a valid stack reference can be held during its invocation. As such
there can be made no assumption whether the task_struct::stack pointer
can be freed if non-NULL.
Set the lowest bit of task_struct::stack if the stack was released via
put_task_stack_sched() and needs a final free in
delayed_put_task_struct(). If the bit is missing then a reference is
held and put_task_stack() will release it.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
include/linux/sched/task_stack.h | 8 +++++
kernel/exit.c | 1 +
kernel/fork.c | 60 ++++++++++++++++++++++++++------
kernel/sched/core.c | 7 ++--
4 files changed, 64 insertions(+), 12 deletions(-)

diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 892562ebbd3aa..ccd1336aa7f42 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -70,6 +70,7 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
}

extern void put_task_stack(struct task_struct *tsk);
+extern void put_task_stack_sched(struct task_struct *tsk);
#else
static inline void *try_get_task_stack(struct task_struct *tsk)
{
@@ -77,6 +78,13 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
}

static inline void put_task_stack(struct task_struct *tsk) {}
+static inline void put_task_stack_sched(struct task_struct *tsk) {}
+#endif
+
+#ifdef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
+static inline void task_stack_cleanup(struct task_struct *tsk) {}
+#else
+extern void task_stack_cleanup(struct task_struct *tsk);
#endif

void exit_task_stack_account(struct task_struct *tsk);
diff --git a/kernel/exit.c b/kernel/exit.c
index c303cffe7fdb4..293b280d23192 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -171,6 +171,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
kprobe_flush_task(tsk);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
+ task_stack_cleanup(tsk);
put_task_struct(tsk);
}

diff --git a/kernel/fork.c b/kernel/fork.c
index 5f4e659a922e1..f48f666582b09 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -179,6 +179,16 @@ static inline void free_task_struct(struct task_struct *tsk)

#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR

+#define THREAD_STACK_DELAYED_FREE 1UL
+
+static void thread_stack_mark_delayed_free(struct task_struct *tsk)
+{
+ unsigned long val = (unsigned long)tsk->stack;
+
+ val |= THREAD_STACK_DELAYED_FREE;
+ WRITE_ONCE(tsk->stack, (void *)val);
+}
+
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
@@ -294,7 +304,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return 0;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool cache_only)
{
int i;

@@ -307,7 +317,12 @@ static void free_thread_stack(struct task_struct *tsk)
tsk->stack_vm_area = NULL;
return;
}
- vfree_atomic(tsk->stack);
+ if (cache_only) {
+ thread_stack_mark_delayed_free(tsk);
+ return;
+ }
+
+ vfree(tsk->stack);
tsk->stack = NULL;
tsk->stack_vm_area = NULL;
}
@@ -326,8 +341,12 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return -ENOMEM;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool cache_only)
{
+ if (cache_only) {
+ thread_stack_mark_delayed_free(tsk);
+ return;
+ }
__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
tsk->stack = NULL;
}
@@ -346,8 +365,12 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return stack ? 0 : -ENOMEM;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool cache_only)
{
+ if (cache_only) {
+ thread_stack_mark_delayed_free(tsk);
+ return;
+ }
kmem_cache_free(thread_stack_cache, tsk->stack);
tsk->stack = NULL;
}
@@ -361,8 +384,19 @@ void thread_stack_cache_init(void)
}

# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
-#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */

+void task_stack_cleanup(struct task_struct *tsk)
+{
+ unsigned long val = (unsigned long)tsk->stack;
+
+ if (!(val & THREAD_STACK_DELAYED_FREE))
+ return;
+
+ WRITE_ONCE(tsk->stack, (void *)(val & ~THREAD_STACK_DELAYED_FREE));
+ free_thread_stack(tsk, false);
+}
+
+#else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
unsigned long *stack;
@@ -464,19 +498,25 @@ void exit_task_stack_account(struct task_struct *tsk)
}
}

-static void release_task_stack(struct task_struct *tsk)
+static void release_task_stack(struct task_struct *tsk, bool cache_only)
{
if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
return; /* Better to leak the stack than to free prematurely */

- free_thread_stack(tsk);
+ free_thread_stack(tsk, cache_only);
}

#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{
if (refcount_dec_and_test(&tsk->stack_refcount))
- release_task_stack(tsk);
+ release_task_stack(tsk, false);
+}
+
+void put_task_stack_sched(struct task_struct *tsk)
+{
+ if (refcount_dec_and_test(&tsk->stack_refcount))
+ release_task_stack(tsk, true);
}
#endif

@@ -490,7 +530,7 @@ void free_task(struct task_struct *tsk)
* The task is finally done with both the stack and thread_info,
* so free both.
*/
- release_task_stack(tsk);
+ release_task_stack(tsk, false);
#else
/*
* If the task had a separate stack allocation, it should be gone
@@ -990,7 +1030,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)

free_stack:
exit_task_stack_account(tsk);
- free_thread_stack(tsk);
+ free_thread_stack(tsk, false);
free_tsk:
free_task_struct(tsk);
return NULL;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e4ae00e52d14..bfcb45c3e59dc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4894,8 +4894,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);

- /* Task is done with its stack. */
- put_task_stack(prev);
+ /*
+ * Cache only the VMAP stack. The final deallocation is in
+ * delayed_put_task_struct.
+ */
+ put_task_stack_sched(prev);

put_task_struct_rcu_user(prev);
}
--
2.34.1


2022-02-14 08:24:44

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

On 1/25/22 07:26, Sebastian Andrzej Siewior wrote:
> The task stack could be deallocated later in delayed_put_task_struct().
> For fork()/exec() kind of workloads (say a shell script executing
> several commands) it is important that the stack is released in
> finish_task_switch() so that in VMAP_STACK case it can be cached and
> reused in the new task.
> If the free/ caching is RCU-delayed then a new stack has to be allocated
> because the cache is filled in batches of which only two stacks, out of
> many, are recycled.
>
> For PREEMPT_RT it would be good if the wake-up in vfree_atomic() could
> be avoided in the scheduling path. Far worse are the other
> free_thread_stack() implementations which invoke __free_pages()/
> kmem_cache_free() with disabled preemption.
>
> Introduce put_task_stack_sched() which is invoked from the
> finish_task_switch() and only caches the VMAP stack. If the cache is
> full or !CONFIG_VMAP_STACK is used than the stack is freed from
> delayed_put_task_struct(). In the VMAP case this is another opportunity
> to fill the cache.
>
> The stack is finally released in delayed_put_task_struct() which means
> that a valid stack reference can be held during its invocation. As such
> there can be made no assumption whether the task_struct::stack pointer
> can be freed if non-NULL.
> Set the lowest bit of task_struct::stack if the stack was released via
> put_task_stack_sched() and needs a final free in
> delayed_put_task_struct(). If the bit is missing then a reference is
> held and put_task_stack() will release it.

I don't understand what this bit is for or why the logic needs to be
this complicated. Can you set ->stack to NULL if and only if you freed
it early?

> +static void free_thread_stack(struct task_struct *tsk, bool cache_only)

This is messy. Please clean it up for real:

static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
{
for (...) try to put it in this slot;
}

And the callers can do things like:

if (try_release_thread_stack_to_cache(...))
return;

/* need to free for real */
free it or delayed-free it.

--Andy

Subject: Re: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

On 2022-02-14 13:10:06 [+0100], To Andy Lutomirski wrote:
>
> I think I could use the first few bytes of the stack as a RCU-head. Let
> me try that.

task::stack_vm_area and ::stack. Now I remember why I went for that bit.
But I do have (hopefully) a better idea now.

> > --Andy

Sebastian

Subject: Re: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

On 2022-02-14 17:54:48 [+0100], To Andy Lutomirski wrote:
> index fcf0c180617c2..defe31036930a 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4895,8 +4895,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
> if (prev->sched_class->task_dead)
> prev->sched_class->task_dead(prev);
>
> - /* Task is done with its stack. */
> - put_task_stack(prev);
> + /*
> + * Task is done with its stack. Try to cache VMAP stack and
> + * delay free it otherwise.
> + */
> + put_task_stack_sched(prev);

Now that I write the commit message, there is probably nothing wrong
with unconditionally delaying it via RCU if caching failed. Then I don't
have to explain that there is one function is for the atomic context and
the other for non-atomic.

> put_task_struct_rcu_user(prev);
> }
> --
Sebastian

Subject: Re: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

On 2022-02-11 15:55:01 [-0800], Andy Lutomirski wrote:
> > Set the lowest bit of task_struct::stack if the stack was released via
> > put_task_stack_sched() and needs a final free in
> > delayed_put_task_struct(). If the bit is missing then a reference is
> > held and put_task_stack() will release it.
>
> I don't understand what this bit is for or why the logic needs to be this
> complicated. Can you set ->stack to NULL if and only if you freed it early?

What do I do if put_task_stack() is invoked from finish_task_switch()
and I can't free but have to do something?

> > +static void free_thread_stack(struct task_struct *tsk, bool cache_only)
>
> This is messy. Please clean it up for real:
>
> static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
> {
> for (...) try to put it in this slot;
> }
>
> And the callers can do things like:
>
> if (try_release_thread_stack_to_cache(...))
> return;
>
> /* need to free for real */
> free it or delayed-free it.

I think I could use the first few bytes of the stack as a RCU-head. Let
me try that.

> --Andy

Sebastian

Subject: Re: [PATCH 7/8] kernel/fork: Only cache the VMAP stack in finish_task_switch().

On 2022-02-14 13:24:05 [+0100], To Andy Lutomirski wrote:
> task::stack_vm_area and ::stack. Now I remember why I went for that bit.
> But I do have (hopefully) a better idea now.

Need to update the patch description but that should work then:

diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 892562ebbd3aa..12b3f472b1358 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -70,6 +70,7 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
}

extern void put_task_stack(struct task_struct *tsk);
+extern void put_task_stack_sched(struct task_struct *tsk);
#else
static inline void *try_get_task_stack(struct task_struct *tsk)
{
@@ -77,6 +78,7 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
}

static inline void put_task_stack(struct task_struct *tsk) {}
+static inline void put_task_stack_sched(struct task_struct *tsk) {}
#endif

void exit_task_stack_account(struct task_struct *tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 5f4e659a922e1..d7e118c86f9e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -193,6 +193,44 @@ static inline void free_task_struct(struct task_struct *tsk)
#define NR_CACHED_STACKS 2
static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);

+struct vm_stack {
+ struct rcu_head rcu;
+ struct vm_struct *stack_vm_area;
+};
+
+static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_CACHED_STACKS; i++) {
+ if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL)
+ continue;
+ return true;
+ }
+ return false;
+}
+
+static void thread_stack_free_rcu(struct rcu_head *rh)
+{
+ struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
+
+ if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
+ return;
+
+ vfree(vm_stack);
+}
+
+static void thread_stack_delayed_free(struct task_struct *tsk)
+{
+ struct vm_stack *vm_stack = tsk->stack;
+
+ vm_stack->stack_vm_area = tsk->stack_vm_area;
+ call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
+
+ tsk->stack = NULL;
+ tsk->stack_vm_area = NULL;
+}
+
static int free_vm_stack_cache(unsigned int cpu)
{
struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
@@ -294,26 +332,39 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return 0;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool delayed_free)
{
- int i;
-
- for (i = 0; i < NR_CACHED_STACKS; i++) {
- if (this_cpu_cmpxchg(cached_stacks[i], NULL,
- tsk->stack_vm_area) != NULL)
- continue;
-
+ if (try_release_thread_stack_to_cache(tsk->stack_vm_area)) {
tsk->stack = NULL;
tsk->stack_vm_area = NULL;
return;
}
- vfree_atomic(tsk->stack);
+
+ if (delayed_free) {
+ thread_stack_delayed_free(tsk);
+ return;
+ }
+
+ vfree(tsk->stack);
tsk->stack = NULL;
tsk->stack_vm_area = NULL;
}

# else /* !CONFIG_VMAP_STACK */

+static void thread_stack_free_rcu(struct rcu_head *rh)
+{
+ __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
+}
+
+static void thread_stack_delayed_free(struct task_struct *tsk)
+{
+ struct rcu_head *rh = tsk->stack;
+
+ call_rcu(rh, thread_stack_free_rcu);
+ tsk->stack = NULL;
+}
+
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
@@ -326,8 +377,12 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return -ENOMEM;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool delayed_free)
{
+ if (delayed_free) {
+ thread_stack_delayed_free(tsk);
+ return;
+ }
__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
tsk->stack = NULL;
}
@@ -337,6 +392,19 @@ static void free_thread_stack(struct task_struct *tsk)

static struct kmem_cache *thread_stack_cache;

+static void thread_stack_free_rcu(struct rcu_head *rh)
+{
+ kmem_cache_free(thread_stack_cache, rh);
+}
+
+static void thread_stack_delayed_free(struct task_struct *tsk)
+{
+ struct rcu_head *rh = tsk->stack;
+
+ call_rcu(rh, thread_stack_free_rcu);
+ tsk->stack = NULL;
+}
+
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
unsigned long *stack;
@@ -346,8 +414,12 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return stack ? 0 : -ENOMEM;
}

-static void free_thread_stack(struct task_struct *tsk)
+static void free_thread_stack(struct task_struct *tsk, bool delayed_free)
{
+ if (delayed_free) {
+ thread_stack_delayed_free(tsk);
+ return;
+ }
kmem_cache_free(thread_stack_cache, tsk->stack);
tsk->stack = NULL;
}
@@ -372,7 +444,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
return stack ? 0 : -ENOMEM;
}

-static void free_thread_stack(struct task_struct *tsk, bool cache_only)
+static void free_thread_stack(struct task_struct *tsk, bool delayed_free)
{
arch_free_thread_stack(tsk);
}
@@ -464,19 +536,25 @@ void exit_task_stack_account(struct task_struct *tsk)
}
}

-static void release_task_stack(struct task_struct *tsk)
+static void release_task_stack(struct task_struct *tsk, bool delayed_free)
{
if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
return; /* Better to leak the stack than to free prematurely */

- free_thread_stack(tsk);
+ free_thread_stack(tsk, delayed_free);
}

#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{
if (refcount_dec_and_test(&tsk->stack_refcount))
- release_task_stack(tsk);
+ release_task_stack(tsk, false);
+}
+
+void put_task_stack_sched(struct task_struct *tsk)
+{
+ if (refcount_dec_and_test(&tsk->stack_refcount))
+ release_task_stack(tsk, true);
}
#endif

@@ -490,7 +568,7 @@ void free_task(struct task_struct *tsk)
* The task is finally done with both the stack and thread_info,
* so free both.
*/
- release_task_stack(tsk);
+ release_task_stack(tsk, false);
#else
/*
* If the task had a separate stack allocation, it should be gone
@@ -990,7 +1068,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)

free_stack:
exit_task_stack_account(tsk);
- free_thread_stack(tsk);
+ free_thread_stack(tsk, false);
free_tsk:
free_task_struct(tsk);
return NULL;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fcf0c180617c2..defe31036930a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4895,8 +4895,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);

- /* Task is done with its stack. */
- put_task_stack(prev);
+ /*
+ * Task is done with its stack. Try to cache VMAP stack and
+ * delay free it otherwise.
+ */
+ put_task_stack_sched(prev);

put_task_struct_rcu_user(prev);
}
--
2.34.1


> > > --Andy
>
Sebastian