Currently, the condition "__this_cpu_read(ksoftirqd) == current" is
checked to ensure the rcu_softirq_qs() is invoked in ksoftirqd tasks
context for non-RT kernels. however, in some scenarios, this condition
will be broken.
ksoftirqd/0
->finish_task_switch
->put_task_struct_rcu_user
->call_rcu(&task->rcu, delayed_put_task_struct)
->__kasan_record_aux_stack
->pfn_valid
->rcu_read_lock_sched()
<interrupt>
__irq_exit_rcu
->__do_softirq
-> if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
__this_cpu_read(ksoftirqd) == current)
->rcu_softirq_qs
->RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map))
The rcu quiescent states is reported occurs in the rcu-read critical
section, so the lockdep warning is triggered. this commit therefore
remove "__this_cpu_read(ksoftirqd) == current" condition check, generate
new "handle_softirqs(bool kirqd)" function to replace __do_softirq() in
run_ksoftirqdt(), and set parameter kirqd to true, make rcu_softirq_qs()
be invoked only in ksofirqd tasks context for non-RT kernels.
Reported-by: [email protected]
Link: https://lore.kernel.org/lkml/8f281a10-b85a-4586-9586-5bbc12dc784f@paulmck-laptop/T/#mea8aba4abfcb97bbf499d169ce7f30c4cff1b0e3
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Zqiang <[email protected]>
---
kernel/softirq.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b315b21fb28c..e991d735be0d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -508,7 +508,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
-asmlinkage __visible void __softirq_entry __do_softirq(void)
+static void handle_softirqs(bool kirqd)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -563,8 +563,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending >>= softirq_bit;
}
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
- __this_cpu_read(ksoftirqd) == current)
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kirqd)
rcu_softirq_qs();
local_irq_disable();
@@ -584,6 +583,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
current_restore_flags(old_flags, PF_MEMALLOC);
}
+asmlinkage __visible void __softirq_entry __do_softirq(void)
+{
+ handle_softirqs(false);
+}
+
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
@@ -921,7 +925,7 @@ static void run_ksoftirqd(unsigned int cpu)
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
- __do_softirq();
+ handle_softirqs(true);
ksoftirqd_run_end();
cond_resched();
return;
--
2.17.1
The following commit has been merged into the irq/urgent branch of tip:
Commit-ID: 1dd1eff161bd55968d3d46bc36def62d71fb4785
Gitweb: https://git.kernel.org/tip/1dd1eff161bd55968d3d46bc36def62d71fb4785
Author: Zqiang <[email protected]>
AuthorDate: Sat, 27 Apr 2024 18:28:08 +08:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Mon, 29 Apr 2024 05:03:51 +02:00
softirq: Fix suspicious RCU usage in __do_softirq()
Currently, the condition "__this_cpu_read(ksoftirqd) == current" is used to
invoke rcu_softirq_qs() in ksoftirqd tasks context for non-RT kernels.
This works correctly as long as the context is actually task context but
this condition is wrong when:
- the current task is ksoftirqd
- the task is interrupted in a RCU read side critical section
- __do_softirq() is invoked on return from interrupt
Syzkaller triggered the following scenario:
-> finish_task_switch()
-> put_task_struct_rcu_user()
-> call_rcu(&task->rcu, delayed_put_task_struct)
-> __kasan_record_aux_stack()
-> pfn_valid()
-> rcu_read_lock_sched()
<interrupt>
__irq_exit_rcu()
-> __do_softirq)()
-> if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
__this_cpu_read(ksoftirqd) == current)
-> rcu_softirq_qs()
-> RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map))
The rcu quiescent state is reported in the rcu-read critical section, so
the lockdep warning is triggered.
Fix this by splitting out the inner working of __do_softirq() into a helper
function which takes an argument to distinguish between ksoftirqd task
context and interrupted context and invoke it from the relevant call sites
with the proper context information and use that for the conditional
invocation of rcu_softirq_qs().
Reported-by: [email protected]
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Link: https://lore.kernel.org/lkml/8f281a10-b85a-4586-9586-5bbc12dc784f@paulmck-laptop/T/#mea8aba4abfcb97bbf499d169ce7f30c4cff1b0e3
---
kernel/softirq.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b315b21..0258201 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -508,7 +508,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
-asmlinkage __visible void __softirq_entry __do_softirq(void)
+static void handle_softirqs(bool ksirqd)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -563,8 +563,7 @@ restart:
pending >>= softirq_bit;
}
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
- __this_cpu_read(ksoftirqd) == current)
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
rcu_softirq_qs();
local_irq_disable();
@@ -584,6 +583,11 @@ restart:
current_restore_flags(old_flags, PF_MEMALLOC);
}
+asmlinkage __visible void __softirq_entry __do_softirq(void)
+{
+ handle_softirqs(false);
+}
+
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
@@ -921,7 +925,7 @@ static void run_ksoftirqd(unsigned int cpu)
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
- __do_softirq();
+ handle_softirqs(true);
ksoftirqd_run_end();
cond_resched();
return;
On Sat, Apr 27, 2024 at 06:28:08PM +0800, Zqiang wrote:
> Currently, the condition "__this_cpu_read(ksoftirqd) == current" is
> checked to ensure the rcu_softirq_qs() is invoked in ksoftirqd tasks
> context for non-RT kernels. however, in some scenarios, this condition
> will be broken.
>
> ksoftirqd/0
> ->finish_task_switch
> ->put_task_struct_rcu_user
> ->call_rcu(&task->rcu, delayed_put_task_struct)
> ->__kasan_record_aux_stack
> ->pfn_valid
> ->rcu_read_lock_sched()
> <interrupt>
> __irq_exit_rcu
> ->__do_softirq
> -> if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
> __this_cpu_read(ksoftirqd) == current)
> ->rcu_softirq_qs
> ->RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map))
>
> The rcu quiescent states is reported occurs in the rcu-read critical
> section, so the lockdep warning is triggered. this commit therefore
> remove "__this_cpu_read(ksoftirqd) == current" condition check, generate
> new "handle_softirqs(bool kirqd)" function to replace __do_softirq() in
> run_ksoftirqdt(), and set parameter kirqd to true, make rcu_softirq_qs()
> be invoked only in ksofirqd tasks context for non-RT kernels.
>
> Reported-by: [email protected]
> Link: https://lore.kernel.org/lkml/8f281a10-b85a-4586-9586-5bbc12dc784f@paulmck-laptop/T/#mea8aba4abfcb97bbf499d169ce7f30c4cff1b0e3
> Suggested-by: Thomas Gleixner <[email protected]>
> Signed-off-by: Zqiang <[email protected]>
Reviewed-by: Paul E. McKenney <[email protected]>
> ---
> kernel/softirq.c | 12 ++++++++----
> 1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/softirq.c b/kernel/softirq.c
> index b315b21fb28c..e991d735be0d 100644
> --- a/kernel/softirq.c
> +++ b/kernel/softirq.c
> @@ -508,7 +508,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
> static inline void lockdep_softirq_end(bool in_hardirq) { }
> #endif
>
> -asmlinkage __visible void __softirq_entry __do_softirq(void)
> +static void handle_softirqs(bool kirqd)
> {
> unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
> unsigned long old_flags = current->flags;
> @@ -563,8 +563,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
> pending >>= softirq_bit;
> }
>
> - if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
> - __this_cpu_read(ksoftirqd) == current)
> + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kirqd)
> rcu_softirq_qs();
>
> local_irq_disable();
> @@ -584,6 +583,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
> current_restore_flags(old_flags, PF_MEMALLOC);
> }
>
> +asmlinkage __visible void __softirq_entry __do_softirq(void)
> +{
> + handle_softirqs(false);
> +}
> +
> /**
> * irq_enter_rcu - Enter an interrupt context with RCU watching
> */
> @@ -921,7 +925,7 @@ static void run_ksoftirqd(unsigned int cpu)
> * We can safely run softirq on inline stack, as we are not deep
> * in the task stack here.
> */
> - __do_softirq();
> + handle_softirqs(true);
> ksoftirqd_run_end();
> cond_resched();
> return;
> --
> 2.17.1
>
On Mon, Apr 29, 2024 at 03:08:02AM -0000, tip-bot2 for Zqiang wrote:
> The following commit has been merged into the irq/urgent branch of tip:
>
> Commit-ID: 1dd1eff161bd55968d3d46bc36def62d71fb4785
> Gitweb: https://git.kernel.org/tip/1dd1eff161bd55968d3d46bc36def62d71fb4785
> Author: Zqiang <[email protected]>
> AuthorDate: Sat, 27 Apr 2024 18:28:08 +08:00
> Committer: Thomas Gleixner <[email protected]>
> CommitterDate: Mon, 29 Apr 2024 05:03:51 +02:00
>
> softirq: Fix suspicious RCU usage in __do_softirq()
>
> Currently, the condition "__this_cpu_read(ksoftirqd) == current" is used to
> invoke rcu_softirq_qs() in ksoftirqd tasks context for non-RT kernels.
>
> This works correctly as long as the context is actually task context but
> this condition is wrong when:
>
> - the current task is ksoftirqd
> - the task is interrupted in a RCU read side critical section
> - __do_softirq() is invoked on return from interrupt
>
> Syzkaller triggered the following scenario:
>
> -> finish_task_switch()
> -> put_task_struct_rcu_user()
> -> call_rcu(&task->rcu, delayed_put_task_struct)
> -> __kasan_record_aux_stack()
> -> pfn_valid()
> -> rcu_read_lock_sched()
> <interrupt>
> __irq_exit_rcu()
> -> __do_softirq)()
> -> if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
> __this_cpu_read(ksoftirqd) == current)
> -> rcu_softirq_qs()
> -> RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map))
>
> The rcu quiescent state is reported in the rcu-read critical section, so
> the lockdep warning is triggered.
>
> Fix this by splitting out the inner working of __do_softirq() into a helper
> function which takes an argument to distinguish between ksoftirqd task
> context and interrupted context and invoke it from the relevant call sites
> with the proper context information and use that for the conditional
> invocation of rcu_softirq_qs().
>
> Reported-by: [email protected]
> Suggested-by: Thomas Gleixner <[email protected]>
> Signed-off-by: Zqiang <[email protected]>
> Signed-off-by: Thomas Gleixner <[email protected]>
> Link: https://lore.kernel.org/r/[email protected]
> Link: https://lore.kernel.org/lkml/8f281a10-b85a-4586-9586-5bbc12dc784f@paulmck-laptop/T/#mea8aba4abfcb97bbf499d169ce7f30c4cff1b0e3
I can add that this also fixes a UAF reported by syzbot
(partially, another part of UAF is an unrelated bug):
Reported-by: [email protected]
> ---
> kernel/softirq.c | 12 ++++++++----
> 1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/softirq.c b/kernel/softirq.c
> index b315b21..0258201 100644
> --- a/kernel/softirq.c
> +++ b/kernel/softirq.c
> @@ -508,7 +508,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
> static inline void lockdep_softirq_end(bool in_hardirq) { }
> #endif
>
> -asmlinkage __visible void __softirq_entry __do_softirq(void)
> +static void handle_softirqs(bool ksirqd)
> {
> unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
> unsigned long old_flags = current->flags;
> @@ -563,8 +563,7 @@ restart:
> pending >>= softirq_bit;
> }
>
> - if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
> - __this_cpu_read(ksoftirqd) == current)
> + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
> rcu_softirq_qs();
>
> local_irq_disable();
> @@ -584,6 +583,11 @@ restart:
> current_restore_flags(old_flags, PF_MEMALLOC);
> }
>
> +asmlinkage __visible void __softirq_entry __do_softirq(void)
> +{
> + handle_softirqs(false);
> +}
> +
> /**
> * irq_enter_rcu - Enter an interrupt context with RCU watching
> */
> @@ -921,7 +925,7 @@ static void run_ksoftirqd(unsigned int cpu)
> * We can safely run softirq on inline stack, as we are not deep
> * in the task stack here.
> */
> - __do_softirq();
> + handle_softirqs(true);
> ksoftirqd_run_end();
> cond_resched();
> return;