The function "irq_find_at_or_after" is at the risk of use-after-free
due to the race condition between the functions "delayer_free_desc"
and "irq_desc_get_irq". The function "delayer_free_desc" could be
called between "mt_find" and "irq_desc_get_irq" due to the absence
of any locks to ensure atomic operations on the "irq_desc" structure.
In this patch, we introduce a pair of locks, namely "rcu_read_lock"
and "rcu_read_unlock" to prevent the occurrence of use-after-free in
"irq_find_at_or_after".
Call trace:
dump_backtrace+0xec/0x138
show_stack+0x18/0x24
dump_stack_lvl+0x50/0x6c
print_report+0x1b0/0x714
kasan_report+0xc4/0x124
__do_kernel_fault+0xc0/0x368
do_bad_area+0x30/0xdc
do_tag_check_fault+0x20/0x34
do_mem_abort+0x58/0x118
el1_abort+0x3c/0x5c
el1h_64_sync_handler+0x54/0x90
el1h_64_sync+0x68/0x6c
irq_get_next_irq+0x58/0x84
show_stat+0x638/0x824
seq_read_iter+0x158/0x4ec
proc_reg_read_iter+0x94/0x12c
vfs_read+0x1e0/0x2c8
__arm64_sys_pread64+0x84/0xcc
invoke_syscall+0x58/0x114
el0_svc_common+0x80/0xe0
do_el0_svc+0x1c/0x28
el0_svc+0x38/0x68
el0t_64_sync_handler+0x68/0xbc
el0t_64_sync+0x1a8/0x1ac
Freed by task 4471:
kasan_save_stack+0x40/0x70
save_stack_info+0x34/0x128
kasan_save_free_info+0x18/0x28
____kasan_slab_free+0x254/0x25c
__kasan_slab_free+0x10/0x20
slab_free_freelist_hook+0x174/0x1e0
__kmem_cache_free+0xa4/0x1dc
kfree+0x64/0x128
irq_kobj_release+0x28/0x3c
kobject_put+0xcc/0x1e0
delayed_free_desc+0x14/0x2c
rcu_do_batch+0x214/0x720
rcu_core+0x1b0/0x408
rcu_core_si+0x10/0x20
__do_softirq+0x154/0x470
Signed-off-by: dicken.ding <[email protected]>
---
kernel/irq/irqdesc.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 88ac3652fcf2..14124e1bcb03 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -160,9 +160,15 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
static unsigned int irq_find_at_or_after(unsigned int offset)
{
unsigned long index = offset;
+ unsigned int irq = nr_irqs;
+
+ rcu_read_lock();
struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
+ if (desc)
+ irq = irq_desc_get_irq(desc);
+ rcu_read_unlock();
- return desc ? irq_desc_get_irq(desc) : nr_irqs;
+ return irq;
}
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
--
2.18.0
On Thu, May 23 2024 at 19:39, dicken.ding wrote:
> The function "irq_find_at_or_after" is at the risk of use-after-free
> due to the race condition between the functions "delayer_free_desc"
> and "irq_desc_get_irq". The function "delayer_free_desc" could be
> called between "mt_find" and "irq_desc_get_irq" due to the absence
> of any locks to ensure atomic operations on the "irq_desc" structure.
>
> In this patch, we introduce a pair of locks, namely "rcu_read_lock"
> and "rcu_read_unlock" to prevent the occurrence of use-after-free in
> "irq_find_at_or_after".
Please read Documentation/process/maintainers-tip.rst and the general
documentation how changelogs should be written.
Something like this:
irq_find_at_or_after() dereferences the interrupt descriptor which is
returned by mt_find() while neither holding sparse_irq_lock nor RCU
read lock, which means the descriptor can be freed between mt_find()
and the dereference.
Guard the access with a RCU read lock section.
Hmm?
> --- a/kernel/irq/irqdesc.c
> +++ b/kernel/irq/irqdesc.c
> @@ -160,9 +160,15 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
> static unsigned int irq_find_at_or_after(unsigned int offset)
> {
> unsigned long index = offset;
> + unsigned int irq = nr_irqs;
> +
> + rcu_read_lock();
> struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
> + if (desc)
> + irq = irq_desc_get_irq(desc);
> + rcu_read_unlock();
>
> - return desc ? irq_desc_get_irq(desc) : nr_irqs;
> + return irq;
I wrote guard above because that's what should be used for this:
unsigned long index = offset;
struct irq_desc *desc;
guard(rcu)();
desc = mt_find(&sparse_irqs, &index, nr_irqs);
return desc ? irq_desc_get_irq(desc) : nr_irqs;
Thanks,
tglx