Hello,
syzbot found the following issue on:
HEAD commit: ba16c1cf11c9 Merge tag 'edac_urgent_for_v6.9' of git://git..
git tree: upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=10bd04e0980000
kernel config: https://syzkaller.appspot.com/x/.config?x=6d14c12b661fb43
dashboard link: https://syzkaller.appspot.com/bug?extid=df038d463cca332e8414
compiler: Debian clang version 15.0.6, GNU ld (GNU Binutils for Debian) 2.40
Unfortunately, I don't have any reproducer for this issue yet.
Downloadable assets:
disk image: https://storage.googleapis.com/syzbot-assets/064cbe6db807/disk-ba16c1cf.raw.xz
vmlinux: https://storage.googleapis.com/syzbot-assets/64148bd6b9f3/vmlinux-ba16c1cf.xz
kernel image: https://storage.googleapis.com/syzbot-assets/26dda4f66cf6/bzImage-ba16c1cf.xz
IMPORTANT: if you fix the issue, please add the following tag to the commit:
Reported-by: [email protected]
==================================================================
BUG: KASAN: slab-use-after-free in instrument_atomic_read_write include/linux/instrumented.h:96 [inline]
BUG: KASAN: slab-use-after-free in atomic_fetch_sub_release include/linux/atomic/atomic-instrumented.h:400 [inline]
BUG: KASAN: slab-use-after-free in __refcount_sub_and_test include/linux/refcount.h:264 [inline]
BUG: KASAN: slab-use-after-free in __refcount_dec_and_test include/linux/refcount.h:307 [inline]
BUG: KASAN: slab-use-after-free in refcount_dec_and_test include/linux/refcount.h:325 [inline]
BUG: KASAN: slab-use-after-free in p9_fid_put include/net/9p/client.h:275 [inline]
BUG: KASAN: slab-use-after-free in v9fs_free_request+0x5f/0xe0 fs/9p/vfs_addr.c:128
Write of size 4 at addr ffff8880624f428c by task kworker/u8:12/18964
CPU: 1 PID: 18964 Comm: kworker/u8:12 Not tainted 6.9.0-rc7-syzkaller-00188-gba16c1cf11c9 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024
Workqueue: events_unbound v9fs_upload_to_server_worker
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0x241/0x360 lib/dump_stack.c:114
print_address_description mm/kasan/report.c:377 [inline]
print_report+0x169/0x550 mm/kasan/report.c:488
kasan_report+0x143/0x180 mm/kasan/report.c:601
kasan_check_range+0x282/0x290 mm/kasan/generic.c:189
instrument_atomic_read_write include/linux/instrumented.h:96 [inline]
atomic_fetch_sub_release include/linux/atomic/atomic-instrumented.h:400 [inline]
__refcount_sub_and_test include/linux/refcount.h:264 [inline]
__refcount_dec_and_test include/linux/refcount.h:307 [inline]
refcount_dec_and_test include/linux/refcount.h:325 [inline]
p9_fid_put include/net/9p/client.h:275 [inline]
v9fs_free_request+0x5f/0xe0 fs/9p/vfs_addr.c:128
netfs_free_request+0x246/0x600 fs/netfs/objects.c:97
v9fs_upload_to_server fs/9p/vfs_addr.c:36 [inline]
v9fs_upload_to_server_worker+0x200/0x3e0 fs/9p/vfs_addr.c:44
process_one_work kernel/workqueue.c:3267 [inline]
process_scheduled_works+0xa12/0x17c0 kernel/workqueue.c:3348
worker_thread+0x86d/0xd70 kernel/workqueue.c:3429
kthread+0x2f2/0x390 kernel/kthread.c:388
ret_from_fork+0x4d/0x80 arch/x86/kernel/process.c:147
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
</TASK>
Allocated by task 5219:
kasan_save_stack mm/kasan/common.c:47 [inline]
kasan_save_track+0x3f/0x80 mm/kasan/common.c:68
poison_kmalloc_redzone mm/kasan/common.c:370 [inline]
__kasan_kmalloc+0x98/0xb0 mm/kasan/common.c:387
kasan_kmalloc include/linux/kasan.h:211 [inline]
kmalloc_trace+0x1db/0x370 mm/slub.c:4003
kmalloc include/linux/slab.h:628 [inline]
kzalloc include/linux/slab.h:749 [inline]
p9_fid_create+0x4f/0x230 net/9p/client.c:853
p9_client_walk+0x103/0x690 net/9p/client.c:1154
clone_fid fs/9p/fid.h:23 [inline]
v9fs_fid_clone fs/9p/fid.h:33 [inline]
v9fs_file_open+0x285/0xa60 fs/9p/vfs_file.c:56
do_dentry_open+0x909/0x15a0 fs/open.c:955
do_open fs/namei.c:3642 [inline]
path_openat+0x2860/0x3240 fs/namei.c:3799
do_filp_open+0x235/0x490 fs/namei.c:3826
do_sys_openat2+0x13e/0x1d0 fs/open.c:1406
do_sys_open fs/open.c:1421 [inline]
__do_sys_creat fs/open.c:1497 [inline]
__se_sys_creat fs/open.c:1491 [inline]
__x64_sys_creat+0x123/0x170 fs/open.c:1491
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xf5/0x240 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x77/0x7f
Freed by task 32641:
kasan_save_stack mm/kasan/common.c:47 [inline]
kasan_save_track+0x3f/0x80 mm/kasan/common.c:68
kasan_save_free_info+0x40/0x50 mm/kasan/generic.c:579
poison_slab_object+0xa6/0xe0 mm/kasan/common.c:240
__kasan_slab_free+0x37/0x60 mm/kasan/common.c:256
kasan_slab_free include/linux/kasan.h:184 [inline]
slab_free_hook mm/slub.c:2111 [inline]
slab_free mm/slub.c:4286 [inline]
kfree+0x153/0x3b0 mm/slub.c:4396
p9_fid_destroy net/9p/client.c:889 [inline]
p9_client_destroy+0x1fb/0x660 net/9p/client.c:1070
v9fs_session_close+0x51/0x210 fs/9p/v9fs.c:506
v9fs_kill_super+0x5c/0x90 fs/9p/vfs_super.c:196
deactivate_locked_super+0xc6/0x130 fs/super.c:472
cleanup_mnt+0x426/0x4c0 fs/namespace.c:1267
task_work_run+0x251/0x310 kernel/task_work.c:180
resume_user_mode_work include/linux/resume_user_mode.h:50 [inline]
exit_to_user_mode_loop kernel/entry/common.c:114 [inline]
exit_to_user_mode_prepare include/linux/entry-common.h:328 [inline]
__syscall_exit_to_user_mode_work kernel/entry/common.c:207 [inline]
syscall_exit_to_user_mode+0x168/0x370 kernel/entry/common.c:218
do_syscall_64+0x102/0x240 arch/x86/entry/common.c:89
entry_SYSCALL_64_after_hwframe+0x77/0x7f
The buggy address belongs to the object at ffff8880624f4280
which belongs to the cache kmalloc-96 of size 96
The buggy address is located 12 bytes inside of
freed 96-byte region [ffff8880624f4280, ffff8880624f42e0)
The buggy address belongs to the physical page:
page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x624f4
anon flags: 0xfff00000000800(slab|node=0|zone=1|lastcpupid=0x7ff)
page_type: 0xffffffff()
raw: 00fff00000000800 ffff888015041780 ffffea00007efa00 dead000000000005
raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
page_owner tracks the page as allocated
page last allocated via order 0, migratetype Unmovable, gfp_mask 0x112c40(GFP_NOFS|__GFP_NOWARN|__GFP_NORETRY|__GFP_HARDWALL), pid 32059, tgid 32059 (syz-executor.0), ts 2629641976105, free_ts 2629624772681
set_page_owner include/linux/page_owner.h:32 [inline]
post_alloc_hook+0x1ea/0x210 mm/page_alloc.c:1534
prep_new_page mm/page_alloc.c:1541 [inline]
get_page_from_freelist+0x3410/0x35b0 mm/page_alloc.c:3317
__alloc_pages+0x256/0x6c0 mm/page_alloc.c:4575
__alloc_pages_node include/linux/gfp.h:238 [inline]
alloc_pages_node include/linux/gfp.h:261 [inline]
alloc_slab_page+0x5f/0x160 mm/slub.c:2180
allocate_slab mm/slub.c:2343 [inline]
new_slab+0x84/0x2f0 mm/slub.c:2396
___slab_alloc+0xc73/0x1260 mm/slub.c:3530
__slab_alloc mm/slub.c:3615 [inline]
__slab_alloc_node mm/slub.c:3668 [inline]
slab_alloc_node mm/slub.c:3841 [inline]
__do_kmalloc_node mm/slub.c:3971 [inline]
__kmalloc+0x2e5/0x4a0 mm/slub.c:3985
kmalloc include/linux/slab.h:632 [inline]
kzalloc include/linux/slab.h:749 [inline]
tomoyo_encode2 security/tomoyo/realpath.c:45 [inline]
tomoyo_encode+0x26f/0x540 security/tomoyo/realpath.c:80
tomoyo_realpath_from_path+0x59e/0x5e0 security/tomoyo/realpath.c:283
tomoyo_get_realpath security/tomoyo/file.c:151 [inline]
tomoyo_path_perm+0x2b7/0x740 security/tomoyo/file.c:822
tomoyo_path_unlink+0xd0/0x110 security/tomoyo/tomoyo.c:162
security_path_unlink+0xe3/0x140 security/security.c:1857
do_unlinkat+0x3e6/0x830 fs/namei.c:4396
__do_sys_unlink fs/namei.c:4447 [inline]
__se_sys_unlink fs/namei.c:4445 [inline]
__x64_sys_unlink+0x49/0x60 fs/namei.c:4445
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xf5/0x240 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x77/0x7f
page last free pid 32059 tgid 32059 stack trace:
reset_page_owner include/linux/page_owner.h:25 [inline]
free_pages_prepare mm/page_alloc.c:1141 [inline]
free_unref_page_prepare+0x986/0xab0 mm/page_alloc.c:2347
free_unref_page+0x37/0x3f0 mm/page_alloc.c:2487
__slab_free+0x31b/0x3d0 mm/slub.c:4198
qlink_free mm/kasan/quarantine.c:163 [inline]
qlist_free_all+0x5e/0xc0 mm/kasan/quarantine.c:179
kasan_quarantine_reduce+0x14f/0x170 mm/kasan/quarantine.c:286
__kasan_slab_alloc+0x23/0x80 mm/kasan/common.c:322
kasan_slab_alloc include/linux/kasan.h:201 [inline]
slab_post_alloc_hook mm/slub.c:3804 [inline]
slab_alloc_node mm/slub.c:3851 [inline]
__do_kmalloc_node mm/slub.c:3971 [inline]
__kmalloc+0x1e2/0x4a0 mm/slub.c:3985
kmalloc include/linux/slab.h:632 [inline]
tomoyo_realpath_from_path+0xcf/0x5e0 security/tomoyo/realpath.c:251
tomoyo_get_realpath security/tomoyo/file.c:151 [inline]
tomoyo_path_number_perm+0x23a/0x880 security/tomoyo/file.c:723
security_file_ioctl+0x77/0xb0 security/security.c:2764
__do_sys_ioctl fs/ioctl.c:898 [inline]
__se_sys_ioctl+0x47/0x170 fs/ioctl.c:890
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xf5/0x240 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x77/0x7f
Memory state around the buggy address:
ffff8880624f4180: fa fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
ffff8880624f4200: 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc
>ffff8880624f4280: fa fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
^
ffff8880624f4300: 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc
ffff8880624f4380: fa fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
==================================================================
---
This report is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at [email protected].
syzbot will keep track of this issue. See:
https://goo.gl/tpsmEJ#status for how to communicate with syzbot.
If the report is already addressed, let syzbot know by replying with:
#syz fix: exact-commit-title
If you want to overwrite report's subsystems, reply with:
#syz set subsystems: new-subsystem
(See the list of subsystem names on the web dashboard)
If the report is a duplicate of another one, reply with:
#syz dup: exact-subject-of-another-report
If you want to undo deduplication, reply with:
#syz undup
Hello, I found a reproducer for this bug.
If you fix this issue, please add the following tag to the commit:
Reported-by: xingwei lee <[email protected]>
Reported-by: yue sun <[email protected]>
I use the same kernel as syzbot instance
git tree: upstream ba16c1cf11c9f264b5455cb7d57267b39925409a
kernel config: https://syzkaller.appspot.com/x/.config?x=6d14c12b661fb43
dashboard link: https://syzkaller.appspot.com/bug?extid=df038d463cca332e8414
compiler: Debian clang version 15.0.6, GNU ld (GNU Binutils for Debian) 2.40
Since, this may be a concurrency bug, we need to run the PoC for a
while, like ./syz-execprog -repeat 0 ../prog for 5 minus, and I test
that only syzlang repro work.
[ 106.007857][ T39]
==================================================================
[ 106.008516][ T39] BUG: KASAN: slab-use-after-free in
v9fs_free_request+0x69/0xf0
[ 106.009146][ T39] Write of size 4 at addr ffff88802842620c by task
kworker/u17:1/39
[ 106.009773][ T39]
[ 106.009976][ T39] CPU: 2 PID: 39 Comm: kworker/u17:1 Not tainted
6.9.0-rc7-00136-gf4345f05c0df-dirty #6
[ 106.010722][ T39] Hardware name: QEMU Standard PC (i440FX + PIIX,
1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
[ 106.011521][ T39] Workqueue: events_unbound v9fs_upload_to_server_worker
[ 106.012083][ T39] Call Trace:
[ 106.012351][ T39] <TASK>
[ 106.012585][ T39] dump_stack_lvl+0x250/0x380
[ 106.012976][ T39] ? __pfx_dump_stack_lvl+0x10/0x10
[ 106.013396][ T39] ? __pfx__printk+0x10/0x10
[ 106.013783][ T39] ? _printk+0xda/0x120
[ 106.014118][ T39] ? __virt_addr_valid+0x19b/0x580
[ 106.014532][ T39] ? __virt_addr_valid+0x19b/0x580
[ 106.014943][ T39] print_report+0x169/0x550
[ 106.015306][ T39] ? __virt_addr_valid+0x19b/0x580
[ 106.015712][ T39] ? __virt_addr_valid+0x19b/0x580
[ 106.016119][ T39] ? __virt_addr_valid+0x4a8/0x580
[ 106.016526][ T39] ? __phys_addr+0xc3/0x180
[ 106.016892][ T39] ? v9fs_free_request+0x69/0xf0
[ 106.017287][ T39] kasan_report+0x143/0x180
[ 106.017650][ T39] ? v9fs_free_request+0x69/0xf0
[ 106.018053][ T39] kasan_check_range+0x282/0x290
[ 106.018444][ T39] v9fs_free_request+0x69/0xf0
[ 106.018828][ T39] ? __pfx_v9fs_free_request+0x10/0x10
[ 106.019260][ T39] netfs_free_request+0x259/0x630
[ 106.019660][ T39] ? netfs_free_subrequest+0x26f/0x420
[ 106.020092][ T39] v9fs_upload_to_server_worker+0x211/0x400
[ 106.020564][ T39] ? __pfx_v9fs_upload_to_server_worker+0x10/0x10
[ 106.021055][ T39] ? process_scheduled_works+0x93a/0x1840
[ 106.021510][ T39] process_scheduled_works+0xa39/0x1840
[ 106.021966][ T39] ? __pfx_process_scheduled_works+0x10/0x10
[ 106.022441][ T39] ? assign_work+0x3b7/0x430
[ 106.022811][ T39] worker_thread+0x89c/0xdc0
[ 106.023187][ T39] ? __kthread_parkme+0x172/0x1d0
[ 106.023589][ T39] kthread+0x310/0x3b0
[ 106.023918][ T39] ? __pfx_worker_thread+0x10/0x10
[ 106.024330][ T39] ? __pfx_kthread+0x10/0x10
[ 106.024702][ T39] ret_from_fork+0x52/0x80
[ 106.025062][ T39] ? __pfx_kthread+0x10/0x10
[ 106.025432][ T39] ret_from_fork_asm+0x1a/0x30
[ 106.025826][ T39] </TASK>
[ 106.026072][ T39]
[ 106.026259][ T39] Allocated by task 34417:
[ 106.026608][ T39] kasan_save_track+0x3f/0x80
[ 106.026981][ T39] __kasan_kmalloc+0x98/0xb0
[ 106.027347][ T39] kmalloc_trace+0x1db/0x370
[ 106.027721][ T39] p9_fid_create+0x54/0x230
[ 106.028083][ T39] p9_client_walk+0x118/0x6c0
[ 106.028456][ T39] v9fs_file_open+0x2b9/0xae0
[ 106.028827][ T39] do_dentry_open+0x93d/0x1610
[ 106.029213][ T39] path_openat+0x29ba/0x33f0
[ 106.029578][ T39] do_filp_open+0x23c/0x4a0
[ 106.029946][ T39] do_sys_openat2+0x122/0x1c0
[ 106.030323][ T39] __x64_sys_creat+0x128/0x170
[ 106.030706][ T39] do_syscall_64+0xf5/0x240
[ 106.031064][ T39] entry_SYSCALL_64_after_hwframe+0x77/0x7f
[ 106.031529][ T39]
[ 106.031722][ T39] Freed by task 8234:
[ 106.032036][ T39] kasan_save_track+0x3f/0x80
[ 106.032412][ T39] kasan_save_free_info+0x40/0x50
[ 106.032817][ T39] poison_slab_object+0xa6/0xe0
[ 106.033205][ T39] __kasan_slab_free+0x37/0x60
[ 106.033582][ T39] kfree+0x153/0x3b0
[ 106.033905][ T39] p9_client_destroy+0x205/0x6b0
[ 106.034296][ T39] v9fs_session_close+0x5b/0x220
[ 106.034690][ T39] v9fs_kill_super+0x61/0x90
[ 106.035056][ T39] deactivate_locked_super+0xcb/0x140
[ 106.035477][ T39] cleanup_mnt+0x444/0x4e0
[ 106.035836][ T39] task_work_run+0x25c/0x320
[ 106.036206][ T39] syscall_exit_to_user_mode+0x168/0x370
[ 106.036650][ T39] do_syscall_64+0x102/0x240
[ 106.037023][ T39] entry_SYSCALL_64_after_hwframe+0x77/0x7f
[ 106.037485][ T39]
[ 106.037673][ T39] The buggy address belongs to the object at ffff888028426200
[ 106.037673][ T39] which belongs to the cache kmalloc-96 of size 96
[ 106.038739][ T39] The buggy address is located 12 bytes inside of
[ 106.038739][ T39] freed 96-byte region [ffff888028426200, ffff888028426260)
[ 106.039789][ T39]
[ 106.039979][ T39] The buggy address belongs to the physical page:
[ 106.040480][ T39] page: refcount:1 mapcount:0
mapping:0000000000000000 index:0x0 pfn:0x28426
[ 106.041162][ T39] flags: 0xfff00000000800(slab|node=0|zone=1|lastcpupid=0x7ff)
[ 106.041770][ T39] page_type: 0xffffffff()
[ 106.042117][ T39] raw: 00fff00000000800 ffff888015042780
ffffea00006db8c0 dead000000000002
[ 106.042783][ T39] raw: 0000000000000000 0000000000200020
00000001ffffffff 0000000000000000
[ 106.043442][ T39] page dumped because: kasan: bad access detected
[ 106.043944][ T39] page_owner tracks the page as allocated
[ 106.044388][ T39] page last allocated via order 0, migratetype
Unmovable, gfp_mask 0x12820(GFP_ATOMIC|__GFP_NOWARN|__GFP_NORETRY),
pid 8265, tgid 626513798 (syz-executor), ts 8265, free_ts 69218878619
[ 106.045775][ T39] post_alloc_hook+0x1ea/0x210
[ 106.046156][ T39] get_page_from_freelist+0x3410/0x35b0
[ 106.046598][ T39] __alloc_pages+0x256/0x6c0
[ 106.046963][ T39] alloc_slab_page+0x5f/0x160
[ 106.047337][ T39] new_slab+0x84/0x2f0
[ 106.047661][ T39] ___slab_alloc+0xc73/0x1260
[ 106.048034][ T39] kmalloc_trace+0x269/0x370
[ 106.048404][ T39] nsim_fib_event_nb+0x191/0x1130
[ 106.048812][ T39] notifier_call_chain+0x1ae/0x400
[ 106.049213][ T39] atomic_notifier_call_chain+0xea/0x1a0
[ 106.049651][ T39] call_fib_notifiers+0x3f/0x70
[ 106.050049][ T39] call_fib_entry_notifiers+0x218/0x380
[ 106.050492][ T39] fib_table_insert+0xf43/0x1fe0
[ 106.050888][ T39] fib_magic+0x3df/0x620
[ 106.051225][ T39] fib_add_ifaddr+0x156/0x610
[ 106.051597][ T39] fib_inetaddr_event+0x16f/0x200
[ 106.051996][ T39] page last free pid 34 tgid 34 stack trace:
[ 106.052460][ T39] free_unref_page_prepare+0x97b/0xaa0
[ 106.052893][ T39] free_unref_page+0x37/0x3f0
[ 106.053264][ T39] __put_partials+0xeb/0x130
[ 106.053628][ T39] put_cpu_partial+0x17c/0x250
[ 106.054014][ T39] __slab_free+0x2ea/0x3d0
[ 106.054371][ T39] qlist_free_all+0x5e/0xc0
[ 106.054733][ T39] kasan_quarantine_reduce+0x14f/0x170
[ 106.055163][ T39] __kasan_slab_alloc+0x23/0x80
[ 106.055546][ T39] kmalloc_trace+0x16f/0x370
[ 106.055913][ T39] nsim_fib_event_work+0xe30/0x41d0
[ 106.056325][ T39] process_scheduled_works+0xa39/0x1840
[ 106.056759][ T39] worker_thread+0x89c/0xdc0
[ 106.057123][ T39] kthread+0x310/0x3b0
[ 106.057451][ T39] ret_from_fork+0x52/0x80
[ 106.057811][ T39] ret_from_fork_asm+0x1a/0x30
[ 106.058194][ T39]
[ 106.058383][ T39] Memory state around the buggy address:
[ 106.058816][ T39] ffff888028426100: fa fb fb fb fb fb fb fb fb fb fb
fb fc fc fc fc
[ 106.059435][ T39] ffff888028426180: 00 00 00 00 00 00 00 00 00 fc fc
fc fc fc fc fc
[ 106.060052][ T39] >ffff888028426200: fa fb fb fb fb fb fb fb fb fb
fb fb fc fc fc fc
[ 106.060667][ T39] ^
[ 106.061009][ T39] ffff888028426280: 00 00 00 00 00 00 00 00 00 00 fc
fc fc fc fc fc
[ 106.061630][ T39] ffff888028426300: fa fb fb fb fb fb fb fb fb fb fb
fb fc fc fc fc
[ 106.062256][ T39]
==================================================================
[ 106.071000][ T39] Kernel panic - not syncing: KASAN: panic_on_warn set ...
[ 106.071654][ T39] CPU: 0 PID: 39 Comm: kworker/u17:1 Not tainted
6.9.0-rc7-00136-gf4345f05c0df-dirty #6
[ 106.072473][ T39] Hardware name: QEMU Standard PC (i440FX + PIIX,
1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
[ 106.073339][ T39] Workqueue: events_unbound v9fs_upload_to_server_worker
[ 106.073954][ T39] Call Trace:
[ 106.074242][ T39] <TASK>
[ 106.074500][ T39] dump_stack_lvl+0x250/0x380
[ 106.074910][ T39] ? __pfx_dump_stack_lvl+0x10/0x10
[ 106.075361][ T39] ? __pfx__printk+0x10/0x10
[ 106.075765][ T39] ? preempt_schedule+0xe1/0xf0
[ 106.076188][ T39] ? vscnprintf+0x64/0x90
[ 106.076563][ T39] panic+0x35a/0x890
[ 106.076905][ T39] ? check_panic_on_warn+0x2b/0xb0
[ 106.077351][ T39] ? __pfx_panic+0x10/0x10
[ 106.077738][ T39] ? _raw_spin_unlock_irqrestore+0x130/0x140
[ 106.078260][ T39] ? __pfx__raw_spin_unlock_irqrestore+0x10/0x10
[ 106.078803][ T39] ? print_report+0x502/0x550
[ 106.079216][ T39] check_panic_on_warn+0x8f/0xb0
[ 106.079643][ T39] ? v9fs_free_request+0x69/0xf0
[ 106.080066][ T39] end_report+0x77/0x160
[ 106.080434][ T39] kasan_report+0x154/0x180
[ 106.080821][ T39] ? v9fs_free_request+0x69/0xf0
[ 106.081249][ T39] kasan_check_range+0x282/0x290
[ 106.081674][ T39] v9fs_free_request+0x69/0xf0
[ 106.082092][ T39] ? __pfx_v9fs_free_request+0x10/0x10
[ 106.082562][ T39] netfs_free_request+0x259/0x630
[ 106.082991][ T39] ? netfs_free_subrequest+0x26f/0x420
[ 106.083475][ T39] v9fs_upload_to_server_worker+0x211/0x400
[ 106.083992][ T39] ? __pfx_v9fs_upload_to_server_worker+0x10/0x10
[ 106.084551][ T39] ? process_scheduled_works+0x93a/0x1840
[ 106.085056][ T39] process_scheduled_works+0xa39/0x1840
[ 106.085554][ T39] ? __pfx_process_scheduled_works+0x10/0x10
[ 106.086086][ T39] ? assign_work+0x3b7/0x430
[ 106.086498][ T39] worker_thread+0x89c/0xdc0
[ 106.086908][ T39] ? __kthread_parkme+0x172/0x1d0
[ 106.087354][ T39] kthread+0x310/0x3b0
[ 106.087717][ T39] ? __pfx_worker_thread+0x10/0x10
[ 106.088171][ T39] ? __pfx_kthread+0x10/0x10
[ 106.088567][ T39] ret_from_fork+0x52/0x80
[ 106.088952][ T39] ? __pfx_kthread+0x10/0x10
[ 106.089352][ T39] ret_from_fork_asm+0x1a/0x30
[ 106.089777][ T39] </TASK>
[ 106.090207][ T39] Kernel Offset: disabled
[ 106.090585][ T39] Rebooting in 86400 seconds..
=* repro.txt =*
creat(&(0x7f0000000240)='./file0\x00', 0x0)
pipe2$9p(&(0x7f0000000080)={<r0=>0xffffffffffffffff,
<r1=>0xffffffffffffffff}, 0x0)
write$P9_RVERSION(r1,
&(0x7f0000000480)=ANY=[@ANYBLOB="1500000065ffff018000000800395032303030"],
0x15)
r2 = dup(r1)
write$FUSE_BMAP(r2, &(0x7f0000000100)={0x18}, 0x18)
write$FUSE_NOTIFY_RETRIEVE(r2, &(0x7f00000000c0)={0x14c}, 0x137)
mount$9p_fd(0x0, &(0x7f0000000000)='./file0\x00', &(0x7f0000000040),
0x0, &(0x7f0000000280)={'trans=fd,', {'rfdno', 0x3d, r0}, 0x2c,
{'wfdno', 0x3d, r2}, 0x2c, {[{@cache_mmap}], [], 0x6b}})
chmod(&(0x7f0000000140)='./file0\x00', 0x0)
r3 = creat(&(0x7f00000004c0)='./file0\x00', 0x600000000000000)
write$FUSE_DIRENTPLUS(r3, &(0x7f0000000200)=ANY=[], 0x1001)
and see also in
https://gist.github.com/xrivendell7/f9a108d59abc4c4fe1883e6d347f7b17.
I hope it helps.
Best regards
xingwei lee
+To David as I need help with netfs
syzbot wrote on Sun, May 12, 2024 at 12:42:33PM -0700:
> UAF in
> Workqueue: events_unbound v9fs_upload_to_server_worker
> refcount_dec_and_test include/linux/refcount.h:325 [inline]
> p9_fid_put include/net/9p/client.h:275 [inline]
> v9fs_free_request+0x5f/0xe0 fs/9p/vfs_addr.c:128
> netfs_free_request+0x246/0x600 fs/netfs/objects.c:97
> v9fs_upload_to_server fs/9p/vfs_addr.c:36 [inline]
> v9fs_upload_to_server_worker+0x200/0x3e0 fs/9p/vfs_addr.c:44
> process_one_work kernel/workqueue.c:3267 [inline]
> Freed by task 32641:
> p9_fid_destroy net/9p/client.c:889 [inline]
> p9_client_destroy+0x1fb/0x660 net/9p/client.c:1070
> v9fs_session_close+0x51/0x210 fs/9p/v9fs.c:506
> v9fs_kill_super+0x5c/0x90 fs/9p/vfs_super.c:196
> deactivate_locked_super+0xc6/0x130 fs/super.c:472
> cleanup_mnt+0x426/0x4c0 fs/namespace.c:1267
That's a tough one: netfs took a ref in v9fs_init_request (netfs op's
init_request) and expects to be able to use it until v9fs_free_request
(net op's free_request()), but the fs was dismounted first and we kill
the kmem cache at this point so we aggressively drop any dangling ref
there as there's no way of waiting.
(this is corroborated by "9pnet: Found fid 1 not clunked" in dmesg in
the syzcaller logs)
The other two recent kasan errors are similar:
https://lkml.kernel.org/r/[email protected]
is pretty much the same (it's just that the decrement here hit 0 as
umount was in the middle of doing it?), and
https://lkml.kernel.org/r/[email protected]
is yet another step faster (netfs freed the last ref while the cache
was being emptied and destroyed the fid first; which is possible because
we're not taking the client lock at this point as we weren't expecting
any other access after umount)
David, got an idea on how we could wait for these async writebacks?
Notes:
- David removed v9fs_upload_to_server in 2df86547b23d ("netfs: Cut
over to using new writeback code") (and c245868524cc ("netfs: Remove the
old writeback code")) in master, but the problem is still present
conceptually.
- layering wise, 9p (fs) depends on 9pnet, so 9pnet cannot call into the
fs code; the wait has to be in v9fs_session_close() before calling
p9_client_destroy or earlier
Thanks,
--
Dominique Martinet | Asmadeus
[email protected] wrote:
> +To David as I need help with netfs
I'll look at this in a bit, though it may be tomorrow. Just fixing something
in cifs at the moment.
David
[email protected] wrote:
> That's a tough one: netfs took a ref in v9fs_init_request (netfs op's
> init_request) and expects to be able to use it until v9fs_free_request
> (net op's free_request()), but the fs was dismounted first and we kill
> the kmem cache at this point so we aggressively drop any dangling ref
> there as there's no way of waiting.
Which kmem cache are we talking about? I can see two in net/9p/ and one in
fs/9p/. And took a ref on what?
David
#syz test: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
netfs, 9p: Fix race between umount and async request completion
There's a problem in 9p's interaction with netfslib whereby a crash occurs
because the 9p_fid structs get forcibly destroyed during client teardown
(without paying attention to their refcounts) before netfslib has finished
with them. However, it's not a simple case of deferring the clunking that
p9_fid_put() does as that requires the client.
The problem is that netfslib has to unlock pages and clear the IN_PROGRESS
flag before destroying the objects involved - including the pid - and, in
any case, nothing checks to see if writeback completed barring looking at
the page flags.
Fix this by keeping a count of outstanding I/O requests (of any type) and
waiting for it to quiesce during inode eviction.
Signed-off-by: David Howells <[email protected]>
cc: Eric Van Hensbergen <[email protected]>
cc: Latchesar Ionkov <[email protected]>
cc: Dominique Martinet <[email protected]>
cc: Christian Schoenebeck <[email protected]>
cc: Jeff Layton <[email protected]>
cc: Steve French <[email protected]>
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
---
fs/9p/vfs_inode.c | 5 ++++-
fs/afs/inode.c | 1 +
fs/netfs/objects.c | 5 +++++
fs/smb/client/cifsfs.c | 1 +
include/linux/netfs.h | 18 ++++++++++++++++++
5 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8c9a896d691e..57cfa9f65046 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -354,6 +354,7 @@ void v9fs_evict_inode(struct inode *inode)
version = cpu_to_le32(v9inode->qid.version);
netfs_clear_inode_writeback(inode, &version);
+ netfs_wait_for_outstanding_io(inode);
clear_inode(inode);
filemap_fdatawrite(&inode->i_data);
@@ -361,8 +362,10 @@ void v9fs_evict_inode(struct inode *inode)
if (v9fs_inode_cookie(v9inode))
fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
#endif
- } else
+ } else {
+ netfs_wait_for_outstanding_io(inode);
clear_inode(inode);
+ }
}
struct inode *
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 94fc049aff58..c831e711a4ac 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -652,6 +652,7 @@ void afs_evict_inode(struct inode *inode)
afs_set_cache_aux(vnode, &aux);
netfs_clear_inode_writeback(inode, &aux);
+ netfs_wait_for_outstanding_io(inode);
clear_inode(inode);
while (!list_empty(&vnode->wb_keys)) {
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index c90d482b1650..f4a642727479 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -72,6 +72,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
}
+ atomic_inc(&ctx->io_count);
trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq);
@@ -124,6 +125,7 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
@@ -142,6 +144,9 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
+
+ if (atomic_dec_and_test(&ictx->io_count))
+ wake_up_var(&ictx->io_count);
call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index ec5b639f421a..21c9e173ea9a 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -435,6 +435,7 @@ cifs_evict_inode(struct inode *inode)
if (inode->i_state & I_PINNING_NETFS_WB)
cifs_fscache_unuse_inode_cookie(inode, true);
cifs_fscache_release_inode_cookie(inode);
+ netfs_wait_for_outstanding_io(inode);
clear_inode(inode);
}
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index d2d291a9cdad..3ca3906bb8da 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -68,6 +68,7 @@ struct netfs_inode {
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
+ atomic_t io_count; /* Number of outstanding reqs */
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
@@ -474,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->remote_i_size = i_size_read(&ctx->inode);
ctx->zero_point = LLONG_MAX;
ctx->flags = 0;
+ atomic_set(&ctx->io_count, 0);
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -517,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
#endif
}
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @ctx: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete. This is intended
+ * to be called from inode eviction routines. This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
+
#endif /* _LINUX_NETFS_H */
> #syz test: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
This crash does not have a reproducer. I cannot test it.
>
> netfs, 9p: Fix race between umount and async request completion
>
> There's a problem in 9p's interaction with netfslib whereby a crash occurs
> because the 9p_fid structs get forcibly destroyed during client teardown
> (without paying attention to their refcounts) before netfslib has finished
> with them. However, it's not a simple case of deferring the clunking that
> p9_fid_put() does as that requires the client.
>
> The problem is that netfslib has to unlock pages and clear the IN_PROGRESS
> flag before destroying the objects involved - including the pid - and, in
> any case, nothing checks to see if writeback completed barring looking at
> the page flags.
>
> Fix this by keeping a count of outstanding I/O requests (of any type) and
> waiting for it to quiesce during inode eviction.
>
> Signed-off-by: David Howells <[email protected]>
> cc: Eric Van Hensbergen <[email protected]>
> cc: Latchesar Ionkov <[email protected]>
> cc: Dominique Martinet <[email protected]>
> cc: Christian Schoenebeck <[email protected]>
> cc: Jeff Layton <[email protected]>
> cc: Steve French <[email protected]>
> cc: [email protected]
> cc: [email protected]
> cc: [email protected]
> cc: [email protected]
> cc: [email protected]
> ---
> fs/9p/vfs_inode.c | 5 ++++-
> fs/afs/inode.c | 1 +
> fs/netfs/objects.c | 5 +++++
> fs/smb/client/cifsfs.c | 1 +
> include/linux/netfs.h | 18 ++++++++++++++++++
> 5 files changed, 29 insertions(+), 1 deletion(-)
>
> diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
> index 8c9a896d691e..57cfa9f65046 100644
> --- a/fs/9p/vfs_inode.c
> +++ b/fs/9p/vfs_inode.c
> @@ -354,6 +354,7 @@ void v9fs_evict_inode(struct inode *inode)
> version = cpu_to_le32(v9inode->qid.version);
> netfs_clear_inode_writeback(inode, &version);
>
> + netfs_wait_for_outstanding_io(inode);
> clear_inode(inode);
> filemap_fdatawrite(&inode->i_data);
>
> @@ -361,8 +362,10 @@ void v9fs_evict_inode(struct inode *inode)
> if (v9fs_inode_cookie(v9inode))
> fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
> #endif
> - } else
> + } else {
> + netfs_wait_for_outstanding_io(inode);
> clear_inode(inode);
> + }
> }
>
> struct inode *
> diff --git a/fs/afs/inode.c b/fs/afs/inode.c
> index 94fc049aff58..c831e711a4ac 100644
> --- a/fs/afs/inode.c
> +++ b/fs/afs/inode.c
> @@ -652,6 +652,7 @@ void afs_evict_inode(struct inode *inode)
>
> afs_set_cache_aux(vnode, &aux);
> netfs_clear_inode_writeback(inode, &aux);
> + netfs_wait_for_outstanding_io(inode);
> clear_inode(inode);
>
> while (!list_empty(&vnode->wb_keys)) {
> diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
> index c90d482b1650..f4a642727479 100644
> --- a/fs/netfs/objects.c
> +++ b/fs/netfs/objects.c
> @@ -72,6 +72,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
> }
> }
>
> + atomic_inc(&ctx->io_count);
> trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
> netfs_proc_add_rreq(rreq);
> netfs_stat(&netfs_n_rh_rreq);
> @@ -124,6 +125,7 @@ static void netfs_free_request(struct work_struct *work)
> {
> struct netfs_io_request *rreq =
> container_of(work, struct netfs_io_request, work);
> + struct netfs_inode *ictx = netfs_inode(rreq->inode);
> unsigned int i;
>
> trace_netfs_rreq(rreq, netfs_rreq_trace_free);
> @@ -142,6 +144,9 @@ static void netfs_free_request(struct work_struct *work)
> }
> kvfree(rreq->direct_bv);
> }
> +
> + if (atomic_dec_and_test(&ictx->io_count))
> + wake_up_var(&ictx->io_count);
> call_rcu(&rreq->rcu, netfs_free_request_rcu);
> }
>
> diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
> index ec5b639f421a..21c9e173ea9a 100644
> --- a/fs/smb/client/cifsfs.c
> +++ b/fs/smb/client/cifsfs.c
> @@ -435,6 +435,7 @@ cifs_evict_inode(struct inode *inode)
> if (inode->i_state & I_PINNING_NETFS_WB)
> cifs_fscache_unuse_inode_cookie(inode, true);
> cifs_fscache_release_inode_cookie(inode);
> + netfs_wait_for_outstanding_io(inode);
> clear_inode(inode);
> }
>
> diff --git a/include/linux/netfs.h b/include/linux/netfs.h
> index d2d291a9cdad..3ca3906bb8da 100644
> --- a/include/linux/netfs.h
> +++ b/include/linux/netfs.h
> @@ -68,6 +68,7 @@ struct netfs_inode {
> loff_t remote_i_size; /* Size of the remote file */
> loff_t zero_point; /* Size after which we assume there's no data
> * on the server */
> + atomic_t io_count; /* Number of outstanding reqs */
> unsigned long flags;
> #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
> #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
> @@ -474,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
> ctx->remote_i_size = i_size_read(&ctx->inode);
> ctx->zero_point = LLONG_MAX;
> ctx->flags = 0;
> + atomic_set(&ctx->io_count, 0);
> #if IS_ENABLED(CONFIG_FSCACHE)
> ctx->cache = NULL;
> #endif
> @@ -517,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
> #endif
> }
>
> +/**
> + * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
> + * @ctx: The netfs inode to wait on
> + *
> + * Wait for outstanding I/O requests of any type to complete. This is intended
> + * to be called from inode eviction routines. This makes sure that any
> + * resources held by those requests are cleaned up before we let the inode get
> + * cleaned up.
> + */
> +static inline void netfs_wait_for_outstanding_io(struct inode *inode)
> +{
> + struct netfs_inode *ictx = netfs_inode(inode);
> +
> + wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
> +}
> +
> #endif /* _LINUX_NETFS_H */
>