With all "silently resizing" callers of ksize() refactored, remove the
logic in ksize() that would allow it to be used to effectively change
the size of an allocation (bypassing __alloc_size hints, etc). Users
wanting this feature need to either use kmalloc_size_roundup() before an
allocation, or use krealloc() directly.
For kfree_sensitive(), move the unpoisoning logic inline. Replace the
some of the partially open-coded ksize() in __do_krealloc with ksize()
now that it doesn't perform unpoisoning.
Adjust the KUnit tests to match the new ksize() behavior.
Cc: Dmitry Vyukov <[email protected]>
Cc: Jakub Kicinski <[email protected]>
Cc: Paolo Abeni <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Hyeonggon Yoo <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Vincenzo Frascino <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
This requires at least this be landed first:
https://lore.kernel.org/lkml/[email protected]/
I suspect given that is the most central ksize() user, this ksize()
fix might be best to land through the netdev tree...
---
mm/kasan/kasan_test.c | 8 +++++---
mm/slab_common.c | 33 ++++++++++++++-------------------
2 files changed, 19 insertions(+), 22 deletions(-)
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 0d59098f0876..cb5c54adb503 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -783,7 +783,7 @@ static void kasan_global_oob_left(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-/* Check that ksize() makes the whole object accessible. */
+/* Check that ksize() does NOT unpoison whole object. */
static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
@@ -791,15 +791,17 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
real_size = ksize(ptr);
+ KUNIT_EXPECT_GT(test, real_size, size);
OPTIMIZER_HIDE_VAR(ptr);
/* This access shouldn't trigger a KASAN report. */
- ptr[size] = 'x';
+ ptr[size - 1] = 'x';
/* This one must. */
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
kfree(ptr);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 33b1886b06eb..eabd66fcabd0 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1333,11 +1333,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
void *ret;
size_t ks;
- /* Don't use instrumented ksize to allow precise KASAN poisoning. */
+ /* Check for double-free before calling ksize. */
if (likely(!ZERO_OR_NULL_PTR(p))) {
if (!kasan_check_byte(p))
return NULL;
- ks = kfence_ksize(p) ?: __ksize(p);
+ ks = ksize(p);
} else
ks = 0;
@@ -1405,8 +1405,10 @@ void kfree_sensitive(const void *p)
void *mem = (void *)p;
ks = ksize(mem);
- if (ks)
+ if (ks) {
+ kasan_unpoison_range(mem, ks);
memzero_explicit(mem, ks);
+ }
kfree(mem);
}
EXPORT_SYMBOL(kfree_sensitive);
@@ -1415,10 +1417,11 @@ EXPORT_SYMBOL(kfree_sensitive);
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
- * kmalloc may internally round up allocations and return more memory
+ * kmalloc() may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
+ * allocated memory. The caller may NOT use this additional memory, unless
+ * it calls krealloc(). To avoid an alloc/realloc cycle, callers can use
+ * kmalloc_size_roundup() to find the size of the associated kmalloc bucket.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
@@ -1427,13 +1430,11 @@ EXPORT_SYMBOL(kfree_sensitive);
*/
size_t ksize(const void *objp)
{
- size_t size;
-
/*
- * We need to first check that the pointer to the object is valid, and
- * only then unpoison the memory. The report printed from ksize() is
- * more useful, then when it's printed later when the behaviour could
- * be undefined due to a potential use-after-free or double-free.
+ * We need to first check that the pointer to the object is valid.
+ * The KASAN report printed from ksize() is more useful, then when
+ * it's printed later when the behaviour could be undefined due to
+ * a potential use-after-free or double-free.
*
* We use kasan_check_byte(), which is supported for the hardware
* tag-based KASAN mode, unlike kasan_check_read/write().
@@ -1447,13 +1448,7 @@ size_t ksize(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;
- size = kfence_ksize(objp) ?: __ksize(objp);
- /*
- * We assume that ksize callers could use whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_range(objp, size);
- return size;
+ return kfence_ksize(objp) ?: __ksize(objp);
}
EXPORT_SYMBOL(ksize);
--
2.34.1
Greeting,
FYI, we noticed BUG:KASAN:slab-out-of-bounds_in_copy_array due to commit (built with gcc-11):
commit: d916d97f33952312ca6b8240b40ed79cc9cb04b2 ("[PATCH] mm: Make ksize() a reporting-only function")
url: https://github.com/intel-lab-lkp/linux/commits/Kees-Cook/mm-Make-ksize-a-reporting-only-function/20221023-020908
base: https://git.kernel.org/cgit/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/linux-mm/[email protected]
patch subject: [PATCH] mm: Make ksize() a reporting-only function
in testcase: boot
on test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 16G
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
If you fix the issue, kindly add following tag
| Reported-by: kernel test robot <[email protected]>
| Link: https://lore.kernel.org/r/[email protected]
[ 32.059131][ T1] BUG: KASAN: slab-out-of-bounds in copy_array (kbuild/src/x86_64-3/arch/x86/kernel/cpu/hypervisor.c:91)
[ 32.059705][ T1] Write of size 32 at addr ffff88814b9d75c0 by task systemd/1
[ 32.060330][ T1]
[ 32.060576][ T1] CPU: 1 PID: 1 Comm: systemd Not tainted 6.1.0-rc1-00216-gd916d97f3395 #2
[ 32.061273][ T1] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-debian-1.16.0-4 04/01/2014
[ 32.062082][ T1] Call Trace:
[ 32.062393][ T1] <TASK>
[ 32.062680][ T1] dump_stack_lvl (kbuild/src/x86_64-3/lib/dump_stack.c:107 (discriminator 4))
[ 32.063071][ T1] print_address_description+0x87/0x2a5
[ 32.063609][ T1] print_report (kbuild/src/x86_64-3/mm/kasan/report.c:365)
[ 32.064003][ T1] ? kasan_addr_to_slab (kbuild/src/x86_64-3/mm/kasan/common.c:35)
[ 32.064424][ T1] ? copy_array (kbuild/src/x86_64-3/arch/x86/kernel/cpu/hypervisor.c:91)
[ 32.064801][ T1] kasan_report (kbuild/src/x86_64-3/mm/kasan/report.c:131 kbuild/src/x86_64-3/mm/kasan/report.c:466)
[ 32.065188][ T1] ? copy_array (kbuild/src/x86_64-3/arch/x86/kernel/cpu/hypervisor.c:91)
[ 32.065570][ T1] kasan_check_range (kbuild/src/x86_64-3/mm/kasan/generic.c:190)
[ 32.065986][ T1] memcpy (kbuild/src/x86_64-3/mm/kasan/shadow.c:65 (discriminator 1))
[ 32.066333][ T1] copy_array (kbuild/src/x86_64-3/arch/x86/kernel/cpu/hypervisor.c:91)
[ 32.066699][ T1] copy_verifier_state (kbuild/src/x86_64-3/kernel/bpf/verifier.c:1189)
[ 32.067124][ T1] pop_stack (kbuild/src/x86_64-3/kernel/bpf/verifier.c:1252)
[ 32.067502][ T1] do_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:12441)
[ 32.067878][ T1] ? lock_downgrade (kbuild/src/x86_64-3/kernel/locking/lockdep.c:5320)
[ 32.068294][ T1] ? check_helper_call (kbuild/src/x86_64-3/kernel/bpf/verifier.c:12144)
[ 32.068737][ T1] ? kasan_quarantine_put (kbuild/src/x86_64-3/arch/x86/include/asm/irqflags.h:45 (discriminator 1) kbuild/src/x86_64-3/arch/x86/include/asm/irqflags.h:80 (discriminator 1) kbuild/src/x86_64-3/arch/x86/include/asm/irqflags.h:138 (discriminator 1) kbuild/src/x86_64-3/mm/kasan/quarantine.c:242 (discriminator 1))
[ 32.069181][ T1] ? trace_hardirqs_on (kbuild/src/x86_64-3/kernel/trace/trace_preemptirq.c:50 (discriminator 22))
[ 32.069605][ T1] ? memset (kbuild/src/x86_64-3/mm/kasan/shadow.c:44)
[ 32.069960][ T1] ? memset (kbuild/src/x86_64-3/mm/kasan/shadow.c:44)
[ 32.070317][ T1] ? memset (kbuild/src/x86_64-3/mm/kasan/shadow.c:44)
[ 32.070683][ T1] do_check_common (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14642)
[ 32.071088][ T1] ? check_cfg (kbuild/src/x86_64-3/kernel/bpf/verifier.c:10938)
[ 32.071483][ T1] bpf_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14705 kbuild/src/x86_64-3/kernel/bpf/verifier.c:15275)
[ 32.071869][ T1] ? find_held_lock (kbuild/src/x86_64-3/kernel/locking/lockdep.c:5158)
[ 32.072279][ T1] ? bpf_prog_load (kbuild/src/x86_64-3/kernel/bpf/syscall.c:2598)
[ 32.072696][ T1] ? bpf_get_btf_vmlinux (kbuild/src/x86_64-3/kernel/bpf/verifier.c:15159)
[ 32.073124][ T1] ? lockdep_hardirqs_on_prepare (kbuild/src/x86_64-3/kernel/locking/lockdep.c:4528)
[ 32.073647][ T1] ? memset (kbuild/src/x86_64-3/mm/kasan/shadow.c:44)
[ 32.074006][ T1] bpf_prog_load (kbuild/src/x86_64-3/kernel/bpf/syscall.c:2605)
[ 32.074412][ T1] ? bpf_prog_bind_map (kbuild/src/x86_64-3/kernel/bpf/syscall.c:2464)
[ 32.074839][ T1] ? lock_acquire (kbuild/src/x86_64-3/kernel/locking/lockdep.c:466 kbuild/src/x86_64-3/kernel/locking/lockdep.c:5670 kbuild/src/x86_64-3/kernel/locking/lockdep.c:5633)
[ 32.075244][ T1] ? __might_fault (kbuild/src/x86_64-3/mm/memory.c:5645 kbuild/src/x86_64-3/mm/memory.c:5638)
[ 32.075654][ T1] ? lock_downgrade (kbuild/src/x86_64-3/kernel/locking/lockdep.c:5320)
[ 32.076067][ T1] ? lock_is_held_type (kbuild/src/x86_64-3/kernel/locking/lockdep.c:5409 kbuild/src/x86_64-3/kernel/locking/lockdep.c:5711)
[ 32.076489][ T1] ? __might_fault (kbuild/src/x86_64-3/mm/memory.c:5645 kbuild/src/x86_64-3/mm/memory.c:5638)
[ 32.076887][ T1] ? lock_release (kbuild/src/x86_64-3/kernel/locking/lockdep.c:466 kbuild/src/x86_64-3/kernel/locking/lockdep.c:5690)
[ 32.077284][ T1] __sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:4965)
[ 32.077662][ T1] ? link_create (kbuild/src/x86_64-3/kernel/bpf/syscall.c:4912)
[ 32.078055][ T1] ? trace_hardirqs_on (kbuild/src/x86_64-3/kernel/trace/trace_preemptirq.c:50 (discriminator 22))
[ 32.079667][ T1] ? task_work_run (kbuild/src/x86_64-3/kernel/task_work.c:182 (discriminator 1))
[ 32.080090][ T1] __ia32_sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:5067)
[ 32.080483][ T1] __do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:112 kbuild/src/x86_64-3/arch/x86/entry/common.c:178)
[ 32.080911][ T1] ? lockdep_hardirqs_on_prepare (kbuild/src/x86_64-3/kernel/locking/lockdep.c:4528)
[ 32.081432][ T1] ? __do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:183)
[ 32.081869][ T1] ? __do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:183)
[ 32.082310][ T1] ? lockdep_hardirqs_on_prepare (kbuild/src/x86_64-3/kernel/locking/lockdep.c:4528)
[ 32.082836][ T1] ? __do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:183)
[ 32.083292][ T1] do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:203)
[ 32.083709][ T1] entry_SYSENTER_compat_after_hwframe (kbuild/src/x86_64-3/arch/x86/entry/entry_64_compat.S:122)
[ 32.084213][ T1] RIP: 0023:0xf7fb6549
[ 32.084574][ T1] Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
All code
========
0: 03 74 c0 01 add 0x1(%rax,%rax,8),%esi
4: 10 05 03 74 b8 01 adc %al,0x1b87403(%rip) # 0x1b8740d
a: 10 06 adc %al,(%rsi)
c: 03 74 b4 01 add 0x1(%rsp,%rsi,4),%esi
10: 10 07 adc %al,(%rdi)
12: 03 74 b0 01 add 0x1(%rax,%rsi,4),%esi
16: 10 08 adc %cl,(%rax)
18: 03 74 d8 01 add 0x1(%rax,%rbx,8),%esi
1c: 00 00 add %al,(%rax)
1e: 00 00 add %al,(%rax)
20: 00 51 52 add %dl,0x52(%rcx)
23: 55 push %rbp
24: 89 e5 mov %esp,%ebp
26: 0f 34 sysenter
28: cd 80 int $0x80
2a:* 5d pop %rbp <-- trapping instruction
2b: 5a pop %rdx
2c: 59 pop %rcx
2d: c3 retq
2e: 90 nop
2f: 90 nop
30: 90 nop
31: 90 nop
32: 8d b4 26 00 00 00 00 lea 0x0(%rsi,%riz,1),%esi
39: 8d b4 26 00 00 00 00 lea 0x0(%rsi,%riz,1),%esi
Code starting with the faulting instruction
===========================================
0: 5d pop %rbp
1: 5a pop %rdx
2: 59 pop %rcx
3: c3 retq
4: 90 nop
5: 90 nop
6: 90 nop
7: 90 nop
8: 8d b4 26 00 00 00 00 lea 0x0(%rsi,%riz,1),%esi
f: 8d b4 26 00 00 00 00 lea 0x0(%rsi,%riz,1),%esi
[ 32.086028][ T1] RSP: 002b:00000000ffdde7dc EFLAGS: 00200206 ORIG_RAX: 0000000000000165
[ 32.086706][ T1] RAX: ffffffffffffffda RBX: 0000000000000005 RCX: 00000000ffdde818
[ 32.087375][ T1] RDX: 0000000000000070 RSI: 000000005805ebe0 RDI: 0000000000000000
[ 32.088030][ T1] RBP: 000000000000000f R08: 0000000000000000 R09: 0000000000000000
[ 32.088688][ T1] R10: 0000000000000000 R11: 0000000000200206 R12: 0000000000000000
[ 32.089341][ T1] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
[ 32.090006][ T1] </TASK>
[ 32.090303][ T1]
[ 32.090550][ T1] Allocated by task 1:
[ 32.090908][ T1] kasan_save_stack (kbuild/src/x86_64-3/mm/kasan/common.c:46)
[ 32.091316][ T1] kasan_set_track (kbuild/src/x86_64-3/mm/kasan/common.c:52)
[ 32.091717][ T1] __kasan_krealloc (kbuild/src/x86_64-3/mm/kasan/common.c:440)
[ 32.092131][ T1] krealloc (kbuild/src/x86_64-3/include/linux/kasan.h:231 kbuild/src/x86_64-3/mm/slab_common.c:1346 kbuild/src/x86_64-3/mm/slab_common.c:1383)
[ 32.092490][ T1] is_state_visited (kbuild/src/x86_64-3/kernel/bpf/verifier.c:2505 kbuild/src/x86_64-3/kernel/bpf/verifier.c:11982)
[ 32.092912][ T1] do_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:12176)
[ 32.093285][ T1] do_check_common (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14642)
[ 32.093692][ T1] bpf_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14705 kbuild/src/x86_64-3/kernel/bpf/verifier.c:15275)
[ 32.094074][ T1] bpf_prog_load (kbuild/src/x86_64-3/kernel/bpf/syscall.c:2605)
[ 32.094473][ T1] __sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:4965)
[ 32.094851][ T1] __ia32_sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:5067)
[ 32.095246][ T1] __do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:112 kbuild/src/x86_64-3/arch/x86/entry/common.c:178)
[ 32.095674][ T1] do_fast_syscall_32 (kbuild/src/x86_64-3/arch/x86/entry/common.c:203)
[ 32.096084][ T1] entry_SYSENTER_compat_after_hwframe (kbuild/src/x86_64-3/arch/x86/entry/entry_64_compat.S:122)
[ 32.096593][ T1]
[ 32.096840][ T1] The buggy address belongs to the object at ffff88814b9d75c0
[ 32.096840][ T1] which belongs to the cache kmalloc-32 of size 32
[ 32.097916][ T1] The buggy address is located 0 bytes inside of
[ 32.097916][ T1] 32-byte region [ffff88814b9d75c0, ffff88814b9d75e0)
[ 32.098895][ T1]
[ 32.099143][ T1] The buggy address belongs to the physical page:
[ 32.099656][ T1] page:00000000a3d33dcb refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14b9d7
[ 32.100459][ T1] flags: 0x17ffffc0000200(slab|node=0|zone=2|lastcpupid=0x1fffff)
[ 32.101113][ T1] raw: 0017ffffc0000200 0000000000000000 dead000000000122 ffff888100041500
[ 32.101813][ T1] raw: 0000000000000000 0000000080400040 00000001ffffffff 0000000000000000
[ 32.102518][ T1] page dumped because: kasan: bad access detected
[ 32.103022][ T1] page_owner tracks the page as allocated
[ 32.103496][ T1] page last allocated via order 0, migratetype Unmovable, gfp_mask 0x112cc0(GFP_USER|__GFP_NOWARN|__GFP_NORETRY), pid 1, tgid 1 (systemd), ts 32049485962, free_ts 0
[ 32.104752][ T1] get_page_from_freelist (kbuild/src/x86_64-3/mm/page_alloc.c:2549 kbuild/src/x86_64-3/mm/page_alloc.c:4287)
[ 32.105197][ T1] __alloc_pages (kbuild/src/x86_64-3/mm/page_alloc.c:5547)
[ 32.105592][ T1] allocate_slab (kbuild/src/x86_64-3/mm/slub.c:1794 kbuild/src/x86_64-3/mm/slub.c:1939)
[ 32.105988][ T1] ___slab_alloc (kbuild/src/x86_64-3/mm/slub.c:3180)
[ 32.106385][ T1] __kmem_cache_alloc_node (kbuild/src/x86_64-3/mm/slub.c:3279 kbuild/src/x86_64-3/mm/slub.c:3364 kbuild/src/x86_64-3/mm/slub.c:3437)
[ 32.106832][ T1] __kmalloc_node_track_caller (kbuild/src/x86_64-3/include/linux/kasan.h:211 kbuild/src/x86_64-3/mm/slab_common.c:955 kbuild/src/x86_64-3/mm/slab_common.c:975)
[ 32.107304][ T1] copy_array (kbuild/src/x86_64-3/arch/x86/kernel/cpu/hypervisor.c:93)
[ 32.107672][ T1] copy_verifier_state (kbuild/src/x86_64-3/kernel/bpf/verifier.c:1189)
[ 32.108094][ T1] push_stack (kbuild/src/x86_64-3/kernel/bpf/verifier.c:1288)
[ 32.108466][ T1] check_cond_jmp_op (kbuild/src/x86_64-3/kernel/bpf/verifier.c:10223)
[ 32.108884][ T1] do_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:12451)
[ 32.109259][ T1] do_check_common (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14642)
[ 32.109663][ T1] bpf_check (kbuild/src/x86_64-3/kernel/bpf/verifier.c:14705 kbuild/src/x86_64-3/kernel/bpf/verifier.c:15275)
[ 32.110044][ T1] bpf_prog_load (kbuild/src/x86_64-3/kernel/bpf/syscall.c:2605)
[ 32.110442][ T1] __sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:4965)
[ 32.110815][ T1] __ia32_sys_bpf (kbuild/src/x86_64-3/kernel/bpf/syscall.c:5067)
To reproduce:
# build kernel
cd linux
cp config-6.1.0-rc1-00216-gd916d97f3395 .config
make HOSTCC=gcc-11 CC=gcc-11 ARCH=x86_64 olddefconfig prepare modules_prepare bzImage modules
make HOSTCC=gcc-11 CC=gcc-11 ARCH=x86_64 INSTALL_MOD_PATH=<mod-install-dir> modules_install
cd <mod-install-dir>
find lib/ | cpio -o -H newc --quiet | gzip > modules.cgz
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp qemu -k <bzImage> -m modules.cgz job-script # job-script is attached in this email
# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.
--
0-DAY CI Kernel Test Service
https://01.org/lkp
On 10/22/22 20:08, Kees Cook wrote:
> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Jakub Kicinski <[email protected]>
> Cc: Paolo Abeni <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Vlastimil Babka <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Hyeonggon Yoo <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Vincenzo Frascino <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: Kees Cook <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
> ---
> This requires at least this be landed first:
> https://lore.kernel.org/lkml/[email protected]/
Don't we need all parts to have landed first, even if the skbuff one is the
most prominent?
> I suspect given that is the most central ksize() user, this ksize()
> fix might be best to land through the netdev tree...
> ---
> mm/kasan/kasan_test.c | 8 +++++---
> mm/slab_common.c | 33 ++++++++++++++-------------------
> 2 files changed, 19 insertions(+), 22 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 0d59098f0876..cb5c54adb503 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -783,7 +783,7 @@ static void kasan_global_oob_left(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> }
>
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -791,15 +791,17 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> real_size = ksize(ptr);
> + KUNIT_EXPECT_GT(test, real_size, size);
>
> OPTIMIZER_HIDE_VAR(ptr);
>
> /* This access shouldn't trigger a KASAN report. */
> - ptr[size] = 'x';
> + ptr[size - 1] = 'x';
>
> /* This one must. */
> - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>
> kfree(ptr);
> }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 33b1886b06eb..eabd66fcabd0 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1333,11 +1333,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
> void *ret;
> size_t ks;
>
> - /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> + /* Check for double-free before calling ksize. */
> if (likely(!ZERO_OR_NULL_PTR(p))) {
> if (!kasan_check_byte(p))
> return NULL;
> - ks = kfence_ksize(p) ?: __ksize(p);
> + ks = ksize(p);
> } else
> ks = 0;
>
> @@ -1405,8 +1405,10 @@ void kfree_sensitive(const void *p)
> void *mem = (void *)p;
>
> ks = ksize(mem);
> - if (ks)
> + if (ks) {
> + kasan_unpoison_range(mem, ks);
> memzero_explicit(mem, ks);
> + }
> kfree(mem);
> }
> EXPORT_SYMBOL(kfree_sensitive);
> @@ -1415,10 +1417,11 @@ EXPORT_SYMBOL(kfree_sensitive);
> * ksize - get the actual amount of memory allocated for a given object
> * @objp: Pointer to the object
> *
> - * kmalloc may internally round up allocations and return more memory
> + * kmalloc() may internally round up allocations and return more memory
> * than requested. ksize() can be used to determine the actual amount of
> - * memory allocated. The caller may use this additional memory, even though
> - * a smaller amount of memory was initially specified with the kmalloc call.
> + * allocated memory. The caller may NOT use this additional memory, unless
> + * it calls krealloc(). To avoid an alloc/realloc cycle, callers can use
> + * kmalloc_size_roundup() to find the size of the associated kmalloc bucket.
> * The caller must guarantee that objp points to a valid object previously
> * allocated with either kmalloc() or kmem_cache_alloc(). The object
> * must not be freed during the duration of the call.
> @@ -1427,13 +1430,11 @@ EXPORT_SYMBOL(kfree_sensitive);
> */
> size_t ksize(const void *objp)
> {
> - size_t size;
> -
> /*
> - * We need to first check that the pointer to the object is valid, and
> - * only then unpoison the memory. The report printed from ksize() is
> - * more useful, then when it's printed later when the behaviour could
> - * be undefined due to a potential use-after-free or double-free.
> + * We need to first check that the pointer to the object is valid.
> + * The KASAN report printed from ksize() is more useful, then when
> + * it's printed later when the behaviour could be undefined due to
> + * a potential use-after-free or double-free.
> *
> * We use kasan_check_byte(), which is supported for the hardware
> * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1447,13 +1448,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
> return 0;
>
> - size = kfence_ksize(objp) ?: __ksize(objp);
> - /*
> - * We assume that ksize callers could use whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_range(objp, size);
> - return size;
> + return kfence_ksize(objp) ?: __ksize(objp);
> }
> EXPORT_SYMBOL(ksize);
>
On Tue, Oct 25, 2022 at 01:53:54PM +0200, Vlastimil Babka wrote:
> On 10/22/22 20:08, Kees Cook wrote:
> > With all "silently resizing" callers of ksize() refactored, remove the
> > logic in ksize() that would allow it to be used to effectively change
> > the size of an allocation (bypassing __alloc_size hints, etc). Users
> > wanting this feature need to either use kmalloc_size_roundup() before an
> > allocation, or use krealloc() directly.
> >
> > For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> > some of the partially open-coded ksize() in __do_krealloc with ksize()
> > now that it doesn't perform unpoisoning.
> >
> > [...]
> > Signed-off-by: Kees Cook <[email protected]>
>
> Acked-by: Vlastimil Babka <[email protected]>
Thanks!
> > ---
> > This requires at least this be landed first:
> > https://lore.kernel.org/lkml/[email protected]/
>
> Don't we need all parts to have landed first, even if the skbuff one is the
> most prominent?
Yes, though, I suspect there will be some cases we couldn't easily find.
Here are the prerequisites I'm aware of:
in -next:
36875a063b5e ("net: ipa: Proactively round up to kmalloc bucket size")
ab3f7828c979 ("openvswitch: Use kmalloc_size_roundup() to match ksize() usage")
d6dd508080a3 ("bnx2: Use kmalloc_size_roundup() to match ksize() usage")
reviewed, waiting to land (should I take these myself?)
btrfs: send: Proactively round up to kmalloc bucket size
https://lore.kernel.org/lkml/[email protected]/
dma-buf: Proactively round up to kmalloc bucket size
https://lore.kernel.org/lkml/[email protected]/
partially reviewed:
igb: Proactively round up to kmalloc bucket size
https://lore.kernel.org/lkml/[email protected]/
unreviewed:
coredump: Proactively round up to kmalloc bucket size
https://lore.kernel.org/lkml/[email protected]/
devres: Use kmalloc_size_roundup() to match ksize() usage
https://lore.kernel.org/lkml/[email protected]/
needs updating:
mempool: Use kmalloc_size_roundup() to match ksize() usage
https://lore.kernel.org/lkml/[email protected]/
bpf: Use kmalloc_size_roundup() to match ksize() usage
https://lore.kernel.org/lkml/[email protected]/
--
Kees Cook
On Sat, Oct 22, 2022 at 8:08 PM Kees Cook <[email protected]> wrote:
>
> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
Hi Kees,
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -791,15 +791,17 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> real_size = ksize(ptr);
> + KUNIT_EXPECT_GT(test, real_size, size);
>
> OPTIMIZER_HIDE_VAR(ptr);
>
> /* This access shouldn't trigger a KASAN report. */
> - ptr[size] = 'x';
> + ptr[size - 1] = 'x';
>
> /* This one must. */
> - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
How about also accessing ptr[size] here? It would allow for a more
precise checking of the in-object redzone.
>
> kfree(ptr);
> }
Thanks!
On Thu, Oct 27, 2022 at 09:05:45PM +0200, Andrey Konovalov wrote:
> On Sat, Oct 22, 2022 at 8:08 PM Kees Cook <[email protected]> wrote:
> [...]
> > -/* Check that ksize() makes the whole object accessible. */
> > +/* Check that ksize() does NOT unpoison whole object. */
> > static void ksize_unpoisons_memory(struct kunit *test)
> > {
> > char *ptr;
> > @@ -791,15 +791,17 @@ static void ksize_unpoisons_memory(struct kunit *test)
> >
> > ptr = kmalloc(size, GFP_KERNEL);
> > KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> > +
> > real_size = ksize(ptr);
> > + KUNIT_EXPECT_GT(test, real_size, size);
> >
> > OPTIMIZER_HIDE_VAR(ptr);
> >
> > /* This access shouldn't trigger a KASAN report. */
> > - ptr[size] = 'x';
> > + ptr[size - 1] = 'x';
> >
> > /* This one must. */
> > - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> > + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>
> How about also accessing ptr[size] here? It would allow for a more
> precise checking of the in-object redzone.
Sure! Probably both ptr[size] and ptr[real_size -1], yes?
--
Kees Cook
On Thu, Oct 27, 2022 at 9:13 PM Kees Cook <[email protected]> wrote:
>
> On Thu, Oct 27, 2022 at 09:05:45PM +0200, Andrey Konovalov wrote:
> > On Sat, Oct 22, 2022 at 8:08 PM Kees Cook <[email protected]> wrote:
> > [...]
> > > -/* Check that ksize() makes the whole object accessible. */
> > > +/* Check that ksize() does NOT unpoison whole object. */
> > > static void ksize_unpoisons_memory(struct kunit *test)
> > > {
> > > char *ptr;
> > > @@ -791,15 +791,17 @@ static void ksize_unpoisons_memory(struct kunit *test)
> > >
> > > ptr = kmalloc(size, GFP_KERNEL);
> > > KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> > > +
> > > real_size = ksize(ptr);
> > > + KUNIT_EXPECT_GT(test, real_size, size);
> > >
> > > OPTIMIZER_HIDE_VAR(ptr);
> > >
> > > /* This access shouldn't trigger a KASAN report. */
> > > - ptr[size] = 'x';
> > > + ptr[size - 1] = 'x';
> > >
> > > /* This one must. */
> > > - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> > > + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
> >
> > How about also accessing ptr[size] here? It would allow for a more
> > precise checking of the in-object redzone.
>
> Sure! Probably both ptr[size] and ptr[real_size -1], yes?
Yes, sounds good. Thank you!