2021-08-18 21:41:32

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 0/7] Add __alloc_size() for better bounds checking

Hi,

GCC and Clang both use the "alloc_size" attribute to assist with bounds
checking around the use of allocation functions. Add the attribute,
adjust the Makefile to silence needless warnings, and add the hints to
the allocators where possible. These changes have been in use for a
while now in GrapheneOS.

To build without warnings, this series needs a couple small fixes for
allmodconfig, which I sent separately:
https://lore.kernel.org/lkml/[email protected]/
https://lore.kernel.org/lkml/[email protected]/
https://lore.kernel.org/lkml/[email protected]/

I figure I can take this via my "overflow" series, or it could go via
-mm?

-Kees

v2:
- clean up slab function declarations (joe)
- update checkpatch.pl attribute regex (joe)
- explain the Makefile changes better (ojeda, nathan)
v1: https://lore.kernel.org/lkml/[email protected]

Kees Cook (7):
Compiler Attributes: Add __alloc_size() for better bounds checking
checkpatch: Add __alloc_size() to known $Attribute
slab: Clean up function declarations
slab: Add __alloc_size attributes for better bounds checking
mm/page_alloc: Add __alloc_size attributes for better bounds checking
percpu: Add __alloc_size attributes for better bounds checking
mm/vmalloc: Add __alloc_size attributes for better bounds checking

Makefile | 6 ++-
include/linux/compiler_attributes.h | 6 +++
include/linux/gfp.h | 2 +
include/linux/percpu.h | 3 ++
include/linux/slab.h | 84 +++++++++++++++++------------
include/linux/vmalloc.h | 11 ++++
scripts/checkpatch.pl | 3 +-
7 files changed, 80 insertions(+), 35 deletions(-)

--
2.30.2


2021-08-18 21:41:38

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 4/7] slab: Add __alloc_size attributes for better bounds checking

As already done in GrapheneOS, add the __alloc_size attribute for
regular kmalloc interfaces, to provide additional hinting for better
bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
optimizations.

Co-developed-by: Daniel Micay <[email protected]>
Signed-off-by: Daniel Micay <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/linux/slab.h | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 10fd0a8c816a..6ce826d8194d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -181,7 +181,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
/*
* Common kmalloc functions provided by all allocators
*/
-__must_check
+__must_check __alloc_size(2)
void *krealloc(const void *objp, size_t new_size, gfp_t flags);
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
@@ -426,6 +426,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */

+__alloc_size(1)
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -450,6 +451,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}

#ifdef CONFIG_NUMA
+__alloc_size(1)
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
__assume_slab_alignment __malloc;
@@ -574,6 +576,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
+__alloc_size(1)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
@@ -596,6 +599,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}

+__alloc_size(1)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
@@ -620,6 +624,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1, 2)
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -638,7 +643,7 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-__must_check
+__must_check __alloc_size(2, 3)
static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
gfp_t flags)
{
@@ -656,6 +661,7 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1, 2)
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
@@ -685,6 +691,7 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}

+__alloc_size(1, 2)
static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
@@ -718,6 +725,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1)
static inline void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
@@ -729,25 +737,31 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
+__alloc_size(1)
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}

+__alloc_size(1)
extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+__alloc_size(1)
static inline void *kvmalloc(size_t size, gfp_t flags)
{
return kvmalloc_node(size, flags, NUMA_NO_NODE);
}
+__alloc_size(1)
static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
{
return kvmalloc_node(size, flags | __GFP_ZERO, node);
}
+__alloc_size(1)
static inline void *kvzalloc(size_t size, gfp_t flags)
{
return kvmalloc(size, flags | __GFP_ZERO);
}

+__alloc_size(1, 2)
static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -758,11 +772,13 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
return kvmalloc(bytes, flags);
}

+__alloc_size(1, 2)
static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
{
return kvmalloc_array(n, size, flags | __GFP_ZERO);
}

+__alloc_size(3)
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
gfp_t flags);
extern void kvfree(const void *addr);
--
2.30.2

2021-08-18 21:42:04

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

As already done in GrapheneOS, add the __alloc_size attribute for
appropriate percpu allocator interfaces, to provide additional hinting
for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
compiler optimizations.

Co-developed-by: Daniel Micay <[email protected]>
Signed-off-by: Daniel Micay <[email protected]>
Cc: Dennis Zhou <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/linux/percpu.h | 3 +++
1 file changed, 3 insertions(+)

diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..119f41815b32 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -123,6 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif

+__alloc_size(1)
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,7 +132,9 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif

+__alloc_size(1)
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
+__alloc_size(1)
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
--
2.30.2

2021-08-18 21:42:17

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 7/7] mm/vmalloc: Add __alloc_size attributes for better bounds checking

As already done in GrapheneOS, add the __alloc_size attribute for
appropriate vmalloc allocator interfaces, to provide additional hinting
for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
compiler optimizations.

Co-developed-by: Daniel Micay <[email protected]>
Signed-off-by: Daniel Micay <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/linux/vmalloc.h | 11 +++++++++++
1 file changed, 11 insertions(+)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 2644425b6dce..1521ba38957d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -136,20 +136,31 @@ static inline void vmalloc_init(void)
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif

+__alloc_size(1)
extern void *vmalloc(unsigned long size);
+__alloc_size(1)
extern void *vzalloc(unsigned long size);
+__alloc_size(1)
extern void *vmalloc_user(unsigned long size);
+__alloc_size(1)
extern void *vmalloc_node(unsigned long size, int node);
+__alloc_size(1)
extern void *vzalloc_node(unsigned long size, int node);
+__alloc_size(1)
extern void *vmalloc_32(unsigned long size);
+__alloc_size(1)
extern void *vmalloc_32_user(unsigned long size);
+__alloc_size(1)
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
+__alloc_size(1)
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
+__alloc_size(1)
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller);
+__alloc_size(1)
void *vmalloc_no_huge(unsigned long size);

extern void vfree(const void *addr);
--
2.30.2

2021-08-18 21:42:33

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 2/7] checkpatch: Add __alloc_size() to known $Attribute

Make sure checkpatch.pl doesn't get confused about finding the
__alloc_size attribute on functions.

Suggested-by: Joe Perches <[email protected]>
Cc: Andy Whitcroft <[email protected]>
Cc: Dwaipayan Ray <[email protected]>
Cc: Lukas Bulwahn <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
---
scripts/checkpatch.pl | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a65753c05a69..e4b0def605c3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -489,7 +489,8 @@ our $Attribute = qr{
____cacheline_aligned|
____cacheline_aligned_in_smp|
____cacheline_internodealigned_in_smp|
- __weak
+ __weak|
+ __alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\)
}x;
our $Modifier;
our $Inline = qr{inline|__always_inline|noinline|__inline|__inline__};
--
2.30.2

2021-08-18 21:42:39

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 3/7] slab: Clean up function declarations

In order to have more readable and regular declarations, move __must_check
to the line above the main function declaration and add all the missing
parameter names.

Suggested-by: Joe Perches <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/linux/slab.h | 68 +++++++++++++++++++++++---------------------
1 file changed, 35 insertions(+), 33 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index c0d46b6fa12a..10fd0a8c816a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -152,8 +152,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);

/*
* Please use this macro to create slab caches. Simply specify the
@@ -181,11 +181,12 @@ int kmem_cache_shrink(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+__must_check
+void *krealloc(const void *objp, size_t new_size, gfp_t flags);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
+size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
@@ -426,8 +427,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#endif /* !CONFIG_SLOB */

void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);

/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -436,8 +437,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);

/*
* Caller must not use kfree_bulk() on memory not originally allocated
@@ -449,8 +450,9 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}

#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+ __assume_slab_alignment __malloc;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -464,17 +466,15 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif

#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_slab_alignment __malloc;

#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_slab_alignment __malloc;
#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
@@ -490,10 +490,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
return ret;
}

-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);

@@ -502,13 +500,15 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */

-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __malloc;

#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __malloc;
#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+static __always_inline void *kmalloc_order_trace(size_t size, gfp_t flags,
+ unsigned int order)
{
return kmalloc_order(size, flags, order);
}
@@ -638,8 +638,9 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-static __must_check inline void *
-krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+__must_check
+static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
+ gfp_t flags)
{
size_t bytes;

@@ -668,7 +669,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)

@@ -691,7 +692,8 @@ static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)


#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
+extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
--
2.30.2

2021-08-18 21:43:06

by Kees Cook

[permalink] [raw]
Subject: [PATCH v2 5/7] mm/page_alloc: Add __alloc_size attributes for better bounds checking

As already done in GrapheneOS, add the __alloc_size attribute for
appropriate page allocator interfaces, to provide additional hinting
for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
compiler optimizations.

Co-developed-by: Daniel Micay <[email protected]>
Signed-off-by: Daniel Micay <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Signed-off-by: Kees Cook <[email protected]>
---
include/linux/gfp.h | 2 ++
1 file changed, 2 insertions(+)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3745efd21cf6..897538d5ffd2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -618,8 +618,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);

+__alloc_size(1)
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
+__alloc_size(1)
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);

#define __get_free_page(gfp_mask) \
--
2.30.2

2021-08-19 00:43:55

by Dennis Zhou

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

Hello,

On Wed, Aug 18, 2021 at 02:40:20PM -0700, Kees Cook wrote:
> As already done in GrapheneOS, add the __alloc_size attribute for
> appropriate percpu allocator interfaces, to provide additional hinting
> for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
> compiler optimizations.

Can you elaborate a little bit for me how this works for percpu? In any
case that's not uniprocessor, any modification is done through address
accessors and not on the returned percpu pointer. Is the metadata kept
by gcc/clang able to transpire the percpu pointer accessors?

Thanks,
Dennis

>
> Co-developed-by: Daniel Micay <[email protected]>
> Signed-off-by: Daniel Micay <[email protected]>
> Cc: Dennis Zhou <[email protected]>
> Cc: Tejun Heo <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: [email protected]
> Signed-off-by: Kees Cook <[email protected]>
> ---
> include/linux/percpu.h | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/include/linux/percpu.h b/include/linux/percpu.h
> index 5e76af742c80..119f41815b32 100644
> --- a/include/linux/percpu.h
> +++ b/include/linux/percpu.h
> @@ -123,6 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
> pcpu_fc_populate_pte_fn_t populate_pte_fn);
> #endif
>
> +__alloc_size(1)
> extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
> extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
> extern bool is_kernel_percpu_address(unsigned long addr);
> @@ -131,7 +132,9 @@ extern bool is_kernel_percpu_address(unsigned long addr);
> extern void __init setup_per_cpu_areas(void);
> #endif
>
> +__alloc_size(1)
> extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
> +__alloc_size(1)
> extern void __percpu *__alloc_percpu(size_t size, size_t align);
> extern void free_percpu(void __percpu *__pdata);
> extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
> --
> 2.30.2
>

2021-08-19 03:40:07

by Kees Cook

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

On Wed, Aug 18, 2021 at 08:42:59PM -0400, Dennis Zhou wrote:
> On Wed, Aug 18, 2021 at 02:40:20PM -0700, Kees Cook wrote:
> > As already done in GrapheneOS, add the __alloc_size attribute for
> > appropriate percpu allocator interfaces, to provide additional hinting
> > for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
> > compiler optimizations.
>
> Can you elaborate a little bit for me how this works for percpu? In any
> case that's not uniprocessor, any modification is done through address
> accessors and not on the returned percpu pointer. Is the metadata kept
> by gcc/clang able to transpire the percpu pointer accessors?

That's an excellent point. :P I haven't tested it through the accessors,
but I guess it's possible that this is only useful for UP, and even
then, only where the access is very close to the "allocation", maybe
like:

char __percpu *test_buf;

char *buf;
test_var = __alloc_percpu(16, __alignof__(char));
buf = per_cpu_ptr(test_buf, get_cpu());
...
buf[20] = '!';

-Kees

>
> Thanks,
> Dennis
>
> >
> > Co-developed-by: Daniel Micay <[email protected]>
> > Signed-off-by: Daniel Micay <[email protected]>
> > Cc: Dennis Zhou <[email protected]>
> > Cc: Tejun Heo <[email protected]>
> > Cc: Christoph Lameter <[email protected]>
> > Cc: [email protected]
> > Signed-off-by: Kees Cook <[email protected]>
> > ---
> > include/linux/percpu.h | 3 +++
> > 1 file changed, 3 insertions(+)
> >
> > diff --git a/include/linux/percpu.h b/include/linux/percpu.h
> > index 5e76af742c80..119f41815b32 100644
> > --- a/include/linux/percpu.h
> > +++ b/include/linux/percpu.h
> > @@ -123,6 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
> > pcpu_fc_populate_pte_fn_t populate_pte_fn);
> > #endif
> >
> > +__alloc_size(1)
> > extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
> > extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
> > extern bool is_kernel_percpu_address(unsigned long addr);
> > @@ -131,7 +132,9 @@ extern bool is_kernel_percpu_address(unsigned long addr);
> > extern void __init setup_per_cpu_areas(void);
> > #endif
> >
> > +__alloc_size(1)
> > extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
> > +__alloc_size(1)
> > extern void __percpu *__alloc_percpu(size_t size, size_t align);
> > extern void free_percpu(void __percpu *__pdata);
> > extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
> > --
> > 2.30.2
> >

--
Kees Cook

2021-08-19 08:29:34

by Rasmus Villemoes

[permalink] [raw]
Subject: Re: [PATCH v2 4/7] slab: Add __alloc_size attributes for better bounds checking

On 18/08/2021 23.40, Kees Cook wrote:
> As already done in GrapheneOS, add the __alloc_size attribute for
> regular kmalloc interfaces, to provide additional hinting for better
> bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> optimizations.
>

> #ifdef CONFIG_NUMA
> +__alloc_size(1)
> void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;

Eh, can we keep all the attributes together instead of having some
before, some after?

I don't necessarily think this is a good idea, but just throwing it out
there: __alloc_size almost always goes along with __malloc, so one could
define __alloc_size in such a way that it implies __malloc, then just
have a "raw" ____alloc_size version to use for krealloc() and similar.
But I guess it's cleaner to keep it this way.

While declared in string.h, kmemdup() is also eligible for alloc_size(2).

Which brings me to an old wishlist item of mine [it's almost christmas]:
that alloc_size could understand more general expressions for the size
of the returned memory, not just the primitive one based on
malloc()/calloc() prototypes. So e.g. kmemdup_nul() returns something of
size $2+1, while it is also very common to have a alloc_foo(void) helper
which returns something of size sizeof(struct foo). Unfortunately I
don't think gcc's attribute parsing machinery can easily be tweaked into
accepting

struct bar *alloc_bars(unsigned count) __new_a_s(count * sizeof(struct bar))

but maybe clang could. If a compiler could understand that kind of
attribute, it would also pave the way for implementing
__attribute__((__buffer_size__(param, size, access)))

e.g.

memchr(src, c, size) __buffer_size(src, size, "r")

clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data
*clks) __buffer_size(clks, num_clks * sizeof(*clks), "rw")

which could be used for both static analysis and optional run-time
instrumentation.

Rasmus

2021-08-19 14:15:22

by Dennis Zhou

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

On Wed, Aug 18, 2021 at 08:36:50PM -0700, Kees Cook wrote:
> On Wed, Aug 18, 2021 at 08:42:59PM -0400, Dennis Zhou wrote:
> > On Wed, Aug 18, 2021 at 02:40:20PM -0700, Kees Cook wrote:
> > > As already done in GrapheneOS, add the __alloc_size attribute for
> > > appropriate percpu allocator interfaces, to provide additional hinting
> > > for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
> > > compiler optimizations.
> >
> > Can you elaborate a little bit for me how this works for percpu? In any
> > case that's not uniprocessor, any modification is done through address
> > accessors and not on the returned percpu pointer. Is the metadata kept
> > by gcc/clang able to transpire the percpu pointer accessors?
>
> That's an excellent point. :P I haven't tested it through the accessors,
> but I guess it's possible that this is only useful for UP, and even
> then, only where the access is very close to the "allocation", maybe
> like:
>

I see that this is already pulled by Andrew, but I think it would be
good to modify the commit log to add a short bit about this limitation.
Otherwise, the commit reads as if it's doing way more than it is.

Thanks,
Dennis

> char __percpu *test_buf;
>
> char *buf;
> test_var = __alloc_percpu(16, __alignof__(char));
> buf = per_cpu_ptr(test_buf, get_cpu());
> ...
> buf[20] = '!';
>
> -Kees
>
> >
> > Thanks,
> > Dennis
> >
> > >
> > > Co-developed-by: Daniel Micay <[email protected]>
> > > Signed-off-by: Daniel Micay <[email protected]>
> > > Cc: Dennis Zhou <[email protected]>
> > > Cc: Tejun Heo <[email protected]>
> > > Cc: Christoph Lameter <[email protected]>
> > > Cc: [email protected]
> > > Signed-off-by: Kees Cook <[email protected]>
> > > ---
> > > include/linux/percpu.h | 3 +++
> > > 1 file changed, 3 insertions(+)
> > >
> > > diff --git a/include/linux/percpu.h b/include/linux/percpu.h
> > > index 5e76af742c80..119f41815b32 100644
> > > --- a/include/linux/percpu.h
> > > +++ b/include/linux/percpu.h
> > > @@ -123,6 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
> > > pcpu_fc_populate_pte_fn_t populate_pte_fn);
> > > #endif
> > >
> > > +__alloc_size(1)
> > > extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
> > > extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
> > > extern bool is_kernel_percpu_address(unsigned long addr);
> > > @@ -131,7 +132,9 @@ extern bool is_kernel_percpu_address(unsigned long addr);
> > > extern void __init setup_per_cpu_areas(void);
> > > #endif
> > >
> > > +__alloc_size(1)
> > > extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
> > > +__alloc_size(1)
> > > extern void __percpu *__alloc_percpu(size_t size, size_t align);
> > > extern void free_percpu(void __percpu *__pdata);
> > > extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
> > > --
> > > 2.30.2
> > >
>
> --
> Kees Cook

2021-08-20 05:17:24

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

On Wed, 18 Aug 2021 14:40:20 -0700 Kees Cook <[email protected]> wrote:

> As already done in GrapheneOS, add the __alloc_size attribute for
> appropriate percpu allocator interfaces, to provide additional hinting
> for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
> compiler optimizations.
>

Caught one, I assume:

In file included from ./include/linux/string.h:262,
from ./include/linux/bitmap.h:11,
from ./include/linux/cpumask.h:12,
from ./arch/x86/include/asm/cpumask.h:5,
from ./arch/x86/include/asm/msr.h:11,
from ./arch/x86/include/asm/processor.h:22,
from ./arch/x86/include/asm/cpufeature.h:5,
from ./arch/x86/include/asm/thread_info.h:53,
from ./include/linux/thread_info.h:60,
from ./arch/x86/include/asm/preempt.h:7,
from ./include/linux/preempt.h:78,
from ./include/linux/spinlock.h:55,
from ./include/linux/mmzone.h:8,
from ./include/linux/gfp.h:6,
from ./include/linux/slab.h:15,
from drivers/misc/lkdtm/heap.c:7:
In function 'memset',
inlined from 'lkdtm_VMALLOC_LINEAR_OVERFLOW' at drivers/misc/lkdtm/heap.c:27:2:
./include/linux/fortify-string.h:172:3: error: call to '__write_overflow' declared with attribute error: detected write beyond size of object passed as 1st parameter
172 | __write_overflow();
| ^~~~~~~~~~~~~~~~~~
make[3]: *** [drivers/misc/lkdtm/heap.o] Error 1
make[2]: *** [drivers/misc/lkdtm] Error 2
make[1]: *** [drivers/misc] Error 2
make: *** [drivers] Error 2

I want to get a kernel release out, so I'll hide
mm-vmalloc-add-__alloc_size-attributes-for-better-bounds-checking.patch
for now.

2021-08-20 05:28:23

by Kees Cook

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] percpu: Add __alloc_size attributes for better bounds checking

On Thu, Aug 19, 2021 at 10:11:15PM -0700, Andrew Morton wrote:
> On Wed, 18 Aug 2021 14:40:20 -0700 Kees Cook <[email protected]> wrote:
>
> > As already done in GrapheneOS, add the __alloc_size attribute for
> > appropriate percpu allocator interfaces, to provide additional hinting
> > for better bounds checking, assisting CONFIG_FORTIFY_SOURCE and other
> > compiler optimizations.
> >
>
> Caught one, I assume:
>
> In file included from ./include/linux/string.h:262,
> from ./include/linux/bitmap.h:11,
> from ./include/linux/cpumask.h:12,
> from ./arch/x86/include/asm/cpumask.h:5,
> from ./arch/x86/include/asm/msr.h:11,
> from ./arch/x86/include/asm/processor.h:22,
> from ./arch/x86/include/asm/cpufeature.h:5,
> from ./arch/x86/include/asm/thread_info.h:53,
> from ./include/linux/thread_info.h:60,
> from ./arch/x86/include/asm/preempt.h:7,
> from ./include/linux/preempt.h:78,
> from ./include/linux/spinlock.h:55,
> from ./include/linux/mmzone.h:8,
> from ./include/linux/gfp.h:6,
> from ./include/linux/slab.h:15,
> from drivers/misc/lkdtm/heap.c:7:
> In function 'memset',
> inlined from 'lkdtm_VMALLOC_LINEAR_OVERFLOW' at drivers/misc/lkdtm/heap.c:27:2:
> ./include/linux/fortify-string.h:172:3: error: call to '__write_overflow' declared with attribute error: detected write beyond size of object passed as 1st parameter
> 172 | __write_overflow();
> | ^~~~~~~~~~~~~~~~~~
> make[3]: *** [drivers/misc/lkdtm/heap.o] Error 1
> make[2]: *** [drivers/misc/lkdtm] Error 2
> make[1]: *** [drivers/misc] Error 2
> make: *** [drivers] Error 2
>
> I want to get a kernel release out, so I'll hide
> mm-vmalloc-add-__alloc_size-attributes-for-better-bounds-checking.patch
> for now.

In the cover letter[1], I listed the needed fixes that were sent
separately from this series. Quoting here:

> To build without warnings, this series needs a couple small fixes for
> allmodconfig, which I sent separately:
> https://lore.kernel.org/lkml/[email protected]/
> https://lore.kernel.org/lkml/[email protected]/
> https://lore.kernel.org/lkml/[email protected]/

What you hit is the first one, which is already in Greg's tree:
https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git/commit/?h=char-misc-next&id=e6d468d32cd084edd030a8bae76440b17b854b5c

The other two have also been taken:
https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git/commit/?h=staging-next&id=cbe34165cc1b7d1110b268ba8b9f30843c941639
https://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git/commit/?id=a31e5a4158d03595ca4258b94397d4097be0ebe4

-Kees

[1] https://lore.kernel.org/lkml/[email protected]/

--
Kees Cook

2021-08-25 21:33:36

by Nick Desaulniers

[permalink] [raw]
Subject: Re: [PATCH v2 4/7] slab: Add __alloc_size attributes for better bounds checking

On Wed, Aug 18, 2021 at 2:40 PM Kees Cook <[email protected]> wrote:
>
> As already done in GrapheneOS, add the __alloc_size attribute for
> regular kmalloc interfaces, to provide additional hinting for better
> bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> optimizations.
>
> Co-developed-by: Daniel Micay <[email protected]>
> Signed-off-by: Daniel Micay <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Vlastimil Babka <[email protected]>
> Cc: [email protected]
> Signed-off-by: Kees Cook <[email protected]>

This is a good start, so
Reviewed-by: Nick Desaulniers <[email protected]>

Do we also want to attribute:
* __kmalloc_index
* kmem_cache_free_bulk
* kmem_cache_alloc_bulk
* kmem_cache_alloc_trace
* kmalloc_order
* kmalloc_order_trace
* kmalloc_large
* kmalloc_node
* __kmalloc_track_caller
* kmalloc_array_node
* __kmalloc_node_track_caller

> ---
> include/linux/slab.h | 20 ++++++++++++++++++--
> 1 file changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 10fd0a8c816a..6ce826d8194d 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -181,7 +181,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
> /*
> * Common kmalloc functions provided by all allocators
> */
> -__must_check
> +__must_check __alloc_size(2)
> void *krealloc(const void *objp, size_t new_size, gfp_t flags);
> void kfree(const void *objp);
> void kfree_sensitive(const void *objp);
> @@ -426,6 +426,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
> #define kmalloc_index(s) __kmalloc_index(s, true)
> #endif /* !CONFIG_SLOB */
>
> +__alloc_size(1)
> void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
> void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
> void kmem_cache_free(struct kmem_cache *s, void *objp);
> @@ -450,6 +451,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
> }
>
> #ifdef CONFIG_NUMA
> +__alloc_size(1)
> void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
> void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
> __assume_slab_alignment __malloc;
> @@ -574,6 +576,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
> * Try really hard to succeed the allocation but fail
> * eventually.
> */
> +__alloc_size(1)
> static __always_inline void *kmalloc(size_t size, gfp_t flags)
> {
> if (__builtin_constant_p(size)) {
> @@ -596,6 +599,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
> return __kmalloc(size, flags);
> }
>
> +__alloc_size(1)
> static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
> {
> #ifndef CONFIG_SLOB
> @@ -620,6 +624,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
> * @size: element size.
> * @flags: the type of memory to allocate (see kmalloc).
> */
> +__alloc_size(1, 2)
> static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
> {
> size_t bytes;
> @@ -638,7 +643,7 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
> * @new_size: new size of a single member of the array
> * @flags: the type of memory to allocate (see kmalloc)
> */
> -__must_check
> +__must_check __alloc_size(2, 3)
> static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
> gfp_t flags)
> {
> @@ -656,6 +661,7 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
> * @size: element size.
> * @flags: the type of memory to allocate (see kmalloc).
> */
> +__alloc_size(1, 2)
> static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
> {
> return kmalloc_array(n, size, flags | __GFP_ZERO);
> @@ -685,6 +691,7 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
> return __kmalloc_node(bytes, flags, node);
> }
>
> +__alloc_size(1, 2)
> static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
> {
> return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
> @@ -718,6 +725,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
> * @size: how many bytes of memory are required.
> * @flags: the type of memory to allocate (see kmalloc).
> */
> +__alloc_size(1)
> static inline void *kzalloc(size_t size, gfp_t flags)
> {
> return kmalloc(size, flags | __GFP_ZERO);
> @@ -729,25 +737,31 @@ static inline void *kzalloc(size_t size, gfp_t flags)
> * @flags: the type of memory to allocate (see kmalloc).
> * @node: memory node from which to allocate
> */
> +__alloc_size(1)
> static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
> {
> return kmalloc_node(size, flags | __GFP_ZERO, node);
> }
>
> +__alloc_size(1)
> extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
> +__alloc_size(1)
> static inline void *kvmalloc(size_t size, gfp_t flags)
> {
> return kvmalloc_node(size, flags, NUMA_NO_NODE);
> }
> +__alloc_size(1)
> static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
> {
> return kvmalloc_node(size, flags | __GFP_ZERO, node);
> }
> +__alloc_size(1)
> static inline void *kvzalloc(size_t size, gfp_t flags)
> {
> return kvmalloc(size, flags | __GFP_ZERO);
> }
>
> +__alloc_size(1, 2)
> static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
> {
> size_t bytes;
> @@ -758,11 +772,13 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
> return kvmalloc(bytes, flags);
> }
>
> +__alloc_size(1, 2)
> static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
> {
> return kvmalloc_array(n, size, flags | __GFP_ZERO);
> }
>
> +__alloc_size(3)
> extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
> gfp_t flags);
> extern void kvfree(const void *addr);
> --

--
Thanks,
~Nick Desaulniers

2021-09-22 22:44:31

by Kees Cook

[permalink] [raw]
Subject: Re: [PATCH v2 4/7] slab: Add __alloc_size attributes for better bounds checking

On Wed, Aug 25, 2021 at 02:31:34PM -0700, Nick Desaulniers wrote:
> On Wed, Aug 18, 2021 at 2:40 PM Kees Cook <[email protected]> wrote:
> >
> > As already done in GrapheneOS, add the __alloc_size attribute for
> > regular kmalloc interfaces, to provide additional hinting for better
> > bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> > optimizations.
> >
> > Co-developed-by: Daniel Micay <[email protected]>
> > Signed-off-by: Daniel Micay <[email protected]>
> > Cc: Christoph Lameter <[email protected]>
> > Cc: Pekka Enberg <[email protected]>
> > Cc: David Rientjes <[email protected]>
> > Cc: Joonsoo Kim <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Vlastimil Babka <[email protected]>
> > Cc: [email protected]
> > Signed-off-by: Kees Cook <[email protected]>
>
> This is a good start, so
> Reviewed-by: Nick Desaulniers <[email protected]>

Thanks!

> Do we also want to attribute:
> * __kmalloc_index

This is just the bucketizer (it returns "int" for the kmalloc bucket).

> * kmem_cache_free_bulk

Not an allocator.

> * kmem_cache_alloc_bulk

This allocates a list of pointers, where "size" is the length of the
list.

> * kmalloc_order
> * kmalloc_order_trace
> * kmalloc_large

Yes, these should be marked, good point.

> * kmalloc_node

This was already marked.

> * kmem_cache_alloc_trace
> * __kmalloc_track_caller
> * __kmalloc_node_track_caller

Yeah, these might get passed through in LTO situations. I'll add them.

> * kmalloc_array_node

I'll add this -- I thought it was already here but it got missed.

Thanks!

-Kees

>
> > ---
> > include/linux/slab.h | 20 ++++++++++++++++++--
> > 1 file changed, 18 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/linux/slab.h b/include/linux/slab.h
> > index 10fd0a8c816a..6ce826d8194d 100644
> > --- a/include/linux/slab.h
> > +++ b/include/linux/slab.h
> > @@ -181,7 +181,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
> > /*
> > * Common kmalloc functions provided by all allocators
> > */
> > -__must_check
> > +__must_check __alloc_size(2)
> > void *krealloc(const void *objp, size_t new_size, gfp_t flags);
> > void kfree(const void *objp);
> > void kfree_sensitive(const void *objp);
> > @@ -426,6 +426,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
> > #define kmalloc_index(s) __kmalloc_index(s, true)
> > #endif /* !CONFIG_SLOB */
> >
> > +__alloc_size(1)
> > void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
> > void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
> > void kmem_cache_free(struct kmem_cache *s, void *objp);
> > @@ -450,6 +451,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
> > }
> >
> > #ifdef CONFIG_NUMA
> > +__alloc_size(1)
> > void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
> > void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
> > __assume_slab_alignment __malloc;
> > @@ -574,6 +576,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
> > * Try really hard to succeed the allocation but fail
> > * eventually.
> > */
> > +__alloc_size(1)
> > static __always_inline void *kmalloc(size_t size, gfp_t flags)
> > {
> > if (__builtin_constant_p(size)) {
> > @@ -596,6 +599,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
> > return __kmalloc(size, flags);
> > }
> >
> > +__alloc_size(1)
> > static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
> > {
> > #ifndef CONFIG_SLOB
> > @@ -620,6 +624,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
> > * @size: element size.
> > * @flags: the type of memory to allocate (see kmalloc).
> > */
> > +__alloc_size(1, 2)
> > static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
> > {
> > size_t bytes;
> > @@ -638,7 +643,7 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
> > * @new_size: new size of a single member of the array
> > * @flags: the type of memory to allocate (see kmalloc)
> > */
> > -__must_check
> > +__must_check __alloc_size(2, 3)
> > static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
> > gfp_t flags)
> > {
> > @@ -656,6 +661,7 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
> > * @size: element size.
> > * @flags: the type of memory to allocate (see kmalloc).
> > */
> > +__alloc_size(1, 2)
> > static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
> > {
> > return kmalloc_array(n, size, flags | __GFP_ZERO);
> > @@ -685,6 +691,7 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
> > return __kmalloc_node(bytes, flags, node);
> > }
> >
> > +__alloc_size(1, 2)
> > static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
> > {
> > return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
> > @@ -718,6 +725,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
> > * @size: how many bytes of memory are required.
> > * @flags: the type of memory to allocate (see kmalloc).
> > */
> > +__alloc_size(1)
> > static inline void *kzalloc(size_t size, gfp_t flags)
> > {
> > return kmalloc(size, flags | __GFP_ZERO);
> > @@ -729,25 +737,31 @@ static inline void *kzalloc(size_t size, gfp_t flags)
> > * @flags: the type of memory to allocate (see kmalloc).
> > * @node: memory node from which to allocate
> > */
> > +__alloc_size(1)
> > static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
> > {
> > return kmalloc_node(size, flags | __GFP_ZERO, node);
> > }
> >
> > +__alloc_size(1)
> > extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
> > +__alloc_size(1)
> > static inline void *kvmalloc(size_t size, gfp_t flags)
> > {
> > return kvmalloc_node(size, flags, NUMA_NO_NODE);
> > }
> > +__alloc_size(1)
> > static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
> > {
> > return kvmalloc_node(size, flags | __GFP_ZERO, node);
> > }
> > +__alloc_size(1)
> > static inline void *kvzalloc(size_t size, gfp_t flags)
> > {
> > return kvmalloc(size, flags | __GFP_ZERO);
> > }
> >
> > +__alloc_size(1, 2)
> > static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
> > {
> > size_t bytes;
> > @@ -758,11 +772,13 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
> > return kvmalloc(bytes, flags);
> > }
> >
> > +__alloc_size(1, 2)
> > static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
> > {
> > return kvmalloc_array(n, size, flags | __GFP_ZERO);
> > }
> >
> > +__alloc_size(3)
> > extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
> > gfp_t flags);
> > extern void kvfree(const void *addr);
> > --
>
> --
> Thanks,
> ~Nick Desaulniers

--
Kees Cook