2023-11-20 18:35:51

by Vlastimil Babka

[permalink] [raw]
Subject: [PATCH v2 17/21] mm/slab: move kmalloc_slab() to mm/slab.h

In preparation for the next patch, move the kmalloc_slab() function to
the header, as it will have callers from two files, and make it inline.
To avoid unnecessary bloat, remove all size checks/warnings from
kmalloc_slab() as they just duplicate those in callers, especially after
recent changes to kmalloc_size_roundup(). We just need to adjust handling
of zero size in __do_kmalloc_node(). Also we can stop handling NULL
result from kmalloc_slab() there as that now cannot happen (unless
called too early during boot).

The size_index array becomes visible so rename it to a more specific
kmalloc_size_index.

Reviewed-by: Kees Cook <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
---
mm/slab.h | 28 ++++++++++++++++++++++++++--
mm/slab_common.c | 43 ++++++++-----------------------------------
2 files changed, 34 insertions(+), 37 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 35a55c4a407d..7d7cc7af614e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -389,8 +389,32 @@ extern const struct kmalloc_info_struct {
void setup_kmalloc_cache_index_table(void);
void create_kmalloc_caches(slab_flags_t);

-/* Find the kmalloc slab corresponding for a certain size */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
+extern u8 kmalloc_size_index[24];
+
+static inline unsigned int size_index_elem(unsigned int bytes)
+{
+ return (bytes - 1) / 8;
+}
+
+/*
+ * Find the kmem_cache structure that serves a given size of
+ * allocation
+ *
+ * This assumes size is larger than zero and not larger than
+ * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
+ */
+static inline struct kmem_cache *
+kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+{
+ unsigned int index;
+
+ if (size <= 192)
+ index = kmalloc_size_index[size_index_elem(size)];
+ else
+ index = fls(size - 1);
+
+ return kmalloc_caches[kmalloc_type(flags, caller)][index];
+}

void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f4f275613d2a..31ade17a7ad9 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -665,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static u8 size_index[24] __ro_after_init = {
+u8 kmalloc_size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
@@ -692,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
2 /* 192 */
};

-static inline unsigned int size_index_elem(unsigned int bytes)
-{
- return (bytes - 1) / 8;
-}
-
-/*
- * Find the kmem_cache structure that serves a given size of
- * allocation
- */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
-{
- unsigned int index;
-
- if (size <= 192) {
- if (!size)
- return ZERO_SIZE_PTR;
-
- index = size_index[size_index_elem(size)];
- } else {
- if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
- return NULL;
- index = fls(size - 1);
- }
-
- return kmalloc_caches[kmalloc_type(flags, caller)][index];
-}
-
size_t kmalloc_size_roundup(size_t size)
{
if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
@@ -843,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
unsigned int elem = size_index_elem(i);

- if (elem >= ARRAY_SIZE(size_index))
+ if (elem >= ARRAY_SIZE(kmalloc_size_index))
break;
- size_index[elem] = KMALLOC_SHIFT_LOW;
+ kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
}

if (KMALLOC_MIN_SIZE >= 64) {
@@ -854,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
- size_index[size_index_elem(i)] = 7;
+ kmalloc_size_index[size_index_elem(i)] = 7;

}

@@ -865,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
- size_index[size_index_elem(i)] = 8;
+ kmalloc_size_index[size_index_elem(i)] = 8;
}
}

@@ -977,10 +950,10 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
return ret;
}

- s = kmalloc_slab(size, flags, caller);
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;

- if (unlikely(ZERO_OR_NULL_PTR(s)))
- return s;
+ s = kmalloc_slab(size, flags, caller);

ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);

--
2.42.1


2023-12-07 01:28:55

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2 17/21] mm/slab: move kmalloc_slab() to mm/slab.h

On Mon, Nov 20, 2023 at 07:34:28PM +0100, Vlastimil Babka wrote:
> In preparation for the next patch, move the kmalloc_slab() function to
> the header, as it will have callers from two files, and make it inline.
> To avoid unnecessary bloat, remove all size checks/warnings from
> kmalloc_slab() as they just duplicate those in callers, especially after
> recent changes to kmalloc_size_roundup(). We just need to adjust handling
> of zero size in __do_kmalloc_node(). Also we can stop handling NULL
> result from kmalloc_slab() there as that now cannot happen (unless
> called too early during boot).
>
> The size_index array becomes visible so rename it to a more specific
> kmalloc_size_index.
>
> Reviewed-by: Kees Cook <[email protected]>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---
> mm/slab.h | 28 ++++++++++++++++++++++++++--
> mm/slab_common.c | 43 ++++++++-----------------------------------
> 2 files changed, 34 insertions(+), 37 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 35a55c4a407d..7d7cc7af614e 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -389,8 +389,32 @@ extern const struct kmalloc_info_struct {
> void setup_kmalloc_cache_index_table(void);
> void create_kmalloc_caches(slab_flags_t);
>
> -/* Find the kmalloc slab corresponding for a certain size */
> -struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
> +extern u8 kmalloc_size_index[24];
> +
> +static inline unsigned int size_index_elem(unsigned int bytes)
> +{
> + return (bytes - 1) / 8;
> +}
> +
> +/*
> + * Find the kmem_cache structure that serves a given size of
> + * allocation
> + *
> + * This assumes size is larger than zero and not larger than
> + * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
> + */
> +static inline struct kmem_cache *
> +kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
> +{
> + unsigned int index;
> +
> + if (size <= 192)
> + index = kmalloc_size_index[size_index_elem(size)];
> + else
> + index = fls(size - 1);
> +
> + return kmalloc_caches[kmalloc_type(flags, caller)][index];
> +}
>
> void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> int node, size_t orig_size,
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index f4f275613d2a..31ade17a7ad9 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -665,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
> * of two cache sizes there. The size of larger slabs can be determined using
> * fls.
> */
> -static u8 size_index[24] __ro_after_init = {
> +u8 kmalloc_size_index[24] __ro_after_init = {
> 3, /* 8 */
> 4, /* 16 */
> 5, /* 24 */
> @@ -692,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
> 2 /* 192 */
> };
>
> -static inline unsigned int size_index_elem(unsigned int bytes)
> -{
> - return (bytes - 1) / 8;
> -}
> -
> -/*
> - * Find the kmem_cache structure that serves a given size of
> - * allocation
> - */
> -struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
> -{
> - unsigned int index;
> -
> - if (size <= 192) {
> - if (!size)
> - return ZERO_SIZE_PTR;
> -
> - index = size_index[size_index_elem(size)];
> - } else {
> - if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
> - return NULL;
> - index = fls(size - 1);
> - }
> -
> - return kmalloc_caches[kmalloc_type(flags, caller)][index];
> -}
> -
> size_t kmalloc_size_roundup(size_t size)
> {
> if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
> @@ -843,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
> for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
> unsigned int elem = size_index_elem(i);
>
> - if (elem >= ARRAY_SIZE(size_index))
> + if (elem >= ARRAY_SIZE(kmalloc_size_index))
> break;
> - size_index[elem] = KMALLOC_SHIFT_LOW;
> + kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
> }
>
> if (KMALLOC_MIN_SIZE >= 64) {
> @@ -854,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
> * is 64 byte.
> */
> for (i = 64 + 8; i <= 96; i += 8)
> - size_index[size_index_elem(i)] = 7;
> + kmalloc_size_index[size_index_elem(i)] = 7;
>
> }
>
> @@ -865,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
> * instead.
> */
> for (i = 128 + 8; i <= 192; i += 8)
> - size_index[size_index_elem(i)] = 8;
> + kmalloc_size_index[size_index_elem(i)] = 8;
> }
> }
>
> @@ -977,10 +950,10 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
> return ret;
> }
>
> - s = kmalloc_slab(size, flags, caller);
> + if (unlikely(!size))
> + return ZERO_SIZE_PTR;
>
> - if (unlikely(ZERO_OR_NULL_PTR(s)))
> - return s;
> + s = kmalloc_slab(size, flags, caller);
>
> ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
> ret = kasan_kmalloc(s, ret, size, flags);
>
> --

Looks good to me,
Reviewed-by: Hyeonggon Yoo <[email protected]>

> 2.42.1
>
>