2024-02-20 16:59:35

by Vlastimil Babka

[permalink] [raw]
Subject: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

The values of SLAB_ cache creation flagsare defined by hand, which is
tedious and error-prone. Use an enum to assign the bit number and a
__SF_BIT() macro to #define the final flags.

This renumbers the flag values, which is OK as they are only used
internally.

Signed-off-by: Vlastimil Babka <[email protected]>
---
include/linux/slab.h | 81 ++++++++++++++++++++++++++++++++++++++--------------
mm/slub.c | 6 ++--
2 files changed, 63 insertions(+), 24 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6252f44115c2..f893a132dd5a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,29 +21,68 @@
#include <linux/cleanup.h>
#include <linux/hash.h>

+enum _slab_flag_bits {
+ _SLAB_CONSISTENCY_CHECKS,
+ _SLAB_RED_ZONE,
+ _SLAB_POISON,
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_CACHE_DMA,
+ _SLAB_CACHE_DMA32,
+ _SLAB_STORE_USER,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_TRACE,
+#ifdef CONFIG_DEBUG_OBJECTS
+ _SLAB_DEBUG_OBJECTS,
+#endif
+ _SLAB_NOLEAKTRACE,
+ _SLAB_NO_MERGE,
+#ifdef CONFIG_FAILSLAB
+ _SLAB_FAILSLAB,
+#endif
+#ifdef CONFIG_MEMCG_KMEM
+ _SLAB_ACCOUNT,
+#endif
+#ifdef CONFIG_KASAN_GENERIC
+ _SLAB_KASAN,
+#endif
+ _SLAB_NO_USER_FLAGS,
+#ifdef CONFIG_KFENCE
+ _SLAB_SKIP_KFENCE,
+#endif
+#ifndef CONFIG_SLUB_TINY
+ _SLAB_RECLAIM_ACCOUNT,
+#endif
+ _SLAB_OBJECT_POISON,
+ _SLAB_CMPXCHG_DOUBLE,
+ _SLAB_FLAGS_LAST_BIT
+};
+
+#define __SF_BIT(nr) ((slab_flags_t __force)(1U << (nr)))

/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+#define SLAB_CONSISTENCY_CHECKS __SF_BIT(_SLAB_CONSISTENCY_CHECKS)
/* DEBUG: Red zone objs in a cache */
-#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+#define SLAB_RED_ZONE __SF_BIT(_SLAB_RED_ZONE)
/* DEBUG: Poison objects */
-#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+#define SLAB_POISON __SF_BIT(_SLAB_POISON)
/* Indicate a kmalloc slab */
-#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
+#define SLAB_KMALLOC __SF_BIT(_SLAB_KMALLOC)
/* Align objs on cache lines */
-#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+#define SLAB_HWCACHE_ALIGN __SF_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
-#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+#define SLAB_CACHE_DMA __SF_BIT(_SLAB_CACHE_DMA)
/* Use GFP_DMA32 memory */
-#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
+#define SLAB_CACHE_DMA32 __SF_BIT(_SLAB_CACHE_DMA32)
/* DEBUG: Store the last owner for bug hunting */
-#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+#define SLAB_STORE_USER __SF_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
-#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
+#define SLAB_PANIC __SF_BIT(_SLAB_PANIC)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -95,19 +134,19 @@
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
/* Defer freeing slabs to RCU */
-#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
+#define SLAB_TYPESAFE_BY_RCU __SF_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
-#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
+#define SLAB_TRACE __SF_BIT(_SLAB_TRACE)

/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
+# define SLAB_DEBUG_OBJECTS __SF_BIT(_SLAB_DEBUG_OBJECTS)
#else
# define SLAB_DEBUG_OBJECTS 0
#endif

/* Avoid kmemleak tracing */
-#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
+#define SLAB_NOLEAKTRACE __SF_BIT(_SLAB_NOLEAKTRACE)

/*
* Prevent merging with compatible kmem caches. This flag should be used
@@ -119,23 +158,23 @@
* - performance critical caches, should be very rare and consulted with slab
* maintainers, and not used together with CONFIG_SLUB_TINY
*/
-#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
+#define SLAB_NO_MERGE __SF_BIT(_SLAB_NO_MERGE)

/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
+# define SLAB_FAILSLAB __SF_BIT(_SLAB_FAILSLAB)
#else
# define SLAB_FAILSLAB 0
#endif
/* Account to memcg */
#ifdef CONFIG_MEMCG_KMEM
-# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
+# define SLAB_ACCOUNT __SF_BIT(_SLAB_ACCOUNT)
#else
# define SLAB_ACCOUNT 0
#endif

#ifdef CONFIG_KASAN_GENERIC
-#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
+#define SLAB_KASAN __SF_BIT(_SLAB_KASAN)
#else
#define SLAB_KASAN 0
#endif
@@ -145,10 +184,10 @@
* Intended for caches created for self-tests so they have only flags
* specified in the code and other flags are ignored.
*/
-#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
+#define SLAB_NO_USER_FLAGS __SF_BIT(_SLAB_NO_USER_FLAGS)

#ifdef CONFIG_KFENCE
-#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
+#define SLAB_SKIP_KFENCE __SF_BIT(_SLAB_SKIP_KFENCE)
#else
#define SLAB_SKIP_KFENCE 0
#endif
@@ -156,9 +195,9 @@
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
#ifndef CONFIG_SLUB_TINY
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
+#define SLAB_RECLAIM_ACCOUNT __SF_BIT(_SLAB_RECLAIM_ACCOUNT)
#else
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
+#define SLAB_RECLAIM_ACCOUNT 0
#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */

diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..a93c5a17cbbb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)

/* Internal SLUB flags */
/* Poison object */
-#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
+#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
/* Use cmpxchg_double */

#ifdef system_has_freelist_aba
-#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
+#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
#else
-#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
+#define __CMPXCHG_DOUBLE 0
#endif

/*

--
2.43.1



2024-02-21 02:32:09

by Song, Xiongwei

[permalink] [raw]
Subject: RE: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags


> The values of SLAB_ cache creation flagsare defined by hand, which is

A blank space missed between flags and are.

> tedious and error-prone. Use an enum to assign the bit number and a
> __SF_BIT() macro to #define the final flags.
>
> This renumbers the flag values, which is OK as they are only used
> internally.
>
> Signed-off-by: Vlastimil Babka <[email protected]>

Ran a rough test with build and bootup with the related debug configs enabled,
feel free to add

Tested-by: Xiongwei Song <[email protected]>
Reviewed-by: Xiongwei Song <[email protected]>

Thanks,
Xiognwei
> ---
> include/linux/slab.h | 81 ++++++++++++++++++++++++++++++++++++++--------------
> mm/slub.c | 6 ++--
> 2 files changed, 63 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 6252f44115c2..f893a132dd5a 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -21,29 +21,68 @@
> #include <linux/cleanup.h>
> #include <linux/hash.h>
>
> +enum _slab_flag_bits {
> + _SLAB_CONSISTENCY_CHECKS,
> + _SLAB_RED_ZONE,
> + _SLAB_POISON,
> + _SLAB_KMALLOC,
> + _SLAB_HWCACHE_ALIGN,
> + _SLAB_CACHE_DMA,
> + _SLAB_CACHE_DMA32,
> + _SLAB_STORE_USER,
> + _SLAB_PANIC,
> + _SLAB_TYPESAFE_BY_RCU,
> + _SLAB_TRACE,
> +#ifdef CONFIG_DEBUG_OBJECTS
> + _SLAB_DEBUG_OBJECTS,
> +#endif
> + _SLAB_NOLEAKTRACE,
> + _SLAB_NO_MERGE,
> +#ifdef CONFIG_FAILSLAB
> + _SLAB_FAILSLAB,
> +#endif
> +#ifdef CONFIG_MEMCG_KMEM
> + _SLAB_ACCOUNT,
> +#endif
> +#ifdef CONFIG_KASAN_GENERIC
> + _SLAB_KASAN,
> +#endif
> + _SLAB_NO_USER_FLAGS,
> +#ifdef CONFIG_KFENCE
> + _SLAB_SKIP_KFENCE,
> +#endif
> +#ifndef CONFIG_SLUB_TINY
> + _SLAB_RECLAIM_ACCOUNT,
> +#endif
> + _SLAB_OBJECT_POISON,
> + _SLAB_CMPXCHG_DOUBLE,
> + _SLAB_FLAGS_LAST_BIT
> +};
> +
> +#define __SF_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
>
> /*
> * Flags to pass to kmem_cache_create().
> * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
> */
> /* DEBUG: Perform (expensive) checks on alloc/free */
> -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
> +#define SLAB_CONSISTENCY_CHECKS __SF_BIT(_SLAB_CONSISTENCY_CHECKS)
> /* DEBUG: Red zone objs in a cache */
> -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
> +#define SLAB_RED_ZONE __SF_BIT(_SLAB_RED_ZONE)
> /* DEBUG: Poison objects */
> -#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
> +#define SLAB_POISON __SF_BIT(_SLAB_POISON)
> /* Indicate a kmalloc slab */
> -#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
> +#define SLAB_KMALLOC __SF_BIT(_SLAB_KMALLOC)
> /* Align objs on cache lines */
> -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
> +#define SLAB_HWCACHE_ALIGN __SF_BIT(_SLAB_HWCACHE_ALIGN)
> /* Use GFP_DMA memory */
> -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
> +#define SLAB_CACHE_DMA __SF_BIT(_SLAB_CACHE_DMA)
> /* Use GFP_DMA32 memory */
> -#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
> +#define SLAB_CACHE_DMA32 __SF_BIT(_SLAB_CACHE_DMA32)
> /* DEBUG: Store the last owner for bug hunting */
> -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
> +#define SLAB_STORE_USER __SF_BIT(_SLAB_STORE_USER)
> /* Panic if kmem_cache_create() fails */
> -#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
> +#define SLAB_PANIC __SF_BIT(_SLAB_PANIC)
> /*
> * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
> *
> @@ -95,19 +134,19 @@
> * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
> */
> /* Defer freeing slabs to RCU */
> -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
> +#define SLAB_TYPESAFE_BY_RCU __SF_BIT(_SLAB_TYPESAFE_BY_RCU)
> /* Trace allocations and frees */
> -#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
> +#define SLAB_TRACE __SF_BIT(_SLAB_TRACE)
>
> /* Flag to prevent checks on free */
> #ifdef CONFIG_DEBUG_OBJECTS
> -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
> +# define SLAB_DEBUG_OBJECTS __SF_BIT(_SLAB_DEBUG_OBJECTS)
> #else
> # define SLAB_DEBUG_OBJECTS 0
> #endif
>
> /* Avoid kmemleak tracing */
> -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
> +#define SLAB_NOLEAKTRACE __SF_BIT(_SLAB_NOLEAKTRACE)
>
> /*
> * Prevent merging with compatible kmem caches. This flag should be used
> @@ -119,23 +158,23 @@
> * - performance critical caches, should be very rare and consulted with slab
> * maintainers, and not used together with CONFIG_SLUB_TINY
> */
> -#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
> +#define SLAB_NO_MERGE __SF_BIT(_SLAB_NO_MERGE)
>
> /* Fault injection mark */
> #ifdef CONFIG_FAILSLAB
> -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
> +# define SLAB_FAILSLAB __SF_BIT(_SLAB_FAILSLAB)
> #else
> # define SLAB_FAILSLAB 0
> #endif
> /* Account to memcg */
> #ifdef CONFIG_MEMCG_KMEM
> -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
> +# define SLAB_ACCOUNT __SF_BIT(_SLAB_ACCOUNT)
> #else
> # define SLAB_ACCOUNT 0
> #endif
>
> #ifdef CONFIG_KASAN_GENERIC
> -#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
> +#define SLAB_KASAN __SF_BIT(_SLAB_KASAN)
> #else
> #define SLAB_KASAN 0
> #endif
> @@ -145,10 +184,10 @@
> * Intended for caches created for self-tests so they have only flags
> * specified in the code and other flags are ignored.
> */
> -#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
> +#define SLAB_NO_USER_FLAGS __SF_BIT(_SLAB_NO_USER_FLAGS)
>
> #ifdef CONFIG_KFENCE
> -#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
> +#define SLAB_SKIP_KFENCE __SF_BIT(_SLAB_SKIP_KFENCE)
> #else
> #define SLAB_SKIP_KFENCE 0
> #endif
> @@ -156,9 +195,9 @@
> /* The following flags affect the page allocator grouping pages by mobility */
> /* Objects are reclaimable */
> #ifndef CONFIG_SLUB_TINY
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
> +#define SLAB_RECLAIM_ACCOUNT __SF_BIT(_SLAB_RECLAIM_ACCOUNT)
> #else
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
> +#define SLAB_RECLAIM_ACCOUNT 0
> #endif
> #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 2ef88bbf56a3..a93c5a17cbbb 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct
> kmem_cache *s)
>
> /* Internal SLUB flags */
> /* Poison object */
> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
> /* Use cmpxchg_double */
>
> #ifdef system_has_freelist_aba
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
> #else
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
> +#define __CMPXCHG_DOUBLE 0
> #endif
>
> /*
>
> --
> 2.43.1

2024-02-21 07:13:45

by Chengming Zhou

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On 2024/2/21 00:58, Vlastimil Babka wrote:
> The values of SLAB_ cache creation flagsare defined by hand, which is
> tedious and error-prone. Use an enum to assign the bit number and a
> __SF_BIT() macro to #define the final flags.
>
> This renumbers the flag values, which is OK as they are only used
> internally.
>
> Signed-off-by: Vlastimil Babka <[email protected]>

Reviewed-by: Chengming Zhou <[email protected]>

Thanks!

> ---
> include/linux/slab.h | 81 ++++++++++++++++++++++++++++++++++++++--------------
> mm/slub.c | 6 ++--
> 2 files changed, 63 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 6252f44115c2..f893a132dd5a 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -21,29 +21,68 @@
> #include <linux/cleanup.h>
> #include <linux/hash.h>
>
> +enum _slab_flag_bits {
> + _SLAB_CONSISTENCY_CHECKS,
> + _SLAB_RED_ZONE,
> + _SLAB_POISON,
> + _SLAB_KMALLOC,
> + _SLAB_HWCACHE_ALIGN,
> + _SLAB_CACHE_DMA,
> + _SLAB_CACHE_DMA32,
> + _SLAB_STORE_USER,
> + _SLAB_PANIC,
> + _SLAB_TYPESAFE_BY_RCU,
> + _SLAB_TRACE,
> +#ifdef CONFIG_DEBUG_OBJECTS
> + _SLAB_DEBUG_OBJECTS,
> +#endif
> + _SLAB_NOLEAKTRACE,
> + _SLAB_NO_MERGE,
> +#ifdef CONFIG_FAILSLAB
> + _SLAB_FAILSLAB,
> +#endif
> +#ifdef CONFIG_MEMCG_KMEM
> + _SLAB_ACCOUNT,
> +#endif
> +#ifdef CONFIG_KASAN_GENERIC
> + _SLAB_KASAN,
> +#endif
> + _SLAB_NO_USER_FLAGS,
> +#ifdef CONFIG_KFENCE
> + _SLAB_SKIP_KFENCE,
> +#endif
> +#ifndef CONFIG_SLUB_TINY
> + _SLAB_RECLAIM_ACCOUNT,
> +#endif
> + _SLAB_OBJECT_POISON,
> + _SLAB_CMPXCHG_DOUBLE,
> + _SLAB_FLAGS_LAST_BIT
> +};
> +
> +#define __SF_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
>
> /*
> * Flags to pass to kmem_cache_create().
> * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
> */
> /* DEBUG: Perform (expensive) checks on alloc/free */
> -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
> +#define SLAB_CONSISTENCY_CHECKS __SF_BIT(_SLAB_CONSISTENCY_CHECKS)
> /* DEBUG: Red zone objs in a cache */
> -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
> +#define SLAB_RED_ZONE __SF_BIT(_SLAB_RED_ZONE)
> /* DEBUG: Poison objects */
> -#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
> +#define SLAB_POISON __SF_BIT(_SLAB_POISON)
> /* Indicate a kmalloc slab */
> -#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
> +#define SLAB_KMALLOC __SF_BIT(_SLAB_KMALLOC)
> /* Align objs on cache lines */
> -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
> +#define SLAB_HWCACHE_ALIGN __SF_BIT(_SLAB_HWCACHE_ALIGN)
> /* Use GFP_DMA memory */
> -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
> +#define SLAB_CACHE_DMA __SF_BIT(_SLAB_CACHE_DMA)
> /* Use GFP_DMA32 memory */
> -#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
> +#define SLAB_CACHE_DMA32 __SF_BIT(_SLAB_CACHE_DMA32)
> /* DEBUG: Store the last owner for bug hunting */
> -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
> +#define SLAB_STORE_USER __SF_BIT(_SLAB_STORE_USER)
> /* Panic if kmem_cache_create() fails */
> -#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
> +#define SLAB_PANIC __SF_BIT(_SLAB_PANIC)
> /*
> * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
> *
> @@ -95,19 +134,19 @@
> * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
> */
> /* Defer freeing slabs to RCU */
> -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
> +#define SLAB_TYPESAFE_BY_RCU __SF_BIT(_SLAB_TYPESAFE_BY_RCU)
> /* Trace allocations and frees */
> -#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
> +#define SLAB_TRACE __SF_BIT(_SLAB_TRACE)
>
> /* Flag to prevent checks on free */
> #ifdef CONFIG_DEBUG_OBJECTS
> -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
> +# define SLAB_DEBUG_OBJECTS __SF_BIT(_SLAB_DEBUG_OBJECTS)
> #else
> # define SLAB_DEBUG_OBJECTS 0
> #endif
>
> /* Avoid kmemleak tracing */
> -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
> +#define SLAB_NOLEAKTRACE __SF_BIT(_SLAB_NOLEAKTRACE)
>
> /*
> * Prevent merging with compatible kmem caches. This flag should be used
> @@ -119,23 +158,23 @@
> * - performance critical caches, should be very rare and consulted with slab
> * maintainers, and not used together with CONFIG_SLUB_TINY
> */
> -#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
> +#define SLAB_NO_MERGE __SF_BIT(_SLAB_NO_MERGE)
>
> /* Fault injection mark */
> #ifdef CONFIG_FAILSLAB
> -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
> +# define SLAB_FAILSLAB __SF_BIT(_SLAB_FAILSLAB)
> #else
> # define SLAB_FAILSLAB 0
> #endif
> /* Account to memcg */
> #ifdef CONFIG_MEMCG_KMEM
> -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
> +# define SLAB_ACCOUNT __SF_BIT(_SLAB_ACCOUNT)
> #else
> # define SLAB_ACCOUNT 0
> #endif
>
> #ifdef CONFIG_KASAN_GENERIC
> -#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
> +#define SLAB_KASAN __SF_BIT(_SLAB_KASAN)
> #else
> #define SLAB_KASAN 0
> #endif
> @@ -145,10 +184,10 @@
> * Intended for caches created for self-tests so they have only flags
> * specified in the code and other flags are ignored.
> */
> -#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
> +#define SLAB_NO_USER_FLAGS __SF_BIT(_SLAB_NO_USER_FLAGS)
>
> #ifdef CONFIG_KFENCE
> -#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
> +#define SLAB_SKIP_KFENCE __SF_BIT(_SLAB_SKIP_KFENCE)
> #else
> #define SLAB_SKIP_KFENCE 0
> #endif
> @@ -156,9 +195,9 @@
> /* The following flags affect the page allocator grouping pages by mobility */
> /* Objects are reclaimable */
> #ifndef CONFIG_SLUB_TINY
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
> +#define SLAB_RECLAIM_ACCOUNT __SF_BIT(_SLAB_RECLAIM_ACCOUNT)
> #else
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
> +#define SLAB_RECLAIM_ACCOUNT 0
> #endif
> #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 2ef88bbf56a3..a93c5a17cbbb 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>
> /* Internal SLUB flags */
> /* Poison object */
> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
> /* Use cmpxchg_double */
>
> #ifdef system_has_freelist_aba
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
> #else
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
> +#define __CMPXCHG_DOUBLE 0
> #endif
>
> /*
>

2024-02-21 18:34:21

by Roman Gushchin

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On Tue, Feb 20, 2024 at 05:58:26PM +0100, Vlastimil Babka wrote:
> The values of SLAB_ cache creation flagsare defined by hand, which is
> tedious and error-prone. Use an enum to assign the bit number and a
> __SF_BIT() macro to #define the final flags.
>
> This renumbers the flag values, which is OK as they are only used
> internally.
>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---
> include/linux/slab.h | 81 ++++++++++++++++++++++++++++++++++++++--------------
> mm/slub.c | 6 ++--
> 2 files changed, 63 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 6252f44115c2..f893a132dd5a 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -21,29 +21,68 @@
> #include <linux/cleanup.h>
> #include <linux/hash.h>
>
> +enum _slab_flag_bits {
> + _SLAB_CONSISTENCY_CHECKS,
> + _SLAB_RED_ZONE,
> + _SLAB_POISON,
> + _SLAB_KMALLOC,
> + _SLAB_HWCACHE_ALIGN,
> + _SLAB_CACHE_DMA,
> + _SLAB_CACHE_DMA32,
> + _SLAB_STORE_USER,
> + _SLAB_PANIC,
> + _SLAB_TYPESAFE_BY_RCU,
> + _SLAB_TRACE,
> +#ifdef CONFIG_DEBUG_OBJECTS
> + _SLAB_DEBUG_OBJECTS,
> +#endif
> + _SLAB_NOLEAKTRACE,
> + _SLAB_NO_MERGE,
> +#ifdef CONFIG_FAILSLAB
> + _SLAB_FAILSLAB,
> +#endif
> +#ifdef CONFIG_MEMCG_KMEM
> + _SLAB_ACCOUNT,
> +#endif
> +#ifdef CONFIG_KASAN_GENERIC
> + _SLAB_KASAN,
> +#endif
> + _SLAB_NO_USER_FLAGS,
> +#ifdef CONFIG_KFENCE
> + _SLAB_SKIP_KFENCE,
> +#endif
> +#ifndef CONFIG_SLUB_TINY
> + _SLAB_RECLAIM_ACCOUNT,
> +#endif
> + _SLAB_OBJECT_POISON,
> + _SLAB_CMPXCHG_DOUBLE,
> + _SLAB_FLAGS_LAST_BIT
> +};
> +
> +#define __SF_BIT(nr) ((slab_flags_t __force)(1U << (nr)))

I'd rename it to (__)SLAB_FLAG_BIT(), as SF is a bit cryptic, but not a strong
preference. Otherwise looks really good to me, nice cleanup.

Reviewed-by: Roman Gushchin <[email protected]>

Thanks!

2024-02-21 22:19:53

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On 2/20/24 17:58, Vlastimil Babka wrote:
> @@ -156,9 +195,9 @@
> /* The following flags affect the page allocator grouping pages by mobility */
> /* Objects are reclaimable */
> #ifndef CONFIG_SLUB_TINY
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
> +#define SLAB_RECLAIM_ACCOUNT __SF_BIT(_SLAB_RECLAIM_ACCOUNT)
> #else
> -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
> +#define SLAB_RECLAIM_ACCOUNT 0

lkp/sparse tells me this was the wrong way to unify all noop-due-to-config
flags [1,2]

so in v2 I'll unify all those to
((slab_flags_t __force)0U)

also the deprecated SLAB_MEM_SPREAD in patch 1

[1] https://lore.kernel.org/all/[email protected]/
[2] https://lore.kernel.org/all/[email protected]/


> #endif
> #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */



2024-02-23 16:42:48

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On 2/21/24 19:33, Roman Gushchin wrote:
> On Tue, Feb 20, 2024 at 05:58:26PM +0100, Vlastimil Babka wrote:
>> The values of SLAB_ cache creation flagsare defined by hand, which is
>> tedious and error-prone. Use an enum to assign the bit number and a
>> __SF_BIT() macro to #define the final flags.
>>
>> This renumbers the flag values, which is OK as they are only used
>> internally.
>>
>> Signed-off-by: Vlastimil Babka <[email protected]>
>> +#define __SF_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
>
> I'd rename it to (__)SLAB_FLAG_BIT(), as SF is a bit cryptic, but not a strong
> preference. Otherwise looks really good to me, nice cleanup.

OK, it's also less likely that somebody would collide it.

> Reviewed-by: Roman Gushchin <[email protected]>
>
> Thanks!


2024-02-23 16:43:42

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On 2/23/24 04:12, Christoph Lameter (Ampere) wrote:
> On Tue, 20 Feb 2024, Vlastimil Babka wrote:
>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 2ef88bbf56a3..a93c5a17cbbb 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>>
>> /* Internal SLUB flags */
>> /* Poison object */
>> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
>> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
>> /* Use cmpxchg_double */
>>
>> #ifdef system_has_freelist_aba
>> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
>> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
>> #else
>> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
>> +#define __CMPXCHG_DOUBLE 0
>> #endif
>
> Maybe its good to put these internal flags together with the other flags.
> After all there is no other slab allocator available anymore and having
> them all together avoids confusion.

Good poiint, will do. Then I can also #undef the helper macro after the last
flag.




2024-02-23 17:06:32

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On 2/23/24 17:43, Vlastimil Babka wrote:
> On 2/23/24 04:12, Christoph Lameter (Ampere) wrote:
>> On Tue, 20 Feb 2024, Vlastimil Babka wrote:
>>
>>> diff --git a/mm/slub.c b/mm/slub.c
>>> index 2ef88bbf56a3..a93c5a17cbbb 100644
>>> --- a/mm/slub.c
>>> +++ b/mm/slub.c
>>> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>>>
>>> /* Internal SLUB flags */
>>> /* Poison object */
>>> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
>>> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
>>> /* Use cmpxchg_double */
>>>
>>> #ifdef system_has_freelist_aba

Hm but we only have this in the internal mm/slab.h

>>> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
>>> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
>>> #else
>>> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
>>> +#define __CMPXCHG_DOUBLE 0

And keeping the 0 is desirable to make the checks compile-time false when
it's not available.

So maybe it's best if it stays here after all, or we'd pull too much of
internal details into the "public" slab.h

>>> #endif
>>
>> Maybe its good to put these internal flags together with the other flags.
>> After all there is no other slab allocator available anymore and having
>> them all together avoids confusion.
>
> Good poiint, will do. Then I can also #undef the helper macro after the last
> flag.
>
>
>


Subject: Re: [PATCH 2/3] mm, slab: use an enum to define SLAB_ cache creation flags

On Tue, 20 Feb 2024, Vlastimil Babka wrote:

> diff --git a/mm/slub.c b/mm/slub.c
> index 2ef88bbf56a3..a93c5a17cbbb 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
>
> /* Internal SLUB flags */
> /* Poison object */
> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON)
> /* Use cmpxchg_double */
>
> #ifdef system_has_freelist_aba
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE)
> #else
> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
> +#define __CMPXCHG_DOUBLE 0
> #endif

Maybe its good to put these internal flags together with the other flags.
After all there is no other slab allocator available anymore and having
them all together avoids confusion.