Fixes the following sparse warnings:
include/trace/events/*: sparse: cast to restricted gfp_t
include/trace/events/*: sparse: restricted gfp_t degrades to integer
gfp_t type is bitwise and requires __force attributes for any casts.
Signed-off-by: Vasily Averin <[email protected]>
---
include/linux/gfp.h | 2 +-
include/trace/events/btrfs.h | 4 +-
include/trace/events/compaction.h | 4 +-
include/trace/events/kmem.h | 12 ++---
include/trace/events/mmflags.h | 74 +++++++++++++++----------------
include/trace/events/vmscan.h | 16 +++----
mm/compaction.c | 2 +-
7 files changed, 57 insertions(+), 57 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 80f63c862be5..5f44c49abab7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -352,7 +352,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0d729664b4b4..ea7a05ac49c3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1343,13 +1343,13 @@ TRACE_EVENT(alloc_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
- __field(gfp_t, mask)
+ __field(unsigned long, mask)
__field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
- __entry->mask = mask,
+ __entry->mask = (__force unsigned long)mask,
__entry->ip = (const void *)IP
),
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 7d48e7079e48..915c11ee28e4 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
TP_STRUCT__entry(
__field(int, order)
- __field(gfp_t, gfp_mask)
+ __field(unsigned long, gfp_mask)
__field(int, prio)
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_mask = gfp_mask;
+ __entry->gfp_mask = (__force unsigned long)gfp_mask;
__entry->prio = prio;
),
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ddc8c944f417..71c141804222 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, node )
),
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
),
@@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc,
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
__entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->migratetype = migratetype;
),
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 116ed4d5d0f8..51d4fa6f3f39 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -14,43 +14,43 @@
*/
#define __def_gfpflag_names \
- {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
- {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
- {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
- {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
- {(unsigned long)GFP_USER, "GFP_USER"}, \
- {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
- {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
- {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
- {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
- {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
- {(unsigned long)GFP_DMA, "GFP_DMA"}, \
- {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
- {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
- {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
- {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
- {(unsigned long)__GFP_IO, "__GFP_IO"}, \
- {(unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
- {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
- {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
- {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
- {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
- {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
- {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
- {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
- {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
- {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
- {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
- {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
- {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
- {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \
- {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
+ {(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
+ {(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
+ {(__force unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
+ {(__force unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
+ {(__force unsigned long)GFP_USER, "GFP_USER"}, \
+ {(__force unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
+ {(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
+ {(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \
+ {(__force unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
+ {(__force unsigned long)GFP_NOIO, "GFP_NOIO"}, \
+ {(__force unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
+ {(__force unsigned long)GFP_DMA, "GFP_DMA"}, \
+ {(__force unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
+ {(__force unsigned long)GFP_DMA32, "GFP_DMA32"}, \
+ {(__force unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
+ {(__force unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
+ {(__force unsigned long)__GFP_IO, "__GFP_IO"}, \
+ {(__force unsigned long)__GFP_FS, "__GFP_FS"}, \
+ {(__force unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
+ {(__force unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
+ {(__force unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
+ {(__force unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
+ {(__force unsigned long)__GFP_COMP, "__GFP_COMP"}, \
+ {(__force unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
+ {(__force unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
+ {(__force unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
+ {(__force unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
+ {(__force unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
+ {(__force unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
+ {(__force unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
+ {(__force unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
+ {(__force unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
+ {(__force unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
+ {(__force unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
+ {(__force unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
+ {(__force unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \
+ {(__force unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"} \
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ca2e9009a651..b1db02c1aa6f 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
__field( int, nid )
__field( int, zid )
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
@@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
TP_STRUCT__entry(
__field( int, order )
- __field( gfp_t, gfp_flags )
+ __field( unsigned long, gfp_flags )
),
TP_fast_assign(
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("order=%d gfp_flags=%s",
@@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(void *, shrink)
__field(int, nid)
__field(long, nr_objects_to_shrink)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
@@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->shrink = shr->scan_objects;
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
- __entry->gfp_flags = sc->gfp_mask;
+ __entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
@@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
- __field(gfp_t, gfp_flags)
+ __field(unsigned long, gfp_flags)
),
TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
- __entry->gfp_flags = gfp_flags;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
),
TP_printk("nid=%d order=%d gfp_flags=%s",
diff --git a/mm/compaction.c b/mm/compaction.c
index b4e94cda3019..746dafa4e6eb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2558,7 +2558,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture)
{
- int may_perform_io = gfp_mask & __GFP_IO;
+ int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
--
2.31.1
On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <[email protected]> wrote:
> Fixes the following sparse warnings:
>
> include/trace/events/*: sparse: cast to restricted gfp_t
> include/trace/events/*: sparse: restricted gfp_t degrades to integer
>
> gfp_t type is bitwise and requires __force attributes for any casts.
>
> --- a/include/trace/events/mmflags.h
> +++ b/include/trace/events/mmflags.h
> @@ -14,43 +14,43 @@
> */
>
> #define __def_gfpflag_names \
> - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
> - {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
> - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
> - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
> - {(unsigned long)GFP_USER, "GFP_USER"}, \
> - {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
> - {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
>
> ...
>
> + {(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
> + {(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
> + {(__force unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
> + {(__force unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
> + {(__force unsigned long)GFP_USER, "GFP_USER"}, \
> + {(__force unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
> + {(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
> + {(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \
This got all repetitive, line-wrappy and ugly :(
What do we think of something silly like this?
--- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
+++ a/include/trace/events/mmflags.h
@@ -13,53 +13,57 @@
* Thus most bits set go first.
*/
+#define FUL __force unsigned long
+
#define __def_gfpflag_names \
- {(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
- {(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
- {(__force unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
- {(__force unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
- {(__force unsigned long)GFP_USER, "GFP_USER"}, \
- {(__force unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
- {(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
- {(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \
- {(__force unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(__force unsigned long)GFP_NOIO, "GFP_NOIO"}, \
- {(__force unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
- {(__force unsigned long)GFP_DMA, "GFP_DMA"}, \
- {(__force unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
- {(__force unsigned long)GFP_DMA32, "GFP_DMA32"}, \
- {(__force unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
- {(__force unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
- {(__force unsigned long)__GFP_IO, "__GFP_IO"}, \
- {(__force unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(__force unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(__force unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
- {(__force unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
- {(__force unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
- {(__force unsigned long)__GFP_COMP, "__GFP_COMP"}, \
- {(__force unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
- {(__force unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
- {(__force unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
- {(__force unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
- {(__force unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
- {(__force unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
- {(__force unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
- {(__force unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(__force unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
- {(__force unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
- {(__force unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(__force unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
- {(__force unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \
+ {(FUL)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
+ {(FUL)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
+ {(FUL)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
+ {(FUL)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
+ {(FUL)GFP_USER, "GFP_USER"}, \
+ {(FUL)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
+ {(FUL)GFP_KERNEL, "GFP_KERNEL"}, \
+ {(FUL)GFP_NOFS, "GFP_NOFS"}, \
+ {(FUL)GFP_ATOMIC, "GFP_ATOMIC"}, \
+ {(FUL)GFP_NOIO, "GFP_NOIO"}, \
+ {(FUL)GFP_NOWAIT, "GFP_NOWAIT"}, \
+ {(FUL)GFP_DMA, "GFP_DMA"}, \
+ {(FUL)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
+ {(FUL)GFP_DMA32, "GFP_DMA32"}, \
+ {(FUL)__GFP_HIGH, "__GFP_HIGH"}, \
+ {(FUL)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
+ {(FUL)__GFP_IO, "__GFP_IO"}, \
+ {(FUL)__GFP_FS, "__GFP_FS"}, \
+ {(FUL)__GFP_NOWARN, "__GFP_NOWARN"}, \
+ {(FUL)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
+ {(FUL)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
+ {(FUL)__GFP_NORETRY, "__GFP_NORETRY"}, \
+ {(FUL)__GFP_COMP, "__GFP_COMP"}, \
+ {(FUL)__GFP_ZERO, "__GFP_ZERO"}, \
+ {(FUL)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
+ {(FUL)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
+ {(FUL)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
+ {(FUL)__GFP_THISNODE, "__GFP_THISNODE"}, \
+ {(FUL)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
+ {(FUL)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
+ {(FUL)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
+ {(FUL)__GFP_WRITE, "__GFP_WRITE"}, \
+ {(FUL)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
+ {(FUL)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
+ {(FUL)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
+ {(FUL)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \
#ifdef CONFIG_KASAN_HW_TAGS
#define __def_gfpflag_names_kasan , \
- {(__force unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \
- {(__force unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \
- {(__force unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
+ {(FUL)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \
+ {(FUL)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \
+ {(FUL)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"}
#else
#define __def_gfpflag_names_kasan
#endif
+#undef FUL
+
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_gfpflag_names __def_gfpflag_names_kasan \
_
On 5/7/22 22:37, Andrew Morton wrote:
> On Sat, 7 May 2022 22:02:05 +0300 Vasily Averin <[email protected]> wrote:
>> + {(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
>> + {(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \
>
> This got all repetitive, line-wrappy and ugly :(
>
> What do we think of something silly like this?
> --- a/include/trace/events/mmflags.h~tracing-incorrect-gfp_t-conversion-fix
> +++ a/include/trace/events/mmflags.h
> @@ -13,53 +13,57 @@
> * Thus most bits set go first.
> */
>
> +#define FUL __force unsigned long
> +
> #define __def_gfpflag_names \
> - {(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
> - {(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
...
> + {(FUL)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
> + {(FUL)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
I think it's a good idea, and I regret it was your idea and not mine.
Should I resend my patch with these changes or would you prefer
to keep your patch as a separate one?
Thank you,
Vasily Averin