2023-05-16 06:29:09

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next v2 00/13] mm: page_alloc: misc cleanup and refector

This is aim to reduce more space in page_alloc.c, also do some
cleanup, no functional changes intended.

This is based on next-20230515.

v2:
- drop move of __pageblock_pfn_to_page(), suggested by Huang Ying
- move __show_mem() below the __show_free_areas() in patch4
- add new patch13
- add RB from Mike

Kefeng Wang (13):
mm: page_alloc: move mirrored_kernelcore into mm_init.c
mm: page_alloc: move init_on_alloc/free() into mm_init.c
mm: page_alloc: move set_zone_contiguous() into mm_init.c
mm: page_alloc: collect mem statistic into show_mem.c
mm: page_alloc: squash page_is_consistent()
mm: page_alloc: remove alloc_contig_dump_pages() stub
mm: page_alloc: split out FAIL_PAGE_ALLOC
mm: page_alloc: split out DEBUG_PAGEALLOC
mm: page_alloc: move mark_free_page() into snapshot.c
mm: page_alloc: move pm_* function into power
mm: vmscan: use gfp_has_io_fs()
mm: page_alloc: move sysctls into it own fils
mm: page_alloc: move is_check_pages_enabled() into page_alloc.c

include/linux/fault-inject.h | 9 +
include/linux/gfp.h | 15 +-
include/linux/memory_hotplug.h | 3 -
include/linux/mm.h | 87 ++--
include/linux/mmzone.h | 21 -
include/linux/suspend.h | 9 +-
kernel/power/main.c | 27 ++
kernel/power/power.h | 5 +
kernel/power/snapshot.c | 52 +++
kernel/sysctl.c | 67 ---
lib/Makefile | 2 +-
lib/show_mem.c | 37 --
mm/Makefile | 4 +-
mm/debug_page_alloc.c | 59 +++
mm/fail_page_alloc.c | 66 +++
mm/internal.h | 21 +-
mm/mm_init.c | 32 ++
mm/page_alloc.c | 799 ++++-----------------------------
mm/show_mem.c | 429 ++++++++++++++++++
mm/swapfile.c | 1 +
mm/vmscan.c | 2 +-
21 files changed, 857 insertions(+), 890 deletions(-)
delete mode 100644 lib/show_mem.c
create mode 100644 mm/debug_page_alloc.c
create mode 100644 mm/fail_page_alloc.c
create mode 100644 mm/show_mem.c

--
2.35.3



2023-05-16 06:32:50

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH v2 08/13] mm: page_alloc: split out DEBUG_PAGEALLOC

Move DEBUG_PAGEALLOC related functions into a single file to
reduce a bit of page_alloc.c.

Signed-off-by: Kefeng Wang <[email protected]>
---
include/linux/mm.h | 76 ++++++++++++++++++++++++++++---------------
mm/Makefile | 1 +
mm/debug_page_alloc.c | 59 +++++++++++++++++++++++++++++++++
mm/page_alloc.c | 69 ---------------------------------------
4 files changed, 109 insertions(+), 96 deletions(-)
create mode 100644 mm/debug_page_alloc.c

diff --git a/include/linux/mm.h b/include/linux/mm.h
index db3f66ed2f32..d3241f4ac903 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3485,9 +3485,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
+
+extern unsigned int _debug_guardpage_minorder;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool debug_guardpage_enabled(void)
+{
+ return static_branch_unlikely(&_debug_guardpage_enabled);
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+
+ return PageGuard(page);
+}
+
+bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+ return __set_page_guard(zone, page, order, migratetype);
+}
+
+void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return;
+ __clear_page_guard(zone, page, order, migratetype);
+}
+
#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
+static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) { return false; }
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */

#ifdef __HAVE_ARCH_GATE_AREA
@@ -3725,33 +3774,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)

#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */

-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern unsigned int _debug_guardpage_minorder;
-DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
-
-static inline unsigned int debug_guardpage_minorder(void)
-{
- return _debug_guardpage_minorder;
-}
-
-static inline bool debug_guardpage_enabled(void)
-{
- return static_branch_unlikely(&_debug_guardpage_enabled);
-}
-
-static inline bool page_is_guard(struct page *page)
-{
- if (!debug_guardpage_enabled())
- return false;
-
- return PageGuard(page);
-}
-#else
-static inline unsigned int debug_guardpage_minorder(void) { return 0; }
-static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
diff --git a/mm/Makefile b/mm/Makefile
index 0eec4bc72d3f..678530a07326 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_SECRETMEM) += secretmem.o
obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
+obj-$(CONFIG_DEBUG_PAGEALLOC) += debug_page_alloc.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_DAMON) += damon/
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c
new file mode 100644
index 000000000000..f9d145730fd1
--- /dev/null
+++ b/mm/debug_page_alloc.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/page-isolation.h>
+
+unsigned int _debug_guardpage_minorder;
+
+bool _debug_pagealloc_enabled_early __read_mostly
+ = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
+DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+EXPORT_SYMBOL(_debug_pagealloc_enabled);
+
+DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
+
+static int __init early_debug_pagealloc(char *buf)
+{
+ return kstrtobool(buf, &_debug_pagealloc_enabled_early);
+}
+early_param("debug_pagealloc", early_debug_pagealloc);
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+ unsigned long res;
+
+ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
+ pr_err("Bad debug_guardpage_minorder value\n");
+ return 0;
+ }
+ _debug_guardpage_minorder = res;
+ pr_info("Setting debug_guardpage_minorder to %lu\n", res);
+ return 0;
+}
+early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
+
+bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype)
+{
+ if (order >= debug_guardpage_minorder())
+ return false;
+
+ __SetPageGuard(page);
+ INIT_LIST_HEAD(&page->buddy_list);
+ set_page_private(page, order);
+ /* Guard pages are not available for any usage */
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
+
+ return true;
+}
+
+void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype)
+{
+ __ClearPageGuard(page);
+
+ set_page_private(page, 0);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, (1 << order), migratetype);
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8d4e803cec44..dc9820466377 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -664,75 +664,6 @@ void destroy_large_folio(struct folio *folio)
compound_page_dtors[dtor](&folio->page);
}

-#ifdef CONFIG_DEBUG_PAGEALLOC
-unsigned int _debug_guardpage_minorder;
-
-bool _debug_pagealloc_enabled_early __read_mostly
- = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
-EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
-DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-EXPORT_SYMBOL(_debug_pagealloc_enabled);
-
-DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
-
-static int __init early_debug_pagealloc(char *buf)
-{
- return kstrtobool(buf, &_debug_pagealloc_enabled_early);
-}
-early_param("debug_pagealloc", early_debug_pagealloc);
-
-static int __init debug_guardpage_minorder_setup(char *buf)
-{
- unsigned long res;
-
- if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
- pr_err("Bad debug_guardpage_minorder value\n");
- return 0;
- }
- _debug_guardpage_minorder = res;
- pr_info("Setting debug_guardpage_minorder to %lu\n", res);
- return 0;
-}
-early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
-
-static inline bool set_page_guard(struct zone *zone, struct page *page,
- unsigned int order, int migratetype)
-{
- if (!debug_guardpage_enabled())
- return false;
-
- if (order >= debug_guardpage_minorder())
- return false;
-
- __SetPageGuard(page);
- INIT_LIST_HEAD(&page->buddy_list);
- set_page_private(page, order);
- /* Guard pages are not available for any usage */
- if (!is_migrate_isolate(migratetype))
- __mod_zone_freepage_state(zone, -(1 << order), migratetype);
-
- return true;
-}
-
-static inline void clear_page_guard(struct zone *zone, struct page *page,
- unsigned int order, int migratetype)
-{
- if (!debug_guardpage_enabled())
- return;
-
- __ClearPageGuard(page);
-
- set_page_private(page, 0);
- if (!is_migrate_isolate(migratetype))
- __mod_zone_freepage_state(zone, (1 << order), migratetype);
-}
-#else
-static inline bool set_page_guard(struct zone *zone, struct page *page,
- unsigned int order, int migratetype) { return false; }
-static inline void clear_page_guard(struct zone *zone, struct page *page,
- unsigned int order, int migratetype) {}
-#endif
-
static inline void set_buddy_order(struct page *page, unsigned int order)
{
set_page_private(page, order);
--
2.35.3


2023-05-16 06:34:15

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH v2 06/13] mm: page_alloc: remove alloc_contig_dump_pages() stub

DEFINE_DYNAMIC_DEBUG_METADATA and DYNAMIC_DEBUG_BRANCH already has
stub definitions without dynamic debug feature, remove unnecessary
alloc_contig_dump_pages() stub.

Reviewed-by: Mike Rapoport (IBM) <[email protected]>
Signed-off-by: Kefeng Wang <[email protected]>
---
mm/page_alloc.c | 7 -------
1 file changed, 7 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1bd8b7832d40..aa3cdfd88393 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6250,8 +6250,6 @@ int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
}

#ifdef CONFIG_CONTIG_ALLOC
-#if defined(CONFIG_DYNAMIC_DEBUG) || \
- (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* Usage: See admin-guide/dynamic-debug-howto.rst */
static void alloc_contig_dump_pages(struct list_head *page_list)
{
@@ -6265,11 +6263,6 @@ static void alloc_contig_dump_pages(struct list_head *page_list)
dump_page(page, "migration failure");
}
}
-#else
-static inline void alloc_contig_dump_pages(struct list_head *page_list)
-{
-}
-#endif

/* [start, end) must belong to a single zone. */
int __alloc_contig_migrate_range(struct compact_control *cc,
--
2.35.3


2023-05-16 22:29:21

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH v2 08/13] mm: page_alloc: split out DEBUG_PAGEALLOC

On Tue, 16 May 2023 14:38:16 +0800 Kefeng Wang <[email protected]> wrote:

> DEBUG_PAGEALLOC
>
> mm/debug_page_alloc.c | 59 +++++++++++++++++++++++++++++++++
> mm/page_alloc.c | 69 ---------------------------------------

and

FAIL_PAGE_ALLOC

We're irritatingly inconsistent about whether there's an underscore.

akpm:/usr/src/25> grep page_alloc mm/*c|wc -l
49
akpm:/usr/src/25> grep pagealloc mm/*c|wc -l
28


2023-05-18 01:47:33

by Kefeng Wang

[permalink] [raw]
Subject: Re: [PATCH v2 08/13] mm: page_alloc: split out DEBUG_PAGEALLOC



On 2023/5/17 6:22, Andrew Morton wrote:
> On Tue, 16 May 2023 14:38:16 +0800 Kefeng Wang <[email protected]> wrote:
>
>> DEBUG_PAGEALLOC
>>
>> mm/debug_page_alloc.c | 59 +++++++++++++++++++++++++++++++++
>> mm/page_alloc.c | 69 ---------------------------------------
>
> and
>
> FAIL_PAGE_ALLOC
>
> We're irritatingly inconsistent about whether there's an underscore.
>
> akpm:/usr/src/25> grep page_alloc mm/*c|wc -l
> 49
> akpm:/usr/src/25> grep pagealloc mm/*c|wc -l
> 28

All the 28 pagealloc naming is from DEBUG_PAGEALLOC feature, they chould
be changed to page_alloc except the cmdline, but it will lead to long
function name and don't gain too much advantage, so keep unchange?

$ grep pagealloc mm/*c
mm/debug_page_alloc.c:bool _debug_pagealloc_enabled_early __read_mostly
mm/debug_page_alloc.c:EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
mm/debug_page_alloc.c:DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
mm/debug_page_alloc.c:EXPORT_SYMBOL(_debug_pagealloc_enabled);
mm/debug_page_alloc.c:static int __init early_debug_pagealloc(char *buf)
mm/debug_page_alloc.c: return kstrtobool(buf,
&_debug_pagealloc_enabled_early);
mm/debug_page_alloc.c:early_param("debug_pagealloc", early_debug_pagealloc);
mm/memory_hotplug.c: * Freeing the page with debug_pagealloc enabled
will try to unmap it,
mm/memory_hotplug.c: debug_pagealloc_map_pages(page, 1 << order);
mm/mm_init.c: debug_pagealloc_enabled())) {
mm/mm_init.c: if (debug_pagealloc_enabled()) {
mm/mm_init.c: static_branch_enable(&_debug_pagealloc_enabled);
mm/page_alloc.c: * page becomes unavailable via debug_pagealloc or
arch_free_page.
mm/page_alloc.c: debug_pagealloc_unmap_pages(page, 1 << order);
mm/page_alloc.c: debug_pagealloc_map_pages(page, 1 << order);
mm/page_poison.c: pr_err("pagealloc: single bit error\n");
mm/page_poison.c: pr_err("pagealloc: memory corruption\n");
mm/page_poison.c: dump_page(page, "pagealloc: corrupted page details");
mm/slab.c:static inline bool is_debug_pagealloc_cache(struct kmem_cache
*cachep)
mm/slab.c: return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
mm/slab.c: if (!is_debug_pagealloc_cache(cachep))
mm/slab.c: if (is_debug_pagealloc_cache(cachep))
mm/slab.c: * To activate debug pagealloc, off-slab management is necessary
mm/slab.c: if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
mm/slab.c: is_debug_pagealloc_cache(cachep))
mm/slub.c: if (!debug_pagealloc_enabled_static())
mm/vmalloc.c: if (debug_pagealloc_enabled_static())
mm/vmalloc.c: if (debug_pagealloc_enabled_static())



>



2023-05-18 02:20:46

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH v2 08/13] mm: page_alloc: split out DEBUG_PAGEALLOC

On Thu, 18 May 2023 09:35:29 +0800 Kefeng Wang <[email protected]> wrote:

> > We're irritatingly inconsistent about whether there's an underscore.
> >
> > akpm:/usr/src/25> grep page_alloc mm/*c|wc -l
> > 49
> > akpm:/usr/src/25> grep pagealloc mm/*c|wc -l
> > 28
>
> All the 28 pagealloc naming is from DEBUG_PAGEALLOC feature, they chould
> be changed to page_alloc except the cmdline, but it will lead to long
> function name and don't gain too much advantage, so keep unchange?

Sure, it's probably not the worst thing in there. I was just having
a moan.