2014-11-07 07:33:56

by Joonsoo Kim

[permalink] [raw]
Subject: [PATCH 1/2] mm/debug-pagealloc: correct freepage accounting and order resetting

One thing, I did in this patch, is fixing freepage accounting.
If we clear guard page and link it onto isolate buddy list, we should
not increase freepage count. This patch adds conditional branch to
skip counting in this case. Without this patch, this overcounting
happens frequently if guard order is set and CMA is used.

Another thing fixed in this patch is the target to reset order. In
__free_one_page(), we check the buddy page whether it is a guard page or
not. And, if so, we should clear guard attribute on the buddy page and
reset order of it to 0. But, current code resets original page's order
rather than buddy one's. Maybe, this doesn't have any problem, because
whole merged page's order will be re-assigned soon. But, it is better
to correct code.

Changes from v2:
Rename subject from
"mm/page_alloc: correct to clear guard attribute in DEBUG_PAGEALLOC"
to
"mm/debug-pagealloc: correct freepage accounting and order resetting".
Separate fix and clean-up part.

Cc: <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Signed-off-by: Joonsoo Kim <[email protected]>
---
mm/page_alloc.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e78e3c8..d673f64 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -583,9 +583,11 @@ static inline void __free_one_page(struct page *page,
*/
if (page_is_guard(buddy)) {
clear_page_guard_flag(buddy);
- set_page_private(page, 0);
- __mod_zone_freepage_state(zone, 1 << order,
- migratetype);
+ set_page_private(buddy, 0);
+ if (!is_migrate_isolate(migratetype)) {
+ __mod_zone_freepage_state(zone, 1 << order,
+ migratetype);
+ }
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
--
1.7.9.5


2014-11-07 07:33:59

by Joonsoo Kim

[permalink] [raw]
Subject: [PATCH 2/2] mm/debug-pagealloc: cleanup page guard code

Page guard is used by debug-pagealloc feature. Currently,
it is open-coded, but, I think that more abstraction of it makes
core page allocator code more readable.

There is no functional difference.

Signed-off-by: Joonsoo Kim <[email protected]>
---
mm/page_alloc.c | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d673f64..c0dbede 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -440,18 +440,29 @@ static int __init debug_guardpage_minorder_setup(char *buf)
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

-static inline void set_page_guard_flag(struct page *page)
+static inline void set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
{
__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ INIT_LIST_HEAD(&page->lru);
+ set_page_private(page, order);
+ /* Guard pages are not available for any usage */
+ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
}

-static inline void clear_page_guard_flag(struct page *page)
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
{
__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ set_page_private(page, 0);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
-static inline void set_page_guard_flag(struct page *page) { }
-static inline void clear_page_guard_flag(struct page *page) { }
+static inline void set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
#endif

static inline void set_page_order(struct page *page, unsigned int order)
@@ -582,12 +593,7 @@ static inline void __free_one_page(struct page *page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy)) {
- clear_page_guard_flag(buddy);
- set_page_private(buddy, 0);
- if (!is_migrate_isolate(migratetype)) {
- __mod_zone_freepage_state(zone, 1 << order,
- migratetype);
- }
+ clear_page_guard(zone, buddy, order, migratetype);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
@@ -862,23 +868,17 @@ static inline void expand(struct zone *zone, struct page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);

-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (high < debug_guardpage_minorder()) {
+ if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
- INIT_LIST_HEAD(&page[size].lru);
- set_page_guard_flag(&page[size]);
- set_page_private(&page[size], high);
- /* Guard pages are not available for any usage */
- __mod_zone_freepage_state(zone, -(1 << high),
- migratetype);
+ set_page_guard(zone, &page[size], high, migratetype);
continue;
}
-#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
--
1.7.9.5

2014-11-07 09:55:28

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/debug-pagealloc: cleanup page guard code

On 11/07/2014 08:35 AM, Joonsoo Kim wrote:
> Page guard is used by debug-pagealloc feature. Currently,
> it is open-coded, but, I think that more abstraction of it makes
> core page allocator code more readable.
>
> There is no functional difference.
>
> Signed-off-by: Joonsoo Kim <[email protected]>

Acked-by: Vlastimil Babka <[email protected]>

> ---
> mm/page_alloc.c | 38 +++++++++++++++++++-------------------
> 1 file changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d673f64..c0dbede 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -440,18 +440,29 @@ static int __init debug_guardpage_minorder_setup(char *buf)
> }
> __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
>
> -static inline void set_page_guard_flag(struct page *page)
> +static inline void set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype)
> {
> __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> + INIT_LIST_HEAD(&page->lru);
> + set_page_private(page, order);
> + /* Guard pages are not available for any usage */
> + __mod_zone_freepage_state(zone, -(1 << order), migratetype);
> }
>
> -static inline void clear_page_guard_flag(struct page *page)
> +static inline void clear_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype)
> {
> __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> + set_page_private(page, 0);
> + if (!is_migrate_isolate(migratetype))
> + __mod_zone_freepage_state(zone, (1 << order), migratetype);
> }
> #else
> -static inline void set_page_guard_flag(struct page *page) { }
> -static inline void clear_page_guard_flag(struct page *page) { }
> +static inline void set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) {}
> +static inline void clear_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) {}
> #endif
>
> static inline void set_page_order(struct page *page, unsigned int order)
> @@ -582,12 +593,7 @@ static inline void __free_one_page(struct page *page,
> * merge with it and move up one order.
> */
> if (page_is_guard(buddy)) {
> - clear_page_guard_flag(buddy);
> - set_page_private(buddy, 0);
> - if (!is_migrate_isolate(migratetype)) {
> - __mod_zone_freepage_state(zone, 1 << order,
> - migratetype);
> - }
> + clear_page_guard(zone, buddy, order, migratetype);
> } else {
> list_del(&buddy->lru);
> zone->free_area[order].nr_free--;
> @@ -862,23 +868,17 @@ static inline void expand(struct zone *zone, struct page *page,
> size >>= 1;
> VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
>
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> - if (high < debug_guardpage_minorder()) {
> + if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> + high < debug_guardpage_minorder()) {
> /*
> * Mark as guard pages (or page), that will allow to
> * merge back to allocator when buddy will be freed.
> * Corresponding page table entries will not be touched,
> * pages will stay not present in virtual address space
> */
> - INIT_LIST_HEAD(&page[size].lru);
> - set_page_guard_flag(&page[size]);
> - set_page_private(&page[size], high);
> - /* Guard pages are not available for any usage */
> - __mod_zone_freepage_state(zone, -(1 << high),
> - migratetype);
> + set_page_guard(zone, &page[size], high, migratetype);
> continue;
> }
> -#endif
> list_add(&page[size].lru, &area->free_list[migratetype]);
> area->nr_free++;
> set_page_order(&page[size], high);
>

2014-11-09 23:28:57

by Gioh Kim

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/debug-pagealloc: cleanup page guard code



2014-11-07 ???? 4:35?? Joonsoo Kim ??(??) ?? ??:
> Page guard is used by debug-pagealloc feature. Currently,
> it is open-coded, but, I think that more abstraction of it makes
> core page allocator code more readable.
>
> There is no functional difference.
>
> Signed-off-by: Joonsoo Kim <[email protected]>
> ---
> mm/page_alloc.c | 38 +++++++++++++++++++-------------------
> 1 file changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d673f64..c0dbede 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -440,18 +440,29 @@ static int __init debug_guardpage_minorder_setup(char *buf)
> }
> __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
>
> -static inline void set_page_guard_flag(struct page *page)
> +static inline void set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype)
> {
> __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> + INIT_LIST_HEAD(&page->lru);
> + set_page_private(page, order);
> + /* Guard pages are not available for any usage */
> + __mod_zone_freepage_state(zone, -(1 << order), migratetype);
> }
>
> -static inline void clear_page_guard_flag(struct page *page)
> +static inline void clear_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype)
> {
> __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> + set_page_private(page, 0);
> + if (!is_migrate_isolate(migratetype))
> + __mod_zone_freepage_state(zone, (1 << order), migratetype);
> }
> #else
> -static inline void set_page_guard_flag(struct page *page) { }
> -static inline void clear_page_guard_flag(struct page *page) { }
> +static inline void set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) {}
> +static inline void clear_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) {}
> #endif
>
> static inline void set_page_order(struct page *page, unsigned int order)
> @@ -582,12 +593,7 @@ static inline void __free_one_page(struct page *page,
> * merge with it and move up one order.
> */
> if (page_is_guard(buddy)) {
> - clear_page_guard_flag(buddy);
> - set_page_private(buddy, 0);
> - if (!is_migrate_isolate(migratetype)) {
> - __mod_zone_freepage_state(zone, 1 << order,
> - migratetype);
> - }
> + clear_page_guard(zone, buddy, order, migratetype);
> } else {
> list_del(&buddy->lru);
> zone->free_area[order].nr_free--;
> @@ -862,23 +868,17 @@ static inline void expand(struct zone *zone, struct page *page,
> size >>= 1;
> VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
>
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> - if (high < debug_guardpage_minorder()) {
> + if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> + high < debug_guardpage_minorder()) {
> /*
> * Mark as guard pages (or page), that will allow to
> * merge back to allocator when buddy will be freed.
> * Corresponding page table entries will not be touched,
> * pages will stay not present in virtual address space
> */
> - INIT_LIST_HEAD(&page[size].lru);
> - set_page_guard_flag(&page[size]);
> - set_page_private(&page[size], high);
> - /* Guard pages are not available for any usage */
> - __mod_zone_freepage_state(zone, -(1 << high),
> - migratetype);
> + set_page_guard(zone, &page[size], high, migratetype);
> continue;
> }
> -#endif
> list_add(&page[size].lru, &area->free_list[migratetype]);
> area->nr_free++;
> set_page_order(&page[size], high);
>

Looks good!
Thanks for your work.