It is necessary to calculate the pfn in the set_pageblock_migratetype(),
but most of the callers already have the pfn.
To reduce the calculation, pass the pfn to set_pageblock_migratetype().
Signed-off-by: Yajun Deng <[email protected]>
---
include/linux/page-isolation.h | 2 +-
mm/mm_init.c | 10 +++++-----
mm/page_alloc.c | 18 +++++++++---------
mm/page_isolation.c | 2 +-
4 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index c16db0067090..b7d55f35eac0 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,7 +33,7 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-void set_pageblock_migratetype(struct page *page, int migratetype);
+void set_pageblock_migratetype(struct page *page, unsigned long pfn, int migratetype);
bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 426314eeecec..85a98d3b8c0e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -901,7 +901,7 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
* over the place during system boot.
*/
if (pageblock_aligned(pfn)) {
- set_pageblock_migratetype(page, migratetype);
+ set_pageblock_migratetype(page, pfn, migratetype);
cond_resched();
}
pfn++;
@@ -1005,7 +1005,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
* because this is done early in section_activate()
*/
if (pageblock_aligned(pfn)) {
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ set_pageblock_migratetype(page, pfn, MIGRATE_MOVABLE);
cond_resched();
}
@@ -1927,7 +1927,7 @@ static void __init deferred_free_range(unsigned long pfn,
/* Free a large naturally-aligned chunk if possible */
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
- set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
+ set_pageblock_migratetype(page + i, pfn + i, MIGRATE_MOVABLE);
__free_pages_core(page, MAX_PAGE_ORDER);
return;
}
@@ -1937,7 +1937,7 @@ static void __init deferred_free_range(unsigned long pfn,
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if (pageblock_aligned(pfn))
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ set_pageblock_migratetype(page, pfn, MIGRATE_MOVABLE);
__free_pages_core(page, 0);
}
}
@@ -2291,7 +2291,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_count(p, 0);
} while (++p, --i);
- set_pageblock_migratetype(page, MIGRATE_CMA);
+ set_pageblock_migratetype(page, page_to_pfn(page), MIGRATE_CMA);
set_page_refcounted(page);
__free_pages(page, pageblock_order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3734fe7e67c0..fe9d37f1b1e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -412,14 +412,14 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
} while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
}
-void set_pageblock_migratetype(struct page *page, int migratetype)
+void set_pageblock_migratetype(struct page *page, unsigned long pfn, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled &&
migratetype < MIGRATE_PCPTYPES))
migratetype = MIGRATE_UNMOVABLE;
set_pfnblock_flags_mask(page, (unsigned long)migratetype,
- page_to_pfn(page), MIGRATETYPE_MASK);
+ pfn, MIGRATETYPE_MASK);
}
#ifdef CONFIG_DEBUG_VM
@@ -817,7 +817,7 @@ static inline void __free_one_page(struct page *page,
* expand() down the line puts the sub-blocks
* on the right freelists.
*/
- set_pageblock_migratetype(buddy, migratetype);
+ set_pageblock_migratetype(buddy, buddy_pfn, migratetype);
}
combined_pfn = buddy_pfn & pfn;
@@ -1579,7 +1579,7 @@ static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
pages_moved += 1 << order;
}
- set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
+ set_pageblock_migratetype(pfn_to_page(start_pfn), start_pfn, new_mt);
return pages_moved;
}
@@ -1730,7 +1730,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
- set_pageblock_migratetype(page, migratetype);
+ set_pageblock_migratetype(page, page_to_pfn(page), migratetype);
split_large_buddy(zone, buddy, pfn, order);
return true;
}
@@ -1741,7 +1741,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
- set_pageblock_migratetype(page, migratetype);
+ set_pageblock_migratetype(page, pfn, migratetype);
split_large_buddy(zone, page, pfn, order);
return true;
}
@@ -1753,14 +1753,14 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
}
#endif /* CONFIG_MEMORY_ISOLATION */
-static void change_pageblock_range(struct page *pageblock_page,
+static void change_pageblock_range(struct page *page,
int start_order, int migratetype)
{
int nr_pageblocks = 1 << (start_order - pageblock_order);
while (nr_pageblocks--) {
- set_pageblock_migratetype(pageblock_page, migratetype);
- pageblock_page += pageblock_nr_pages;
+ set_pageblock_migratetype(page, page_to_pfn(page), migratetype);
+ page += pageblock_nr_pages;
}
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 042937d5abe4..a436bf4da04e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -254,7 +254,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
*/
WARN_ON_ONCE(!move_freepages_block_isolate(zone, page, migratetype));
} else {
- set_pageblock_migratetype(page, migratetype);
+ set_pageblock_migratetype(page, page_to_pfn(page), migratetype);
__putback_isolated_page(page, order, migratetype);
}
zone->nr_isolate_pageblock--;
--
2.25.1
June 3, 2024 at 9:03 PM, "Matthew Wilcox" <[email protected]> wrote:
>
> On Mon, Jun 03, 2024 at 08:41:00PM +0800, Yajun Deng wrote:
>
> >
> > It is necessary to calculate the pfn in the set_pageblock_migratetype(),
> >
> > but most of the callers already have the pfn.
> >
> >
> >
> > To reduce the calculation, pass the pfn to set_pageblock_migratetype().
> >
>
> It's not exactly a hard computation though. Have you done any
>
> measurements that show this patch is an improvement?
>
No, just view the code. But some callers are in a for loop.
On Mon, Jun 03, 2024 at 01:48:50PM +0000, Yajun Deng wrote:
> June 3, 2024 at 9:03 PM, "Matthew Wilcox" <[email protected]> wrote:
> > On Mon, Jun 03, 2024 at 08:41:00PM +0800, Yajun Deng wrote:
> > > It is necessary to calculate the pfn in the set_pageblock_migratetype(),
> > > but most of the callers already have the pfn.
> >
> > It's not exactly a hard computation though. Have you done any
> >
> > measurements that show this patch is an improvement?
>
> No, just view the code. But some callers are in a for loop.
All the more reason to MEASURE. There is a cost to marshalling
function arguments which must be weighed against the cost of
recalculating the PFN. Since you haven't done that, NAK.
On Mon, Jun 03, 2024 at 08:41:00PM +0800, Yajun Deng wrote:
> It is necessary to calculate the pfn in the set_pageblock_migratetype(),
> but most of the callers already have the pfn.
>
> To reduce the calculation, pass the pfn to set_pageblock_migratetype().
It's not exactly a hard computation though. Have you done any
measurements that show this patch is an improvement?