From: Alex Shi <[email protected]>
Couple of old page/lru operation funcs are just inline replaced. Remove
them to reduce function name remember and make code simple.
Thanks
Alex
Cc: Steven Rostedt <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Yu Zhao <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Alex Shi (5):
mm: remove page_is_file_lru function
mm: remove __clear_page_lru_flags()
mm: remove add_page_to_lru_list() function
mm: remove add_page_to_lru_list_tail()
mm: remove del_page_from_lru_list()
include/linux/mm_inline.h | 28 ----------------------------
include/trace/events/vmscan.h | 2 +-
mm/compaction.c | 4 ++--
mm/gup.c | 2 +-
mm/khugepaged.c | 4 ++--
mm/memory-failure.c | 2 +-
mm/memory_hotplug.c | 2 +-
mm/mempolicy.c | 2 +-
mm/migrate.c | 14 +++++++-------
mm/mlock.c | 2 +-
mm/mprotect.c | 2 +-
mm/swap.c | 22 +++++++++++-----------
mm/vmscan.c | 23 ++++++++++++-----------
13 files changed, 41 insertions(+), 68 deletions(-)
--
2.25.1
From: Alex Shi <[email protected]>
The function could be fully replaced by ruvec_add_folio(), no reason to
keep a duplicate one.
Signed-off-by: Alex Shi <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Yu Zhao <[email protected]>
Cc: Alex Shi <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
include/linux/mm_inline.h | 6 ------
mm/swap.c | 6 +++---
mm/vmscan.c | 4 ++--
3 files changed, 5 insertions(+), 11 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c2384da888b4..7d7abd5ff73f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -92,12 +92,6 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
list_add(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_add_folio(lruvec, page_folio(page));
-}
-
static __always_inline
void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
diff --git a/mm/swap.c b/mm/swap.c
index 953cf8860542..fb101a06dce4 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -543,7 +543,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
* It can make readahead confusing. But race window
* is _really_ small and it's non-critical problem.
*/
- add_page_to_lru_list(page, lruvec);
+ lruvec_add_folio(lruvec, page_folio(page));
SetPageReclaim(page);
} else {
/*
@@ -569,7 +569,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
del_page_from_lru_list(page, lruvec);
ClearPageActive(page);
ClearPageReferenced(page);
- add_page_to_lru_list(page, lruvec);
+ lruvec_add_folio(lruvec, page_folio(page));
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
@@ -592,7 +592,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
* anonymous pages
*/
ClearPageSwapBacked(page);
- add_page_to_lru_list(page, lruvec);
+ lruvec_add_folio(lruvec, page_folio(page));
__count_vm_events(PGLAZYFREE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 59a52ba8b52a..f09473c9ff35 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2354,7 +2354,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
* inhibits memcg migration).
*/
VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
- add_page_to_lru_list(page, lruvec);
+ lruvec_add_folio(lruvec, page_folio(page));
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
if (PageActive(page))
@@ -4875,7 +4875,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
if (page_evictable(page) && PageUnevictable(page)) {
del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page);
- add_page_to_lru_list(page, lruvec);
+ lruvec_add_folio(lruvec, page_folio(page));
pgrescued += nr_pages;
}
SetPageLRU(page);
--
2.25.1
From: Alex Shi <[email protected]>
The function could be fully replaced by __folio_clear_lru_flags(), no
reason to keep a duplicate one.
Signed-off-by: Alex Shi <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Yu Zhao <[email protected]>
Cc: Alex Shi <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
include/linux/mm_inline.h | 5 -----
mm/swap.c | 4 ++--
mm/vmscan.c | 2 +-
3 files changed, 3 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index f0aa34b0f2c4..c2384da888b4 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -59,11 +59,6 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
__folio_clear_unevictable(folio);
}
-static __always_inline void __clear_page_lru_flags(struct page *page)
-{
- __folio_clear_lru_flags(page_folio(page));
-}
-
/**
* folio_lru_list - Which LRU list should a folio be on?
* @folio: The folio to test.
diff --git a/mm/swap.c b/mm/swap.c
index bcf3ac288b56..953cf8860542 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -86,7 +86,7 @@ static void __page_cache_release(struct page *page)
lruvec = folio_lruvec_lock_irqsave(folio, &flags);
del_page_from_lru_list(page, lruvec);
- __clear_page_lru_flags(page);
+ __folio_clear_lru_flags(page_folio(page));
unlock_page_lruvec_irqrestore(lruvec, flags);
}
__ClearPageWaiters(page);
@@ -966,7 +966,7 @@ void release_pages(struct page **pages, int nr)
lock_batch = 0;
del_page_from_lru_list(page, lruvec);
- __clear_page_lru_flags(page);
+ __folio_clear_lru_flags(page_folio(page));
}
__ClearPageWaiters(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c361973774b4..59a52ba8b52a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2337,7 +2337,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
SetPageLRU(page);
if (unlikely(put_page_testzero(page))) {
- __clear_page_lru_flags(page);
+ __folio_clear_lru_flags(page_folio(page));
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&lruvec->lru_lock);
--
2.25.1
From: Alex Shi <[email protected]>
The function could be fully replaced by lruvec_add_folio_tail, no reason
to keep a duplicate one.
Signed-off-by: Alex Shi <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Yu Zhao <[email protected]>
Cc: Alex Shi <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
include/linux/mm_inline.h | 6 ------
mm/swap.c | 2 +-
2 files changed, 1 insertion(+), 7 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 7d7abd5ff73f..4df5b39cc97b 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -102,12 +102,6 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_add_folio_tail(lruvec, page_folio(page));
-}
-
static __always_inline
void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
diff --git a/mm/swap.c b/mm/swap.c
index fb101a06dce4..23c0afb76be6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -550,7 +550,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
* The page's writeback ends up during pagevec
* We move that page into tail of inactive.
*/
- add_page_to_lru_list_tail(page, lruvec);
+ lruvec_add_folio_tail(lruvec, page_folio(page));
__count_vm_events(PGROTATED, nr_pages);
}
--
2.25.1