From: Huang Ying <[email protected]>
Now PageSwapBacked() is used as the helper function to check whether
pages have been freed lazily via MADV_FREE. This isn't very obvious.
So Dave suggested to add PageLazyFree() family helper functions to
improve the code readability.
Signed-off-by: "Huang, Ying" <[email protected]>
Suggested-by: Dave Hansen <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Zi Yan <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Hugh Dickins <[email protected]>
---
Changelog:
v3:
- Improved comments, Thanks David Hildenbrand!
- Use the helper function in /proc/PID/smaps lazyfree reporting.
v2:
- Avoid code bloat via removing VM_BUG_ON_PAGE(), which doesn't exist
in the original code. Now there is no any text/data/bss size
change.
- Fix one wrong replacement in try_to_unmap_one().
---
fs/proc/task_mmu.c | 2 +-
include/linux/page-flags.h | 25 +++++++++++++++++++++++++
mm/rmap.c | 4 ++--
mm/swap.c | 11 +++--------
mm/vmscan.c | 7 +++----
5 files changed, 34 insertions(+), 15 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3ba9ae83bff5..3458d5711e57 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -471,7 +471,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
*/
if (PageAnon(page)) {
mss->anonymous += size;
- if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
+ if (__PageLazyFree(page) && !dirty && !PageDirty(page))
mss->lazyfree += size;
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 49c2697046b9..bb26f74cbe8e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
TESTPAGEFLAG_FALSE(Ksm)
#endif
+/*
+ * For pages freed lazily via MADV_FREE. Lazyfree pages are clean
+ * anonymous pages. They don't have PG_swapbacked set, to distinguish
+ * them from normal anonymous pages.
+ */
+static __always_inline int __PageLazyFree(struct page *page)
+{
+ return !PageSwapBacked(page);
+}
+
+static __always_inline int PageLazyFree(struct page *page)
+{
+ return PageAnon(page) && __PageLazyFree(page);
+}
+
+static __always_inline void SetPageLazyFree(struct page *page)
+{
+ ClearPageSwapBacked(page);
+}
+
+static __always_inline void ClearPageLazyFree(struct page *page)
+{
+ SetPageSwapBacked(page);
+}
+
u64 stable_page_flags(struct page *page);
static inline int PageUptodate(struct page *page)
diff --git a/mm/rmap.c b/mm/rmap.c
index 1c02adaa233e..6ec96c8e7826 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/* MADV_FREE page check */
- if (!PageSwapBacked(page)) {
+ if (__PageLazyFree(page)) {
if (!PageDirty(page)) {
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
@@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* discarded. Remap the page to page table.
*/
set_pte_at(mm, address, pvmw.pte, pteval);
- SetPageSwapBacked(page);
+ ClearPageLazyFree(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
diff --git a/mm/swap.c b/mm/swap.c
index c1d3ca80ea10..d83f2cd4cdb8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -563,7 +563,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
- if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
@@ -571,12 +571,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
- /*
- * lazyfree pages are clean anonymous pages. They have
- * SwapBacked flag cleared to distinguish normal anonymous
- * pages
- */
- ClearPageSwapBacked(page);
+ SetPageLazyFree(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
@@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
*/
void mark_page_lazyfree(struct page *page)
{
- if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eca49a1c2f68..40bb41ada2d2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
*/
- if (!page_is_file_cache(page) ||
- (PageAnon(page) && !PageSwapBacked(page))) {
+ if (!page_is_file_cache(page) || PageLazyFree(page)) {
*dirty = false;
*writeback = false;
return;
@@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Try to allocate it some swap space here.
* Lazyfree page could be freed directly
*/
- if (PageAnon(page) && PageSwapBacked(page)) {
+ if (PageAnon(page) && !__PageLazyFree(page)) {
if (!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
@@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (PageAnon(page) && !PageSwapBacked(page)) {
+ if (PageLazyFree(page)) {
/* follow __remove_mapping for reference */
if (!page_ref_freeze(page, 1))
goto keep_locked;
--
2.25.0
On 09.03.20 03:17, Huang, Ying wrote:
> From: Huang Ying <[email protected]>
>
> Now PageSwapBacked() is used as the helper function to check whether
> pages have been freed lazily via MADV_FREE. This isn't very obvious.
> So Dave suggested to add PageLazyFree() family helper functions to
> improve the code readability.
>
> Signed-off-by: "Huang, Ying" <[email protected]>
> Suggested-by: Dave Hansen <[email protected]>
> Cc: David Hildenbrand <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Vlastimil Babka <[email protected]>
> Cc: Zi Yan <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Minchan Kim <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Hugh Dickins <[email protected]>
> ---
> Changelog:
>
> v3:
>
> - Improved comments, Thanks David Hildenbrand!
>
> - Use the helper function in /proc/PID/smaps lazyfree reporting.
>
> v2:
>
> - Avoid code bloat via removing VM_BUG_ON_PAGE(), which doesn't exist
> in the original code. Now there is no any text/data/bss size
> change.
>
> - Fix one wrong replacement in try_to_unmap_one().
>
> ---
> fs/proc/task_mmu.c | 2 +-
> include/linux/page-flags.h | 25 +++++++++++++++++++++++++
> mm/rmap.c | 4 ++--
> mm/swap.c | 11 +++--------
> mm/vmscan.c | 7 +++----
> 5 files changed, 34 insertions(+), 15 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 3ba9ae83bff5..3458d5711e57 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -471,7 +471,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
> */
> if (PageAnon(page)) {
> mss->anonymous += size;
> - if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
> + if (__PageLazyFree(page) && !dirty && !PageDirty(page))
> mss->lazyfree += size;
> }
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 49c2697046b9..bb26f74cbe8e 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
> TESTPAGEFLAG_FALSE(Ksm)
> #endif
>
> +/*
> + * For pages freed lazily via MADV_FREE. Lazyfree pages are clean
> + * anonymous pages. They don't have PG_swapbacked set, to distinguish
> + * them from normal anonymous pages.
> + */
> +static __always_inline int __PageLazyFree(struct page *page)
> +{
> + return !PageSwapBacked(page);
> +}
> +
> +static __always_inline int PageLazyFree(struct page *page)
> +{
> + return PageAnon(page) && __PageLazyFree(page);
> +}
> +
> +static __always_inline void SetPageLazyFree(struct page *page)
> +{
> + ClearPageSwapBacked(page);
> +}
> +
> +static __always_inline void ClearPageLazyFree(struct page *page)
> +{
> + SetPageSwapBacked(page);
> +}
> +
> u64 stable_page_flags(struct page *page);
>
> static inline int PageUptodate(struct page *page)
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 1c02adaa233e..6ec96c8e7826 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
> }
>
> /* MADV_FREE page check */
> - if (!PageSwapBacked(page)) {
> + if (__PageLazyFree(page)) {
> if (!PageDirty(page)) {
> /* Invalidate as we cleared the pte */
> mmu_notifier_invalidate_range(mm,
> @@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
> * discarded. Remap the page to page table.
> */
> set_pte_at(mm, address, pvmw.pte, pteval);
> - SetPageSwapBacked(page);
> + ClearPageLazyFree(page);
> ret = false;
> page_vma_mapped_walk_done(&pvmw);
> break;
> diff --git a/mm/swap.c b/mm/swap.c
> index c1d3ca80ea10..d83f2cd4cdb8 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -563,7 +563,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
> static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
> void *arg)
> {
> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
> !PageSwapCache(page) && !PageUnevictable(page)) {
> bool active = PageActive(page);
>
> @@ -571,12 +571,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
> LRU_INACTIVE_ANON + active);
> ClearPageActive(page);
> ClearPageReferenced(page);
> - /*
> - * lazyfree pages are clean anonymous pages. They have
> - * SwapBacked flag cleared to distinguish normal anonymous
> - * pages
> - */
> - ClearPageSwapBacked(page);
> + SetPageLazyFree(page);
> add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
>
> __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
> @@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
> */
> void mark_page_lazyfree(struct page *page)
> {
> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
> !PageSwapCache(page) && !PageUnevictable(page)) {
> struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index eca49a1c2f68..40bb41ada2d2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
> * Anonymous pages are not handled by flushers and must be written
> * from reclaim context. Do not stall reclaim based on them
> */
> - if (!page_is_file_cache(page) ||
> - (PageAnon(page) && !PageSwapBacked(page))) {
> + if (!page_is_file_cache(page) || PageLazyFree(page)) {
> *dirty = false;
> *writeback = false;
> return;
> @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> * Try to allocate it some swap space here.
> * Lazyfree page could be freed directly
> */
> - if (PageAnon(page) && PageSwapBacked(page)) {
> + if (PageAnon(page) && !__PageLazyFree(page)) {
> if (!PageSwapCache(page)) {
> if (!(sc->gfp_mask & __GFP_IO))
> goto keep_locked;
> @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> }
> }
>
> - if (PageAnon(page) && !PageSwapBacked(page)) {
> + if (PageLazyFree(page)) {
> /* follow __remove_mapping for reference */
> if (!page_ref_freeze(page, 1))
> goto keep_locked;
>
I still prefer something like
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index fd6d4670ccc3..7538501230bd 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -63,6 +63,10 @@
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
* to become unlocked.
*
+ * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
+ * page is backed by swap. Anonymous pages without PG_swapbacked are
+ * pages that can be lazily freed (e.g., MADV_FREE) on demand.
+ *
* PG_uptodate tells whether the page's contents is valid. When a read
* completes, the page becomes uptodate, unless a disk I/O error happened.
*
and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
--
Thanks,
David / dhildenb
David Hildenbrand <[email protected]> writes:
> On 09.03.20 03:17, Huang, Ying wrote:
>> From: Huang Ying <[email protected]>
>>
>> Now PageSwapBacked() is used as the helper function to check whether
>> pages have been freed lazily via MADV_FREE. This isn't very obvious.
>> So Dave suggested to add PageLazyFree() family helper functions to
>> improve the code readability.
>>
>> Signed-off-by: "Huang, Ying" <[email protected]>
>> Suggested-by: Dave Hansen <[email protected]>
>> Cc: David Hildenbrand <[email protected]>
>> Cc: Mel Gorman <[email protected]>
>> Cc: Vlastimil Babka <[email protected]>
>> Cc: Zi Yan <[email protected]>
>> Cc: Michal Hocko <[email protected]>
>> Cc: Peter Zijlstra <[email protected]>
>> Cc: Minchan Kim <[email protected]>
>> Cc: Johannes Weiner <[email protected]>
>> Cc: Hugh Dickins <[email protected]>
>> ---
>> Changelog:
>>
>> v3:
>>
>> - Improved comments, Thanks David Hildenbrand!
>>
>> - Use the helper function in /proc/PID/smaps lazyfree reporting.
>>
>> v2:
>>
>> - Avoid code bloat via removing VM_BUG_ON_PAGE(), which doesn't exist
>> in the original code. Now there is no any text/data/bss size
>> change.
>>
>> - Fix one wrong replacement in try_to_unmap_one().
>>
>> ---
>> fs/proc/task_mmu.c | 2 +-
>> include/linux/page-flags.h | 25 +++++++++++++++++++++++++
>> mm/rmap.c | 4 ++--
>> mm/swap.c | 11 +++--------
>> mm/vmscan.c | 7 +++----
>> 5 files changed, 34 insertions(+), 15 deletions(-)
>>
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index 3ba9ae83bff5..3458d5711e57 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -471,7 +471,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
>> */
>> if (PageAnon(page)) {
>> mss->anonymous += size;
>> - if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
>> + if (__PageLazyFree(page) && !dirty && !PageDirty(page))
>> mss->lazyfree += size;
>> }
>>
>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>> index 49c2697046b9..bb26f74cbe8e 100644
>> --- a/include/linux/page-flags.h
>> +++ b/include/linux/page-flags.h
>> @@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
>> TESTPAGEFLAG_FALSE(Ksm)
>> #endif
>>
>> +/*
>> + * For pages freed lazily via MADV_FREE. Lazyfree pages are clean
>> + * anonymous pages. They don't have PG_swapbacked set, to distinguish
>> + * them from normal anonymous pages.
>> + */
>> +static __always_inline int __PageLazyFree(struct page *page)
>> +{
>> + return !PageSwapBacked(page);
>> +}
>> +
>> +static __always_inline int PageLazyFree(struct page *page)
>> +{
>> + return PageAnon(page) && __PageLazyFree(page);
>> +}
>> +
>> +static __always_inline void SetPageLazyFree(struct page *page)
>> +{
>> + ClearPageSwapBacked(page);
>> +}
>> +
>> +static __always_inline void ClearPageLazyFree(struct page *page)
>> +{
>> + SetPageSwapBacked(page);
>> +}
>> +
>> u64 stable_page_flags(struct page *page);
>>
>> static inline int PageUptodate(struct page *page)
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 1c02adaa233e..6ec96c8e7826 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>> }
>>
>> /* MADV_FREE page check */
>> - if (!PageSwapBacked(page)) {
>> + if (__PageLazyFree(page)) {
>> if (!PageDirty(page)) {
>> /* Invalidate as we cleared the pte */
>> mmu_notifier_invalidate_range(mm,
>> @@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>> * discarded. Remap the page to page table.
>> */
>> set_pte_at(mm, address, pvmw.pte, pteval);
>> - SetPageSwapBacked(page);
>> + ClearPageLazyFree(page);
>> ret = false;
>> page_vma_mapped_walk_done(&pvmw);
>> break;
>> diff --git a/mm/swap.c b/mm/swap.c
>> index c1d3ca80ea10..d83f2cd4cdb8 100644
>> --- a/mm/swap.c
>> +++ b/mm/swap.c
>> @@ -563,7 +563,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
>> static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>> void *arg)
>> {
>> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
>> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>> !PageSwapCache(page) && !PageUnevictable(page)) {
>> bool active = PageActive(page);
>>
>> @@ -571,12 +571,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>> LRU_INACTIVE_ANON + active);
>> ClearPageActive(page);
>> ClearPageReferenced(page);
>> - /*
>> - * lazyfree pages are clean anonymous pages. They have
>> - * SwapBacked flag cleared to distinguish normal anonymous
>> - * pages
>> - */
>> - ClearPageSwapBacked(page);
>> + SetPageLazyFree(page);
>> add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
>>
>> __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
>> @@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
>> */
>> void mark_page_lazyfree(struct page *page)
>> {
>> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
>> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>> !PageSwapCache(page) && !PageUnevictable(page)) {
>> struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
>>
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index eca49a1c2f68..40bb41ada2d2 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
>> * Anonymous pages are not handled by flushers and must be written
>> * from reclaim context. Do not stall reclaim based on them
>> */
>> - if (!page_is_file_cache(page) ||
>> - (PageAnon(page) && !PageSwapBacked(page))) {
>> + if (!page_is_file_cache(page) || PageLazyFree(page)) {
>> *dirty = false;
>> *writeback = false;
>> return;
>> @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>> * Try to allocate it some swap space here.
>> * Lazyfree page could be freed directly
>> */
>> - if (PageAnon(page) && PageSwapBacked(page)) {
>> + if (PageAnon(page) && !__PageLazyFree(page)) {
>> if (!PageSwapCache(page)) {
>> if (!(sc->gfp_mask & __GFP_IO))
>> goto keep_locked;
>> @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>> }
>> }
>>
>> - if (PageAnon(page) && !PageSwapBacked(page)) {
>> + if (PageLazyFree(page)) {
>> /* follow __remove_mapping for reference */
>> if (!page_ref_freeze(page, 1))
>> goto keep_locked;
>>
>
> I still prefer something like
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index fd6d4670ccc3..7538501230bd 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -63,6 +63,10 @@
> * page_waitqueue(page) is a wait queue of all tasks waiting for the page
> * to become unlocked.
> *
> + * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
> + * page is backed by swap. Anonymous pages without PG_swapbacked are
> + * pages that can be lazily freed (e.g., MADV_FREE) on demand.
> + *
> * PG_uptodate tells whether the page's contents is valid. When a read
> * completes, the page becomes uptodate, unless a disk I/O error happened.
> *
Why not just send a formal patch? So Andrew can just pick anything he
likes. I am totally OK with that.
> and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
If adopted, !__PageLazyFree() should only be used in the context where
we really want to check whether pages are freed lazily. Otherwise,
PageSwapBacked() should be used.
Best Regards,
Huang, Ying
On 09.03.20 10:15, Huang, Ying wrote:
> David Hildenbrand <[email protected]> writes:
>
>> On 09.03.20 03:17, Huang, Ying wrote:
>>> From: Huang Ying <[email protected]>
>>>
>>> Now PageSwapBacked() is used as the helper function to check whether
>>> pages have been freed lazily via MADV_FREE. This isn't very obvious.
>>> So Dave suggested to add PageLazyFree() family helper functions to
>>> improve the code readability.
>>>
>>> Signed-off-by: "Huang, Ying" <[email protected]>
>>> Suggested-by: Dave Hansen <[email protected]>
>>> Cc: David Hildenbrand <[email protected]>
>>> Cc: Mel Gorman <[email protected]>
>>> Cc: Vlastimil Babka <[email protected]>
>>> Cc: Zi Yan <[email protected]>
>>> Cc: Michal Hocko <[email protected]>
>>> Cc: Peter Zijlstra <[email protected]>
>>> Cc: Minchan Kim <[email protected]>
>>> Cc: Johannes Weiner <[email protected]>
>>> Cc: Hugh Dickins <[email protected]>
>>> ---
>>> Changelog:
>>>
>>> v3:
>>>
>>> - Improved comments, Thanks David Hildenbrand!
>>>
>>> - Use the helper function in /proc/PID/smaps lazyfree reporting.
>>>
>>> v2:
>>>
>>> - Avoid code bloat via removing VM_BUG_ON_PAGE(), which doesn't exist
>>> in the original code. Now there is no any text/data/bss size
>>> change.
>>>
>>> - Fix one wrong replacement in try_to_unmap_one().
>>>
>>> ---
>>> fs/proc/task_mmu.c | 2 +-
>>> include/linux/page-flags.h | 25 +++++++++++++++++++++++++
>>> mm/rmap.c | 4 ++--
>>> mm/swap.c | 11 +++--------
>>> mm/vmscan.c | 7 +++----
>>> 5 files changed, 34 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>>> index 3ba9ae83bff5..3458d5711e57 100644
>>> --- a/fs/proc/task_mmu.c
>>> +++ b/fs/proc/task_mmu.c
>>> @@ -471,7 +471,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
>>> */
>>> if (PageAnon(page)) {
>>> mss->anonymous += size;
>>> - if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
>>> + if (__PageLazyFree(page) && !dirty && !PageDirty(page))
>>> mss->lazyfree += size;
>>> }
>>>
>>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>>> index 49c2697046b9..bb26f74cbe8e 100644
>>> --- a/include/linux/page-flags.h
>>> +++ b/include/linux/page-flags.h
>>> @@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
>>> TESTPAGEFLAG_FALSE(Ksm)
>>> #endif
>>>
>>> +/*
>>> + * For pages freed lazily via MADV_FREE. Lazyfree pages are clean
>>> + * anonymous pages. They don't have PG_swapbacked set, to distinguish
>>> + * them from normal anonymous pages.
>>> + */
>>> +static __always_inline int __PageLazyFree(struct page *page)
>>> +{
>>> + return !PageSwapBacked(page);
>>> +}
>>> +
>>> +static __always_inline int PageLazyFree(struct page *page)
>>> +{
>>> + return PageAnon(page) && __PageLazyFree(page);
>>> +}
>>> +
>>> +static __always_inline void SetPageLazyFree(struct page *page)
>>> +{
>>> + ClearPageSwapBacked(page);
>>> +}
>>> +
>>> +static __always_inline void ClearPageLazyFree(struct page *page)
>>> +{
>>> + SetPageSwapBacked(page);
>>> +}
>>> +
>>> u64 stable_page_flags(struct page *page);
>>>
>>> static inline int PageUptodate(struct page *page)
>>> diff --git a/mm/rmap.c b/mm/rmap.c
>>> index 1c02adaa233e..6ec96c8e7826 100644
>>> --- a/mm/rmap.c
>>> +++ b/mm/rmap.c
>>> @@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>>> }
>>>
>>> /* MADV_FREE page check */
>>> - if (!PageSwapBacked(page)) {
>>> + if (__PageLazyFree(page)) {
>>> if (!PageDirty(page)) {
>>> /* Invalidate as we cleared the pte */
>>> mmu_notifier_invalidate_range(mm,
>>> @@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>>> * discarded. Remap the page to page table.
>>> */
>>> set_pte_at(mm, address, pvmw.pte, pteval);
>>> - SetPageSwapBacked(page);
>>> + ClearPageLazyFree(page);
>>> ret = false;
>>> page_vma_mapped_walk_done(&pvmw);
>>> break;
>>> diff --git a/mm/swap.c b/mm/swap.c
>>> index c1d3ca80ea10..d83f2cd4cdb8 100644
>>> --- a/mm/swap.c
>>> +++ b/mm/swap.c
>>> @@ -563,7 +563,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
>>> static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>>> void *arg)
>>> {
>>> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
>>> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>>> !PageSwapCache(page) && !PageUnevictable(page)) {
>>> bool active = PageActive(page);
>>>
>>> @@ -571,12 +571,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>>> LRU_INACTIVE_ANON + active);
>>> ClearPageActive(page);
>>> ClearPageReferenced(page);
>>> - /*
>>> - * lazyfree pages are clean anonymous pages. They have
>>> - * SwapBacked flag cleared to distinguish normal anonymous
>>> - * pages
>>> - */
>>> - ClearPageSwapBacked(page);
>>> + SetPageLazyFree(page);
>>> add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
>>>
>>> __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
>>> @@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
>>> */
>>> void mark_page_lazyfree(struct page *page)
>>> {
>>> - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
>>> + if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>>> !PageSwapCache(page) && !PageUnevictable(page)) {
>>> struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
>>>
>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>> index eca49a1c2f68..40bb41ada2d2 100644
>>> --- a/mm/vmscan.c
>>> +++ b/mm/vmscan.c
>>> @@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
>>> * Anonymous pages are not handled by flushers and must be written
>>> * from reclaim context. Do not stall reclaim based on them
>>> */
>>> - if (!page_is_file_cache(page) ||
>>> - (PageAnon(page) && !PageSwapBacked(page))) {
>>> + if (!page_is_file_cache(page) || PageLazyFree(page)) {
>>> *dirty = false;
>>> *writeback = false;
>>> return;
>>> @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>> * Try to allocate it some swap space here.
>>> * Lazyfree page could be freed directly
>>> */
>>> - if (PageAnon(page) && PageSwapBacked(page)) {
>>> + if (PageAnon(page) && !__PageLazyFree(page)) {
>>> if (!PageSwapCache(page)) {
>>> if (!(sc->gfp_mask & __GFP_IO))
>>> goto keep_locked;
>>> @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>> }
>>> }
>>>
>>> - if (PageAnon(page) && !PageSwapBacked(page)) {
>>> + if (PageLazyFree(page)) {
>>> /* follow __remove_mapping for reference */
>>> if (!page_ref_freeze(page, 1))
>>> goto keep_locked;
>>>
>>
>> I still prefer something like
>>
>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>> index fd6d4670ccc3..7538501230bd 100644
>> --- a/include/linux/page-flags.h
>> +++ b/include/linux/page-flags.h
>> @@ -63,6 +63,10 @@
>> * page_waitqueue(page) is a wait queue of all tasks waiting for the page
>> * to become unlocked.
>> *
>> + * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
>> + * page is backed by swap. Anonymous pages without PG_swapbacked are
>> + * pages that can be lazily freed (e.g., MADV_FREE) on demand.
>> + *
>> * PG_uptodate tells whether the page's contents is valid. When a read
>> * completes, the page becomes uptodate, unless a disk I/O error happened.
>> *
>
> Why not just send a formal patch? So Andrew can just pick anything he
> likes. I am totally OK with that.
Because you're working on cleaning this up.
>
>> and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
>
> If adopted, !__PageLazyFree() should only be used in the context where
> we really want to check whether pages are freed lazily. Otherwise,
> PageSwapBacked() should be used.
>
Yeah, and once again, personally, I don't like this approach. E.g.,
ClearPageLazyFree() sets PG_swapbacked. You already have to be aware
that this is a single flag being used in the background and what the
implications are. IMHO, in no way better than the current approach. I
prefer better documentation instead.
But I am just a reviewer and not a maintainer, so it's just my 2 cents.
--
Thanks,
David / dhildenb
On Mon 09-03-20 09:55:38, David Hildenbrand wrote:
> On 09.03.20 03:17, Huang, Ying wrote:
[...]
> > @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> > * Try to allocate it some swap space here.
> > * Lazyfree page could be freed directly
> > */
> > - if (PageAnon(page) && PageSwapBacked(page)) {
> > + if (PageAnon(page) && !__PageLazyFree(page)) {
> > if (!PageSwapCache(page)) {
> > if (!(sc->gfp_mask & __GFP_IO))
> > goto keep_locked;
> > @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> > }
> > }
> >
> > - if (PageAnon(page) && !PageSwapBacked(page)) {
> > + if (PageLazyFree(page)) {
> > /* follow __remove_mapping for reference */
> > if (!page_ref_freeze(page, 1))
> > goto keep_locked;
> >
>
> I still prefer something like
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index fd6d4670ccc3..7538501230bd 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -63,6 +63,10 @@
> * page_waitqueue(page) is a wait queue of all tasks waiting for the page
> * to become unlocked.
> *
> + * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
> + * page is backed by swap. Anonymous pages without PG_swapbacked are
> + * pages that can be lazily freed (e.g., MADV_FREE) on demand.
> + *
> * PG_uptodate tells whether the page's contents is valid. When a read
> * completes, the page becomes uptodate, unless a disk I/O error happened.
> *
>
> and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
I have to say that I do not have a strong opinion about helper
functions. In general I tend to be against adding them unless there is a
very good reason for them. This particular patch is in a gray zone a bit.
There are few places which are easier to follow but others sound like,
we have a hammer let's use it. E.g. shrink_page_list path above. There
is a clear comment explaining PageAnon && PageSwapBacked check being
LazyFree related but do I have to know that this is LazyFree path? I
believe that seeing PageSwapBacked has a more meaning to me because it
tells me that anonymous pages without a backing store doesn't really
need swap entry. This happens to be Lazy free related today but with a
heavy overloading of our flags this might differ in the future. You have
effectively made a more generic description more specific without a very
good reason.
On the other hand having PG_swapbacked description in page-flags.h above
gives a very useful information which was previously hidden at the
definition so this is a clear improvement.
That being said I think that the patch is not helpful enough. I would
much rather see a simply documentation update.
--
Michal Hocko
SUSE Labs
On Mon, 9 Mar 2020, David Hildenbrand wrote:
> >> I still prefer something like
> >>
> >> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> >> index fd6d4670ccc3..7538501230bd 100644
> >> --- a/include/linux/page-flags.h
> >> +++ b/include/linux/page-flags.h
> >> @@ -63,6 +63,10 @@
> >> * page_waitqueue(page) is a wait queue of all tasks waiting for the page
> >> * to become unlocked.
> >> *
> >> + * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
> >> + * page is backed by swap. Anonymous pages without PG_swapbacked are
> >> + * pages that can be lazily freed (e.g., MADV_FREE) on demand.
> >> + *
> >> * PG_uptodate tells whether the page's contents is valid. When a read
> >> * completes, the page becomes uptodate, unless a disk I/O error happened.
> >> *
> >
> > Why not just send a formal patch? So Andrew can just pick anything he
> > likes. I am totally OK with that.
>
> Because you're working on cleaning this up.
>
> >
> >> and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
> >
> > If adopted, !__PageLazyFree() should only be used in the context where
> > we really want to check whether pages are freed lazily. Otherwise,
> > PageSwapBacked() should be used.
> >
>
> Yeah, and once again, personally, I don't like this approach. E.g.,
> ClearPageLazyFree() sets PG_swapbacked. You already have to be aware
> that this is a single flag being used in the background and what the
> implications are. IMHO, in no way better than the current approach. I
> prefer better documentation instead.
>
Fully agreed.
Michal Hocko <[email protected]> writes:
> On Mon 09-03-20 09:55:38, David Hildenbrand wrote:
>> On 09.03.20 03:17, Huang, Ying wrote:
> [...]
>> > @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>> > * Try to allocate it some swap space here.
>> > * Lazyfree page could be freed directly
>> > */
>> > - if (PageAnon(page) && PageSwapBacked(page)) {
>> > + if (PageAnon(page) && !__PageLazyFree(page)) {
>> > if (!PageSwapCache(page)) {
>> > if (!(sc->gfp_mask & __GFP_IO))
>> > goto keep_locked;
>> > @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>> > }
>> > }
>> >
>> > - if (PageAnon(page) && !PageSwapBacked(page)) {
>> > + if (PageLazyFree(page)) {
>> > /* follow __remove_mapping for reference */
>> > if (!page_ref_freeze(page, 1))
>> > goto keep_locked;
>> >
>>
>> I still prefer something like
>>
>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>> index fd6d4670ccc3..7538501230bd 100644
>> --- a/include/linux/page-flags.h
>> +++ b/include/linux/page-flags.h
>> @@ -63,6 +63,10 @@
>> * page_waitqueue(page) is a wait queue of all tasks waiting for the page
>> * to become unlocked.
>> *
>> + * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
>> + * page is backed by swap. Anonymous pages without PG_swapbacked are
>> + * pages that can be lazily freed (e.g., MADV_FREE) on demand.
>> + *
>> * PG_uptodate tells whether the page's contents is valid. When a read
>> * completes, the page becomes uptodate, unless a disk I/O error happened.
>> *
>>
>> and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().
>
> I have to say that I do not have a strong opinion about helper
> functions. In general I tend to be against adding them unless there is a
> very good reason for them. This particular patch is in a gray zone a bit.
>
> There are few places which are easier to follow but others sound like,
> we have a hammer let's use it. E.g. shrink_page_list path above.
I can remove all these places. Only keep the helpful places.
> There is a clear comment explaining PageAnon && PageSwapBacked check
> being LazyFree related but do I have to know that this is LazyFree
> path? I believe that seeing PageSwapBacked has a more meaning to me
> because it tells me that anonymous pages without a backing store
> doesn't really need swap entry. This happens to be Lazy free related
> today but with a heavy overloading of our flags this might differ in
> the future. You have effectively made a more generic description more
> specific without a very good reason.
Yes. The following piece isn't lazy free specific. We can keep use PageSwapBacked().
@@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Try to allocate it some swap space here.
* Lazyfree page could be freed directly
*/
- if (PageAnon(page) && PageSwapBacked(page)) {
+ if (PageAnon(page) && !__PageLazyFree(page)) {
if (!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
And the following piece is lazy free specific. I think it helps.
@@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (PageAnon(page) && !PageSwapBacked(page)) {
+ if (PageLazyFree(page)) {
/* follow __remove_mapping for reference */
if (!page_ref_freeze(page, 1))
goto keep_locked;
> On the other hand having PG_swapbacked description in page-flags.h above
> gives a very useful information which was previously hidden at the
> definition so this is a clear improvement.
Yes. I think it's good to add document for PG_swapbacked definition.
> That being said I think that the patch is not helpful enough. I would
> much rather see a simply documentation update.