2023-06-02 18:18:19

by Sidhartha Kumar

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm/hugetlb: Use a folio in hugetlb_wp()

On 6/1/23 6:54 PM, Peng Zhang wrote:
> From: ZhangPeng <[email protected]>
>
> We can replace nine implict calls to compound_head() with one by using
> old_folio. However, we still need to keep old_page because we need to
> know which page in the folio we are copying.
>
> Suggested-by: Matthew Wilcox (Oracle) <[email protected]>
> Signed-off-by: ZhangPeng <[email protected]>
> ---
> mm/hugetlb.c | 22 ++++++++++++----------
> 1 file changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0b774dd3d57b..f0ab6e8adf6f 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5543,6 +5543,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> pte_t pte = huge_ptep_get(ptep);
> struct hstate *h = hstate_vma(vma);
> struct page *old_page;
> + struct folio *old_folio;
> struct folio *new_folio;
> int outside_reserve = 0;
> vm_fault_t ret = 0;
> @@ -5574,6 +5575,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> }
>
> old_page = pte_page(pte);
> + old_folio = page_folio(old_page);
>
> delayacct_wpcopy_start();
>
> @@ -5582,7 +5584,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> * If no-one else is actually using this page, we're the exclusive
> * owner and can reuse this page.
> */
> - if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
> + if (page_mapcount(old_page) == 1 && folio_test_anon(old_folio)) {
> if (!PageAnonExclusive(old_page))
> page_move_anon_rmap(old_page, vma);
> if (likely(!unshare))
> @@ -5591,8 +5593,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> delayacct_wpcopy_end();
> return 0;
> }
> - VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
> - old_page);
> + VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
> + PageAnonExclusive(old_page), old_page);
>
> /*
> * If the process that created a MAP_PRIVATE mapping is about to
> @@ -5604,10 +5606,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> * of the full address range.
> */
> if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
> - page_folio(old_page) != pagecache_folio)
> + old_folio != pagecache_folio)
> outside_reserve = 1;
>
> - get_page(old_page);
> + folio_get(old_folio);
>
> /*
> * Drop page table lock as buddy allocator may be called. It will
> @@ -5629,7 +5631,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> pgoff_t idx;
> u32 hash;
>
> - put_page(old_page);
> + folio_put(old_folio);
> /*
> * Drop hugetlb_fault_mutex and vma_lock before
> * unmapping. unmapping needs to hold vma_lock
> @@ -5674,7 +5676,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> goto out_release_all;
> }
>
> - if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) {
> + if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
> ret = VM_FAULT_HWPOISON_LARGE;
> goto out_release_all;
> }
> @@ -5703,7 +5705,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> set_huge_pte_at(mm, haddr, ptep, newpte);
> folio_set_hugetlb_migratable(new_folio);
> /* Make the old page be freed below */
> - new_folio = page_folio(old_page);
> + new_folio = old_folio;
> }
> spin_unlock(ptl);
> mmu_notifier_invalidate_range_end(&range);
> @@ -5712,11 +5714,11 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
> * No restore in case of successful pagetable update (Break COW or
> * unshare)
> */
> - if (new_folio != page_folio(old_page))
> + if (new_folio != old_folio)
> restore_reserve_on_error(h, vma, haddr, new_folio);
> folio_put(new_folio);
> out_release_old:
> - put_page(old_page);
> + folio_put(old_folio);
>
> spin_lock(ptl); /* Caller expects lock to be held */
>
Reviewed-by Sidhartha Kumar <[email protected]>