2023-10-20 18:34:06

by Vishal Moola

[permalink] [raw]
Subject: [PATCH v3 0/5] Some khugepaged folio conversions

This patchset converts a number of functions to use folios. This cleans
up some khugepaged code and removes a large number of hidden
compound_head() calls.

---
v3:
Address filemap_lock_folio() returning ERR_PTR
Fix uninitialized variable warning
Some minor rewording

Vishal Moola (Oracle) (5):
mm/khugepaged: Convert __collapse_huge_page_isolate() to use folios
mm/khugepaged: Convert hpage_collapse_scan_pmd() to use folios
mm/khugepaged: Convert is_refcount_suitable() to use folios
mm/khugepaged: Convert alloc_charge_hpage() to use folios
mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios

include/linux/memcontrol.h | 14 ----
mm/khugepaged.c | 137 ++++++++++++++++++-------------------
2 files changed, 68 insertions(+), 83 deletions(-)

--
2.40.1


2023-10-20 18:34:08

by Vishal Moola

[permalink] [raw]
Subject: [PATCH v3 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios

Also remove count_memcg_page_event now that its last caller no longer uses
it and reword hpage_collapse_alloc_page() to hpage_collapse_alloc_folio().

This removes 1 call to compound_head() and helps convert khugepaged to
use folios throughout.

Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
include/linux/memcontrol.h | 14 --------------
mm/khugepaged.c | 17 ++++++++++-------
2 files changed, 10 insertions(+), 21 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..3126bde982e8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}

-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
-{
- struct mem_cgroup *memcg = page_memcg(page);
-
- if (memcg)
- count_memcg_events(memcg, idx, 1);
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}

-static inline void count_memcg_page_event(struct page *page,
- int idx)
-{
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9efd8ff68f06..6a7184cd291b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
}
#endif

-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
nodemask_t *nmask)
{
- *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
- if (unlikely(!*hpage)) {
+ *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
+
+ if (unlikely(!*folio)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return false;
}

- folio_prep_large_rmappable((struct folio *)*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
return true;
}
@@ -1064,17 +1064,20 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
int node = hpage_collapse_find_target_node(cc);
struct folio *folio;

- if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+ if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
+ *hpage = NULL;
return SCAN_ALLOC_HUGE_PAGE_FAIL;
+ }

- folio = page_folio(*hpage);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
*hpage = NULL;
return SCAN_CGROUP_CHARGE_FAIL;
}
- count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);

+ count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
+
+ *hpage = folio_page(folio, 0);
return SCAN_SUCCEED;
}

--
2.40.1

2023-10-20 18:34:25

by Vishal Moola

[permalink] [raw]
Subject: [PATCH v3 5/5] mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios

This removes 2 calls to compound_head() and helps convert khugepaged to
use folios throughout.

Previously, if the address passed to collapse_pte_mapped_thp()
corresponded to a tail page, the scan would fail immediately. Using
filemap_lock_folio() we get the corresponding folio back and try to
operate on the folio instead.

Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
mm/khugepaged.c | 45 ++++++++++++++++++++-------------------------
1 file changed, 20 insertions(+), 25 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6a7184cd291b..bc2d8ff269c7 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1477,7 +1477,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
bool notified = false;
unsigned long haddr = addr & HPAGE_PMD_MASK;
struct vm_area_struct *vma = vma_lookup(mm, haddr);
- struct page *hpage;
+ struct folio *folio;
pte_t *start_pte, *pte;
pmd_t *pmd, pgt_pmd;
spinlock_t *pml = NULL, *ptl;
@@ -1510,19 +1510,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (userfaultfd_wp(vma))
return SCAN_PTE_UFFD_WP;

- hpage = find_lock_page(vma->vm_file->f_mapping,
+ folio = filemap_lock_folio(vma->vm_file->f_mapping,
linear_page_index(vma, haddr));
- if (!hpage)
+ if (IS_ERR(folio))
return SCAN_PAGE_NULL;

- if (!PageHead(hpage)) {
- result = SCAN_FAIL;
- goto drop_hpage;
- }
-
- if (compound_order(hpage) != HPAGE_PMD_ORDER) {
+ if (folio_order(folio) != HPAGE_PMD_ORDER) {
result = SCAN_PAGE_COMPOUND;
- goto drop_hpage;
+ goto drop_folio;
}

result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
@@ -1536,13 +1531,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
*/
goto maybe_install_pmd;
default:
- goto drop_hpage;
+ goto drop_folio;
}

result = SCAN_FAIL;
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
if (!start_pte) /* mmap_lock + page lock should prevent this */
- goto drop_hpage;
+ goto drop_folio;

/* step 1: check all mapped PTEs are to the right huge page */
for (i = 0, addr = haddr, pte = start_pte;
@@ -1567,7 +1562,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* Note that uprobe, debugger, or MAP_PRIVATE may change the
* page table, but the new page will not be a subpage of hpage.
*/
- if (hpage + i != page)
+ if (folio_page(folio, i) != page)
goto abort;
}

@@ -1582,7 +1577,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* page_table_lock) ptl nests inside pml. The less time we hold pml,
* the better; but userfaultfd's mfill_atomic_pte() on a private VMA
* inserts a valid as-if-COWed PTE without even looking up page cache.
- * So page lock of hpage does not protect from it, so we must not drop
+ * So page lock of folio does not protect from it, so we must not drop
* ptl before pgt_pmd is removed, so uffd private needs pml taken now.
*/
if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
@@ -1606,7 +1601,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
continue;
/*
* We dropped ptl after the first scan, to do the mmu_notifier:
- * page lock stops more PTEs of the hpage being faulted in, but
+ * page lock stops more PTEs of the folio being faulted in, but
* does not stop write faults COWing anon copies from existing
* PTEs; and does not stop those being swapped out or migrated.
*/
@@ -1615,7 +1610,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
goto abort;
}
page = vm_normal_page(vma, addr, ptent);
- if (hpage + i != page)
+ if (folio_page(folio, i) != page)
goto abort;

/*
@@ -1634,8 +1629,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,

/* step 3: set proper refcount and mm_counters. */
if (nr_ptes) {
- page_ref_sub(hpage, nr_ptes);
- add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+ folio_ref_sub(folio, nr_ptes);
+ add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
}

/* step 4: remove empty page table */
@@ -1659,14 +1654,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
- ? set_huge_pmd(vma, haddr, pmd, hpage)
+ ? set_huge_pmd(vma, haddr, pmd, &folio->page)
: SCAN_SUCCEED;
- goto drop_hpage;
+ goto drop_folio;
abort:
if (nr_ptes) {
flush_tlb_mm(mm);
- page_ref_sub(hpage, nr_ptes);
- add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+ folio_ref_sub(folio, nr_ptes);
+ add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
}
if (start_pte)
pte_unmap_unlock(start_pte, ptl);
@@ -1674,9 +1669,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
spin_unlock(pml);
if (notified)
mmu_notifier_invalidate_range_end(&range);
-drop_hpage:
- unlock_page(hpage);
- put_page(hpage);
+drop_folio:
+ folio_unlock(folio);
+ folio_put(folio);
return result;
}

--
2.40.1

2023-10-24 01:53:56

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH v3 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios

On Fri, 2023-10-20 at 11:33 -0700, Vishal Moola (Oracle) wrote:
> Also remove count_memcg_page_event now that its last caller no longer
> uses
> it and reword hpage_collapse_alloc_page() to
> hpage_collapse_alloc_folio().
>
> This removes 1 call to compound_head() and helps convert khugepaged
> to
> use folios throughout.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>
>
Reviewed-by: Rik van Riel <[email protected]>

--
All Rights Reversed.

2023-10-24 02:43:29

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH v3 5/5] mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios

On Fri, 2023-10-20 at 11:33 -0700, Vishal Moola (Oracle) wrote:
> This removes 2 calls to compound_head() and helps convert khugepaged
> to
> use folios throughout.
>
> Previously, if the address passed to collapse_pte_mapped_thp()
> corresponded to a tail page, the scan would fail immediately. Using
> filemap_lock_folio() we get the corresponding folio back and try to
> operate on the folio instead.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>

Reviewed-by: Rik van Riel <[email protected]>

--
All Rights Reversed.

2023-10-24 17:40:12

by Yang Shi

[permalink] [raw]
Subject: Re: [PATCH v3 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios

On Fri, Oct 20, 2023 at 11:34 AM Vishal Moola (Oracle)
<[email protected]> wrote:
>
> Also remove count_memcg_page_event now that its last caller no longer uses
> it and reword hpage_collapse_alloc_page() to hpage_collapse_alloc_folio().
>
> This removes 1 call to compound_head() and helps convert khugepaged to
> use folios throughout.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>

Reviewed-by: Yang Shi <[email protected]>

> ---
> include/linux/memcontrol.h | 14 --------------
> mm/khugepaged.c | 17 ++++++++++-------
> 2 files changed, 10 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index ab94ad4597d0..3126bde982e8 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
> local_irq_restore(flags);
> }
>
> -static inline void count_memcg_page_event(struct page *page,
> - enum vm_event_item idx)
> -{
> - struct mem_cgroup *memcg = page_memcg(page);
> -
> - if (memcg)
> - count_memcg_events(memcg, idx, 1);
> -}
> -
> static inline void count_memcg_folio_events(struct folio *folio,
> enum vm_event_item idx, unsigned long nr)
> {
> @@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
> {
> }
>
> -static inline void count_memcg_page_event(struct page *page,
> - int idx)
> -{
> -}
> -
> static inline void count_memcg_folio_events(struct folio *folio,
> enum vm_event_item idx, unsigned long nr)
> {
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 9efd8ff68f06..6a7184cd291b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
> }
> #endif
>
> -static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
> +static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
> nodemask_t *nmask)
> {
> - *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
> - if (unlikely(!*hpage)) {
> + *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
> +
> + if (unlikely(!*folio)) {
> count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> return false;
> }
>
> - folio_prep_large_rmappable((struct folio *)*hpage);
> count_vm_event(THP_COLLAPSE_ALLOC);
> return true;
> }
> @@ -1064,17 +1064,20 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
> int node = hpage_collapse_find_target_node(cc);
> struct folio *folio;
>
> - if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
> + if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
> + *hpage = NULL;
> return SCAN_ALLOC_HUGE_PAGE_FAIL;
> + }
>
> - folio = page_folio(*hpage);
> if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
> folio_put(folio);
> *hpage = NULL;
> return SCAN_CGROUP_CHARGE_FAIL;
> }
> - count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
>
> + count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
> +
> + *hpage = folio_page(folio, 0);
> return SCAN_SUCCEED;
> }
>
> --
> 2.40.1
>

2023-10-24 17:40:44

by Yang Shi

[permalink] [raw]
Subject: Re: [PATCH v3 5/5] mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios

On Fri, Oct 20, 2023 at 11:34 AM Vishal Moola (Oracle)
<[email protected]> wrote:
>
> This removes 2 calls to compound_head() and helps convert khugepaged to
> use folios throughout.
>
> Previously, if the address passed to collapse_pte_mapped_thp()
> corresponded to a tail page, the scan would fail immediately. Using
> filemap_lock_folio() we get the corresponding folio back and try to
> operate on the folio instead.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>

Reviewed-by: Yang Shi <[email protected]>

> ---
> mm/khugepaged.c | 45 ++++++++++++++++++++-------------------------
> 1 file changed, 20 insertions(+), 25 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 6a7184cd291b..bc2d8ff269c7 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1477,7 +1477,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> bool notified = false;
> unsigned long haddr = addr & HPAGE_PMD_MASK;
> struct vm_area_struct *vma = vma_lookup(mm, haddr);
> - struct page *hpage;
> + struct folio *folio;
> pte_t *start_pte, *pte;
> pmd_t *pmd, pgt_pmd;
> spinlock_t *pml = NULL, *ptl;
> @@ -1510,19 +1510,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> if (userfaultfd_wp(vma))
> return SCAN_PTE_UFFD_WP;
>
> - hpage = find_lock_page(vma->vm_file->f_mapping,
> + folio = filemap_lock_folio(vma->vm_file->f_mapping,
> linear_page_index(vma, haddr));
> - if (!hpage)
> + if (IS_ERR(folio))
> return SCAN_PAGE_NULL;
>
> - if (!PageHead(hpage)) {
> - result = SCAN_FAIL;
> - goto drop_hpage;
> - }
> -
> - if (compound_order(hpage) != HPAGE_PMD_ORDER) {
> + if (folio_order(folio) != HPAGE_PMD_ORDER) {
> result = SCAN_PAGE_COMPOUND;
> - goto drop_hpage;
> + goto drop_folio;
> }
>
> result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
> @@ -1536,13 +1531,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> */
> goto maybe_install_pmd;
> default:
> - goto drop_hpage;
> + goto drop_folio;
> }
>
> result = SCAN_FAIL;
> start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
> if (!start_pte) /* mmap_lock + page lock should prevent this */
> - goto drop_hpage;
> + goto drop_folio;
>
> /* step 1: check all mapped PTEs are to the right huge page */
> for (i = 0, addr = haddr, pte = start_pte;
> @@ -1567,7 +1562,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> * Note that uprobe, debugger, or MAP_PRIVATE may change the
> * page table, but the new page will not be a subpage of hpage.
> */
> - if (hpage + i != page)
> + if (folio_page(folio, i) != page)
> goto abort;
> }
>
> @@ -1582,7 +1577,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> * page_table_lock) ptl nests inside pml. The less time we hold pml,
> * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
> * inserts a valid as-if-COWed PTE without even looking up page cache.
> - * So page lock of hpage does not protect from it, so we must not drop
> + * So page lock of folio does not protect from it, so we must not drop
> * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
> */
> if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
> @@ -1606,7 +1601,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> continue;
> /*
> * We dropped ptl after the first scan, to do the mmu_notifier:
> - * page lock stops more PTEs of the hpage being faulted in, but
> + * page lock stops more PTEs of the folio being faulted in, but
> * does not stop write faults COWing anon copies from existing
> * PTEs; and does not stop those being swapped out or migrated.
> */
> @@ -1615,7 +1610,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> goto abort;
> }
> page = vm_normal_page(vma, addr, ptent);
> - if (hpage + i != page)
> + if (folio_page(folio, i) != page)
> goto abort;
>
> /*
> @@ -1634,8 +1629,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>
> /* step 3: set proper refcount and mm_counters. */
> if (nr_ptes) {
> - page_ref_sub(hpage, nr_ptes);
> - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
> + folio_ref_sub(folio, nr_ptes);
> + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
> }
>
> /* step 4: remove empty page table */
> @@ -1659,14 +1654,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> maybe_install_pmd:
> /* step 5: install pmd entry */
> result = install_pmd
> - ? set_huge_pmd(vma, haddr, pmd, hpage)
> + ? set_huge_pmd(vma, haddr, pmd, &folio->page)
> : SCAN_SUCCEED;
> - goto drop_hpage;
> + goto drop_folio;
> abort:
> if (nr_ptes) {
> flush_tlb_mm(mm);
> - page_ref_sub(hpage, nr_ptes);
> - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
> + folio_ref_sub(folio, nr_ptes);
> + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
> }
> if (start_pte)
> pte_unmap_unlock(start_pte, ptl);
> @@ -1674,9 +1669,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> spin_unlock(pml);
> if (notified)
> mmu_notifier_invalidate_range_end(&range);
> -drop_hpage:
> - unlock_page(hpage);
> - put_page(hpage);
> +drop_folio:
> + folio_unlock(folio);
> + folio_put(folio);
> return result;
> }
>
> --
> 2.40.1
>