This patchset converts a number of functions to use folios. This cleans
up some khugepaged code and removes a large number of hidden
compound_head() calls.
---
v2:
Remove last caller of count_memcg_page_event()
Stop using the folio_estimated_sharers() heuristic for tail page
sharer estimation
Vishal Moola (Oracle) (5):
mm/khugepaged: Convert __collapse_huge_page_isolate() to use folios
mm/khugepaged: Convert hpage_collapse_scan_pmd() to use folios
mm/khugepaged: Convert is_refcount_suitable() to use folios
mm/khugepaged: Convert alloc_charge_hpage() to use folios
mm/khugepaged: Convert collapse_pte_mapped_thp() to use folios
include/linux/memcontrol.h | 14 ----
mm/khugepaged.c | 135 ++++++++++++++++++-------------------
2 files changed, 66 insertions(+), 83 deletions(-)
--
2.40.1
Both callers of is_refcount_suitable() have been converted to use
folios, so convert it to take in a folio. Both callers only operate on
head pages of folios so mapcount/refcount conversions here are trivial.
Removes 3 calls to compound head, and removes 315 bytes of kernel text.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
mm/khugepaged.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6c4b5af43371..9efd8ff68f06 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -524,15 +524,15 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
}
}
-static bool is_refcount_suitable(struct page *page)
+static bool is_refcount_suitable(struct folio *folio)
{
int expected_refcount;
- expected_refcount = total_mapcount(page);
- if (PageSwapCache(page))
- expected_refcount += compound_nr(page);
+ expected_refcount = folio_mapcount(folio);
+ if (folio_test_swapcache(folio))
+ expected_refcount += folio_nr_pages(folio);
- return page_count(page) == expected_refcount;
+ return folio_ref_count(folio) == expected_refcount;
}
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
@@ -625,7 +625,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
* but not from this process. The other process cannot write to
* the page, only trigger CoW.
*/
- if (!is_refcount_suitable(&folio->page)) {
+ if (!is_refcount_suitable(folio)) {
folio_unlock(folio);
result = SCAN_PAGE_COUNT;
goto out;
@@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
* has excessive GUP pins (i.e. 512). Anyway the same check
* will be done again later the risk seems low.
*/
- if (!is_refcount_suitable(&folio->page)) {
+ if (!is_refcount_suitable(folio)) {
result = SCAN_PAGE_COUNT;
goto out_unmap;
}
--
2.40.1
Also remove count_memcg_page_event now that its last caller no longer uses it,
and convert hpage_collapse_alloc_page() to hpage_collapse_alloc_folio(),
This removes 1 call to compound_head() and helps convert khugepaged to
use folios throughout.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
include/linux/memcontrol.h | 14 --------------
mm/khugepaged.c | 15 ++++++++-------
2 files changed, 8 insertions(+), 21 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..3126bde982e8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
-{
- struct mem_cgroup *memcg = page_memcg(page);
-
- if (memcg)
- count_memcg_events(memcg, idx, 1);
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
-{
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9efd8ff68f06..d49aa22d99c9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
}
#endif
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
nodemask_t *nmask)
{
- *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
- if (unlikely(!*hpage)) {
+ *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
+
+ if (unlikely(!*folio)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return false;
}
- folio_prep_large_rmappable((struct folio *)*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
return true;
}
@@ -1064,17 +1064,18 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
int node = hpage_collapse_find_target_node(cc);
struct folio *folio;
- if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+ if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
return SCAN_ALLOC_HUGE_PAGE_FAIL;
- folio = page_folio(*hpage);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
*hpage = NULL;
return SCAN_CGROUP_CHARGE_FAIL;
}
- count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+ count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
+
+ *hpage = folio_page(folio, 0);
return SCAN_SUCCEED;
}
--
2.40.1
Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel
text.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
mm/khugepaged.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 500756604488..6c4b5af43371 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1248,6 +1248,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
int result = SCAN_FAIL, referenced = 0;
int none_or_zero = 0, shared = 0;
struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long _address;
spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0;
@@ -1334,29 +1335,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
}
}
- page = compound_head(page);
-
+ folio = page_folio(page);
/*
* Record which node the original page is from and save this
* information to cc->node_load[].
* Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
- node = page_to_nid(page);
+ node = folio_nid(folio);
if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
goto out_unmap;
}
cc->node_load[node]++;
- if (!PageLRU(page)) {
+ if (!folio_test_lru(folio)) {
result = SCAN_PAGE_LRU;
goto out_unmap;
}
- if (PageLocked(page)) {
+ if (folio_test_locked(folio)) {
result = SCAN_PAGE_LOCK;
goto out_unmap;
}
- if (!PageAnon(page)) {
+ if (!folio_test_anon(folio)) {
result = SCAN_PAGE_ANON;
goto out_unmap;
}
@@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
* has excessive GUP pins (i.e. 512). Anyway the same check
* will be done again later the risk seems low.
*/
- if (!is_refcount_suitable(page)) {
+ if (!is_refcount_suitable(&folio->page)) {
result = SCAN_PAGE_COUNT;
goto out_unmap;
}
@@ -1381,8 +1381,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
* enough young pte to justify collapsing the page
*/
if (cc->is_khugepaged &&
- (pte_young(pteval) || page_is_young(page) ||
- PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+ (pte_young(pteval) || folio_test_young(folio) ||
+ folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
address)))
referenced++;
}
@@ -1404,7 +1404,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
*mmap_locked = false;
}
out:
- trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
+ trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
none_or_zero, result, unmapped);
return result;
}
--
2.40.1
This removes 2 calls to compound_head() and helps convert khugepaged to
use folios throughout.
Previously, if the address passed to collapse_pte_mapped_thp()
corresponded to a tail page, the scan would fail immediately. Using
filemap_lock_folio() we can get the corresponding folio back and try to
operate on the folio instead.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
mm/khugepaged.c | 45 ++++++++++++++++++++-------------------------
1 file changed, 20 insertions(+), 25 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index d49aa22d99c9..94c1dd09a8a6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1475,7 +1475,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
bool notified = false;
unsigned long haddr = addr & HPAGE_PMD_MASK;
struct vm_area_struct *vma = vma_lookup(mm, haddr);
- struct page *hpage;
+ struct folio *folio;
pte_t *start_pte, *pte;
pmd_t *pmd, pgt_pmd;
spinlock_t *pml = NULL, *ptl;
@@ -1508,19 +1508,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (userfaultfd_wp(vma))
return SCAN_PTE_UFFD_WP;
- hpage = find_lock_page(vma->vm_file->f_mapping,
+ folio = filemap_lock_folio(vma->vm_file->f_mapping,
linear_page_index(vma, haddr));
- if (!hpage)
+ if (!folio)
return SCAN_PAGE_NULL;
- if (!PageHead(hpage)) {
- result = SCAN_FAIL;
- goto drop_hpage;
- }
-
- if (compound_order(hpage) != HPAGE_PMD_ORDER) {
+ if (folio_order(folio) != HPAGE_PMD_ORDER) {
result = SCAN_PAGE_COMPOUND;
- goto drop_hpage;
+ goto drop_folio;
}
result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
@@ -1534,13 +1529,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
*/
goto maybe_install_pmd;
default:
- goto drop_hpage;
+ goto drop_folio;
}
result = SCAN_FAIL;
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
if (!start_pte) /* mmap_lock + page lock should prevent this */
- goto drop_hpage;
+ goto drop_folio;
/* step 1: check all mapped PTEs are to the right huge page */
for (i = 0, addr = haddr, pte = start_pte;
@@ -1565,7 +1560,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* Note that uprobe, debugger, or MAP_PRIVATE may change the
* page table, but the new page will not be a subpage of hpage.
*/
- if (hpage + i != page)
+ if (folio_page(folio, i) != page)
goto abort;
}
@@ -1580,7 +1575,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* page_table_lock) ptl nests inside pml. The less time we hold pml,
* the better; but userfaultfd's mfill_atomic_pte() on a private VMA
* inserts a valid as-if-COWed PTE without even looking up page cache.
- * So page lock of hpage does not protect from it, so we must not drop
+ * So page lock of folio does not protect from it, so we must not drop
* ptl before pgt_pmd is removed, so uffd private needs pml taken now.
*/
if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
@@ -1604,7 +1599,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
continue;
/*
* We dropped ptl after the first scan, to do the mmu_notifier:
- * page lock stops more PTEs of the hpage being faulted in, but
+ * page lock stops more PTEs of the folio being faulted in, but
* does not stop write faults COWing anon copies from existing
* PTEs; and does not stop those being swapped out or migrated.
*/
@@ -1613,7 +1608,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
goto abort;
}
page = vm_normal_page(vma, addr, ptent);
- if (hpage + i != page)
+ if (folio_page(folio, i) != page)
goto abort;
/*
@@ -1632,8 +1627,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
/* step 3: set proper refcount and mm_counters. */
if (nr_ptes) {
- page_ref_sub(hpage, nr_ptes);
- add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+ folio_ref_sub(folio, nr_ptes);
+ add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
}
/* step 4: remove empty page table */
@@ -1657,14 +1652,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
- ? set_huge_pmd(vma, haddr, pmd, hpage)
+ ? set_huge_pmd(vma, haddr, pmd, &folio->page)
: SCAN_SUCCEED;
- goto drop_hpage;
+ goto drop_folio;
abort:
if (nr_ptes) {
flush_tlb_mm(mm);
- page_ref_sub(hpage, nr_ptes);
- add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
+ folio_ref_sub(folio, nr_ptes);
+ add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
}
if (start_pte)
pte_unmap_unlock(start_pte, ptl);
@@ -1672,9 +1667,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
spin_unlock(pml);
if (notified)
mmu_notifier_invalidate_range_end(&range);
-drop_hpage:
- unlock_page(hpage);
- put_page(hpage);
+drop_folio:
+ folio_unlock(folio);
+ folio_put(folio);
return result;
}
--
2.40.1
On Wed, Oct 18, 2023 at 01:32:13PM -0700, Vishal Moola (Oracle) wrote:
> This removes 2 calls to compound_head() and helps convert khugepaged to
> use folios throughout.
>
> Previously, if the address passed to collapse_pte_mapped_thp()
> corresponded to a tail page, the scan would fail immediately. Using
> filemap_lock_folio() we can get the corresponding folio back and try to
> operate on the folio instead.
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Hi Andrew, I've attached a fix patch addressing the syzbot issue.
It can be squashed into this patch, syzbot tested it on v2 already as
well.