2022-10-21 17:19:50

by James Houghton

[permalink] [raw]
Subject: [RFC PATCH v2 28/47] rmap: in try_to_{migrate,unmap}_one, check head page for page flags

The main complication here is that HugeTLB pages have their poison
status stored in the head page as the HWPoison page flag. Because
HugeTLB high-granularity mapping can create PTEs that point to subpages
instead of always the head of a hugepage, we need to check the
compound_head for page flags.

Signed-off-by: James Houghton <[email protected]>
---
mm/rmap.c | 34 ++++++++++++++++++++++++++--------
1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/mm/rmap.c b/mm/rmap.c
index a8359584467e..d5e1eb6b8ce5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1474,10 +1474,11 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
- struct page *subpage;
+ struct page *subpage, *page_flags_page;
bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ bool page_poisoned;

/*
* When racing against e.g. zap_pte_range() on another cpu,
@@ -1530,9 +1531,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,

subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
+ /*
+ * We check the page flags of HugeTLB pages by checking the
+ * head page.
+ */
+ page_flags_page = folio_test_hugetlb(folio)
+ ? &folio->page
+ : subpage;
+ page_poisoned = PageHWPoison(page_flags_page);
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ PageAnonExclusive(page_flags_page);

if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
@@ -1541,7 +1550,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* The try_to_unmap() is only passed a hugetlb page
* in the case where the hugetlb page is poisoned.
*/
- VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
+ VM_BUG_ON_FOLIO(!page_poisoned, folio);
/*
* huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may
@@ -1630,7 +1639,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);

- if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
+ if (page_poisoned && !(flags & TTU_IGNORE_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(1UL << pvmw.pte_order, mm);
@@ -1656,7 +1665,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) {
- swp_entry_t entry = { .val = page_private(subpage) };
+ swp_entry_t entry = {
+ .val = page_private(page_flags_page)
+ };
pte_t swp_pte;
/*
* Store the swap location in the pte.
@@ -1855,7 +1866,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
- struct page *subpage;
+ struct page *subpage, *page_flags_page;
bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
@@ -1935,9 +1946,16 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
}
+ /*
+ * We check the page flags of HugeTLB pages by checking the
+ * head page.
+ */
+ page_flags_page = folio_test_hugetlb(folio)
+ ? &folio->page
+ : subpage;
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ PageAnonExclusive(page_flags_page);

if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
@@ -2048,7 +2066,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
*/
- } else if (PageHWPoison(subpage)) {
+ } else if (PageHWPoison(page_flags_page)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(1L << pvmw.pte_order, mm);
--
2.38.0.135.g90850a2211-goog