2022-04-28 12:40:09

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH v4 00/17] mm: COW fixes part 2: reliable GUP pins of anonymous pages

On 28.04.22 10:34, David Hildenbrand wrote:
> This is roughly what we have in -mm and -next, however, includes one
> additional patch and some minor differences, especially minor fixes in the
> patch descriptions.
>
> v4 is located at:
> https://github.com/davidhildenbrand/linux/tree/cow_fixes_part_2_v4
>
> Please refer to to v3 cover letter:
> https://lkml.kernel.org/r/[email protected]
>
>

Essential diff to v3:


diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 06280fc1c99b..8b6e4cd1fab8 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -299,7 +299,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
struct page_vma_mapped_walk;

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);

extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
@@ -332,7 +332,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
#else
-static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
diff --git a/mm/gup.c b/mm/gup.c
index 5c17d4816441..46ffd8c51c6e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -564,8 +564,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
goto out;
}

- VM_BUG_ON((flags & FOLL_PIN) && PageAnon(page) &&
- !PageAnonExclusive(page));
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);

/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
if (unlikely(!try_grab_page(page, flags))) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c7ac1b462543..a2f44d8d3d47 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1392,8 +1392,8 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
return ERR_PTR(-EMLINK);

- VM_BUG_ON((flags & FOLL_PIN) && PageAnon(page) &&
- !PageAnonExclusive(page));
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);

if (!try_grab_page(page, flags))
return ERR_PTR(-ENOMEM);
@@ -3080,7 +3080,7 @@ late_initcall(split_huge_pages_debugfs);
#endif

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
struct vm_area_struct *vma = pvmw->vma;
@@ -3092,7 +3092,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
pmd_t pmdswp;

if (!(pvmw->pmd && !pvmw->pte))
- return;
+ return 0;

flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
@@ -3100,7 +3100,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (anon_exclusive && page_try_share_anon_rmap(page)) {
set_pmd_at(mm, address, pvmw->pmd, pmdval);
- return;
+ return -EBUSY;
}

if (pmd_dirty(pmdval))
@@ -3118,6 +3118,8 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
page_remove_rmap(page, vma, true);
put_page(page);
trace_set_migration_pmd(address, pmd_val(pmdswp));
+
+ return 0;
}

void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ee0542f77130..534747d661dd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6100,8 +6100,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));

- VM_BUG_ON((flags & FOLL_PIN) && PageAnon(page) &&
- !PageAnonExclusive(page));
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);

/*
* If subpage information not requested, update counters
diff --git a/mm/memory.c b/mm/memory.c
index 2046de391da2..1a25d28ee5d9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3132,7 +3132,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
free_swap_cache(old_page);
put_page(old_page);
}
- return page_copied && !unshare ? VM_FAULT_WRITE : 0;
+ return (page_copied && !unshare) ? VM_FAULT_WRITE : 0;
oom_free_new:
put_page(new_page);
oom:
@@ -4557,7 +4557,7 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;

if (vma_is_anonymous(vmf->vma)) {
- if (unlikely(unshare) &&
+ if (likely(!unshare) &&
userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
return handle_userfault(vmf, VM_UFFD_WP);
return do_huge_pmd_wp_page(vmf);
diff --git a/mm/rmap.c b/mm/rmap.c
index 00418faaf4ce..12f54fbdb920 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1217,8 +1217,6 @@ void page_add_new_anon_rmap(struct page *page,

__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
} else {
- /* Anon THP always mapped first with PMD */
- VM_BUG_ON_PAGE(PageTransCompound(page), page);
/* increment count (starts at -1) */
atomic_set(&page->_mapcount, 0);
}
@@ -1814,7 +1812,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
!folio_test_pmd_mappable(folio), folio);

- set_pmd_migration_entry(&pvmw, subpage);
+ if (set_pmd_migration_entry(&pvmw, subpage)) {
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
continue;
}
#endif


--
Thanks,

David / dhildenb