2021-06-09 17:05:53

by Hugh Dickins

[permalink] [raw]
Subject: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem migration entry (fwd)



---------- Forwarded message ----------
Date: Tue, 8 Jun 2021 21:00:12 -0700 (PDT)
From: Hugh Dickins <[email protected]>
To: Andrew Morton <[email protected]>
Cc: Hugh Dickins <[email protected]>,
Kirill A. Shutemov <[email protected]>,
Yang Shi <[email protected]>, Wang Yugui <[email protected]>,
Matthew Wilcox <[email protected]>,
Naoya Horiguchi <[email protected]>,
Alistair Popple <[email protected]>, Ralph Campbell <[email protected]>,
Zi Yan <[email protected]>, Miaohe Lin <[email protected]>,
Minchan Kim <[email protected]>, Jue Wang <[email protected]>,
Peter Xu <[email protected]>, Jan Kara <[email protected]>,
Shakeel Butt <[email protected]>, Oscar Salvador <[email protected]>
Subject: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem
migration entry

Stressing huge tmpfs page migration racing hole punch often crashed on the
VM_BUG_ON(!pmd_present) in pmdp_huge_clear_flush(), with DEBUG_VM=y kernel;
or shortly afterwards, on a bad dereference in __split_huge_pmd_locked()
when DEBUG_VM=n. They forgot to allow for pmd migration entries in the
non-anonymous case.

Full disclosure: those particular experiments were on a kernel with more
relaxed mmap_lock and i_mmap_rwsem locking, and were not repeated on the
vanilla kernel: it is conceivable that stricter locking happens to avoid
those cases, or makes them less likely; but __split_huge_pmd_locked()
already allowed for pmd migration entries when handling anonymous THPs,
so this commit brings the shmem and file THP handling into line.

And while there: use old_pmd rather than _pmd, as in the following blocks;
and make it clearer to the eye that the !vma_is_anonymous() block is
self-contained, making an early return after accounting for unmapping.

Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
Signed-off-by: Hugh Dickins <[email protected]>
Cc: <[email protected]>
---
v2: omit is_huge_zero_pmd() mods (done differently in next), per Kirill

mm/huge_memory.c | 27 ++++++++++++++++++---------
mm/pgtable-generic.c | 5 ++---
2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 63ed6b25deaa..42cfefc6e66e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2044,7 +2044,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD);

if (!vma_is_anonymous(vma)) {
- _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
* just go ahead and zap it
@@ -2053,16 +2053,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
zap_deposited_table(mm, pmd);
if (vma_is_special_huge(vma))
return;
- page = pmd_page(_pmd);
- if (!PageDirty(page) && pmd_dirty(_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, true);
- put_page(page);
+ if (unlikely(is_pmd_migration_entry(old_pmd))) {
+ swp_entry_t entry;
+
+ entry = pmd_to_swp_entry(old_pmd);
+ page = migration_entry_to_page(entry);
+ } else {
+ page = pmd_page(old_pmd);
+ if (!PageDirty(page) && pmd_dirty(old_pmd))
+ set_page_dirty(page);
+ if (!PageReferenced(page) && pmd_young(old_pmd))
+ SetPageReferenced(page);
+ page_remove_rmap(page, true);
+ put_page(page);
+ }
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ }
+
+ if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c2210e1cdb51..4e640baf9794 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(!pmd_present(*pmdp));
- /* Below assumes pmd_present() is true */
- VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
--
2.26.2


2021-06-09 17:16:14

by Kirill A. Shutemov

[permalink] [raw]
Subject: Re: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem migration entry (fwd)

On Tue, Jun 08, 2021 at 09:05:21PM -0700, Hugh Dickins wrote:
>
>
> ---------- Forwarded message ----------
> Date: Tue, 8 Jun 2021 21:00:12 -0700 (PDT)
> From: Hugh Dickins <[email protected]>
> To: Andrew Morton <[email protected]>
> Cc: Hugh Dickins <[email protected]>,
> Kirill A. Shutemov <[email protected]>,
> Yang Shi <[email protected]>, Wang Yugui <[email protected]>,
> Matthew Wilcox <[email protected]>,
> Naoya Horiguchi <[email protected]>,
> Alistair Popple <[email protected]>, Ralph Campbell <[email protected]>,
> Zi Yan <[email protected]>, Miaohe Lin <[email protected]>,
> Minchan Kim <[email protected]>, Jue Wang <[email protected]>,
> Peter Xu <[email protected]>, Jan Kara <[email protected]>,
> Shakeel Butt <[email protected]>, Oscar Salvador <[email protected]>
> Subject: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem
> migration entry
>
> Stressing huge tmpfs page migration racing hole punch often crashed on the
> VM_BUG_ON(!pmd_present) in pmdp_huge_clear_flush(), with DEBUG_VM=y kernel;
> or shortly afterwards, on a bad dereference in __split_huge_pmd_locked()
> when DEBUG_VM=n. They forgot to allow for pmd migration entries in the
> non-anonymous case.
>
> Full disclosure: those particular experiments were on a kernel with more
> relaxed mmap_lock and i_mmap_rwsem locking, and were not repeated on the
> vanilla kernel: it is conceivable that stricter locking happens to avoid
> those cases, or makes them less likely; but __split_huge_pmd_locked()
> already allowed for pmd migration entries when handling anonymous THPs,
> so this commit brings the shmem and file THP handling into line.
>
> And while there: use old_pmd rather than _pmd, as in the following blocks;
> and make it clearer to the eye that the !vma_is_anonymous() block is
> self-contained, making an early return after accounting for unmapping.
>
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <[email protected]>
> Cc: <[email protected]>

Acked-by: Kirill A. Shutemov <[email protected]>


--
Kirill A. Shutemov

2021-06-09 18:12:29

by Yang Shi

[permalink] [raw]
Subject: Re: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem migration entry (fwd)

On Tue, Jun 8, 2021 at 9:05 PM Hugh Dickins <[email protected]> wrote:
>
>
>
> ---------- Forwarded message ----------
> Date: Tue, 8 Jun 2021 21:00:12 -0700 (PDT)
> From: Hugh Dickins <[email protected]>
> To: Andrew Morton <[email protected]>
> Cc: Hugh Dickins <[email protected]>,
> Kirill A. Shutemov <[email protected]>,
> Yang Shi <[email protected]>, Wang Yugui <[email protected]>,
> Matthew Wilcox <[email protected]>,
> Naoya Horiguchi <[email protected]>,
> Alistair Popple <[email protected]>, Ralph Campbell <[email protected]>,
> Zi Yan <[email protected]>, Miaohe Lin <[email protected]>,
> Minchan Kim <[email protected]>, Jue Wang <[email protected]>,
> Peter Xu <[email protected]>, Jan Kara <[email protected]>,
> Shakeel Butt <[email protected]>, Oscar Salvador <[email protected]>
> Subject: [PATCH v2 01/10] mm/thp: fix __split_huge_pmd_locked() on shmem
> migration entry
>
> Stressing huge tmpfs page migration racing hole punch often crashed on the
> VM_BUG_ON(!pmd_present) in pmdp_huge_clear_flush(), with DEBUG_VM=y kernel;
> or shortly afterwards, on a bad dereference in __split_huge_pmd_locked()
> when DEBUG_VM=n. They forgot to allow for pmd migration entries in the
> non-anonymous case.
>
> Full disclosure: those particular experiments were on a kernel with more
> relaxed mmap_lock and i_mmap_rwsem locking, and were not repeated on the
> vanilla kernel: it is conceivable that stricter locking happens to avoid
> those cases, or makes them less likely; but __split_huge_pmd_locked()
> already allowed for pmd migration entries when handling anonymous THPs,
> so this commit brings the shmem and file THP handling into line.
>
> And while there: use old_pmd rather than _pmd, as in the following blocks;
> and make it clearer to the eye that the !vma_is_anonymous() block is
> self-contained, making an early return after accounting for unmapping.
>
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <[email protected]>
> Cc: <[email protected]>

Reviewed-by: Yang Shi <[email protected]>

> ---
> v2: omit is_huge_zero_pmd() mods (done differently in next), per Kirill
>
> mm/huge_memory.c | 27 ++++++++++++++++++---------
> mm/pgtable-generic.c | 5 ++---
> 2 files changed, 20 insertions(+), 12 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 63ed6b25deaa..42cfefc6e66e 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2044,7 +2044,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> count_vm_event(THP_SPLIT_PMD);
>
> if (!vma_is_anonymous(vma)) {
> - _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
> + old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
> /*
> * We are going to unmap this huge page. So
> * just go ahead and zap it
> @@ -2053,16 +2053,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> zap_deposited_table(mm, pmd);
> if (vma_is_special_huge(vma))
> return;
> - page = pmd_page(_pmd);
> - if (!PageDirty(page) && pmd_dirty(_pmd))
> - set_page_dirty(page);
> - if (!PageReferenced(page) && pmd_young(_pmd))
> - SetPageReferenced(page);
> - page_remove_rmap(page, true);
> - put_page(page);
> + if (unlikely(is_pmd_migration_entry(old_pmd))) {
> + swp_entry_t entry;
> +
> + entry = pmd_to_swp_entry(old_pmd);
> + page = migration_entry_to_page(entry);
> + } else {
> + page = pmd_page(old_pmd);
> + if (!PageDirty(page) && pmd_dirty(old_pmd))
> + set_page_dirty(page);
> + if (!PageReferenced(page) && pmd_young(old_pmd))
> + SetPageReferenced(page);
> + page_remove_rmap(page, true);
> + put_page(page);
> + }
> add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
> return;
> - } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> + }
> +
> + if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> /*
> * FIXME: Do we want to invalidate secondary mmu by calling
> * mmu_notifier_invalidate_range() see comments below inside
> diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
> index c2210e1cdb51..4e640baf9794 100644
> --- a/mm/pgtable-generic.c
> +++ b/mm/pgtable-generic.c
> @@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
> {
> pmd_t pmd;
> VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> - VM_BUG_ON(!pmd_present(*pmdp));
> - /* Below assumes pmd_present() is true */
> - VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
> + VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
> + !pmd_devmap(*pmdp));
> pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
> flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
> return pmd;
> --
> 2.26.2
>
>