Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752575AbdCMPr2 (ORCPT ); Mon, 13 Mar 2017 11:47:28 -0400 Received: from out1-smtp.messagingengine.com ([66.111.4.25]:50638 "EHLO out1-smtp.messagingengine.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753458AbdCMPqs (ORCPT ); Mon, 13 Mar 2017 11:46:48 -0400 X-ME-Sender: X-Sasl-enc: KVaTVCwE9M3c4K0ny6/yO2kYS5lgHnvumJ4BDcHAEWX2 1489419977 From: Zi Yan To: linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: kirill.shutemov@linux.intel.com, akpm@linux-foundation.org, minchan@kernel.org, vbabka@suse.cz, mgorman@techsingularity.net, mhocko@kernel.org, n-horiguchi@ah.jp.nec.com, khandual@linux.vnet.ibm.com, zi.yan@cs.rutgers.edu, dnellans@nvidia.com Subject: [PATCH v4 07/11] mm: soft-dirty: keep soft-dirty bits over thp migration Date: Mon, 13 Mar 2017 11:45:03 -0400 Message-Id: <20170313154507.3647-8-zi.yan@sent.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170313154507.3647-1-zi.yan@sent.com> References: <20170313154507.3647-1-zi.yan@sent.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6789 Lines: 219 From: Naoya Horiguchi Soft dirty bit is designed to keep tracked over page migration. This patch makes it work in the same manner for thp migration too. Signed-off-by: Naoya Horiguchi --- ChangeLog v1 -> v2: - separate diff moving _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 - clear_soft_dirty_pmd can handle migration entry --- arch/x86/include/asm/pgtable.h | 17 +++++++++++++++++ fs/proc/task_mmu.c | 27 ++++++++++++++++----------- include/asm-generic/pgtable.h | 34 +++++++++++++++++++++++++++++++++- include/linux/swapops.h | 2 ++ mm/huge_memory.c | 22 +++++++++++++++++++++- 5 files changed, 89 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b8c024..e57abf8e926c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1088,6 +1088,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); } + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); +} +#endif #endif #define PKRU_AD_BIT 0x1 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f2b0f3ba25ac..6ea9546ea8c1 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -908,17 +908,22 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, { pmd_t pmd = *pmdp; - /* See comment in change_huge_pmd() */ - pmdp_invalidate(vma, addr, pmdp); - if (pmd_dirty(*pmdp)) - pmd = pmd_mkdirty(pmd); - if (pmd_young(*pmdp)) - pmd = pmd_mkyoung(pmd); - - pmd = pmd_wrprotect(pmd); - pmd = pmd_clear_soft_dirty(pmd); - - set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + if (pmd_present(pmd)) { + /* See comment in change_huge_pmd() */ + pmdp_invalidate(vma, addr, pmdp); + if (pmd_dirty(*pmdp)) + pmd = pmd_mkdirty(pmd); + if (pmd_young(*pmdp)) + pmd = pmd_mkyoung(pmd); + + pmd = pmd_wrprotect(pmd); + pmd = pmd_clear_soft_dirty(pmd); + + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { + pmd = pmd_swp_clear_soft_dirty(pmd); + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } } #else static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f98a028100b6..7c781aef0911 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -574,7 +574,24 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, #define arch_start_context_switch(prev) do {} while (0) #endif -#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif +#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ static inline int pte_soft_dirty(pte_t pte) { return 0; @@ -619,6 +636,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte; } + +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} #endif #ifndef __HAVE_PFNMAP_TRACKING diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 6625bea13869..b52674bd4173 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -179,6 +179,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) { swp_entry_t arch_entry; + if (pmd_swp_soft_dirty(pmd)) + pmd = pmd_swp_clear_soft_dirty(pmd); arch_entry = __pmd_to_swp_entry(pmd); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3f18452f3eb1..f8a11d634a7e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -905,6 +905,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (is_write_migration_entry(entry)) { make_migration_entry_read(&entry); pmd = swp_entry_to_pmd(entry); + if (pmd_swp_soft_dirty(pmd)) + pmd = pmd_swp_mksoft_dirty(pmd); set_pmd_at(src_mm, addr, src_pmd, pmd); } set_pmd_at(dst_mm, addr, dst_pmd, pmd); @@ -1707,6 +1709,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, } #endif +static pmd_t move_soft_dirty_pmd(pmd_t pmd) +{ +#ifdef CONFIG_MEM_SOFT_DIRTY + if (unlikely(is_pmd_migration_entry(pmd))) + pmd = pmd_swp_mksoft_dirty(pmd); + else if (pmd_present(pmd)) + pmd = pmd_mksoft_dirty(pmd); +#endif + return pmd; +} + bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) @@ -1749,7 +1762,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); pgtable_trans_huge_deposit(mm, new_pmd, pgtable); } - set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); + pmd = move_soft_dirty_pmd(pmd); + set_pmd_at(mm, new_addr, new_pmd, pmd); if (new_ptl != old_ptl) spin_unlock(new_ptl); if (force_flush) @@ -2751,6 +2765,8 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, set_page_dirty(page); entry = make_migration_entry(page, pmd_write(pmdval)); pmdswp = swp_entry_to_pmd(entry); + if (pmd_soft_dirty(pmdval)) + pmdswp = pmd_swp_mksoft_dirty(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); page_remove_rmap(page, true); put_page(page); @@ -2767,6 +2783,8 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, set_page_dirty(subpage); entry = make_migration_entry(subpage, pte_write(pteval)); swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); set_pte_at(mm, address, pvmw->pte, swp_pte); page_remove_rmap(subpage, false); put_page(subpage); @@ -2790,6 +2808,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) entry = pmd_to_swp_entry(*pvmw->pmd); get_page(new); pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); + if (pmd_swp_soft_dirty(*pvmw->pmd)) + pmde = pmd_mksoft_dirty(pmde); if (is_write_migration_entry(entry)) pmde = maybe_pmd_mkwrite(pmde, vma); -- 2.11.0