Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932402AbbLXQU6 (ORCPT ); Thu, 24 Dec 2015 11:20:58 -0500 Received: from mga01.intel.com ([192.55.52.88]:24313 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751540AbbLXQUo (ORCPT ); Thu, 24 Dec 2015 11:20:44 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,474,1444719600"; d="scan'208";a="878122147" From: Matthew Wilcox Cc: Matthew Wilcox , linux-mm@kvack.org, linux-nvdimm@ml01.01.org, linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, x86@kernel.org Subject: [PATCH 3/8] procfs: Add support for PUDs to smaps, clear_refs and pagemap Date: Thu, 24 Dec 2015 11:20:32 -0500 Message-Id: <1450974037-24775-4-git-send-email-matthew.r.wilcox@intel.com> X-Mailer: git-send-email 2.6.2 In-Reply-To: <1450974037-24775-1-git-send-email-matthew.r.wilcox@intel.com> References: <1450974037-24775-1-git-send-email-matthew.r.wilcox@intel.com> To: unlisted-recipients:; (no To-header on input) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5445 Lines: 181 From: Matthew Wilcox Because there's no 'struct page' for DAX THPs, a lot of this code is simpler than the PMD code it mimics. Extra code would need to be added to support PUDs of anonymous or page-cache THPs. Signed-off-by: Matthew Wilcox --- fs/proc/task_mmu.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 67aaaad..6a9dad7 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -596,6 +596,33 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, } #endif +static int smaps_pud_range(pud_t *pud, unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + struct vm_area_struct *vma = walk->vma; + struct mem_size_stats *mss = walk->private; + + if (is_huge_zero_pud(*pud)) + return 0; + + mss->resident += HPAGE_PUD_SIZE; + if (vma->vm_flags & VM_SHARED) { + if (pud_dirty(*pud)) + mss->shared_dirty += HPAGE_PUD_SIZE; + else + mss->shared_clean += HPAGE_PUD_SIZE; + } else { + if (pud_dirty(*pud)) + mss->private_dirty += HPAGE_PUD_SIZE; + else + mss->private_clean += HPAGE_PUD_SIZE; + } +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ + + return 0; +} + static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -716,6 +743,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) struct vm_area_struct *vma = v; struct mem_size_stats mss; struct mm_walk smaps_walk = { + .pud_entry = smaps_pud_range, .pmd_entry = smaps_pte_range, #ifdef CONFIG_HUGETLB_PAGE .hugetlb_entry = smaps_hugetlb_range, @@ -901,13 +929,50 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, set_pmd_at(vma->vm_mm, addr, pmdp, pmd); } +static inline void clear_soft_dirty_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp) +{ +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + pud_t pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); + + pud = pud_wrprotect(pud); + pud = pud_clear_soft_dirty(pud); + + if (vma->vm_flags & VM_SOFTDIRTY) + vma->vm_flags &= ~VM_SOFTDIRTY; + + set_pud_at(vma->vm_mm, addr, pudp, pud); +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +} #else static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { } +static inline void clear_soft_dirty_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp) +{ +} #endif +static int clear_refs_pud_range(pud_t *pud, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = walk->vma; + + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { + clear_soft_dirty_pud(vma, addr, pud); + } else { + /* Clear accessed and referenced bits. */ + pudp_test_and_clear_young(vma, addr, pud); + } +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ + + return 0; +} + static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -1017,6 +1082,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, .type = type, }; struct mm_walk clear_refs_walk = { + .pud_entry = clear_refs_pud_range, .pmd_entry = clear_refs_pte_range, .test_walk = clear_refs_test_walk, .mm = mm, @@ -1181,6 +1247,48 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, return make_pme(frame, flags); } +static int pagemap_pud_range(pud_t *pudp, unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + int err = 0; +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + struct vm_area_struct *vma = walk->vma; + struct pagemapread *pm = walk->private; + u64 flags = 0, frame = 0; + pud_t pud = *pudp; + + if ((vma->vm_flags & VM_SOFTDIRTY) || pud_soft_dirty(pud)) + flags |= PM_SOFT_DIRTY; + + /* + * Currently pud for thp is always present because thp + * can not be swapped-out, migrated, or HWPOISONed + * (split in such cases instead.) + * This if-check is just to prepare for future implementation. + */ + if (pud_present(pud)) { + flags |= PM_PRESENT; + if (!(vma->vm_flags & VM_SHARED)) + flags |= PM_MMAP_EXCLUSIVE; + + if (pm->show_pfn) + frame = pud_pfn(pud) + + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + + for (; addr != end; addr += PAGE_SIZE) { + pagemap_entry_t pme = make_pme(frame, flags); + + err = add_to_pagemap(addr, &pme, pm); + if (err) + break; + if (pm->show_pfn && (flags & PM_PRESENT)) + frame++; + } + } +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ + return err; +} + static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -1359,6 +1467,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, if (!pm.buffer) goto out_mm; + pagemap_walk.pud_entry = pagemap_pud_range; pagemap_walk.pmd_entry = pagemap_pmd_range; pagemap_walk.pte_hole = pagemap_pte_hole; #ifdef CONFIG_HUGETLB_PAGE -- 2.6.2 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/