Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1761216Ab1EOWWK (ORCPT ); Sun, 15 May 2011 18:22:10 -0400 Received: from mout.perfora.net ([74.208.4.194]:54564 "EHLO mout.perfora.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1761413Ab1EOWV6 (ORCPT ); Sun, 15 May 2011 18:21:58 -0400 From: Stephen Wilson To: Andrew Morton Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, KOSAKI Motohiro , Stephen Wilson , Hugh Dickins , David Rientjes , Lee Schermerhorn , Alexey Dobriyan , Christoph Lameter Subject: [PATCH v2 2/9] mm: use walk_page_range() instead of custom page table walking code Date: Sun, 15 May 2011 18:20:22 -0400 Message-Id: <1305498029-11677-3-git-send-email-wilsons@start.ca> X-Mailer: git-send-email 1.7.4.4 In-Reply-To: <1305498029-11677-1-git-send-email-wilsons@start.ca> References: <1305498029-11677-1-git-send-email-wilsons@start.ca> X-Provags-ID: V02:K0:P2Lr2Kk+3KRrJ0m7kevbGa3t5RBDb0e8MstslI9Hy9G udXZ+WKiaoOHGEldD+dY/TwRcAqgiqPZe+NE5LvAjI/Ip3jjUq hhQ4tcxuiLcQZCi1jgV4gIX5h9DwSdaPDH7rO7IZdCGMbtwbG7 uITPjlExlzK0LIu0O7bkKVOg5TmL5iJM8qL77NrR6vjw5Wvgas KI2qXjAuaAFf9lxPqOBSJ5loWPf8owh4FWpGTsqiEE= Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4216 Lines: 154 Converting show_numa_map() to use the generic routine decouples the function from mempolicy.c, allowing it to be moved out of the mm subsystem and into fs/proc. Also, include KSM pages in /proc/pid/numa_maps statistics. The pagewalk logic implemented by check_pte_range() failed to account for such pages as they were not applicable to the page migration case. Signed-off-by: Stephen Wilson Reviewed-by: KOSAKI Motohiro Cc: Hugh Dickins Cc: David Rientjes Cc: Lee Schermerhorn Cc: Alexey Dobriyan Cc: Christoph Lameter --- mm/mempolicy.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 files changed, 68 insertions(+), 7 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 6cc997d..c894671 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2547,6 +2547,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) } struct numa_maps { + struct vm_area_struct *vma; unsigned long pages; unsigned long anon; unsigned long active; @@ -2584,6 +2585,41 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) md->node[page_to_nid(page)]++; } +static int gather_pte_stats(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct numa_maps *md; + spinlock_t *ptl; + pte_t *orig_pte; + pte_t *pte; + + md = walk->private; + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + do { + struct page *page; + int nid; + + if (!pte_present(*pte)) + continue; + + page = vm_normal_page(md->vma, addr, *pte); + if (!page) + continue; + + if (PageReserved(page)) + continue; + + nid = page_to_nid(page); + if (!node_isset(nid, node_states[N_HIGH_MEMORY])) + continue; + + gather_stats(page, md, pte_dirty(*pte)); + + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(orig_pte, ptl); + return 0; +} + #ifdef CONFIG_HUGETLB_PAGE static void check_huge_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, @@ -2613,12 +2649,35 @@ static void check_huge_range(struct vm_area_struct *vma, gather_stats(page, md, pte_dirty(*ptep)); } } + +static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long end, struct mm_walk *walk) +{ + struct page *page; + + if (pte_none(*pte)) + return 0; + + page = pte_page(*pte); + if (!page) + return 0; + + gather_stats(page, walk->private, pte_dirty(*pte)); + return 0; +} + #else static inline void check_huge_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct numa_maps *md) { } + +static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long end, struct mm_walk *walk) +{ + return 0; +} #endif /* @@ -2631,6 +2690,7 @@ int show_numa_map(struct seq_file *m, void *v) struct numa_maps *md; struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; + struct mm_walk walk = {}; struct mempolicy *pol; int n; char buffer[50]; @@ -2642,6 +2702,13 @@ int show_numa_map(struct seq_file *m, void *v) if (!md) return 0; + md->vma = vma; + + walk.hugetlb_entry = gather_hugetbl_stats; + walk.pmd_entry = gather_pte_stats; + walk.private = md; + walk.mm = mm; + pol = get_vma_policy(priv->task, vma, vma->vm_start); mpol_to_str(buffer, sizeof(buffer), pol, 0); mpol_cond_put(pol); @@ -2658,13 +2725,7 @@ int show_numa_map(struct seq_file *m, void *v) seq_printf(m, " stack"); } - if (is_vm_hugetlb_page(vma)) { - check_huge_range(vma, vma->vm_start, vma->vm_end, md); - seq_printf(m, " huge"); - } else { - check_pgd_range(vma, vma->vm_start, vma->vm_end, - &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); - } + walk_page_range(vma->vm_start, vma->vm_end, &walk); if (!md->pages) goto out; -- 1.7.4.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/