Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757212AbZKJWAl (ORCPT ); Tue, 10 Nov 2009 17:00:41 -0500 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1754036AbZKJWAk (ORCPT ); Tue, 10 Nov 2009 17:00:40 -0500 Received: from mk-filter-2-a-1.mail.uk.tiscali.com ([212.74.100.53]:55039 "EHLO mk-filter-2-a-1.mail.uk.tiscali.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756384AbZKJWAj (ORCPT ); Tue, 10 Nov 2009 17:00:39 -0500 X-Trace: 287469678/mk-filter-2.mail.uk.tiscali.com/B2C/$b2c-THROTTLED-DYNAMIC/b2c-CUSTOMER-DYNAMIC-IP/79.69.44.180/None/hugh.dickins@tiscali.co.uk X-SBRS: None X-RemoteIP: 79.69.44.180 X-IP-MAIL-FROM: hugh.dickins@tiscali.co.uk X-SMTP-AUTH: X-MUA: X-IP-BHB: Once X-IronPort-Anti-Spam-Filtered: true X-IronPort-Anti-Spam-Result: AjkFAKZx+UpPRSy0/2dsb2JhbACBTt0phD4E X-IronPort-AV: E=Sophos;i="4.44,718,1249254000"; d="scan'208";a="287469678" Date: Tue, 10 Nov 2009 22:00:43 +0000 (GMT) From: Hugh Dickins X-X-Sender: hugh@sister.anvils To: Andrew Morton cc: Izik Eidus , Andrea Arcangeli , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH 4/6] mm: pass address down to rmap ones In-Reply-To: Message-ID: References: MIME-Version: 1.0 Content-Type: TEXT/PLAIN; charset=US-ASCII Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5439 Lines: 163 KSM swapping will know where page_referenced_one() and try_to_unmap_one() should look. It could hack page->index to get them to do what it wants, but it seems cleaner now to pass the address down to them. Make the same change to page_mkclean_one(), since it follows the same pattern; but there's no real need in its case. Signed-off-by: Hugh Dickins --- mm/rmap.c | 53 ++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) --- mm3/mm/rmap.c 2009-11-04 10:52:58.000000000 +0000 +++ mm4/mm/rmap.c 2009-11-04 10:53:05.000000000 +0000 @@ -336,21 +336,15 @@ int page_mapped_in_vma(struct page *page * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ -static int page_referenced_one(struct page *page, - struct vm_area_struct *vma, - unsigned int *mapcount, +static int page_referenced_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, unsigned int *mapcount, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int referenced = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -408,6 +402,9 @@ static int page_referenced_anon(struct p mapcount = page_mapcount(page); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -415,7 +412,7 @@ static int page_referenced_anon(struct p */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -473,6 +470,9 @@ static int page_referenced_file(struct p mapcount = page_mapcount(page); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -480,7 +480,7 @@ static int page_referenced_file(struct p */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -534,18 +534,14 @@ int page_referenced(struct page *page, return referenced; } -static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) +static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, + unsigned long address) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int ret = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) goto out; @@ -577,8 +573,12 @@ static int page_mkclean_file(struct addr spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - if (vma->vm_flags & VM_SHARED) - ret += page_mkclean_one(page, vma); + if (vma->vm_flags & VM_SHARED) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret += page_mkclean_one(page, vma, address); + } } spin_unlock(&mapping->i_mmap_lock); return ret; @@ -760,19 +760,14 @@ void page_remove_rmap(struct page *page) * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - enum ttu_flags flags) + unsigned long address, enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -1017,7 +1012,10 @@ static int try_to_unmap_anon(struct page return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - ret = try_to_unmap_one(page, vma, flags); + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); if (ret != SWAP_AGAIN || !page_mapped(page)) break; } @@ -1055,7 +1053,10 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - ret = try_to_unmap_one(page, vma, flags); + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); if (ret != SWAP_AGAIN || !page_mapped(page)) goto out; } -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/