Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757341Ab1DZQ0S (ORCPT ); Tue, 26 Apr 2011 12:26:18 -0400 Received: from mail-iy0-f174.google.com ([209.85.210.174]:62674 "EHLO mail-iy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757264Ab1DZQ0O (ORCPT ); Tue, 26 Apr 2011 12:26:14 -0400 DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; b=nXSCfvMiOajn+Wc0BkdLvSasCIUVUQX+evs9o9x9SLglnUTTFjYsyakrBSUNwpiUxo d80F4H6vto6rrg6qNW/I9+Ik+lpRVMmLdvZGn6MGtOQYXrnx6xiVxCecskFRA55smbXy 4FNVcrEyMZl5FoxdD8uXGB6oWcI1EC9iY4rXo= From: Minchan Kim To: Andrew Morton Cc: linux-mm , LKML , Christoph Lameter , Johannes Weiner , KAMEZAWA Hiroyuki , Minchan Kim , KOSAKI Motohiro , Mel Gorman , Rik van Riel , Andrea Arcangeli Subject: [RFC 7/8] migration: make in-order-putback aware Date: Wed, 27 Apr 2011 01:25:24 +0900 Message-Id: <1f162d17040ab50ffea1ef53d4cd16348d3e7c2d.1303833418.git.minchan.kim@gmail.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: References: In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7441 Lines: 256 This patch makes migrate_pages is aware of in-order putback This patch should be not changed old behavior. It's used by next patch. Cc: KOSAKI Motohiro Cc: Mel Gorman Cc: Rik van Riel Cc: Andrea Arcangeli Signed-off-by: Minchan Kim --- include/linux/migrate.h | 4 +- mm/compaction.c | 2 +- mm/memory-failure.c | 2 +- mm/memory_hotplug.c | 2 +- mm/mempolicy.c | 4 +- mm/migrate.c | 95 +++++++++++++++++++++++++++++++++++------------ 6 files changed, 78 insertions(+), 31 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3aa5ab6..f842fc8 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -15,7 +15,7 @@ extern int migrate_page(struct address_space *, struct page *, struct page *); extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync); + bool sync, bool keep_lru); extern int migrate_huge_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, bool sync); @@ -38,7 +38,7 @@ static inline void putback_pages_lru(struct list_head *l) {} static inline void putback_lru_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync) { return -ENOSYS; } + bool sync, bool keep_lru) { return -ENOSYS; } static inline int migrate_huge_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, bool sync) { return -ENOSYS; } diff --git a/mm/compaction.c b/mm/compaction.c index c453000..a2f6e96 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -529,7 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) nr_migrate = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, (unsigned long)cc, false, - cc->sync); + cc->sync, false); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2b9a5ee..395a99e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1466,7 +1466,7 @@ int soft_offline_page(struct page *page, int flags) list_add(&page->lru, &pagelist); ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, - 0, true); + 0, true, false); if (ret) { putback_lru_pages(&pagelist); pr_info("soft offline: %#lx: migration failed %d, type %lx\n", diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 59ac18f..75dd241 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -741,7 +741,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) } /* this function returns # of failed pages */ ret = migrate_pages(&source, hotremove_migrate_alloc, 0, - true, true); + true, true, false); if (ret) putback_lru_pages(&source); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8e57a72..9fe702a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -938,7 +938,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, new_node_page, dest, - false, true); + false, true, false); if (err) putback_lru_pages(&pagelist); } @@ -1159,7 +1159,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { nr_failed = migrate_pages(&pagelist, new_vma_page, (unsigned long)vma, - false, true); + false, true, false); if (nr_failed) putback_lru_pages(&pagelist); } diff --git a/mm/migrate.c b/mm/migrate.c index 9cfb63b..871e6ee 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -662,7 +662,8 @@ static int move_to_new_page(struct page *newpage, struct page *page, * to the newly allocated page in newpage. */ static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force, bool offlining, bool sync) + struct page *page, int force, bool offlining, + bool sync, struct pages_lru *pages_lru) { int rc = 0; int *result = NULL; @@ -671,6 +672,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int charge = 0; struct mem_cgroup *mem; struct anon_vma *anon_vma = NULL; + bool del_pages_lru = false; if (!newpage) return -ENOMEM; @@ -834,7 +836,13 @@ move_newpage: * migrated will have kepts its references and be * restored. */ - list_del(&page->lru); + if (pages_lru) { + list_del(&pages_lru->lru); + del_pages_lru = true; + } + else + list_del(&page->lru); + dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); putback_lru_page(page); @@ -844,7 +852,21 @@ move_newpage: * Move the new page to the LRU. If migration was not successful * then this will free the page. */ - putback_lru_page(newpage); + if (pages_lru) { + struct zone *zone = page_zone(page); + spin_lock_irq(&zone->lru_lock); + if (keep_lru_order(pages_lru)) { + putback_page_to_lru(newpage, &pages_lru->prev_page->lru); + spin_unlock_irq(&zone->lru_lock); + } + else { + spin_unlock_irq(&zone->lru_lock); + putback_lru_page(newpage); + } + + if (del_pages_lru) + kfree(pages_lru); + } if (result) { if (rc) @@ -947,13 +969,13 @@ out: */ int migrate_pages(struct list_head *from, new_page_t get_new_page, unsigned long private, bool offlining, - bool sync) + bool sync, bool keep_lru) { int retry = 1; int nr_failed = 0; int pass = 0; - struct page *page; - struct page *page2; + struct page *page, *page2; + struct pages_lru *pages_lru, *pages_lru2; int swapwrite = current->flags & PF_SWAPWRITE; int rc; @@ -962,26 +984,51 @@ int migrate_pages(struct list_head *from, for(pass = 0; pass < 10 && retry; pass++) { retry = 0; + if (!keep_lru) { + list_for_each_entry_safe(page, page2, from, lru) { + cond_resched(); - list_for_each_entry_safe(page, page2, from, lru) { - cond_resched(); - - rc = unmap_and_move(get_new_page, private, + rc = unmap_and_move(get_new_page, private, page, pass > 2, offlining, - sync); - - switch(rc) { - case -ENOMEM: - goto out; - case -EAGAIN: - retry++; - break; - case 0: - break; - default: - /* Permanent failure */ - nr_failed++; - break; + sync, NULL); + + switch(rc) { + case -ENOMEM: + goto out; + case -EAGAIN: + retry++; + break; + case 0: + break; + default: + /* Permanent failure */ + nr_failed++; + break; + } + } + } + else { + + list_for_each_entry_safe(pages_lru, pages_lru2, from, lru) { + cond_resched(); + + rc = unmap_and_move(get_new_page, private, + pages_lru->page, pass > 2, offlining, + sync, pages_lru); + + switch(rc) { + case -ENOMEM: + goto out; + case -EAGAIN: + retry++; + break; + case 0: + break; + default: + /* Permanent failure */ + nr_failed++; + break; + } } } } -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/