Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S965579Ab2JYNME (ORCPT ); Thu, 25 Oct 2012 09:12:04 -0400 Received: from casper.infradead.org ([85.118.1.10]:51760 "EHLO casper.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1759460Ab2JYNKy (ORCPT ); Thu, 25 Oct 2012 09:10:54 -0400 Message-Id: <20121025124834.161540645@chello.nl> User-Agent: quilt/0.48-1 Date: Thu, 25 Oct 2012 14:16:39 +0200 From: Peter Zijlstra To: Rik van Riel , Andrea Arcangeli , Mel Gorman , Johannes Weiner , Thomas Gleixner , Linus Torvalds , Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, Peter Zijlstra , Ingo Molnar Subject: [PATCH 22/31] sched, numa, mm: Implement THP migration References: <20121025121617.617683848@chello.nl> Content-Disposition: inline; filename=0022-sched-numa-mm-Implement-THP-migration.patch Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5350 Lines: 205 Add THP migration for the NUMA working set scanning fault case. It uses the page lock to serialize. No migration pte dance is necessary because the pte is already unmapped when we decide to migrate. Signed-off-by: Peter Zijlstra Cc: Johannes Weiner Cc: Mel Gorman Cc: Andrea Arcangeli Cc: Andrew Morton Cc: Linus Torvalds [ Significant fixes and changelog. ] Signed-off-by: Ingo Molnar --- mm/huge_memory.c | 133 ++++++++++++++++++++++++++++++++++++++++++------------- mm/migrate.c | 2 2 files changed, 104 insertions(+), 31 deletions(-) Index: tip/mm/huge_memory.c =================================================================== --- tip.orig/mm/huge_memory.c +++ tip/mm/huge_memory.c @@ -742,12 +742,13 @@ void do_huge_pmd_numa_page(struct mm_str unsigned int flags, pmd_t entry) { unsigned long haddr = address & HPAGE_PMD_MASK; + struct page *new_page = NULL; struct page *page = NULL; - int node; + int node, lru; spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(*pmd, entry))) - goto out_unlock; + goto unlock; if (unlikely(pmd_trans_splitting(entry))) { spin_unlock(&mm->page_table_lock); @@ -755,45 +756,117 @@ void do_huge_pmd_numa_page(struct mm_str return; } -#ifdef CONFIG_NUMA page = pmd_page(entry); - VM_BUG_ON(!PageCompound(page) || !PageHead(page)); + if (page) { + VM_BUG_ON(!PageCompound(page) || !PageHead(page)); - get_page(page); + get_page(page); + node = mpol_misplaced(page, vma, haddr); + if (node != -1) + goto migrate; + } + +fixup: + /* change back to regular protection */ + entry = pmd_modify(entry, vma->vm_page_prot); + set_pmd_at(mm, haddr, pmd, entry); + update_mmu_cache_pmd(vma, address, entry); + +unlock: spin_unlock(&mm->page_table_lock); + if (page) + put_page(page); - /* - * XXX should we serialize against split_huge_page ? - */ - - node = mpol_misplaced(page, vma, haddr); - if (node == -1) - goto do_fixup; - - /* - * Due to lacking code to migrate thp pages, we'll split - * (which preserves the special PROT_NONE) and re-take the - * fault on the normal pages. - */ - split_huge_page(page); - put_page(page); return; -do_fixup: +migrate: + spin_unlock(&mm->page_table_lock); + + lock_page(page); spin_lock(&mm->page_table_lock); - if (unlikely(!pmd_same(*pmd, entry))) - goto out_unlock; -#endif + if (unlikely(!pmd_same(*pmd, entry))) { + spin_unlock(&mm->page_table_lock); + unlock_page(page); + put_page(page); + return; + } + spin_unlock(&mm->page_table_lock); - /* change back to regular protection */ - entry = pmd_modify(entry, vma->vm_page_prot); - if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) - update_mmu_cache_pmd(vma, address, entry); + new_page = alloc_pages_node(node, + (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, + HPAGE_PMD_ORDER); + + if (!new_page) + goto alloc_fail; + + lru = PageLRU(page); + + if (lru && isolate_lru_page(page)) /* does an implicit get_page() */ + goto alloc_fail; + + if (!trylock_page(new_page)) + BUG(); + + /* anon mapping, we can simply copy page->mapping to the new page: */ + new_page->mapping = page->mapping; + new_page->index = page->index; -out_unlock: + migrate_page_copy(new_page, page); + + WARN_ON(PageLRU(new_page)); + + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_same(*pmd, entry))) { + spin_unlock(&mm->page_table_lock); + if (lru) + putback_lru_page(page); + + unlock_page(new_page); + ClearPageActive(new_page); /* Set by migrate_page_copy() */ + new_page->mapping = NULL; + put_page(new_page); /* Free it */ + + unlock_page(page); + put_page(page); /* Drop the local reference */ + + return; + } + + entry = mk_pmd(new_page, vma->vm_page_prot); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = pmd_mkhuge(entry); + + page_add_new_anon_rmap(new_page, vma, haddr); + + set_pmd_at(mm, haddr, pmd, entry); + update_mmu_cache_pmd(vma, address, entry); + page_remove_rmap(page); spin_unlock(&mm->page_table_lock); - if (page) + + put_page(page); /* Drop the rmap reference */ + + if (lru) + put_page(page); /* drop the LRU isolation reference */ + + unlock_page(new_page); + unlock_page(page); + put_page(page); /* Drop the local reference */ + + return; + +alloc_fail: + if (new_page) + put_page(new_page); + + unlock_page(page); + + spin_lock(&mm->page_table_lock); + if (unlikely(!pmd_same(*pmd, entry))) { put_page(page); + page = NULL; + goto unlock; + } + goto fixup; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, Index: tip/mm/migrate.c =================================================================== --- tip.orig/mm/migrate.c +++ tip/mm/migrate.c @@ -417,7 +417,7 @@ int migrate_huge_page_move_mapping(struc */ void migrate_page_copy(struct page *newpage, struct page *page) { - if (PageHuge(page)) + if (PageHuge(page) || PageTransHuge(page)) copy_huge_page(newpage, page); else copy_highpage(newpage, page); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/