Commit-ID: 5cc4a4cb0abc63699b6741d7737e07e49b502782
Gitweb: http://git.kernel.org/tip/5cc4a4cb0abc63699b6741d7737e07e49b502782
Author: Peter Zijlstra <[email protected]>
AuthorDate: Thu, 11 Oct 2012 17:42:06 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 15 Oct 2012 13:56:50 +0200
sched/numa/mm: Avoid pointless TLB invalidation from page-migration
When we do migrate-on-fault we faulted on a PROT_NONE entry, this is
a !present entry, we then replace this with a regular entry and then
proceed with page-migration.
Page-migration in turn replaces the now valid entry with a
migration-PTE (which is again !present) which requires a TLB
invalidate.
Instead, leave the PROT_NONE entry in-place when we need to migrate
such that the PROT_NONE -> migration-PTE transition is a !preset ->
!preset transition and doesn't require a TLB invalidate.
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
mm/memory.c | 63 +++++++++++++++++++++++++++++------------------------------
1 files changed, 31 insertions(+), 32 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 9ada7ed..8b1ad86 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3442,38 +3442,12 @@ static bool pte_prot_none(struct vm_area_struct *vma, pte_t pte)
return pte_same(pte, pte_modify(pte, vma_prot_none(vma)));
}
-#ifdef CONFIG_NUMA
-
-
-static void do_prot_none_numa(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, struct page *page)
-{
- int node, page_nid = page_to_nid(page);
-
- task_numa_placement();
-
- /*
- * For NUMA systems we use the special PROT_NONE maps to drive
- * lazy page migration, see MPOL_MF_LAZY and related.
- */
- node = mpol_misplaced(page, vma, address);
- if (node != -1 && !migrate_misplaced_page(mm, page, node))
- page_nid = node;
-
- task_numa_fault(page_nid);
-}
-#else
-static void do_prot_none_numa(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, struct page *page)
-{
-}
-#endif /* CONFIG_NUMA */
-
static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pmd_t *pmd,
unsigned int flags, pte_t entry)
{
struct page *page = NULL;
+ int node, page_nid = -1;
spinlock_t *ptl;
ptl = pte_lockptr(mm, pmd);
@@ -3481,6 +3455,16 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!pte_same(*ptep, entry)))
goto unlock;
+ page = vm_normal_page(vma, address, entry);
+ if (page) {
+ get_page(page);
+ page_nid = page_to_nid(page);
+ node = mpol_misplaced(page, vma, address);
+ if (node != -1)
+ goto migrate;
+ }
+
+fixup:
flush_cache_page(vma, address, pte_pfn(entry));
ptep_modify_prot_start(mm, address, ptep);
@@ -3489,17 +3473,32 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
- page = vm_normal_page(vma, address, entry);
- if (page)
- get_page(page);
-
unlock:
pte_unmap_unlock(ptep, ptl);
+out:
if (page) {
- do_prot_none_numa(mm, vma, address, page);
+ task_numa_fault(page_nid, 1);
put_page(page);
}
+
return 0;
+
+migrate:
+ pte_unmap_unlock(ptep, ptl);
+
+ if (!migrate_misplaced_page(mm, page, node)) {
+ page_nid = node;
+ goto out;
+ }
+
+ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (!pte_same(*ptep, entry)) {
+ put_page(page);
+ page = NULL;
+ goto unlock;
+ }
+
+ goto fixup;
}
/*