2017-04-17 17:12:09

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 0/7] HugeTLB migration support for PPC64

This patch series add support for hugetlb page migration.

Aneesh Kumar K.V (6):
mm/hugetlb/migration: Use set_huge_pte_at instead of set_pte_at
mm/follow_page_mask: Split follow_page_mask to smaller functions.
mm/hugetlb: export hugetlb_entry_migration helper
mm/follow_page_mask: Add support for hugepage directory entry
powerpc/hugetlb: Add follow_huge_pd implementation for ppc64.
powerpc/hugetlb: Enable hugetlb migration for ppc64

Anshuman Khandual (1):
mm/follow_page_mask: Add support for hugetlb pgd entries.

arch/powerpc/mm/hugetlbpage.c | 43 ++++++++
arch/powerpc/platforms/Kconfig.cputype | 5 +
include/linux/hugetlb.h | 9 ++
mm/gup.c | 186 +++++++++++++++++++++++----------
mm/hugetlb.c | 25 ++++-
mm/migrate.c | 21 ++--
6 files changed, 219 insertions(+), 70 deletions(-)

--
2.7.4


2017-04-17 17:12:14

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 1/7] mm/hugetlb/migration: Use set_huge_pte_at instead of set_pte_at

The right interface to use to set a hugetlb pte entry is set_huge_pte_at. Use
that instead of set_pte_at.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
mm/migrate.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0897a14d37..4c272ac6fe53 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -224,25 +224,26 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);

+ flush_dcache_page(new);
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
- }
-#endif
- flush_dcache_page(new);
- set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
-
- if (PageHuge(new)) {
+ set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
- } else if (PageAnon(new))
- page_add_anon_rmap(new, vma, pvmw.address, false);
- else
- page_add_file_rmap(new, false);
+ } else
+#endif
+ {
+ set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);

+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, pvmw.address, false);
+ else
+ page_add_file_rmap(new, false);
+ }
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);

--
2.7.4

2017-04-17 17:12:33

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 4/7] mm/follow_page_mask: Add support for hugetlb pgd entries.

From: Anshuman Khandual <[email protected]>

ppc64 supports pgd hugetlb entries. Add code to handle hugetlb pgd entries to
follow_page_mask so that ppc64 can switch to it to handle hugetlbe entries.

Signed-off-by: Anshuman Khandual <[email protected]>
Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
include/linux/hugetlb.h | 4 ++++
mm/gup.c | 7 +++++++
mm/hugetlb.c | 9 +++++++++
3 files changed, 20 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index fddf6cf403d5..edab98f0a7b8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -121,6 +121,9 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+ pgd_t *pgd, int flags);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -150,6 +153,7 @@ static inline void hugetlb_show_meminfo(void)
}
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
+#define follow_huge_pgd(mm, addr, pgd, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
diff --git a/mm/gup.c b/mm/gup.c
index 73d46f9f7b81..65255389620a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -357,6 +357,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);

+ if (pgd_huge(*pgd)) {
+ page = follow_huge_pgd(mm, address, pgd, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+
return follow_p4d_mask(vma, address, pgd, flags, page_mask);
}

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9b630e2195d5..83f39cf5162a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4694,6 +4694,15 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
}

+struct page * __weak
+follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
+{
+ if (flags & FOLL_GET)
+ return NULL;
+
+ return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+}
+
#ifdef CONFIG_MEMORY_FAILURE

/*
--
2.7.4

2017-04-17 17:12:40

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 6/7] powerpc/hugetlb: Add follow_huge_pd implementation for ppc64.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
arch/powerpc/mm/hugetlbpage.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)

diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 80f6d2ed551a..5c829a83a4cc 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -17,6 +17,8 @@
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/moduleparam.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -618,6 +620,46 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
}

/*
+ * 64 bit book3s use generic follow_page_mask
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift)
+{
+ pte_t *ptep;
+ spinlock_t *ptl;
+ struct page *page = NULL;
+ unsigned long mask;
+ int shift = hugepd_shift(hpd);
+ struct mm_struct *mm = vma->vm_mm;
+
+retry:
+ ptl = &mm->page_table_lock;
+ spin_lock(ptl);
+
+ ptep = hugepte_offset(hpd, address, pdshift);
+ if (pte_present(*ptep)) {
+ mask = (1UL << shift) - 1;
+ page = pte_page(*ptep);
+ page += ((address & mask) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+ if (is_hugetlb_entry_migration(*ptep)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, ptep, ptl);
+ goto retry;
+ }
+ }
+ spin_unlock(ptl);
+ return page;
+}
+
+#else /* !CONFIG_PPC_BOOK3S_64 */
+
+/*
* We are holding mmap_sem, so a parallel huge page collapse cannot run.
* To prevent hugepage split, disable irq.
*/
@@ -672,6 +714,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
BUG();
return NULL;
}
+#endif

static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
--
2.7.4

2017-04-17 17:12:19

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 2/7] mm/follow_page_mask: Split follow_page_mask to smaller functions.

Makes code reading easy. No functional changes in this patch. In a followup
patch, we will be updating the follow_page_mask to handle hugetlb hugepd format
so that archs like ppc64 can switch to the generic version. This split helps
in doing that nicely.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
mm/gup.c | 148 +++++++++++++++++++++++++++++++++++++++------------------------
1 file changed, 91 insertions(+), 57 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 04aa405350dc..73d46f9f7b81 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -208,68 +208,16 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
return no_page_table(vma, flags);
}

-/**
- * follow_page_mask - look up a page descriptor from a user-virtual address
- * @vma: vm_area_struct mapping @address
- * @address: virtual address to look up
- * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
- *
- * @flags can have FOLL_ flags set, defined in <linux/mm.h>
- *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
- * an error pointer if there is a mapping to something not represented
- * by a page descriptor (see also vm_normal_page()).
- */
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask)
+static struct page *follow_pmd_mask(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ unsigned int flags, unsigned int *page_mask)
{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;

- *page_mask = 0;
-
- page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
- if (!IS_ERR(page)) {
- BUG_ON(flags & FOLL_GET);
- return page;
- }
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return no_page_table(vma, flags);
- p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d))
- return no_page_table(vma, flags);
- BUILD_BUG_ON(p4d_huge(*p4d));
- if (unlikely(p4d_bad(*p4d)))
- return no_page_table(vma, flags);
- pud = pud_offset(p4d, address);
- if (pud_none(*pud))
- return no_page_table(vma, flags);
- if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pud(mm, address, pud, flags);
- if (page)
- return page;
- return no_page_table(vma, flags);
- }
- if (pud_devmap(*pud)) {
- ptl = pud_lock(mm, pud);
- page = follow_devmap_pud(vma, address, pud, flags);
- spin_unlock(ptl);
- if (page)
- return page;
- }
- if (unlikely(pud_bad(*pud)))
- return no_page_table(vma, flags);
-
- pmd = pmd_offset(pud, address);
+ pmd = pmd_offset(pudp, address);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
@@ -319,13 +267,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags);
}
-
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1;
return page;
}

+
+static struct page *follow_pud_mask(struct vm_area_struct *vma,
+ unsigned long address, p4d_t *p4dp,
+ unsigned int flags, unsigned int *page_mask)
+{
+ pud_t *pud;
+ spinlock_t *ptl;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ pud = pud_offset(p4dp, address);
+ if (pud_none(*pud))
+ return no_page_table(vma, flags);
+ if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+ page = follow_huge_pud(mm, address, pud, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if (pud_devmap(*pud)) {
+ ptl = pud_lock(mm, pud);
+ page = follow_devmap_pud(vma, address, pud, flags);
+ spin_unlock(ptl);
+ if (page)
+ return page;
+ }
+ if (unlikely(pud_bad(*pud)))
+ return no_page_table(vma, flags);
+
+ return follow_pmd_mask(vma, address, pud, flags, page_mask);
+}
+
+
+static struct page *follow_p4d_mask(struct vm_area_struct *vma,
+ unsigned long address, pgd_t *pgdp,
+ unsigned int flags, unsigned int *page_mask)
+{
+ p4d_t *p4d;
+
+ p4d = p4d_offset(pgdp, address);
+ if (p4d_none(*p4d))
+ return no_page_table(vma, flags);
+ BUILD_BUG_ON(p4d_huge(*p4d));
+ if (unlikely(p4d_bad(*p4d)))
+ return no_page_table(vma, flags);
+
+ return follow_pud_mask(vma, address, p4d, flags, page_mask);
+}
+
+/**
+ * follow_page_mask - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ * @page_mask: on output, *page_mask is set according to the size of the page
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
+ */
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
+{
+ pgd_t *pgd;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ *page_mask = 0;
+
+ /* make this handle hugepd */
+ page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
+ if (!IS_ERR(page)) {
+ BUG_ON(flags & FOLL_GET);
+ return page;
+ }
+
+ pgd = pgd_offset(mm, address);
+
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ return no_page_table(vma, flags);
+
+ return follow_p4d_mask(vma, address, pgd, flags, page_mask);
+}
+
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
--
2.7.4

2017-04-17 17:12:29

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 3/7] mm/hugetlb: export hugetlb_entry_migration helper

We will be using this later from the ppc64 code. Change the return type to bool.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
include/linux/hugetlb.h | 1 +
mm/hugetlb.c | 8 ++++----
2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b857fc8cc2ec..fddf6cf403d5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -126,6 +126,7 @@ int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);

+bool is_hugetlb_entry_migration(pte_t pte);
#else /* !CONFIG_HUGETLB_PAGE */

static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2c090189f314..9b630e2195d5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3189,17 +3189,17 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
}

-static int is_hugetlb_entry_migration(pte_t pte)
+bool is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
- return 0;
+ return false;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
- return 1;
+ return true;
else
- return 0;
+ return false;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
--
2.7.4

2017-04-17 17:12:44

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 5/7] mm/follow_page_mask: Add support for hugepage directory entry

Architectures like ppc64 supports hugepage size that is not mapped to any of
of the page table levels. Instead they add an alternate page table entry format
called hugepage directory (hugepd). hugepd indicates that the page table entry maps
to a set of hugetlb pages. Add support for this in generic follow_page_mask
code. We already support this format in the generic gup code.

The defaul implementation prints warning and returns NULL. We will add ppc64
support in later patches

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
include/linux/hugetlb.h | 4 ++++
mm/gup.c | 33 +++++++++++++++++++++++++++++++++
mm/hugetlb.c | 8 ++++++++
3 files changed, 45 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edab98f0a7b8..7a5917d190f2 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -117,6 +117,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
@@ -151,6 +154,7 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
+#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
#define follow_huge_pgd(mm, addr, pgd, flags) NULL
diff --git a/mm/gup.c b/mm/gup.c
index 65255389620a..a7f5b82e15f3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,14 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pmd_val(*pmd)), flags,
+ PMD_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
if (pmd_devmap(*pmd)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -292,6 +300,14 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pud_val(*pud)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pud_val(*pud)), flags,
+ PUD_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);
@@ -311,6 +327,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned int flags, unsigned int *page_mask)
{
p4d_t *p4d;
+ struct page *page;

p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
@@ -319,6 +336,14 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);

+ if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(p4d_val(*p4d)), flags,
+ P4D_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
return follow_pud_mask(vma, address, p4d, flags, page_mask);
}

@@ -363,6 +388,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pgd_val(*pgd)), flags,
+ PGDIR_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }

return follow_p4d_mask(vma, address, pgd, flags, page_mask);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83f39cf5162a..64ad00d97094 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4650,6 +4650,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address,
}

struct page * __weak
+follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd, int flags, int pdshift)
+{
+ WARN(1, "hugepd follow called with no support for hugepage directory format\n");
+ return NULL;
+}
+
+struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags)
{
--
2.7.4

2017-04-17 17:12:49

by Aneesh Kumar K.V

[permalink] [raw]
Subject: [PATCH 7/7] powerpc/hugetlb: Enable hugetlb migration for ppc64

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
arch/powerpc/platforms/Kconfig.cputype | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f4ba4bf0d762..9fb075745c7f 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -350,6 +350,11 @@ config PPC_RADIX_MMU
is only implemented by IBM Power9 CPUs, if you don't have one of them
you can probably disable this.

+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+ def_bool y
+ depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
+
+
config PPC_MMU_NOHASH
def_bool y
depends on !PPC_STD_MMU
--
2.7.4

2017-04-17 17:57:55

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 5/7] mm/follow_page_mask: Add support for hugepage directory entry

Hi Aneesh,

[auto build test ERROR on linus/master]
[also build test ERROR on v4.11-rc7 next-20170413]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/HugeTLB-migration-support-for-PPC64/20170418-011540
config: i386-randconfig-x016-201716 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=i386

All errors (new ones prefixed by >>):

In file included from fs/proc/task_mmu.c:3:0:
>> include/linux/hugetlb.h:121:31: error: unknown type name 'hugepd_t'
unsigned long address, hugepd_t hpd,
^~~~~~~~
--
In file included from mm/gup.c:15:0:
>> include/linux/hugetlb.h:121:31: error: unknown type name 'hugepd_t'
unsigned long address, hugepd_t hpd,
^~~~~~~~
mm/gup.c: In function 'follow_pmd_mask':
>> mm/gup.c:230:10: error: implicit declaration of function 'follow_huge_pd' [-Werror=implicit-function-declaration]
page = follow_huge_pd(vma, address,
^~~~~~~~~~~~~~
mm/gup.c:230:8: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
page = follow_huge_pd(vma, address,
^
mm/gup.c: In function 'follow_pud_mask':
mm/gup.c:304:8: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
page = follow_huge_pd(vma, address,
^
mm/gup.c: In function 'follow_p4d_mask':
mm/gup.c:340:8: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
page = follow_huge_pd(vma, address,
^
mm/gup.c: In function 'follow_page_mask':
mm/gup.c:392:8: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
page = follow_huge_pd(vma, address,
^
cc1: some warnings being treated as errors

vim +/hugepd_t +121 include/linux/hugetlb.h

115 unsigned long addr, unsigned long sz);
116 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
117 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
118 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
119 int write);
120 struct page *follow_huge_pd(struct vm_area_struct *vma,
> 121 unsigned long address, hugepd_t hpd,
122 int flags, int pdshift);
123 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
124 pmd_t *pmd, int flags);

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (2.73 kB)
.config.gz (25.76 kB)
Download all attachments

2017-04-17 18:30:59

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 5/7] mm/follow_page_mask: Add support for hugepage directory entry

Hi Aneesh,

[auto build test ERROR on linus/master]
[also build test ERROR on v4.11-rc7 next-20170413]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/HugeTLB-migration-support-for-PPC64/20170418-011540
config: x86_64-randconfig-a0-04180109 (attached as .config)
compiler: gcc-4.4 (Debian 4.4.7-8) 4.4.7
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64

All errors (new ones prefixed by >>):

In file included from mm//swap.c:35:
>> include/linux/hugetlb.h:121: error: expected declaration specifiers or '...' before 'hugepd_t'

vim +121 include/linux/hugetlb.h

115 unsigned long addr, unsigned long sz);
116 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
117 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
118 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
119 int write);
120 struct page *follow_huge_pd(struct vm_area_struct *vma,
> 121 unsigned long address, hugepd_t hpd,
122 int flags, int pdshift);
123 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
124 pmd_t *pmd, int flags);

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (1.44 kB)
.config.gz (27.11 kB)
Download all attachments

2017-04-18 03:20:22

by Aneesh Kumar K.V

[permalink] [raw]
Subject: Re: [PATCH 5/7] mm/follow_page_mask: Add support for hugepage directory entry

kbuild test robot <[email protected]> writes:

> Hi Aneesh,
>
> [auto build test ERROR on linus/master]
> [also build test ERROR on v4.11-rc7 next-20170413]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
>
> url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/HugeTLB-migration-support-for-PPC64/20170418-011540
> config: x86_64-randconfig-a0-04180109 (attached as .config)
> compiler: gcc-4.4 (Debian 4.4.7-8) 4.4.7
> reproduce:
> # save the attached .config to linux build tree
> make ARCH=x86_64
>
> All errors (new ones prefixed by >>):
>
> In file included from mm//swap.c:35:
>>> include/linux/hugetlb.h:121: error: expected declaration specifiers or '...' before 'hugepd_t'
>
> vim +121 include/linux/hugetlb.h
>
> 115 unsigned long addr, unsigned long sz);
> 116 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
> 117 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
> 118 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
> 119 int write);
> 120 struct page *follow_huge_pd(struct vm_area_struct *vma,
> > 121 unsigned long address, hugepd_t hpd,
> 122 int flags, int pdshift);
> 123 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
> 124 pmd_t *pmd, int flags);
>

Thanks for the report. How about

>From 64033ea38962f69271169129277bbb0482299c31 Mon Sep 17 00:00:00 2001
From: "Aneesh Kumar K.V" <[email protected]>
Date: Tue, 18 Apr 2017 08:39:09 +0530
Subject: [PATCH] mm/hugetlb: Move default definition of hugepd_t earlier in
the header

This enable to use the hugepd_t type early. No functional change in this patch.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
---
include/linux/hugetlb.h | 47 ++++++++++++++++++++++++-----------------------
1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edab98f0a7b8..f66c1d4e0d1f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -14,6 +14,30 @@ struct ctl_table;
struct user_struct;
struct mmu_gather;

+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
+
+
#ifdef CONFIG_HUGETLB_PAGE

#include <linux/mempolicy.h>
@@ -222,29 +246,6 @@ static inline int pud_write(pud_t pud)
}
#endif

-#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"

enum {
--
2.7.4



2017-04-27 09:40:48

by Naoya Horiguchi

[permalink] [raw]
Subject: Re: [PATCH 1/7] mm/hugetlb/migration: Use set_huge_pte_at instead of set_pte_at

On Mon, Apr 17, 2017 at 10:41:40PM +0530, Aneesh Kumar K.V wrote:
> The right interface to use to set a hugetlb pte entry is set_huge_pte_at. Use
> that instead of set_pte_at.
>
> Signed-off-by: Aneesh Kumar K.V <[email protected]>

Reviewed-by: Naoya Horiguchi <[email protected]>

2017-04-27 09:42:16

by Naoya Horiguchi

[permalink] [raw]
Subject: Re: [PATCH 3/7] mm/hugetlb: export hugetlb_entry_migration helper

On Mon, Apr 17, 2017 at 10:41:42PM +0530, Aneesh Kumar K.V wrote:
> We will be using this later from the ppc64 code. Change the return type to bool.
>
> Signed-off-by: Aneesh Kumar K.V <[email protected]>

Reviewed-by: Naoya Horiguchi <[email protected]>

2017-04-27 09:41:00

by Naoya Horiguchi

[permalink] [raw]
Subject: Re: [PATCH 2/7] mm/follow_page_mask: Split follow_page_mask to smaller functions.

On Mon, Apr 17, 2017 at 10:41:41PM +0530, Aneesh Kumar K.V wrote:
> Makes code reading easy. No functional changes in this patch. In a followup
> patch, we will be updating the follow_page_mask to handle hugetlb hugepd format
> so that archs like ppc64 can switch to the generic version. This split helps
> in doing that nicely.
>
> Signed-off-by: Aneesh Kumar K.V <[email protected]>

Reviewed-by: Naoya Horiguchi <[email protected]>