2019-06-25 14:38:26

by Christoph Hellwig

[permalink] [raw]
Subject: switch the remaining architectures to use generic GUP v4

Hi Linus and maintainers,

below is a series to switch mips, sh and sparc64 to use the generic
GUP code so that we only have one codebase to touch for further
improvements to this code. I don't have hardware for any of these
architectures, and generally no clue about their page table
management, so handle with care.

Changes since v3:
- improve a few commit messages
- clean up gup_fast_permitted a bit more
- split the code reordering in gup.c into a separate patch
- drop the patch to pass argument in a structure for now

Changes since v2:
- rebase to mainline to pick up the untagged_addr definition
- fix the gup range check to be start <= end to catch the 0 length case
- use pfn based version for the missing pud_page/pgd_page definitions
- fix a wrong check in the sparc64 version of pte_access_permitted

Changes since v1:
- fix various issues found by the build bot
- cherry pick and use the untagged_addr helper form Andrey
- add various refactoring patches to share more code over architectures
- move the powerpc hugepd code to mm/gup.c and sync it with the generic
hup semantics


2019-06-25 14:38:27

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 05/16] sh: add the missing pud_page definition

sh only had pud_page_vaddr, but not pud_page.

Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/sh/include/asm/pgtable-3level.h | 1 +
1 file changed, 1 insertion(+)

diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 7d8587eb65ff..3c7ff20f3f94 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -37,6 +37,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
{
return pud_val(pud);
}
+#define pud_page(pud) pfn_to_page(pud_pfn(pud))

#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
--
2.20.1

2019-06-25 14:38:39

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 10/16] mm: rename CONFIG_HAVE_GENERIC_GUP to CONFIG_HAVE_FAST_GUP

We only support the generic GUP now, so rename the config option to
be more clear, and always use the mm/Kconfig definition of the
symbol and select it from the arch Kconfigs.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Khalid Aziz <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
---
arch/arm/Kconfig | 5 +----
arch/arm64/Kconfig | 4 +---
arch/mips/Kconfig | 2 +-
arch/powerpc/Kconfig | 2 +-
arch/s390/Kconfig | 2 +-
arch/sh/Kconfig | 2 +-
arch/sparc/Kconfig | 2 +-
arch/x86/Kconfig | 4 +---
mm/Kconfig | 2 +-
mm/gup.c | 4 ++--
10 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8869742a85df..3879a3e2c511 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -73,6 +73,7 @@ config ARM
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
@@ -1596,10 +1597,6 @@ config ARCH_SELECT_MEMORY_MODEL
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM

-config HAVE_GENERIC_GUP
- def_bool y
- depends on ARM_LPAE
-
config HIGHMEM
bool "High Memory Support"
depends on MMU
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 697ea0510729..4a6ee3e92757 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -140,6 +140,7 @@ config ARM64
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
@@ -262,9 +263,6 @@ config GENERIC_CALIBRATE_DELAY
config ZONE_DMA32
def_bool y

-config HAVE_GENERIC_GUP
- def_bool y
-
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 64108a2a16d4..b1e42f0e4ed0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -54,10 +54,10 @@ config MIPS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select HAVE_GENERIC_GUP
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8c1c636308c8..992a04796e56 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -185,12 +185,12 @@ config PPC
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_EBPF_JIT if PPC64
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
- select HAVE_GENERIC_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 109243fdb6ec..aaff0376bf53 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -137,6 +137,7 @@ config S390
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_FAST_GUP
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
@@ -144,7 +145,6 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_GUP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6fddfc3c9710..f6b15ecc37a3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,7 +14,7 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
- select HAVE_GENERIC_GUP
+ select HAVE_FAST_GUP if MMU
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 22435471f942..659232b760e1 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,7 +28,7 @@ config SPARC
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
- select HAVE_GENERIC_GUP if SPARC64
+ select HAVE_FAST_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7cd53cc59f0f..44500e0ed630 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -157,6 +157,7 @@ config X86
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
@@ -2874,9 +2875,6 @@ config HAVE_ATOMIC_IOMAP
config X86_DEV_DMA_OPS
bool

-config HAVE_GENERIC_GUP
- def_bool y
-
source "drivers/firmware/Kconfig"

source "arch/x86/kvm/Kconfig"
diff --git a/mm/Kconfig b/mm/Kconfig
index fe51f104a9e0..98dffb0f2447 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -132,7 +132,7 @@ config HAVE_MEMBLOCK_NODE_MAP
config HAVE_MEMBLOCK_PHYS_MAP
bool

-config HAVE_GENERIC_GUP
+config HAVE_FAST_GUP
bool

config ARCH_KEEP_MEMBLOCK
diff --git a/mm/gup.c b/mm/gup.c
index 9b72f2ea3471..7328890ad8d3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1651,7 +1651,7 @@ struct page *get_dump_page(unsigned long addr)
#endif /* CONFIG_ELF_CORE */

/*
- * Generic Fast GUP
+ * Fast GUP
*
* get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be
@@ -1683,7 +1683,7 @@ struct page *get_dump_page(unsigned long addr)
*
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
-#ifdef CONFIG_HAVE_GENERIC_GUP
+#ifdef CONFIG_HAVE_FAST_GUP
#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
/*
* WARNING: only to be used in the get_user_pages_fast() implementation.
--
2.20.1

2019-06-25 14:38:43

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 11/16] mm: reorder code blocks in gup.c

This moves the actually exported functions towards the end of the file,
and reorders some functions to be in more logical blocks as a
preparation for moving various stubs inline into the main functionality
using IS_ENABLED().

Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/gup.c | 410 +++++++++++++++++++++++++++----------------------------
1 file changed, 205 insertions(+), 205 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 7328890ad8d3..b29249581672 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1100,86 +1100,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
return pages_done;
}

-/*
- * We can leverage the VM_FAULT_RETRY functionality in the page fault
- * paths better by using either get_user_pages_locked() or
- * get_user_pages_unlocked().
- *
- * get_user_pages_locked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
- *
- * to:
- *
- * int locked = 1;
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages_locked(tsk, mm, ..., pages, &locked);
- * if (locked)
- * up_read(&mm->mmap_sem);
- */
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- /*
- * FIXME: Current FOLL_LONGTERM behavior is incompatible with
- * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
- * vmas. As there are no users of this flag in this call we simply
- * disallow this option for now.
- */
- if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
- return -EINVAL;
-
- return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, NULL, locked,
- gup_flags | FOLL_TOUCH);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-/*
- * get_user_pages_unlocked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
- *
- * with:
- *
- * get_user_pages_unlocked(tsk, mm, ..., pages);
- *
- * It is functionally equivalent to get_user_pages_fast so
- * get_user_pages_fast should be used instead if specific gup_flags
- * (e.g. FOLL_FORCE) are not required.
- */
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- struct mm_struct *mm = current->mm;
- int locked = 1;
- long ret;
-
- /*
- * FIXME: Current FOLL_LONGTERM behavior is incompatible with
- * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
- * vmas. As there are no users of this flag in this call we simply
- * disallow this option for now.
- */
- if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
- return -EINVAL;
-
- down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
- &locked, gup_flags | FOLL_TOUCH);
- if (locked)
- up_read(&mm->mmap_sem);
- return ret;
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
/*
* get_user_pages_remote() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
@@ -1256,6 +1176,153 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages_remote);

+/**
+ * populate_vma_page_range() - populate a range of pages in the vma.
+ * @vma: target vma
+ * @start: start address
+ * @end: end address
+ * @nonblocking:
+ *
+ * This takes care of mlocking the pages too if VM_LOCKED is set.
+ *
+ * return 0 on success, negative error code on error.
+ *
+ * vma->vm_mm->mmap_sem must be held.
+ *
+ * If @nonblocking is NULL, it may be held for read or write and will
+ * be unperturbed.
+ *
+ * If @nonblocking is non-NULL, it must held for read only and may be
+ * released. If it's released, *@nonblocking will be set to 0.
+ */
+long populate_vma_page_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *nonblocking)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long nr_pages = (end - start) / PAGE_SIZE;
+ int gup_flags;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON_VMA(start < vma->vm_start, vma);
+ VM_BUG_ON_VMA(end > vma->vm_end, vma);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+
+ gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
+ if (vma->vm_flags & VM_LOCKONFAULT)
+ gup_flags &= ~FOLL_POPULATE;
+ /*
+ * We want to touch writable mappings with a write fault in order
+ * to break COW, except for shared mappings because these don't COW
+ * and we would not want to dirty them for nothing.
+ */
+ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+ /*
+ * We want mlock to succeed for regions that have any permissions
+ * other than PROT_NONE.
+ */
+ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+ gup_flags |= FOLL_FORCE;
+
+ /*
+ * We made sure addr is within a VMA, so the following will
+ * not result in a stack expansion that recurses back here.
+ */
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
+ NULL, NULL, nonblocking);
+}
+
+/*
+ * __mm_populate - populate and/or mlock pages within a range of address space.
+ *
+ * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
+ * flags. VMAs must be already marked with the desired vm_flags, and
+ * mmap_sem must not be held.
+ */
+int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long end, nstart, nend;
+ struct vm_area_struct *vma = NULL;
+ int locked = 0;
+ long ret = 0;
+
+ end = start + len;
+
+ for (nstart = start; nstart < end; nstart = nend) {
+ /*
+ * We want to fault in pages for [nstart; end) address range.
+ * Find first corresponding VMA.
+ */
+ if (!locked) {
+ locked = 1;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, nstart);
+ } else if (nstart >= vma->vm_end)
+ vma = vma->vm_next;
+ if (!vma || vma->vm_start >= end)
+ break;
+ /*
+ * Set [nstart; nend) to intersection of desired address
+ * range with the first VMA. Also, skip undesirable VMA types.
+ */
+ nend = min(end, vma->vm_end);
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ continue;
+ if (nstart < vma->vm_start)
+ nstart = vma->vm_start;
+ /*
+ * Now fault in a range of pages. populate_vma_page_range()
+ * double checks the vma flags, so that it won't mlock pages
+ * if the vma was already munlocked.
+ */
+ ret = populate_vma_page_range(vma, nstart, nend, &locked);
+ if (ret < 0) {
+ if (ignore_errors) {
+ ret = 0;
+ continue; /* continue at next VMA */
+ }
+ break;
+ }
+ nend = nstart + ret * PAGE_SIZE;
+ ret = 0;
+ }
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret; /* 0 or negative error code */
+}
+
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ if (__get_user_pages(current, current->mm, addr, 1,
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+ NULL) < 1)
+ return NULL;
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
@@ -1503,152 +1570,85 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);

-/**
- * populate_vma_page_range() - populate a range of pages in the vma.
- * @vma: target vma
- * @start: start address
- * @end: end address
- * @nonblocking:
- *
- * This takes care of mlocking the pages too if VM_LOCKED is set.
+/*
+ * We can leverage the VM_FAULT_RETRY functionality in the page fault
+ * paths better by using either get_user_pages_locked() or
+ * get_user_pages_unlocked().
*
- * return 0 on success, negative error code on error.
+ * get_user_pages_locked() is suitable to replace the form:
*
- * vma->vm_mm->mmap_sem must be held.
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
*
- * If @nonblocking is NULL, it may be held for read or write and will
- * be unperturbed.
+ * to:
*
- * If @nonblocking is non-NULL, it must held for read only and may be
- * released. If it's released, *@nonblocking will be set to 0.
+ * int locked = 1;
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ * if (locked)
+ * up_read(&mm->mmap_sem);
*/
-long populate_vma_page_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, int *nonblocking)
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked)
{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long nr_pages = (end - start) / PAGE_SIZE;
- int gup_flags;
-
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(end & ~PAGE_MASK);
- VM_BUG_ON_VMA(start < vma->vm_start, vma);
- VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
-
- gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
- if (vma->vm_flags & VM_LOCKONFAULT)
- gup_flags &= ~FOLL_POPULATE;
- /*
- * We want to touch writable mappings with a write fault in order
- * to break COW, except for shared mappings because these don't COW
- * and we would not want to dirty them for nothing.
- */
- if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
- gup_flags |= FOLL_WRITE;
-
/*
- * We want mlock to succeed for regions that have any permissions
- * other than PROT_NONE.
+ * FIXME: Current FOLL_LONGTERM behavior is incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. As there are no users of this flag in this call we simply
+ * disallow this option for now.
*/
- if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
- gup_flags |= FOLL_FORCE;
+ if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+ return -EINVAL;

- /*
- * We made sure addr is within a VMA, so the following will
- * not result in a stack expansion that recurses back here.
- */
- return __get_user_pages(current, mm, start, nr_pages, gup_flags,
- NULL, NULL, nonblocking);
+ return __get_user_pages_locked(current, current->mm, start, nr_pages,
+ pages, NULL, locked,
+ gup_flags | FOLL_TOUCH);
}
+EXPORT_SYMBOL(get_user_pages_locked);

/*
- * __mm_populate - populate and/or mlock pages within a range of address space.
+ * get_user_pages_unlocked() is suitable to replace the form:
*
- * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
- * flags. VMAs must be already marked with the desired vm_flags, and
- * mmap_sem must not be held.
+ * down_read(&mm->mmap_sem);
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * with:
+ *
+ * get_user_pages_unlocked(tsk, mm, ..., pages);
+ *
+ * It is functionally equivalent to get_user_pages_fast so
+ * get_user_pages_fast should be used instead if specific gup_flags
+ * (e.g. FOLL_FORCE) are not required.
*/
-int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
- unsigned long end, nstart, nend;
- struct vm_area_struct *vma = NULL;
- int locked = 0;
- long ret = 0;
+ int locked = 1;
+ long ret;

- end = start + len;
+ /*
+ * FIXME: Current FOLL_LONGTERM behavior is incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. As there are no users of this flag in this call we simply
+ * disallow this option for now.
+ */
+ if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+ return -EINVAL;

- for (nstart = start; nstart < end; nstart = nend) {
- /*
- * We want to fault in pages for [nstart; end) address range.
- * Find first corresponding VMA.
- */
- if (!locked) {
- locked = 1;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, nstart);
- } else if (nstart >= vma->vm_end)
- vma = vma->vm_next;
- if (!vma || vma->vm_start >= end)
- break;
- /*
- * Set [nstart; nend) to intersection of desired address
- * range with the first VMA. Also, skip undesirable VMA types.
- */
- nend = min(end, vma->vm_end);
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- continue;
- if (nstart < vma->vm_start)
- nstart = vma->vm_start;
- /*
- * Now fault in a range of pages. populate_vma_page_range()
- * double checks the vma flags, so that it won't mlock pages
- * if the vma was already munlocked.
- */
- ret = populate_vma_page_range(vma, nstart, nend, &locked);
- if (ret < 0) {
- if (ignore_errors) {
- ret = 0;
- continue; /* continue at next VMA */
- }
- break;
- }
- nend = nstart + ret * PAGE_SIZE;
- ret = 0;
- }
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+ &locked, gup_flags | FOLL_TOUCH);
if (locked)
up_read(&mm->mmap_sem);
- return ret; /* 0 or negative error code */
-}
-
-/**
- * get_dump_page() - pin user page in memory while writing it to core dump
- * @addr: user address
- *
- * Returns struct page pointer of user page pinned for dump,
- * to be freed afterwards by put_page().
- *
- * Returns NULL on any kind of failure - a hole must then be inserted into
- * the corefile, to preserve alignment with its headers; and also returns
- * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
- * allowing a hole to be left in the corefile to save diskspace.
- *
- * Called without mmap_sem, but after all other threads have been killed.
- */
-#ifdef CONFIG_ELF_CORE
-struct page *get_dump_page(unsigned long addr)
-{
- struct vm_area_struct *vma;
- struct page *page;
-
- if (__get_user_pages(current, current->mm, addr, 1,
- FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
- NULL) < 1)
- return NULL;
- flush_cache_page(vma, addr, page_to_pfn(page));
- return page;
+ return ret;
}
-#endif /* CONFIG_ELF_CORE */
+EXPORT_SYMBOL(get_user_pages_unlocked);

/*
* Fast GUP
--
2.20.1

2019-06-25 14:38:47

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 12/16] mm: consolidate the get_user_pages* implementations

Always build mm/gup.c so that we don't have to provide separate nommu
stubs. Also merge the get_user_pages_fast and __get_user_pages_fast
stubs when HAVE_FAST_GUP into the main implementations, which will
never call the fast path if HAVE_FAST_GUP is not set.

This also ensures the new put_user_pages* helpers are available for
nommu, as those are currently missing, which would create a problem as
soon as we actually grew users for it.

Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/Kconfig | 1 +
mm/Makefile | 4 +--
mm/gup.c | 67 +++++++++++++++++++++++++++++++++++++---
mm/nommu.c | 88 -----------------------------------------------------
mm/util.c | 47 ----------------------------
5 files changed, 65 insertions(+), 142 deletions(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index 98dffb0f2447..5c41409557da 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -133,6 +133,7 @@ config HAVE_MEMBLOCK_PHYS_MAP
bool

config HAVE_FAST_GUP
+ depends on MMU
bool

config ARCH_KEEP_MEMBLOCK
diff --git a/mm/Makefile b/mm/Makefile
index ac5e5ba78874..dc0746ca1109 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -22,7 +22,7 @@ KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n

mmu-y := nommu.o
-mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
+mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
pgtable-generic.o rmap.o vmalloc.o
@@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ debug.o gup.o $(mmu-y)

# Give 'page_alloc' its own module-parameter namespace
page-alloc-y := page_alloc.o
diff --git a/mm/gup.c b/mm/gup.c
index b29249581672..0e83dba98dfd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -134,6 +134,7 @@ void put_user_pages(struct page **pages, unsigned long npages)
}
EXPORT_SYMBOL(put_user_pages);

+#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
{
@@ -1322,6 +1323,51 @@ struct page *get_dump_page(unsigned long addr)
return page;
}
#endif /* CONFIG_ELF_CORE */
+#else /* CONFIG_MMU */
+static long __get_user_pages_locked(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long nr_pages, struct page **pages,
+ struct vm_area_struct **vmas, int *locked,
+ unsigned int foll_flags)
+{
+ struct vm_area_struct *vma;
+ unsigned long vm_flags;
+ int i;
+
+ /* calculate required read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (foll_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (foll_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+ for (i = 0; i < nr_pages; i++) {
+ vma = find_vma(mm, start);
+ if (!vma)
+ goto finish_or_fault;
+
+ /* protect what we can, including chardevs */
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ goto finish_or_fault;
+
+ if (pages) {
+ pages[i] = virt_to_page(start);
+ if (pages[i])
+ get_page(pages[i]);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ start = (start + PAGE_SIZE) & PAGE_MASK;
+ }
+
+ return i;
+
+finish_or_fault:
+ return i ? : -EFAULT;
+}
+#endif /* !CONFIG_MMU */

#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
@@ -1484,7 +1530,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
{
return nr_pages;
}
-#endif
+#endif /* CONFIG_CMA */

/*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -2160,6 +2206,12 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
return;
} while (pgdp++, addr = next, addr != end);
}
+#else
+static inline void gup_pgd_range(unsigned long addr, unsigned long end,
+ unsigned int flags, struct page **pages, int *nr)
+{
+}
+#endif /* CONFIG_HAVE_FAST_GUP */

#ifndef gup_fast_permitted
/*
@@ -2177,6 +2229,9 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
+ *
+ * If the architecture does not support this function, simply return with no
+ * pages pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
@@ -2206,7 +2261,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting.
*/

- if (gup_fast_permitted(start, end)) {
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+ gup_fast_permitted(start, end)) {
local_irq_save(flags);
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
local_irq_restore(flags);
@@ -2214,6 +2270,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,

return nr;
}
+EXPORT_SYMBOL_GPL(__get_user_pages_fast);

static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
@@ -2270,7 +2327,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;

- if (gup_fast_permitted(start, end)) {
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+ gup_fast_permitted(start, end)) {
local_irq_disable();
gup_pgd_range(addr, end, gup_flags, pages, &nr);
local_irq_enable();
@@ -2296,5 +2354,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages,

return ret;
}
-
-#endif /* CONFIG_HAVE_GENERIC_GUP */
+EXPORT_SYMBOL_GPL(get_user_pages_fast);
diff --git a/mm/nommu.c b/mm/nommu.c
index d8c02fbe03b5..07165ad2e548 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -111,94 +111,6 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}

-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking)
-{
- struct vm_area_struct *vma;
- unsigned long vm_flags;
- int i;
-
- /* calculate required read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (foll_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (foll_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
- for (i = 0; i < nr_pages; i++) {
- vma = find_vma(mm, start);
- if (!vma)
- goto finish_or_fault;
-
- /* protect what we can, including chardevs */
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- goto finish_or_fault;
-
- if (pages) {
- pages[i] = virt_to_page(start);
- if (pages[i])
- get_page(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- start = (start + PAGE_SIZE) & PAGE_MASK;
- }
-
- return i;
-
-finish_or_fault:
- return i ? : -EFAULT;
-}
-
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- * slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas)
-{
- return __get_user_pages(current, current->mm, start, nr_pages,
- gup_flags, pages, vmas, NULL);
-}
-EXPORT_SYMBOL(get_user_pages);
-
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-static long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- down_read(&mm->mmap_sem);
- ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
- NULL, NULL);
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
diff --git a/mm/util.c b/mm/util.c
index 9834c4ab7d8e..68575a315dc5 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -300,53 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
}
#endif

-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- * If the architecture does not support this function, simply return with no
- * pages pinned.
- */
-int __weak __get_user_pages_fast(unsigned long start,
- int nr_pages, int write, struct page **pages)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(__get_user_pages_fast);
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * get_user_pages_fast provides equivalent functionality to get_user_pages,
- * operating on current and current->mm, with force=0 and vma=NULL. However
- * unlike get_user_pages, it must be called without mmap_sem held.
- *
- * get_user_pages_fast may take mmap_sem and page table locks, so no
- * assumptions can be made about lack of locking. get_user_pages_fast is to be
- * implemented in a way that is advantageous (vs get_user_pages()) when the
- * user memory area is already faulted in and present in ptes. However if the
- * pages have to be faulted in, it may turn out to be slightly slower so
- * callers need to carefully consider what to use. On many architectures,
- * get_user_pages_fast simply falls back to get_user_pages.
- *
- * Return: number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int __weak get_user_pages_fast(unsigned long start,
- int nr_pages, unsigned int gup_flags,
- struct page **pages)
-{
- return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
-}
-EXPORT_SYMBOL_GPL(get_user_pages_fast);
-
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
--
2.20.1

2019-06-25 14:39:04

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 15/16] mm: switch gup_hugepte to use try_get_compound_head

This applies the overflow fixes from 8fde12ca79aff
("mm: prevent get_user_pages() from overflowing page refcount")
to the powerpc hugepd code and brings it back in sync with the
other GUP cases.

Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/gup.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mm/gup.c b/mm/gup.c
index 7077549bc8ea..e06447cff635 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2006,7 +2006,8 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);

- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(head, refs);
+ if (!head) {
*nr -= refs;
return 0;
}
--
2.20.1

2019-06-25 14:39:08

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 16/16] mm: mark the page referenced in gup_hugepte

All other get_user_page_fast cases mark the page referenced, so do
this here as well.

Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/gup.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/mm/gup.c b/mm/gup.c
index e06447cff635..d9d022d835ca 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2020,6 +2020,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
return 0;
}

+ SetPageReferenced(head);
return 1;
}

--
2.20.1

2019-06-25 14:39:14

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

The sparc64 code is mostly equivalent to the generic one, minus various
bugfixes and two arch overrides that this patch adds to pgtable.h.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Khalid Aziz <[email protected]>
---
arch/sparc/Kconfig | 1 +
arch/sparc/include/asm/pgtable_64.h | 18 ++
arch/sparc/mm/Makefile | 2 +-
arch/sparc/mm/gup.c | 340 ----------------------------
4 files changed, 20 insertions(+), 341 deletions(-)
delete mode 100644 arch/sparc/mm/gup.c

diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 26ab6f5bbaaf..22435471f942 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,6 +28,7 @@ config SPARC
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select HAVE_GENERIC_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1904782dcd39..547ff96fb228 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1098,6 +1098,24 @@ static inline unsigned long untagged_addr(unsigned long start)
}
#define untagged_addr untagged_addr

+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot;
+
+ if (tlb_type == hypervisor) {
+ prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
+ if (write)
+ prot |= _PAGE_WRITE_4V;
+ } else {
+ prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
+ if (write)
+ prot |= _PAGE_WRITE_4U;
+ }
+
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
+}
+#define pte_access_permitted pte_access_permitted
+
#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>

diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index d39075b1e3b7..b078205b70e0 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -5,7 +5,7 @@
asflags-y := -ansi
ccflags-y := -Werror

-obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
deleted file mode 100644
index 1e770a517d4a..000000000000
--- a/arch/sparc/mm/gup.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for sparc, cribbed from powerpc
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-#include <asm/adi.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long mask, result;
- pte_t *ptep;
-
- if (tlb_type == hypervisor) {
- result = _PAGE_PRESENT_4V|_PAGE_P_4V;
- if (write)
- result |= _PAGE_WRITE_4V;
- } else {
- result = _PAGE_PRESENT_4U|_PAGE_P_4U;
- if (write)
- result |= _PAGE_WRITE_4U;
- }
- mask = result | _PAGE_SPECIAL;
-
- ptep = pte_offset_kernel(&pmd, addr);
- do {
- struct page *page, *head;
- pte_t pte = *ptep;
-
- if ((pte_val(pte) & mask) != result)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- /* The hugepage case is simplified on sparc64 because
- * we encode the sub-page pfn offsets into the
- * hugepage PTEs. We could optimize this in the future
- * use page_cache_add_speculative() for the hugepage case.
- */
- page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
- return 0;
- }
-
- pages[*nr] = page;
- (*nr)++;
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages,
- int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!(pmd_val(pmd) & _PAGE_VALID))
- return 0;
-
- if (write && !pmd_write(pmd))
- return 0;
-
- refs = 0;
- page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- head = compound_head(page);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages,
- int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!(pud_val(pud) & _PAGE_VALID))
- return 0;
-
- if (write && !pud_write(pud))
- return 0;
-
- refs = 0;
- page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- head = compound_head(page);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pud_val(pud) != pud_val(*pudp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_large(pmd))) {
- if (!gup_huge_pmd(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmd, addr, next, write,
- pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_large(pud))) {
- if (!gup_huge_pud(pudp, pud, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp;
- int nr = 0;
-
-#ifdef CONFIG_SPARC64
- if (adi_capable()) {
- long addr = start;
-
- /* If userspace has passed a versioned address, kernel
- * will not find it in the VMAs since it does not store
- * the version tags in the list of VMAs. Storing version
- * tags in list of VMAs is impractical since they can be
- * changed any time from userspace without dropping into
- * kernel. Any address search in VMAs will be done with
- * non-versioned addresses. Ensure the ADI version bits
- * are dropped here by sign extending the last bit before
- * ADI bits. IOMMU does not implement version tags.
- */
- addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
- start = addr;
- }
-#endif
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int nr = 0;
-
-#ifdef CONFIG_SPARC64
- if (adi_capable()) {
- long addr = start;
-
- /* If userspace has passed a versioned address, kernel
- * will not find it in the VMAs since it does not store
- * the version tags in the list of VMAs. Storing version
- * tags in list of VMAs is impractical since they can be
- * changed any time from userspace without dropping into
- * kernel. Any address search in VMAs will be done with
- * non-versioned addresses. Ensure the ADI version bits
- * are dropped here by sign extending the last bit before
- * ADI bits. IOMMU does not implements version tags,
- */
- addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
- start = addr;
- }
-#endif
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch size
- * will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables from being freed on sparc.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_disable();
-
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
-
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-
-slow:
- local_irq_enable();
-
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, pages,
- gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
--
2.20.1

2019-06-25 14:39:25

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 07/16] sparc64: add the missing pgd_page definition

sparc64 only had pgd_page_vaddr, but not pgd_page.

Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/sparc/include/asm/pgtable_64.h | 1 +
1 file changed, 1 insertion(+)

diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 22500c3be7a9..f0dcf991d27f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -861,6 +861,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
#define pgd_page_vaddr(pgd) \
((unsigned long) __va(pgd_val(pgd)))
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
#define pgd_present(pgd) (pgd_val(pgd) != 0U)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)

--
2.20.1

2019-06-25 14:39:26

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 08/16] sparc64: define untagged_addr()

Add a helper to untag a user pointer. This is needed for ADI support
in get_user_pages_fast.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Khalid Aziz <[email protected]>
---
arch/sparc/include/asm/pgtable_64.h | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)

diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index f0dcf991d27f..1904782dcd39 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1076,6 +1076,28 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
}
#define io_remap_pfn_range io_remap_pfn_range

+static inline unsigned long untagged_addr(unsigned long start)
+{
+ if (adi_capable()) {
+ long addr = start;
+
+ /* If userspace has passed a versioned address, kernel
+ * will not find it in the VMAs since it does not store
+ * the version tags in the list of VMAs. Storing version
+ * tags in list of VMAs is impractical since they can be
+ * changed any time from userspace without dropping into
+ * kernel. Any address search in VMAs will be done with
+ * non-versioned addresses. Ensure the ADI version bits
+ * are dropped here by sign extending the last bit before
+ * ADI bits. IOMMU does not implement version tags.
+ */
+ return (addr << (long)adi_nbits()) >> (long)adi_nbits();
+ }
+
+ return start;
+}
+#define untagged_addr untagged_addr
+
#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>

--
2.20.1

2019-06-25 14:39:44

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 01/16] mm: use untagged_addr() for get_user_pages_fast addresses

This will allow sparc64, or any future architecture with memory tagging
to override its tags for get_user_pages and get_user_pages_fast.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Khalid Aziz <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
---
mm/gup.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index ddde097cf9e4..6bb521db67ec 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2146,7 +2146,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
unsigned long flags;
int nr = 0;

- start &= PAGE_MASK;
+ start = untagged_addr(start) & PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;

@@ -2219,7 +2219,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned long addr, len, end;
int nr = 0, ret = 0;

- start &= PAGE_MASK;
+ start = untagged_addr(start) & PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
--
2.20.1

2019-06-25 14:40:05

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 13/16] mm: validate get_user_pages_fast flags

We can only deal with FOLL_WRITE and/or FOLL_LONGTERM in
get_user_pages_fast, so reject all other flags.

Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/gup.c | 3 +++
1 file changed, 3 insertions(+)

diff --git a/mm/gup.c b/mm/gup.c
index 0e83dba98dfd..37a2083b1ed8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2317,6 +2317,9 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned long addr, len, end;
int nr = 0, ret = 0;

+ if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
+ return -EINVAL;
+
start = untagged_addr(start) & PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
--
2.20.1

2019-06-25 14:40:07

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 14/16] mm: move the powerpc hugepd code to mm/gup.c

While only powerpc supports the hugepd case, the code is pretty
generic and I'd like to keep all GUP internals in one place.

Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/mm/hugetlbpage.c | 72 ------------------------------
include/linux/hugetlb.h | 18 --------
mm/Kconfig | 10 +++++
mm/gup.c | 82 +++++++++++++++++++++++++++++++++++
5 files changed, 93 insertions(+), 90 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 992a04796e56..4f1b00979cde 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -125,6 +125,7 @@ config PPC
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
+ select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b5d92dc32844..51716c11d0fb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -511,13 +511,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
return page;
}

-static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
- unsigned long sz)
-{
- unsigned long __boundary = (addr + sz) & ~(sz-1);
- return (__boundary - 1 < end - 1) ? __boundary : end;
-}
-
#ifdef CONFIG_PPC_MM_SLICES
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
@@ -665,68 +658,3 @@ void flush_dcache_icache_hugepage(struct page *page)
}
}
}
-
-static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long pte_end;
- struct page *head, *page;
- pte_t pte;
- int refs;
-
- pte_end = (addr + sz) & ~(sz-1);
- if (pte_end < end)
- end = pte_end;
-
- pte = READ_ONCE(*ptep);
-
- if (!pte_access_permitted(pte, write))
- return 0;
-
- /* hugepages are never "special" */
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
-
- page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- /* Could be optimized better */
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- pte_t *ptep;
- unsigned long sz = 1UL << hugepd_shift(hugepd);
- unsigned long next;
-
- ptep = hugepte_offset(hugepd, addr, pdshift);
- do {
- next = hugepte_addr_end(addr, end, sz);
- if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
- return 0;
- } while (ptep++, addr = next, addr != end);
-
- return 1;
-}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edf476c8cfb9..0f91761e2c53 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,29 +16,11 @@ struct user_struct;
struct mmu_gather;

#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
#endif

-
#ifdef CONFIG_HUGETLB_PAGE

#include <linux/mempolicy.h>
diff --git a/mm/Kconfig b/mm/Kconfig
index 5c41409557da..44be3f01a2b2 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -769,4 +769,14 @@ config GUP_GET_PTE_LOW_HIGH
config ARCH_HAS_PTE_SPECIAL
bool

+#
+# Some architectures require a special hugepage directory format that is
+# required to support multiple hugepage sizes. For example a4fe3ce76
+# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+# introduced it on powerpc. This allows for a more flexible hugepage
+# pagetable layouts.
+#
+config ARCH_HAS_HUGEPD
+ bool
+
endmenu
diff --git a/mm/gup.c b/mm/gup.c
index 37a2083b1ed8..7077549bc8ea 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1966,6 +1966,88 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
}
#endif

+#ifdef CONFIG_ARCH_HAS_HUGEPD
+static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
+ unsigned long sz)
+{
+ unsigned long __boundary = (addr + sz) & ~(sz-1);
+ return (__boundary - 1 < end - 1) ? __boundary : end;
+}
+
+static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long pte_end;
+ struct page *head, *page;
+ pte_t pte;
+ int refs;
+
+ pte_end = (addr + sz) & ~(sz-1);
+ if (pte_end < end)
+ end = pte_end;
+
+ pte = READ_ONCE(*ptep);
+
+ if (!pte_access_permitted(pte, write))
+ return 0;
+
+ /* hugepages are never "special" */
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+
+ page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ /* Could be optimized better */
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned int pdshift, unsigned long end, int write,
+ struct page **pages, int *nr)
+{
+ pte_t *ptep;
+ unsigned long sz = 1UL << hugepd_shift(hugepd);
+ unsigned long next;
+
+ ptep = hugepte_offset(hugepd, addr, pdshift);
+ do {
+ next = hugepte_addr_end(addr, end, sz);
+ if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ return 0;
+ } while (ptep++, addr = next, addr != end);
+
+ return 1;
+}
+#else
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end, int write,
+ struct page **pages, int *nr)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_HAS_HUGEPD */
+
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags, struct page **pages, int *nr)
{
--
2.20.1

2019-06-25 14:40:12

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 06/16] sh: use the generic get_user_pages_fast code

The sh code is mostly equivalent to the generic one, minus various
bugfixes and two arch overrides that this patch adds to pgtable.h.

Signed-off-by: Christoph Hellwig <[email protected]>
---
arch/sh/Kconfig | 2 +
arch/sh/include/asm/pgtable.h | 37 +++++
arch/sh/mm/Makefile | 2 +-
arch/sh/mm/gup.c | 277 ----------------------------------
4 files changed, 40 insertions(+), 278 deletions(-)
delete mode 100644 arch/sh/mm/gup.c

diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index b77f512bb176..6fddfc3c9710 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,6 +14,7 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_GENERIC_GUP
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -63,6 +64,7 @@ config SUPERH
config SUPERH32
def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
+ select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_IOREMAP_PROT if MMU && !X2TLB
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 3587103afe59..9085d1142fa3 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -149,6 +149,43 @@ extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd);

+static inline bool __pte_access_permitted(pte_t pte, u64 prot)
+{
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
+}
+
+#ifdef CONFIG_X2TLB
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT;
+
+ prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
+ if (write)
+ prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
+ return __pte_access_permitted(pte, prot);
+}
+#elif defined(CONFIG_SUPERH64)
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
+
+ if (write)
+ prot |= _PAGE_WRITE;
+ return __pte_access_permitted(pte, prot);
+}
+#else
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT | _PAGE_USER;
+
+ if (write)
+ prot |= _PAGE_RW;
+ return __pte_access_permitted(pte, prot);
+}
+#endif
+
+#define pte_access_permitted pte_access_permitted
+
/* arch/sh/mm/mmap.c */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index fbe5e79751b3..5051b38fd5b6 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -17,7 +17,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)

mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \
+mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \
pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o

obj-y += $(mmu-y)
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
deleted file mode 100644
index 277c882f7489..000000000000
--- a/arch/sh/mm/gup.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for SuperH
- *
- * Copyright (C) 2009 - 2010 Paul Mundt
- *
- * Cloned from the x86 and PowerPC versions, by:
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#ifndef CONFIG_X2TLB
- return READ_ONCE(*ptep);
-#else
- /*
- * With get_user_pages_fast, we walk down the pagetables without
- * taking any locks. For this we would like to load the pointers
- * atomically, but that is not possible with 64-bit PTEs. What
- * we do have is the guarantee that a pte will only either go
- * from not present to present, or present to not present or both
- * -- it will not switch to a completely different present page
- * without a TLB flush in between; something that we are blocking
- * by holding interrupts off.
- *
- * Setting ptes from not present to present goes:
- * ptep->pte_high = h;
- * smp_wmb();
- * ptep->pte_low = l;
- *
- * And present to not present goes:
- * ptep->pte_low = 0;
- * smp_wmb();
- * ptep->pte_high = 0;
- *
- * We must ensure here that the load of pte_low sees l iff pte_high
- * sees h. We load pte_high *after* loading pte_low, which ensures we
- * don't see an older value of pte_high. *Then* we recheck pte_low,
- * which ensures that we haven't picked up a changed pte high. We might
- * have got rubbish values from pte_low and pte_high, but we are
- * guaranteed that pte_low will not have the present bit set *unless*
- * it is 'l'. And get_user_pages_fast only operates on present ptes, so
- * we're safe.
- *
- * gup_get_pte should not be used or copied outside gup.c without being
- * very careful -- it does not atomically load the pte or anything that
- * is likely to be useful for you.
- */
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#endif
-}
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- u64 mask, result;
- pte_t *ptep;
-
-#ifdef CONFIG_X2TLB
- result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
- if (write)
- result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
-#elif defined(CONFIG_SUPERH64)
- result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
- if (write)
- result |= _PAGE_WRITE;
-#else
- result = _PAGE_PRESENT | _PAGE_USER;
- if (write)
- result |= _PAGE_RW;
-#endif
-
- mask = result | _PAGE_SPECIAL;
-
- ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- if ((pte_val(pte) & mask) != result) {
- pte_unmap(ptep);
- return 0;
- }
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- __flush_anon_page(page, addr);
- flush_dcache_page(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok((void __user *)start, len)))
- return 0;
-
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start)
- goto slow_irqon;
-
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, pages,
- gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
--
2.20.1

2019-06-25 14:40:21

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 02/16] mm: simplify gup_fast_permitted

Pass in the already calculated end value instead of recomputing it, and
leave the end > start check in the callers instead of duplicating them
in the arch code.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
---
arch/s390/include/asm/pgtable.h | 8 +-------
arch/x86/include/asm/pgtable_64.h | 8 +-------
mm/gup.c | 17 +++++++----------
3 files changed, 9 insertions(+), 24 deletions(-)

diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9f0195d5fa16..9b274fcaacb6 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1270,14 +1270,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

-static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (end < start)
- return false;
return end <= current->mm->context.asce_limit;
}
#define gup_fast_permitted gup_fast_permitted
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 0bb566315621..4990d26dfc73 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -259,14 +259,8 @@ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);

#define gup_fast_permitted gup_fast_permitted
-static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long)nr_pages << PAGE_SHIFT;
- end = start + len;
- if (end < start)
- return false;
if (end >> __VIRTUAL_MASK_SHIFT)
return false;
return true;
diff --git a/mm/gup.c b/mm/gup.c
index 6bb521db67ec..3237f33792e6 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2123,13 +2123,9 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
-bool gup_fast_permitted(unsigned long start, int nr_pages)
+static bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- return end >= start;
+ return true;
}
#endif

@@ -2150,6 +2146,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;

+ if (end <= start)
+ return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return 0;

@@ -2165,7 +2163,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting.
*/

- if (gup_fast_permitted(start, nr_pages)) {
+ if (gup_fast_permitted(start, end)) {
local_irq_save(flags);
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
local_irq_restore(flags);
@@ -2224,13 +2222,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;

- if (nr_pages <= 0)
+ if (end <= start)
return 0;
-
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;

- if (gup_fast_permitted(start, nr_pages)) {
+ if (gup_fast_permitted(start, end)) {
local_irq_disable();
gup_pgd_range(addr, end, gup_flags, pages, &nr);
local_irq_enable();
--
2.20.1

2019-06-25 14:40:37

by Christoph Hellwig

[permalink] [raw]
Subject: [PATCH 04/16] MIPS: use the generic get_user_pages_fast code

The mips code is mostly equivalent to the generic one, minus various
bugfixes and an arch override for gup_fast_permitted.

Note that this defines ARCH_HAS_PTE_SPECIAL for mips as mips has
pte_special and pte_mkspecial implemented and used in the existing
gup code. They are no-op stubs, though which makes me a little unsure
if this is really right thing to do.

Note that this also adds back a missing cpu_has_dc_aliases check for
__get_user_pages_fast, which the old code was only doing for
get_user_pages_fast. This clearly looks like an oversight, as any
condition that makes get_user_pages_fast unsafe also applies to
__get_user_pages_fast.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
---
arch/mips/Kconfig | 3 +
arch/mips/include/asm/pgtable.h | 3 +
arch/mips/mm/Makefile | 1 -
arch/mips/mm/gup.c | 303 --------------------------------
4 files changed, 6 insertions(+), 304 deletions(-)
delete mode 100644 arch/mips/mm/gup.c

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 70d3200476bf..64108a2a16d4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -6,6 +6,7 @@ config MIPS
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_UPROBES
@@ -34,6 +35,7 @@ config MIPS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
@@ -55,6 +57,7 @@ config MIPS
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
+ select HAVE_GENERIC_GUP
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4ccb465ef3f2..7d27194e3b45 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,7 @@
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/pgtable-bits.h>
+#include <asm/cpu-features.h>

struct mm_struct;
struct vm_area_struct;
@@ -626,6 +627,8 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,

#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

+#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
+
#include <asm-generic/pgtable.h>

/*
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index f34d7ff5eb60..1e8d335025d7 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -7,7 +7,6 @@ obj-y += cache.o
obj-y += context.o
obj-y += extable.o
obj-y += fault.o
-obj-y += gup.o
obj-y += init.o
obj-y += mmap.o
obj-y += page.o
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
deleted file mode 100644
index 4c2b4483683c..000000000000
--- a/arch/mips/mm/gup.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for MIPS
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- * Copyright (C) 2011 Ralf Baechle
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/hugetlb.h>
-
-#include <asm/cpu-features.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#else
- return READ_ONCE(*ptep);
-#endif
-}
-
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t *ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- if (!pte_present(pte) ||
- pte_special(pte) || (write && !pte_write(pte))) {
- pte_unmap(ptep);
- return 0;
- }
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- SetPageReferenced(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- pte_unmap(ptep - 1);
- return 1;
-}
-
-static inline void get_head_page_multiple(struct page *page, int nr)
-{
- VM_BUG_ON(page != compound_head(page));
- VM_BUG_ON(page_count(page) == 0);
- page_ref_add(page, nr);
- SetPageReferenced(page);
-}
-
-static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pmd;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_huge(pmd))) {
- if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pte_range(pmd, addr, next, write, pages,nr))
- return 0;
- }
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pud;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_huge(pud))) {
- if (!gup_huge_pud(pud, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pmd_range(pud, addr, next, write, pages,nr))
- return 0;
- }
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok((void __user *)start, len)))
- return 0;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch
- * size will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int ret, nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start || cpu_has_dc_aliases)
- goto slow_irqon;
-
- /* XXX: batch / limit 'nr' */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-slow:
- local_irq_enable();
-
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
- pages, gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
- return ret;
-}
--
2.20.1

2019-06-25 20:02:34

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH 14/16] mm: move the powerpc hugepd code to mm/gup.c

On Tue, 25 Jun 2019 16:37:13 +0200 Christoph Hellwig <[email protected]> wrote:

> +static int gup_huge_pd(hugepd_t hugepd

Naming nitlet: we have hugepd and we also have huge_pd. We have
hugepte and we also have huge_pte. It make things a bit hard to
remember and it would be nice to make it all consistent sometime.

We're consistent with huge_pud and almost consistent with huge_pmd.

To be fully consistent I guess we should make all of them have the
underscore. Or not have it.

2019-06-26 05:52:07

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 14/16] mm: move the powerpc hugepd code to mm/gup.c

On Tue, Jun 25, 2019 at 12:37:57PM -0700, Andrew Morton wrote:
> On Tue, 25 Jun 2019 16:37:13 +0200 Christoph Hellwig <[email protected]> wrote:
>
> > +static int gup_huge_pd(hugepd_t hugepd
>
> Naming nitlet: we have hugepd and we also have huge_pd. We have
> hugepte and we also have huge_pte. It make things a bit hard to
> remember and it would be nice to make it all consistent sometime.
>
> We're consistent with huge_pud and almost consistent with huge_pmd.
>
> To be fully consistent I guess we should make all of them have the
> underscore. Or not have it.

Either way is fine with me. Feel free to fix up per your preference.

2019-06-29 14:38:21

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH 04/16] MIPS: use the generic get_user_pages_fast code

Hi,

On Tue, Jun 25, 2019 at 04:37:03PM +0200, Christoph Hellwig wrote:
> The mips code is mostly equivalent to the generic one, minus various
> bugfixes and an arch override for gup_fast_permitted.
>
> Note that this defines ARCH_HAS_PTE_SPECIAL for mips as mips has
> pte_special and pte_mkspecial implemented and used in the existing
> gup code. They are no-op stubs, though which makes me a little unsure
> if this is really right thing to do.
>
> Note that this also adds back a missing cpu_has_dc_aliases check for
> __get_user_pages_fast, which the old code was only doing for
> get_user_pages_fast. This clearly looks like an oversight, as any
> condition that makes get_user_pages_fast unsafe also applies to
> __get_user_pages_fast.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Jason Gunthorpe <[email protected]>

This patch causes all mips images (mips, mips64, mipsel, mipsel64)
to crash when booting in qemu. Unfortunately the patch can not be
reverted easily since there are context changes, causing build failures
after the revert, so I can not verify if this is the only problem.

Crash log (same for all variants):

...
Run /sbin/init as init process
BUG: Bad page map in process mount pte:00b70401 pmd:8e5dc000
page:80c24880 refcount:1 mapcount:-1 mapping:00000000 index:0x0
flags: 0x1000(reserved)
raw: 00001000 80c24884 80c24884 00000000 00000000 00000000 fffffffe 00000001
page dumped because: bad pte
addr:(ptrval) vm_flags:04044411 anon_vma:(ptrval) mapping:(ptrval) index:0
qemu-system-mips: terminating on signal 15 from pid 13034 (/bin/bash)

Guenter

---
bisect log:

# bad: [48568d8c7f479ec45b9c3d02b4b1895f3ef61a03] Add linux-next specific files for 20190628
# good: [4b972a01a7da614b4796475f933094751a295a2f] Linux 5.2-rc6
git bisect start 'HEAD' 'v5.2-rc6'
# good: [89a77c9176fe88f68c3bf7bd255cfea6797258d4] Merge remote-tracking branch 'crypto/master'
git bisect good 89a77c9176fe88f68c3bf7bd255cfea6797258d4
# good: [2cedca636ad73ed838bd636685b245404e490c73] Merge remote-tracking branch 'security/next-testing'
git bisect good 2cedca636ad73ed838bd636685b245404e490c73
# good: [ea260819fdc2f8a64e6c87f3ad80ecc5e4015921] Merge remote-tracking branch 'char-misc/char-misc-next'
git bisect good ea260819fdc2f8a64e6c87f3ad80ecc5e4015921
# good: [aca42ca2a32eacf804ac56a33526f049debc8ec0] Merge remote-tracking branch 'rpmsg/for-next'
git bisect good aca42ca2a32eacf804ac56a33526f049debc8ec0
# good: [f4cd0c7f3c07876f7173b5306e974644c6eec141] Merge remote-tracking branch 'pidfd/for-next'
git bisect good f4cd0c7f3c07876f7173b5306e974644c6eec141
# bad: [09c57a8ab1fc3474b4a620247a0f9e3ac61c4cfe] mm/sparsemem: support sub-section hotplug
git bisect bad 09c57a8ab1fc3474b4a620247a0f9e3ac61c4cfe
# good: [aaffcf10880c363870413c5cdee5dfb6a923e9ae] mm: memcontrol: dump memory.stat during cgroup OOM
git bisect good aaffcf10880c363870413c5cdee5dfb6a923e9ae
# bad: [81d90bb2d2784258ed7c0762ecf34d4665198bad] um: switch to generic version of pte allocation
git bisect bad 81d90bb2d2784258ed7c0762ecf34d4665198bad
# bad: [dadae650472841f004882a2409aa844e37809c60] sparc64-add-the-missing-pgd_page-definition-fix
git bisect bad dadae650472841f004882a2409aa844e37809c60
# good: [d1edd06c6ac8c8c49345ff34de1c72ee571f3f7b] mm: memcg/slab: stop setting page->mem_cgroup pointer for slab pages
git bisect good d1edd06c6ac8c8c49345ff34de1c72ee571f3f7b
# good: [b1ceaacca9e63794bd3f574c928e7e6aca01bce7] mm: simplify gup_fast_permitted
git bisect good b1ceaacca9e63794bd3f574c928e7e6aca01bce7
# bad: [59f238b3353caf43b118e1bb44010aa1abd56d7f] sh: add the missing pud_page definition
git bisect bad 59f238b3353caf43b118e1bb44010aa1abd56d7f
# bad: [93a184240a74cb0242b9b970f0bc018c4fdf24fd] MIPS: use the generic get_user_pages_fast code
git bisect bad 93a184240a74cb0242b9b970f0bc018c4fdf24fd
# good: [7c6a77cff73127e9495e345a0903d55b1b0fb323] mm: lift the x86_32 PAE version of gup_get_pte to common code
git bisect good 7c6a77cff73127e9495e345a0903d55b1b0fb323
# first bad commit: [93a184240a74cb0242b9b970f0bc018c4fdf24fd] MIPS: use the generic get_user_pages_fast code

2019-06-29 15:17:11

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH 06/16] sh: use the generic get_user_pages_fast code

On Tue, Jun 25, 2019 at 04:37:05PM +0200, Christoph Hellwig wrote:
> The sh code is mostly equivalent to the generic one, minus various
> bugfixes and two arch overrides that this patch adds to pgtable.h.
>
> Signed-off-by: Christoph Hellwig <[email protected]>

sh:defconfig no longer builds with this patch applied.

mm/gup.c: In function 'gup_huge_pud':
arch/sh/include/asm/pgtable-3level.h:40:36: error:
implicit declaration of function 'pud_pfn'; did you mean 'pte_pfn'?

Bisect log attached.

Guenter

---
# bad: [48568d8c7f479ec45b9c3d02b4b1895f3ef61a03] Add linux-next specific files for 20190628
# good: [4b972a01a7da614b4796475f933094751a295a2f] Linux 5.2-rc6
git bisect start 'HEAD' 'v5.2-rc6'
# good: [89a77c9176fe88f68c3bf7bd255cfea6797258d4] Merge remote-tracking branch 'crypto/master'
git bisect good 89a77c9176fe88f68c3bf7bd255cfea6797258d4
# good: [2cedca636ad73ed838bd636685b245404e490c73] Merge remote-tracking branch 'security/next-testing'
git bisect good 2cedca636ad73ed838bd636685b245404e490c73
# good: [ea260819fdc2f8a64e6c87f3ad80ecc5e4015921] Merge remote-tracking branch 'char-misc/char-misc-next'
git bisect good ea260819fdc2f8a64e6c87f3ad80ecc5e4015921
# good: [aca42ca2a32eacf804ac56a33526f049debc8ec0] Merge remote-tracking branch 'rpmsg/for-next'
git bisect good aca42ca2a32eacf804ac56a33526f049debc8ec0
# good: [f4cd0c7f3c07876f7173b5306e974644c6eec141] Merge remote-tracking branch 'pidfd/for-next'
git bisect good f4cd0c7f3c07876f7173b5306e974644c6eec141
# bad: [09c57a8ab1fc3474b4a620247a0f9e3ac61c4cfe] mm/sparsemem: support sub-section hotplug
git bisect bad 09c57a8ab1fc3474b4a620247a0f9e3ac61c4cfe
# good: [aaffcf10880c363870413c5cdee5dfb6a923e9ae] mm: memcontrol: dump memory.stat during cgroup OOM
git bisect good aaffcf10880c363870413c5cdee5dfb6a923e9ae
# bad: [81d90bb2d2784258ed7c0762ecf34d4665198bad] um: switch to generic version of pte allocation
git bisect bad 81d90bb2d2784258ed7c0762ecf34d4665198bad
# bad: [dadae650472841f004882a2409aa844e37809c60] sparc64-add-the-missing-pgd_page-definition-fix
git bisect bad dadae650472841f004882a2409aa844e37809c60
# good: [d1edd06c6ac8c8c49345ff34de1c72ee571f3f7b] mm: memcg/slab: stop setting page->mem_cgroup pointer for slab pages
git bisect good d1edd06c6ac8c8c49345ff34de1c72ee571f3f7b
# good: [b1ceaacca9e63794bd3f574c928e7e6aca01bce7] mm: simplify gup_fast_permitted
git bisect good b1ceaacca9e63794bd3f574c928e7e6aca01bce7
# good: [59f238b3353caf43b118e1bb44010aa1abd56d7f] sh: add the missing pud_page definition
git bisect good 59f238b3353caf43b118e1bb44010aa1abd56d7f
# bad: [51bbf54b3f26a85217db720f4e5b01a6c4d3f010] sparc64: add the missing pgd_page definition
git bisect bad 51bbf54b3f26a85217db720f4e5b01a6c4d3f010
# bad: [be748d6e72113580af7e37ad68a0047659e60189] sh: use the generic get_user_pages_fast code
git bisect bad be748d6e72113580af7e37ad68a0047659e60189
# first bad commit: [be748d6e72113580af7e37ad68a0047659e60189] sh: use the generic get_user_pages_fast code

2019-07-17 22:01:35

by Dmitry V. Levin

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

Hi,

On Tue, Jun 25, 2019 at 04:37:08PM +0200, Christoph Hellwig wrote:
> The sparc64 code is mostly equivalent to the generic one, minus various
> bugfixes and two arch overrides that this patch adds to pgtable.h.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Khalid Aziz <[email protected]>
> ---
> arch/sparc/Kconfig | 1 +
> arch/sparc/include/asm/pgtable_64.h | 18 ++
> arch/sparc/mm/Makefile | 2 +-
> arch/sparc/mm/gup.c | 340 ----------------------------
> 4 files changed, 20 insertions(+), 341 deletions(-)
> delete mode 100644 arch/sparc/mm/gup.c

So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
(thanks to Anatoly for bisecting) and introduced a regression:
futex.test from the strace test suite now causes an Oops on sparc64
in futex syscall.

Here is a heavily stripped down reproducer:

// SPDX-License-Identifier: GPL-2.0-or-later
#include <err.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <asm/unistd.h>
int main(void)
{
size_t page_size = sysconf(_SC_PAGESIZE);
size_t alloc_size = 3 * page_size;
void *p = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (MAP_FAILED == p)
err(EXIT_FAILURE, "mmap(%zu)", alloc_size);
void *hole = p + page_size;
if (munmap(hole, page_size))
err(EXIT_FAILURE, "munmap(%p, %zu)", hole, page_size);
syscall(__NR_futex, (unsigned long) hole, 0L, 0L, 0L, 0L, 0L);
return 0;
}

--
ldv


Attachments:
(No filename) (1.53 kB)
signature.asc (817.00 B)
Download all attachments

2019-07-17 22:06:35

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Wed, Jul 17, 2019 at 2:59 PM Dmitry V. Levin <[email protected]> wrote:
>
> So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> (thanks to Anatoly for bisecting) and introduced a regression:
> futex.test from the strace test suite now causes an Oops on sparc64
> in futex syscall.

Can you post the oops here in the same thread too? Maybe it's already
posted somewhere else, but I can't seem to find anything likely on
lkml at least..

On x86-64, it obviously just causes the (expected) EFAULT error from
the futex call.

Somebody with access to sparc64 probably needs to debug this, but
having the exact oops wouldn't hurt...

Linus

2019-07-17 23:31:12

by Dmitry V. Levin

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Wed, Jul 17, 2019 at 03:04:56PM -0700, Linus Torvalds wrote:
> On Wed, Jul 17, 2019 at 2:59 PM Dmitry V. Levin <[email protected]> wrote:
> >
> > So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> > (thanks to Anatoly for bisecting) and introduced a regression:
> > futex.test from the strace test suite now causes an Oops on sparc64
> > in futex syscall.
>
> Can you post the oops here in the same thread too? Maybe it's already
> posted somewhere else, but I can't seem to find anything likely on
> lkml at least..

Sure, here it is:

[ 514.137217] Unable to handle kernel paging request at virtual address 00060000541d0000
[ 514.137295] tsk->{mm,active_mm}->context = 00000000000005b2
[ 514.137343] tsk->{mm,active_mm}->pgd = fff80024955a2000
[ 514.137387] \|/ ____ \|/
"@'/ .. \`@"
/_| \__/ |_\
\__U_/
[ 514.137493] futex(1599): Oops [#1]
[ 514.137533] CPU: 26 PID: 1599 Comm: futex Not tainted 5.2.0-05721-gd3649f68b433 #1096
[ 514.137595] TSTATE: 0000000011001603 TPC: 000000000051adc4 TNPC: 000000000051adc8 Y: 00000000 Not tainted
[ 514.137678] TPC: <get_futex_key+0xe4/0x6a0>
[ 514.137712] g0: 0000000000000000 g1: 0000000000e75178 g2: 000000000009a21d g3: 0000000000000000
[ 514.137769] g4: fff8002474fbc0e0 g5: fff80024aa80c000 g6: fff8002495aec000 g7: 0000000000000200
[ 514.137825] o0: 0000000000000001 o1: 0000000000000001 o2: 0000000000000000 o3: fff8002495aefa28
[ 514.137882] o4: fff8000100030000 o5: fff800010002e000 sp: fff8002495aef161 ret_pc: 000000000051ada4
[ 514.137944] RPC: <get_futex_key+0xc4/0x6a0>
[ 514.137978] l0: 000000000051b144 l1: 0000000000000001 l2: 0000000000c01950 l3: fff80024626051c0
[ 514.138036] l4: 0000000000c01970 l5: 0000000000cf6800 l6: 00060000541d13f0 l7: 00000000014b3000
[ 514.138094] i0: 0000000000000001 i1: 000000000051af30 i2: fff8002495aefc28 i3: 0000000000000001
[ 514.138152] i4: 0000000000cf69b0 i5: fff800010002e000 i6: fff8002495aef231 i7: 000000000051b3a8
[ 514.138211] I7: <futex_wait_setup+0x28/0x120>
[ 514.138245] Call Trace:
[ 514.138271] [000000000051b3a8] futex_wait_setup+0x28/0x120
[ 514.138313] [000000000051b550] futex_wait+0xb0/0x200
[ 514.138352] [000000000051d734] do_futex+0xd4/0xc00
[ 514.138390] [000000000051e384] sys_futex+0x124/0x140
[ 514.138435] [0000000000406294] linux_sparc_syscall+0x34/0x44
[ 514.138478] Disabling lock debugging due to kernel taint
[ 514.138501] Caller[000000000051b3a8]: futex_wait_setup+0x28/0x120
[ 514.138524] Caller[000000000051b550]: futex_wait+0xb0/0x200
[ 514.138547] Caller[000000000051d734]: do_futex+0xd4/0xc00
[ 514.138568] Caller[000000000051e384]: sys_futex+0x124/0x140
[ 514.138590] Caller[0000000000406294]: linux_sparc_syscall+0x34/0x44
[ 514.138614] Caller[0000010000000e90]: 0x10000000e90
[ 514.138633] Instruction DUMP:
[ 514.138635] 0640016e
[ 514.138650] b13da000
[ 514.138663] ec5fa7f7
[ 514.138676] <c25da008>
[ 514.138689] ae100016
[ 514.138702] 84086001
[ 514.138714] 82007fff
[ 514.138727] af789401
[ 514.138740] f05de018


--
ldv


Attachments:
(No filename) (3.14 kB)
signature.asc (817.00 B)
Download all attachments

2019-07-18 00:18:17

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Wed, Jul 17, 2019 at 4:30 PM Dmitry V. Levin <[email protected]> wrote:
>
> Sure, here it is:

Hmm. I'm not seeing anything obviously wrong in the generic gup conversion.

From the oops, I assume that the problem is that get_user_pages_fast()
returned an invalid page, causing the bad access later in
get_futex_key(). But that's odd too, considering that
get_user_pages_fast() had already accessed the page (both for looking
up the head, and for then doing things like SetPageReferenced(page)).

The only half-way subtle thing is the pte_access_permitted() movement,
but it looks like it matches what gup_pte_range() did in the original
sparc64 code. And the address masking is done the same way too, as far
as I can tell.

So clearly there's something wrong there, but I'm not seeing it. Maybe
I'm incorrectly looking at that pte case, and the problem happened
earlier.

Anyway, I suspect some sparc64 person needs to delve into it.

I know this got reviewed by sparc64 people (the final commit message
only has a single Reviewed-by, but I see an Ack by Davem in my maill
that seems to have gotten lost by the time the patch made it in), but
maybe actually nobody ever _tested_ it until it hit my tree?

Linus

2019-07-18 01:22:34

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: Linus Torvalds <[email protected]>
Date: Wed, 17 Jul 2019 17:17:16 -0700

> Anyway, I suspect some sparc64 person needs to delve into it.

I'll take a look at it soon if someone doesn't figure it out before
me.

2019-07-18 21:14:47

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: "Dmitry V. Levin" <[email protected]>
Date: Thu, 18 Jul 2019 00:59:56 +0300

> So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> (thanks to Anatoly for bisecting) and introduced a regression:
> futex.test from the strace test suite now causes an Oops on sparc64
> in futex syscall.
>
> Here is a heavily stripped down reproducer:

Does not reproduce for me on a T4-2 machine.

So this problem might depend on the type of system you are on,
I suspect it's one of those "pre-Niagara vs. Niagara and later"
situations because that's the dividing line between two set of
wildly different TLB and cache management methods.

What kind of machine are you on?

2019-07-18 21:44:20

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: Linus Torvalds <[email protected]>
Date: Wed, 17 Jul 2019 17:17:16 -0700

> From the oops, I assume that the problem is that get_user_pages_fast()
> returned an invalid page, causing the bad access later in
> get_futex_key().

That's correct. It's the first deref of page that oops's.


> But that's odd too, considering that get_user_pages_fast() had
> already accessed the page (both for looking up the head, and for
> then doing things like SetPageReferenced(page)).

Even the huge page cases all do that dereference as well, so it is
indeed a mystery how the pointer works inside of get_user_pages_fast()
but becomes garbage in the caller.

This page pointer sits on the stack, so maybe something stores garbage
there meanwhile. Maybe the issue is even compiler dependent.

I'll keep looking over the changes made here for clues.

2019-07-19 06:02:25

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Thu, Jul 18, 2019 at 02:14:05PM -0700, David Miller wrote:
> From: "Dmitry V. Levin" <[email protected]>
> Date: Thu, 18 Jul 2019 00:59:56 +0300
>
> > So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> > (thanks to Anatoly for bisecting) and introduced a regression:
> > futex.test from the strace test suite now causes an Oops on sparc64
> > in futex syscall.
> >
> > Here is a heavily stripped down reproducer:
>
> Does not reproduce for me on a T4-2 machine.
>
> So this problem might depend on the type of system you are on,
> I suspect it's one of those "pre-Niagara vs. Niagara and later"
> situations because that's the dividing line between two set of
> wildly different TLB and cache management methods.
>
> What kind of machine are you on?

FYI, I'm pretty sure I tested this on Guenthers build test qemu
setup in the end, which further speaks for the different machines
issue.

2019-07-24 19:33:41

by Anatoly Pugachev

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Fri, Jul 19, 2019 at 12:14 AM David Miller <[email protected]> wrote:
> > So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> > (thanks to Anatoly for bisecting) and introduced a regression:
> > futex.test from the strace test suite now causes an Oops on sparc64
> > in futex syscall.
> >
> > Here is a heavily stripped down reproducer:
>
> Does not reproduce for me on a T4-2 machine.
>
> So this problem might depend on the type of system you are on,
> I suspect it's one of those "pre-Niagara vs. Niagara and later"
> situations because that's the dividing line between two set of
> wildly different TLB and cache management methods.
>
> What kind of machine are you on?

David,

the first test where it was discovered was done on my test LDOM named
ttip, hardware (hypervisor) is T5-2 server, running under Solaris 11.4
OS.
ttip LDOM is debian sparc64 unstable , so with almost all the latest
software (gcc 8.3.0, binutils 2.32.51.20190707-1, debian GLIBC
2.28-10, etc..)

For another test, i also installed LDOM with oracle sparc linux
https://oss.oracle.com/projects/linux-sparc/ , but I've to install a
more fresh version of gcc on it first, since system installed gcc 4.4
is too old for a git kernel (linux-2.6/Documentation/Changes lists gcc
4.6 as a minimal version), so I choose to install gcc-7.4.0 to /opt/
(leaving system installed gcc 4.4 under /usr/bin). Compiled and
installed git kernel version, i.e. last tag 5.3.0-rc1 and ran the
test. Kernel still produced oops.

2019-07-24 20:16:15

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: Anatoly Pugachev <[email protected]>
Date: Wed, 24 Jul 2019 22:32:17 +0300

> the first test where it was discovered was done on my test LDOM named
> ttip, hardware (hypervisor) is T5-2 server, running under Solaris 11.4
> OS.
> ttip LDOM is debian sparc64 unstable , so with almost all the latest
> software (gcc 8.3.0, binutils 2.32.51.20190707-1, debian GLIBC
> 2.28-10, etc..)
>
> For another test, i also installed LDOM with oracle sparc linux
> https://oss.oracle.com/projects/linux-sparc/ , but I've to install a
> more fresh version of gcc on it first, since system installed gcc 4.4
> is too old for a git kernel (linux-2.6/Documentation/Changes lists gcc
> 4.6 as a minimal version), so I choose to install gcc-7.4.0 to /opt/
> (leaving system installed gcc 4.4 under /usr/bin). Compiled and
> installed git kernel version, i.e. last tag 5.3.0-rc1 and ran the
> test. Kernel still produced oops.

I suspect, therefore, that we have a miscompile.

Please put your unstripped vmlinux image somewhere so I can take a closer
look.

Thank you.

2019-07-25 18:35:03

by Anatoly Pugachev

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Wed, Jul 24, 2019 at 11:13 PM David Miller <[email protected]> wrote:
>
> From: Anatoly Pugachev <[email protected]>
> Date: Wed, 24 Jul 2019 22:32:17 +0300
>
> > the first test where it was discovered was done on my test LDOM named
> > ttip, hardware (hypervisor) is T5-2 server, running under Solaris 11.4
> > OS.
> > ttip LDOM is debian sparc64 unstable , so with almost all the latest
> > software (gcc 8.3.0, binutils 2.32.51.20190707-1, debian GLIBC
> > 2.28-10, etc..)
> >
> > For another test, i also installed LDOM with oracle sparc linux
> > https://oss.oracle.com/projects/linux-sparc/ , but I've to install a
> > more fresh version of gcc on it first, since system installed gcc 4.4
> > is too old for a git kernel (linux-2.6/Documentation/Changes lists gcc
> > 4.6 as a minimal version), so I choose to install gcc-7.4.0 to /opt/
> > (leaving system installed gcc 4.4 under /usr/bin). Compiled and
> > installed git kernel version, i.e. last tag 5.3.0-rc1 and ran the
> > test. Kernel still produced oops.
>
> I suspect, therefore, that we have a miscompile.
>
> Please put your unstripped vmlinux image somewhere so I can take a closer
> look.

David,

http://u164.east.ru/kernel/

there's vmlinuz-5.3.0-rc1 kernel and archive 5.3.0-rc1-modules.tar.gz
of /lib/modules/5.3.0-rc1/
this is from oracle sparclinux LDOM , compiled with 7.4.0 gcc

Thank you.

2019-07-25 22:54:53

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: Anatoly Pugachev <[email protected]>
Date: Thu, 25 Jul 2019 21:33:24 +0300

> http://u164.east.ru/kernel/
>
> there's vmlinuz-5.3.0-rc1 kernel and archive 5.3.0-rc1-modules.tar.gz
> of /lib/modules/5.3.0-rc1/
> this is from oracle sparclinux LDOM , compiled with 7.4.0 gcc

Thank you, I'll take a look.

2019-07-26 19:37:06

by Khalid Aziz

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On 7/17/19 3:59 PM, Dmitry V. Levin wrote:
> Hi,
>
> On Tue, Jun 25, 2019 at 04:37:08PM +0200, Christoph Hellwig wrote:
>> The sparc64 code is mostly equivalent to the generic one, minus various
>> bugfixes and two arch overrides that this patch adds to pgtable.h.
>>
>> Signed-off-by: Christoph Hellwig <[email protected]>
>> Reviewed-by: Khalid Aziz <[email protected]>
>> ---
>> arch/sparc/Kconfig | 1 +
>> arch/sparc/include/asm/pgtable_64.h | 18 ++
>> arch/sparc/mm/Makefile | 2 +-
>> arch/sparc/mm/gup.c | 340 ----------------------------
>> 4 files changed, 20 insertions(+), 341 deletions(-)
>> delete mode 100644 arch/sparc/mm/gup.c
>
> So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505
> (thanks to Anatoly for bisecting) and introduced a regression:
> futex.test from the strace test suite now causes an Oops on sparc64
> in futex syscall
>

I have been working on reproducing this problem but ran into a different
problem. I found 5.1 and newer kernels no longer boot on an S7 server or
in an ldom on a T7 server (kernel hangs after "crc32c_sparc64: Using
sparc64 crc32c opcode optimized CRC32C implementation" on console). A
long git bisect session between 5.0 and 5.1 pointed to commit
73a66023c937 ("sparc64: fix sparc_ipc type conversion") but that makes
no sense. I will keep working on finding root cause. I wonder if
Anatoly's git bisect result is also suspect.

--
Khalid


2019-07-28 02:10:36

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

From: Anatoly Pugachev <[email protected]>
Date: Thu, 25 Jul 2019 21:33:24 +0300

> http://u164.east.ru/kernel/
>
> there's vmlinuz-5.3.0-rc1 kernel and archive 5.3.0-rc1-modules.tar.gz
> of /lib/modules/5.3.0-rc1/
> this is from oracle sparclinux LDOM , compiled with 7.4.0 gcc

Please, I really really need the unstripped kernel image with all the
symbols. This vmlinuz file is stripped already. The System.map does
not serve as a replacement.

Thank you.

2019-07-28 20:02:03

by Anatoly Pugachev

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Sun, Jul 28, 2019 at 5:09 AM David Miller <[email protected]> wrote:
> From: Anatoly Pugachev <[email protected]>
> Date: Thu, 25 Jul 2019 21:33:24 +0300
> > there's vmlinuz-5.3.0-rc1 kernel and archive 5.3.0-rc1-modules.tar.gz
> > of /lib/modules/5.3.0-rc1/
> > this is from oracle sparclinux LDOM , compiled with 7.4.0 gcc
>
> Please, I really really need the unstripped kernel image with all the
> symbols. This vmlinuz file is stripped already. The System.map does
> not serve as a replacement.

David,

http://u164.east.ru/kernel2/

I'm sorry missed debug kernel first. Enabled CONFIG_DEBUG_INFO=y

2019-08-09 20:01:30

by Anatoly Pugachev

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Thu, Jul 18, 2019 at 12:59 AM Dmitry V. Levin <[email protected]> wrote:
> On Tue, Jun 25, 2019 at 04:37:08PM +0200, Christoph Hellwig wrote:
> > The sparc64 code is mostly equivalent to the generic one, minus various
> > bugfixes and two arch overrides that this patch adds to pgtable.h.
> >
> > Signed-off-by: Christoph Hellwig <[email protected]>
> > Reviewed-by: Khalid Aziz <[email protected]>
> > ---
> > arch/sparc/Kconfig | 1 +
> > arch/sparc/include/asm/pgtable_64.h | 18 ++
> > arch/sparc/mm/Makefile | 2 +-
> > arch/sparc/mm/gup.c | 340 ----------------------------
> > 4 files changed, 20 insertions(+), 341 deletions(-)
> > delete mode 100644 arch/sparc/mm/gup.c
>
> So this ended up as commit 7b9afb86b6328f10dc2cad9223d7def12d60e505

I've tried to revert this commit on a current master branch , but i'm getting :

linux-2.6$ git show 7b9afb86b632 > /tmp/gup.patch
linux-2.6$ patch -p1 -R < /tmp/gup.patch
...
linux-2.6$ make -j && make -j modules
...
CALL scripts/atomic/check-atomics.sh
CALL scripts/checksyscalls.sh
<stdin>:1511:2: warning: #warning syscall clone3 not implemented [-Wcpp]
CHK include/generated/compile.h
CHK include/generated/autoksyms.h
GEN .version
CHK include/generated/compile.h
UPD include/generated/compile.h
CC init/version.o
AR init/built-in.a
LD vmlinux.o
ld: mm/gup.o: in function `__get_user_pages_fast':
gup.c:(.text+0x1bc0): multiple definition of `__get_user_pages_fast';
arch/sparc/mm/gup.o:gup.c:(.text+0x620): first defined here
ld: mm/gup.o: in function `get_user_pages_fast':
gup.c:(.text+0x1be0): multiple definition of `get_user_pages_fast';
arch/sparc/mm/gup.o:gup.c:(.text+0x740): first defined here
make: *** [Makefile:1060: vmlinux] Error 1

Can someone help me to revert this commit? Is it even possible? Since
it's not only futex strace calls getting killed and producing OOPS,
even util-linux.git 'make check' hangs machine/LDOM with multiple OOPS
in logs, while previous (before this commit) kernel passes tests ok
(and without kernel OOPS). I've already tried to compile current
master with gcc-6, gcc-7, gcc-8 debian versions, but all produce same
OOPS.

Thanks.

2019-08-10 07:17:52

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

There isn't really a way to use an arch-specific get_user_pages_fast
in mainline, you'd need to revert the whole series. As a relatively
quick workaround you can just remove the

select HAVE_FAST_GUP if SPARC64

line from arch/sparc/Kconfig

2019-08-10 19:37:26

by Mikael Pettersson

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

For the record the futex test case OOPSes a 5.3-rc3 kernel running on
a Sun Blade 2500 (2 x USIIIi). This system runs a custom distro with
a custom toolchain (gcc-8.3 based), so I doubt it's a distro problem.

On Sat, Aug 10, 2019 at 9:17 AM Christoph Hellwig <[email protected]> wrote:
>
> There isn't really a way to use an arch-specific get_user_pages_fast
> in mainline, you'd need to revert the whole series. As a relatively
> quick workaround you can just remove the
>
> select HAVE_FAST_GUP if SPARC64
>
> line from arch/sparc/Kconfig

2019-08-11 20:31:53

by Anatoly Pugachev

[permalink] [raw]
Subject: Re: [PATCH 09/16] sparc64: use the generic get_user_pages_fast code

On Sat, Aug 10, 2019 at 10:36 PM Mikael Pettersson <[email protected]> wrote:
> For the record the futex test case OOPSes a 5.3-rc3 kernel running on
> a Sun Blade 2500 (2 x USIIIi). This system runs a custom distro with
> a custom toolchain (gcc-8.3 based), so I doubt it's a distro problem.

Mikael, Khalid,

can you please test util-linux source code with 'make check' on
current git kernel and post the results?
https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git

Thanks.

As with my test machine/LDOM, util-linux 'make check' hangs git kernel
with the OOPS in the end of this message.

PS: And I was able to revert patch so current kernel git master branch
works again, futex strace test works as before (not being killed and
does not produce kernel OOPS), as well util-linux 'make check' does
not kills kernel. If anyone interested I can post the patch, but I'm
not sure it's a right thing to do, if all other architectures were
converted to use generic GUP code (mm/gup.c).


[ 47.600488] BUG: Bad rss-counter state mm:00000000ae46ef00 idx:0 val:-17
[ 47.600645] BUG: Bad rss-counter state mm:00000000ae46ef00 idx:1 val:102
[ 47.673090] fdisk[4270]: segfault at 20 ip fff8000100007ed8 (rpc
fff8000100007e30) sp 000007feffe79661 error 1 in
ld-2.28.so[fff8000100000000+22000]
[ 47.674415] BUG: Bad rss-counter state mm:00000000ca65883c idx:0 val:17
[ 47.674722] BUG: Bad rss-counter state mm:00000000ca65883c idx:1 val:1
[ 47.785453] ------------[ cut here ]------------
[ 47.785722] WARNING: CPU: 17 PID: 96 at mm/slab.h:410
kmem_cache_free+0xb4/0x300
[ 47.785880] virt_to_cache: Object is not a Slab page!
[ 47.786003] Modules linked in: tun ip_set_hash_net ip_set nf_tables
nfnetlink binfmt_misc camellia_sparc64 des_sparc64 des_generic
aes_sparc64 md5_sparc64 sha512_sparc64 sha256_sparc64 n2_rng rng_core
flash sha1_sparc64 ip_tables x_tables ipv6 nf_defrag_ipv6 autofs4 ext4
crc16 mbcache jbd2 raid10 raid456 async_raid6_recov async_memcpy
async_pq async_xor xor async_tx raid6_pq raid1 raid0 multipath linear
md_mod crc32c_sparc64
[ 47.787041] CPU: 17 PID: 96 Comm: ksoftirqd/17 Not tainted 5.3.0-rc3 #1143
[ 47.787181] Call Trace:
[ 47.787268] [0000000000464540] __warn+0xc0/0x100
[ 47.787384] [00000000004645b4] warn_slowpath_fmt+0x34/0x60
[ 47.787512] [00000000006758f4] kmem_cache_free+0xb4/0x300
[ 47.787648] [0000000000451b68] pgtable_free+0x28/0x40
[ 47.787779] [00000000006470fc] tlb_remove_table_rcu+0x3c/0x80
[ 47.787928] [00000000004fae94] rcu_core+0xbd4/0x1000
[ 47.788048] [00000000004fb7ac] rcu_core_si+0xc/0x20
[ 47.788177] [0000000000abc648] __do_softirq+0x288/0x500
[ 47.788296] [000000000046beb0] run_ksoftirqd+0x30/0x80
[ 47.788433] [0000000000493c64] smpboot_thread_fn+0x244/0x280
[ 47.788575] [000000000048ef50] kthread+0x110/0x140
[ 47.788707] [00000000004060e4] ret_from_fork+0x1c/0x2c
[ 47.788835] [0000000000000000] 0x0
[ 47.788927] irq event stamp: 19420
[ 47.789028] hardirqs last enabled at (19428): [<00000000004e2910>]
console_unlock+0x630/0x6c0
[ 47.789230] hardirqs last disabled at (19435): [<00000000004e23dc>]
console_unlock+0xfc/0x6c0
[ 47.789420] softirqs last enabled at (19254): [<0000000000abc854>]
__do_softirq+0x494/0x500
[ 47.789612] softirqs last disabled at (19259): [<000000000046beb0>]
run_ksoftirqd+0x30/0x80
[ 47.789795] ---[ end trace afb11a4826780c48 ]---
[ 47.925975] Unable to handle kernel paging request at virtual
address 0006120000000000
[ 47.926088] tsk->{mm,active_mm}->context = 0000000000001b68
[ 47.926150] tsk->{mm,active_mm}->pgd = fff8002438f90000
[ 47.926202] \|/ ____ \|/
[ 47.926202] "@'/ .. \`@"
[ 47.926202] /_| \__/ |_\
[ 47.926202] \__U_/
[ 47.926311] kworker/25:2(653): Oops [#1]
[ 47.926354] CPU: 25 PID: 653 Comm: kworker/25:2 Tainted: G W
5.3.0-rc3 #1143
[ 47.926433] Workqueue: xfs-conv/dm-0 xfs_end_io
[ 47.926479] TSTATE: 0000000080001605 TPC: 000000000067588c TNPC:
0000000000675890 Y: 00000000 Tainted: G W
[ 47.926570] TPC: <kmem_cache_free+0x4c/0x300>
[ 47.926611] g0: 0000000000675668 g1: 0006120000000000 g2:
0000004000000000 g3: 0006000000000000
[ 47.926682] g4: fff80024938c8e40 g5: fff80024a83bc000 g6:
fff8002490254000 g7: 0000000000000102
[ 47.926751] o0: 0000000000000000 o1: 0000000000d02c30 o2:
fff80024938c96b8 o3: 000000000000c000
[ 47.926821] o4: 00000000014c3000 o5: 0000000000000000 sp:
fff80024ad577121 ret_pc: 00000000004d5148
[ 47.926898] RPC: <lock_is_held_type+0x68/0xe0>
[ 47.926940] l0: 0000000000000000 l1: 0000000000000000 l2:
00000000f0000000 l3: 0000000000000080
[ 47.927010] l4: 0000000000d953c0 l5: 0000000000d953c0 l6:
0000000000000002 l7: 000000000000000b
[ 47.927080] i0: fff800003040b1e0 i1: 0000000000000000 i2:
fff80024938c96b8 i3: fff80024938c8e40
[ 47.927149] i4: 0000000000000004 i5: fff80024938c9730 i6:
fff80024ad5771d1 i7: 00000000006409f4
[ 47.927223] I7: <ptlock_free+0x14/0x40>
[ 47.927261] Call Trace:
[ 47.927291] [00000000006409f4] ptlock_free+0x14/0x40
[ 47.927342] [0000000000450a54] __pte_free+0x34/0x80
[ 47.927388] [0000000000451b54] pgtable_free+0x14/0x40
[ 47.927436] [00000000006470fc] tlb_remove_table_rcu+0x3c/0x80
[ 47.927497] [00000000004fae94] rcu_core+0xbd4/0x1000
[ 47.927543] [00000000004fb7ac] rcu_core_si+0xc/0x20
[ 47.927593] [0000000000abc648] __do_softirq+0x288/0x500
[ 47.927644] [000000000042d054] do_softirq_own_stack+0x34/0x60
[ 47.927697] [000000000046c1c8] irq_exit+0x68/0xe0
[ 47.927742] [0000000000abc1b8] timer_interrupt+0x98/0xc0
[ 47.927791] [0000000000427490] sys_call_table+0x780/0x970
[ 47.927845] [0000000000609ba8] test_clear_page_writeback+0x2c8/0x300
[ 47.927900] [00000000005f9d18] end_page_writeback+0x58/0xa0
[ 47.927951] [00000000007b83f8] xfs_destroy_ioend+0xf8/0x240
[ 47.928002] [00000000007b86a4] xfs_end_ioend+0x164/0x1e0
[ 47.928050] [00000000007b9550] xfs_end_io+0x90/0xc0
[ 47.928095] Disabling lock debugging due to kernel taint
[ 47.928118] Caller[00000000006409f4]: ptlock_free+0x14/0x40
[ 47.928140] Caller[0000000000450a54]: __pte_free+0x34/0x80
[ 47.928162] Caller[0000000000451b54]: pgtable_free+0x14/0x40
[ 47.928184] Caller[00000000006470fc]: tlb_remove_table_rcu+0x3c/0x80
[ 47.928208] Caller[00000000004fae94]: rcu_core+0xbd4/0x1000
[ 47.928230] Caller[00000000004fb7ac]: rcu_core_si+0xc/0x20
[ 47.928252] Caller[0000000000abc648]: __do_softirq+0x288/0x500
[ 47.928278] Caller[000000000042d054]: do_softirq_own_stack+0x34/0x60
[ 47.928306] Caller[000000000046c1c8]: irq_exit+0x68/0xe0
[ 47.928330] Caller[0000000000abc1b8]: timer_interrupt+0x98/0xc0
[ 47.928357] Caller[0000000000427490]: sys_call_table+0x780/0x970
[ 47.928384] Caller[0000000000609b9c]: test_clear_page_writeback+0x2bc/0x300
[ 47.928412] Caller[00000000005f9d18]: end_page_writeback+0x58/0xa0
[ 47.928736] Caller[00000000007b83f8]: xfs_destroy_ioend+0xf8/0x240
[ 47.928770] Caller[00000000007b86a4]: xfs_end_ioend+0x164/0x1e0
[ 47.928798] Caller[00000000007b9550]: xfs_end_io+0x90/0xc0
[ 47.928829] Caller[0000000000486ea4]: process_one_work+0x3e4/0x720
[ 47.928858] Caller[00000000004874b8]: worker_thread+0x2d8/0x5a0
[ 47.928888] Caller[000000000048ef50]: kthread+0x110/0x140
[ 47.928922] Caller[00000000004060e4]: ret_from_fork+0x1c/0x2c
[ 47.928952] Caller[0000000000000000]: 0x0
[ 47.928973] Instruction DUMP:
[ 47.928976] 82004002
[ 47.928995] 83287003
[ 47.929013] 82004003
[ 47.929031] <c4586008>
[ 47.929048] 8608a001
[ 47.929065] 8400bfff
[ 47.929082] 8578c401
[ 47.929098] 82100002
[ 47.929115] c458a008
[ 47.929132]
[ 47.929161] Kernel panic - not syncing: Aiee, killing interrupt handler!
[ 47.933949] Press Stop-A (L1-A) from sun keyboard or send break
[ 47.933949] twice on console to return to the boot prom
[ 47.933995] ---[ end Kernel panic - not syncing: Aiee, killing
interrupt handler! ]---