Futex uses GUP. Currently on ARM, the default __get_user_pages_fast
being used always returns 0, leading to a forever loop in get_futex_key :(
Implementing GUP solves this problem.
Tested on vexpress-A15 on QEMU.
8<---------------------------------------------------->8
Implement get_user_pages_fast without locking in the fastpath on ARM.
This work is derived from the x86 version and adapted to ARM.
Signed-off-by: Zi Shen Lim <[email protected]>
---
arch/arm/mm/Makefile | 4 +-
arch/arm/mm/gup.c | 330 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 332 insertions(+), 2 deletions(-)
create mode 100644 arch/arm/mm/gup.c
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 224a9cc..26cafd0 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,8 +2,8 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
-obj-y := dma-mapping.o extable.o fault.o init.o \
- iomap.o
+obj-y := dma-mapping.o extable.o fault.o gup.o \
+ init.o iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o
diff --git a/arch/arm/mm/gup.c b/arch/arm/mm/gup.c
new file mode 100644
index 0000000..42dce08
--- /dev/null
+++ b/arch/arm/mm/gup.c
@@ -0,0 +1,330 @@
+/*
+ * Lockless get_user_pages_fast for ARM
+ *
+ * Copyright (C) 2008 Nick Piggin
+ * Copyright (C) 2008 Novell Inc.
+ * Copyright (C) 2013 Zi Shen Lim, Linaro Ltd <[email protected]>
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/hugetlb.h>
+
+#include <asm/pgtable.h>
+
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+ return ACCESS_ONCE(*ptep);
+}
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ pte_t *ptep;
+
+ ptep = pte_offset_map(&pmd, addr);
+ do {
+ pte_t pte = gup_get_pte(ptep);
+ struct page *page;
+
+ if (!pte_present_user(pte) || (write && !pte_write(pte)) ||
+ pte_special(pte)) {
+ pte_unmap(ptep);
+ return 0;
+ }
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ get_page(page);
+ SetPageReferenced(page);
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(ptep - 1);
+
+ return 1;
+}
+
+static inline void get_head_page_multiple(struct page *page, int nr)
+{
+ VM_BUG_ON(page != compound_head(page));
+ VM_BUG_ON(page_count(page) == 0);
+ atomic_add(nr, &page->_count);
+ SetPageReferenced(page);
+}
+
+static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ pte_t pte = *(pte_t *)&pmd;
+ struct page *head, *page;
+ int refs;
+
+ if (!pte_present_user(pte) || (write && !pte_write(pte)))
+ return 0;
+ /* hugepages are never "special" */
+ VM_BUG_ON(pte_special(pte));
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+ get_head_page_multiple(head, refs);
+
+ return 1;
+}
+
+static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp;
+
+ pmdp = pmd_offset(&pud, addr);
+ do {
+ pmd_t pmd = *pmdp;
+
+ next = pmd_addr_end(addr, end);
+ /*
+ * The pmd_trans_splitting() check below explains why
+ * pmdp_splitting_flush has to flush the tlb, to stop
+ * this gup-fast code from running while we set the
+ * splitting bit in the pmd. Returning zero will take
+ * the slow path that will call wait_split_huge_page()
+ * if the pmd is still in splitting state. gup-fast
+ * can't because it has irq disabled and
+ * wait_split_huge_page() would never return as the
+ * tlb flush IPI wouldn't run.
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+ if (unlikely(pmd_huge(pmd))) {
+ if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
+ return 0;
+ } else {
+ if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ return 0;
+ }
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ pte_t pte = *(pte_t *)&pud;
+ struct page *head, *page;
+ int refs;
+
+ if (!pte_present_user(pte) || (write && !pte_write(pte)))
+ return 0;
+ /* hugepages are never "special" */
+ VM_BUG_ON(pte_special(pte));
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+ page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+ get_head_page_multiple(head, refs);
+
+ return 1;
+}
+
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp;
+
+ pudp = pud_offset(&pgd, addr);
+ do {
+ pud_t pud = *pudp;
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (unlikely(pud_huge(pud))) {
+ if (!gup_huge_pud(pud, addr, next, write, pages, nr))
+ return 0;
+ } else {
+ if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+ return 0;
+ }
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+ * important workloads (eg. DB2), and whether limiting the batch size
+ * will decrease performance.
+ *
+ * It seems like we're in the clear for the moment. Direct-IO is
+ * the main guy that batches up lots of get_user_pages, and even
+ * they are limited to 64-at-a-time which is not so many.
+ */
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+
+ end = start + len;
+ if (end < start)
+ goto slow_irqon;
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+ * important workloads (eg. DB2), and whether limiting the batch size
+ * will decrease performance.
+ *
+ * It seems like we're in the clear for the moment. Direct-IO is
+ * the main guy that batches up lots of get_user_pages, and even
+ * they are limited to 64-at-a-time which is not so many.
+ */
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed.
+ *
+ * So long as we atomically load page table pointers versus teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ */
+ local_irq_disable();
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_enable();
+
+ VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+ return nr;
+
+ {
+ int ret;
+
+slow:
+ local_irq_enable();
+slow_irqon:
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+ if (ret < 0)
+ ret = nr;
+ else
+ ret += nr;
+ }
+
+ return ret;
+ }
+}
--
1.8.1.2
On Thu, Oct 03, 2013 at 06:15:15PM +0100, Zi Shen Lim wrote:
> Futex uses GUP. Currently on ARM, the default __get_user_pages_fast
> being used always returns 0, leading to a forever loop in get_futex_key :(
>
> Implementing GUP solves this problem.
>
> Tested on vexpress-A15 on QEMU.
> 8<---------------------------------------------------->8
>
> Implement get_user_pages_fast without locking in the fastpath on ARM.
> This work is derived from the x86 version and adapted to ARM.
This looks pretty much like an exact copy of the x86 version, which will
likely also result in another exact copy for arm64. Can none of this code be
made common? Furthermore, the fact that you've lifted the code and not
provided much of an explanation in the cover letter hints that you might not
be aware of all the subtleties involved here...
> +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
> + int write, struct page **pages, int *nr)
> +{
> + unsigned long next;
> + pmd_t *pmdp;
> +
> + pmdp = pmd_offset(&pud, addr);
> + do {
> + pmd_t pmd = *pmdp;
> +
> + next = pmd_addr_end(addr, end);
> + /*
> + * The pmd_trans_splitting() check below explains why
> + * pmdp_splitting_flush has to flush the tlb, to stop
> + * this gup-fast code from running while we set the
> + * splitting bit in the pmd. Returning zero will take
> + * the slow path that will call wait_split_huge_page()
> + * if the pmd is still in splitting state. gup-fast
> + * can't because it has irq disabled and
> + * wait_split_huge_page() would never return as the
> + * tlb flush IPI wouldn't run.
> + */
> + if (pmd_none(pmd) || pmd_trans_splitting(pmd))
> + return 0;
> + if (unlikely(pmd_huge(pmd))) {
> + if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
> + return 0;
> + } else {
> + if (!gup_pte_range(pmd, addr, next, write, pages, nr))
> + return 0;
> + }
> + } while (pmdp++, addr = next, addr != end);
...case in point: we don't (usually) require IPIs to shoot down TLB entries
in SMP systems, so this is racy under thp splitting.
Will
Thanks for your feedback Will.
On Thu, Oct 3, 2013 at 10:27 AM, Will Deacon <[email protected]> wrote:
> On Thu, Oct 03, 2013 at 06:15:15PM +0100, Zi Shen Lim wrote:
>> Futex uses GUP. Currently on ARM, the default __get_user_pages_fast
>> being used always returns 0, leading to a forever loop in get_futex_key :(
>>
>> Implementing GUP solves this problem.
>>
>> Tested on vexpress-A15 on QEMU.
>> 8<---------------------------------------------------->8
>>
>> Implement get_user_pages_fast without locking in the fastpath on ARM.
>> This work is derived from the x86 version and adapted to ARM.
>
> This looks pretty much like an exact copy of the x86 version, which will
> likely also result in another exact copy for arm64. Can none of this code be
> made common? Furthermore, the fact that you've lifted the code and not
> provided much of an explanation in the cover letter hints that you might not
> be aware of all the subtleties involved here...
>
You are right. I was wondering the same too. Hopefully this RFC will
lead to the desired solution.
x86 does this:
--8<-----
unsigned long mask;
pte_t *ptep;
mask = _PAGE_PRESENT|_PAGE_USER;
if (write)
mask |= _PAGE_RW;
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
pte_unmap(ptep);
return 0;
}
-->8-----
The adaptation uses pte_* macros.
x86 also uses a more optimized version of pmd_large and pud_large,
instead of reusing pmd_huge or pud_huge.
>> +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
>> + int write, struct page **pages, int *nr)
>> +{
>> + unsigned long next;
>> + pmd_t *pmdp;
>> +
>> + pmdp = pmd_offset(&pud, addr);
>> + do {
>> + pmd_t pmd = *pmdp;
>> +
>> + next = pmd_addr_end(addr, end);
>> + /*
>> + * The pmd_trans_splitting() check below explains why
>> + * pmdp_splitting_flush has to flush the tlb, to stop
>> + * this gup-fast code from running while we set the
>> + * splitting bit in the pmd. Returning zero will take
>> + * the slow path that will call wait_split_huge_page()
>> + * if the pmd is still in splitting state. gup-fast
>> + * can't because it has irq disabled and
>> + * wait_split_huge_page() would never return as the
>> + * tlb flush IPI wouldn't run.
>> + */
>> + if (pmd_none(pmd) || pmd_trans_splitting(pmd))
>> + return 0;
>> + if (unlikely(pmd_huge(pmd))) {
>> + if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
>> + return 0;
>> + } else {
>> + if (!gup_pte_range(pmd, addr, next, write, pages, nr))
>> + return 0;
>> + }
>> + } while (pmdp++, addr = next, addr != end);
>
> ...case in point: we don't (usually) require IPIs to shoot down TLB entries
> in SMP systems, so this is racy under thp splitting.
>
Ok. I learned something new :)
Suggestions on how to proceed?
Thanks for your patience.
> Will
On Thu, Oct 03, 2013 at 11:07:44AM -0700, Zi Shen Lim wrote:
> Thanks for your feedback Will.
>
> On Thu, Oct 3, 2013 at 10:27 AM, Will Deacon <[email protected]> wrote:
> > On Thu, Oct 03, 2013 at 06:15:15PM +0100, Zi Shen Lim wrote:
> >> Futex uses GUP. Currently on ARM, the default __get_user_pages_fast
> >> being used always returns 0, leading to a forever loop in get_futex_key :(
> >>
> >> Implementing GUP solves this problem.
> >>
> >> Tested on vexpress-A15 on QEMU.
> >> 8<---------------------------------------------------->8
> >>
> >> Implement get_user_pages_fast without locking in the fastpath on ARM.
> >> This work is derived from the x86 version and adapted to ARM.
> >
> > This looks pretty much like an exact copy of the x86 version, which will
> > likely also result in another exact copy for arm64. Can none of this code be
> > made common? Furthermore, the fact that you've lifted the code and not
> > provided much of an explanation in the cover letter hints that you might not
> > be aware of all the subtleties involved here...
> >
>
> You are right. I was wondering the same too. Hopefully this RFC will
> lead to the desired solution.
>
> x86 does this:
> --8<-----
> unsigned long mask;
> pte_t *ptep;
>
> mask = _PAGE_PRESENT|_PAGE_USER;
> if (write)
> mask |= _PAGE_RW;
>
> ptep = pte_offset_map(&pmd, addr);
> do {
> pte_t pte = gup_get_pte(ptep);
> struct page *page;
>
> if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
> pte_unmap(ptep);
> return 0;
> }
> -->8-----
> The adaptation uses pte_* macros.
>
> x86 also uses a more optimized version of pmd_large and pud_large,
> instead of reusing pmd_huge or pud_huge.
>
> >> +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
> >> + int write, struct page **pages, int *nr)
> >> +{
> >> + unsigned long next;
> >> + pmd_t *pmdp;
> >> +
> >> + pmdp = pmd_offset(&pud, addr);
> >> + do {
> >> + pmd_t pmd = *pmdp;
> >> +
> >> + next = pmd_addr_end(addr, end);
> >> + /*
> >> + * The pmd_trans_splitting() check below explains why
> >> + * pmdp_splitting_flush has to flush the tlb, to stop
> >> + * this gup-fast code from running while we set the
> >> + * splitting bit in the pmd. Returning zero will take
> >> + * the slow path that will call wait_split_huge_page()
> >> + * if the pmd is still in splitting state. gup-fast
> >> + * can't because it has irq disabled and
> >> + * wait_split_huge_page() would never return as the
> >> + * tlb flush IPI wouldn't run.
> >> + */
> >> + if (pmd_none(pmd) || pmd_trans_splitting(pmd))
> >> + return 0;
> >> + if (unlikely(pmd_huge(pmd))) {
> >> + if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
> >> + return 0;
> >> + } else {
> >> + if (!gup_pte_range(pmd, addr, next, write, pages, nr))
> >> + return 0;
> >> + }
> >> + } while (pmdp++, addr = next, addr != end);
> >
> > ...case in point: we don't (usually) require IPIs to shoot down TLB entries
> > in SMP systems, so this is racy under thp splitting.
> >
>
> Ok. I learned something new :)
> Suggestions on how to proceed?
Hi Zi Shen,
I am actually looking at this now too :-).
(but from the perspective of optimising Direct-IO on ARM/ARM64).
I've been looking through Chanho Park's work at:
http://lists.infradead.org/pipermail/linux-arm-kernel/2013-April/162115.html
As Will pointed out, ARM does not usually require IPIs to shoot down TLB
entries. So the local_irq_disable will not necessarily block pagetables being
freed when fast_gup is running.
Transparent huge pages when splitting will set the pmd splitting bit then
perform a tlb invalidate, then proceed with the split. Thus a splitting THP
will not always be blocked by local_irq_disable on ARM. This does not only
affect fast_gup, futexes are also affected. From my understanding of futex on
THP tail case in kernel/futex.c, it looks like an assumption is made there also
that splitting pmds can be blocked by disabling local irqs.
PowerPC and SPARC, like ARM do not necessarily require IPIs for TLB shootdown
either so they make use of tlb_remove_table (CONFIG_HAVE_RCU_TABLE_FREE). This
identifies pages backing pagetables that have multiple users and batches them
up, and then performs a dummy IPI before freeing them en masse. This reduces
the performance impact from the IPIs (by doing considerably fewer of them), and
guarantees that pagetables cannot be freed from under the fast_gup.
Unfortunately this also means that the fast_gup has to be aware of ptes/pmds
changing from under it.
I am currently working on a fast_gup that follows the PowerPC way of doing
things, and am convincing myself that the behaviour is valid on ARM (if
tlb_remove_table were to be used), also I am looking over the THP splitting
logic to see how that is handled on PowerPC and SPARC. It may be practical to
do an IPI for THPs that have multiple users and are splitting. (On my x86
desktop with an uptime of 63 days of running all manner of crazyness, I have
67982 THP splits recorded in total).
There's also the possibility of blocking without an IPI, but it's not obvious
to me how to do that (that would probably necessitate a change to
kernel/futex.c). I've just picked this up recently and am still trying to
understand it fully.
Cheers,
--
Steve
>
> Thanks for your patience.
>
> > Will
Hi Steve,
[adding linux-mm, since this has turned into a discussion about THP
splitting]
On Fri, Oct 04, 2013 at 11:31:42AM +0100, Steve Capper wrote:
> On Thu, Oct 03, 2013 at 11:07:44AM -0700, Zi Shen Lim wrote:
> > On Thu, Oct 3, 2013 at 10:27 AM, Will Deacon <[email protected]> wrote:
> > > On Thu, Oct 03, 2013 at 06:15:15PM +0100, Zi Shen Lim wrote:
> > >> Futex uses GUP. Currently on ARM, the default __get_user_pages_fast
> > >> being used always returns 0, leading to a forever loop in get_futex_key :(
> > >
> > > This looks pretty much like an exact copy of the x86 version, which will
> > > likely also result in another exact copy for arm64. Can none of this code be
> > > made common? Furthermore, the fact that you've lifted the code and not
> > > provided much of an explanation in the cover letter hints that you might not
> > > be aware of all the subtleties involved here...
[...]
> > >> +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
> > >> + int write, struct page **pages, int *nr)
> > >> +{
> > >> + unsigned long next;
> > >> + pmd_t *pmdp;
> > >> +
> > >> + pmdp = pmd_offset(&pud, addr);
> > >> + do {
> > >> + pmd_t pmd = *pmdp;
> > >> +
> > >> + next = pmd_addr_end(addr, end);
> > >> + /*
> > >> + * The pmd_trans_splitting() check below explains why
> > >> + * pmdp_splitting_flush has to flush the tlb, to stop
> > >> + * this gup-fast code from running while we set the
> > >> + * splitting bit in the pmd. Returning zero will take
> > >> + * the slow path that will call wait_split_huge_page()
> > >> + * if the pmd is still in splitting state. gup-fast
> > >> + * can't because it has irq disabled and
> > >> + * wait_split_huge_page() would never return as the
> > >> + * tlb flush IPI wouldn't run.
> > >> + */
> > >> + if (pmd_none(pmd) || pmd_trans_splitting(pmd))
> > >> + return 0;
> > >> + if (unlikely(pmd_huge(pmd))) {
> > >> + if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
> > >> + return 0;
> > >> + } else {
> > >> + if (!gup_pte_range(pmd, addr, next, write, pages, nr))
> > >> + return 0;
> > >> + }
> > >> + } while (pmdp++, addr = next, addr != end);
> > >
> > > ...case in point: we don't (usually) require IPIs to shoot down TLB entries
> > > in SMP systems, so this is racy under thp splitting.
[...]
> As Will pointed out, ARM does not usually require IPIs to shoot down TLB
> entries. So the local_irq_disable will not necessarily block pagetables being
> freed when fast_gup is running.
>
> Transparent huge pages when splitting will set the pmd splitting bit then
> perform a tlb invalidate, then proceed with the split. Thus a splitting THP
> will not always be blocked by local_irq_disable on ARM. This does not only
> affect fast_gup, futexes are also affected. From my understanding of futex on
> THP tail case in kernel/futex.c, it looks like an assumption is made there also
> that splitting pmds can be blocked by disabling local irqs.
>
> PowerPC and SPARC, like ARM do not necessarily require IPIs for TLB shootdown
> either so they make use of tlb_remove_table (CONFIG_HAVE_RCU_TABLE_FREE). This
> identifies pages backing pagetables that have multiple users and batches them
> up, and then performs a dummy IPI before freeing them en masse. This reduces
> the performance impact from the IPIs (by doing considerably fewer of them), and
> guarantees that pagetables cannot be freed from under the fast_gup.
> Unfortunately this also means that the fast_gup has to be aware of ptes/pmds
> changing from under it.
[...]
> There's also the possibility of blocking without an IPI, but it's not obvious
> to me how to do that (that would probably necessitate a change to
> kernel/futex.c). I've just picked this up recently and am still trying to
> understand it fully.
The IPI solution looks like a hack to me and essentially moves the
synchronisation down into the csd_lock on the splitting side as part of the
cross-call to invalidate the TLB. Furthermore, the TLB doesn't even need to
be invalidated afaict, since we're just updating software bits.
Instead, I wonder whether this can be solved with a simple atomic_t:
- The fast GUP code (read side) does something like:
atomic_inc(readers);
smp_mb__after_atomic_inc();
__get_user_pages_fast(...);
smp_mb__before_atomic_dec();
atomic_dec(readers);
- The splitting code (write side) then polls the counter to reach
zero:
pmd_t pmd = pmd_mksplitting(*pmdp);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
smp_mb();
while (atomic_read(readers) != 0)
cpu_relax();
that way, we don't need to worry about IPIs, we don't need to disable
interrupts on the read side and we still get away without heavyweight
locking.
Of course, I could well be missing something here, but what we currently
have in mainline doesn't work for ARM anyway (since TLB invalidation is
broadcast in hardware).
Will