2021-05-07 17:26:36

by Peter Xu

[permalink] [raw]
Subject: [PATCH v2 3/3] mm: gup: pack has_pinned in MMF_HAS_PINNED

From: Andrea Arcangeli <[email protected]>

has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop
cleanup.

Any atomic_inc/dec to the mm cacheline shared by all threads in
pin-fast would reintroduce a loss of SMP scalability to pin-fast, so
there's no future potential usefulness to keep an atomic in the mm for
this.

set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than
WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit
(just like atomic_set after this commit) has to be still issued only
once per "mm", so the difference between the two will be lost in the
noise.

will-it-scale "mmap2" shows no change in performance with enterprise
config as expected.

will-it-scale "pin_fast" retains the > 4000% SMP scalability
performance improvement against upstream as expected.

This is a noop as far as overall performance and SMP scalability are
concerned.

Signed-off-by: Andrea Arcangeli <[email protected]>
[peterx: Fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix
comment here and there]
Signed-off-by: Peter Xu <[email protected]>
---
fs/proc/task_mmu.c | 2 +-
include/linux/mm.h | 2 +-
include/linux/mm_types.h | 10 ----------
include/linux/sched/coredump.h | 8 ++++++++
kernel/fork.c | 1 -
mm/gup.c | 19 +++++++++++++++----
6 files changed, 25 insertions(+), 17 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 4c95cc57a66a8..6144571942db9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1049,7 +1049,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
return false;
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+ if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
return false;
page = vm_normal_page(vma, addr, pte);
if (!page)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d6790ab0cf575..94dc84f6d8658 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1331,7 +1331,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
if (!is_cow_mapping(vma->vm_flags))
return false;

- if (!atomic_read(&vma->vm_mm->has_pinned))
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;

return page_maybe_dma_pinned(page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6613b26a88946..15d79858fadbd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -435,16 +435,6 @@ struct mm_struct {
*/
atomic_t mm_count;

- /**
- * @has_pinned: Whether this mm has pinned any pages. This can
- * be either replaced in the future by @pinned_vm when it
- * becomes stable, or grow into a counter on its own. We're
- * aggresive on this bit now - even if the pinned pages were
- * unpinned later on, we'll still keep this bit set for the
- * lifecycle of this mm just for simplicity.
- */
- atomic_t has_pinned;
-
/**
* @write_protect_seq: Locked when any thread is write
* protecting pages mapped by this mm to enforce a later COW,
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index dfd82eab29025..4d9e3a6568758 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)

#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/kernel/fork.c b/kernel/fork.c
index 502dc046fbc62..a71e73707ef59 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1026,7 +1026,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
- atomic_set(&mm->has_pinned, 0);
atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
diff --git a/mm/gup.c b/mm/gup.c
index 9933bc5c2eff2..bb130723a6717 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(fixup_user_fault);

+/*
+ * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
+ * lifecycle. Avoid setting the bit unless necessary, or it might cause write
+ * cache bouncing on large SMP machines for concurrent pinned gups.
+ */
+static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
+{
+ if (!test_bit(MMF_HAS_PINNED, mm_flags))
+ set_bit(MMF_HAS_PINNED, mm_flags);
+}
+
/*
* Please note that this function, unlike __get_user_pages will not
* return 0 for nr_pages > 0 without FOLL_NOWAIT
@@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
BUG_ON(*locked != 1);
}

- if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
- atomic_set(&mm->has_pinned, 1);
+ if (flags & FOLL_PIN)
+ mm_set_has_pinned_flag(&mm->flags);

/*
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
@@ -2617,8 +2628,8 @@ static int internal_get_user_pages_fast(unsigned long start,
FOLL_FAST_ONLY)))
return -EINVAL;

- if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
- atomic_set(&current->mm->has_pinned, 1);
+ if (gup_flags & FOLL_PIN)
+ mm_set_has_pinned_flag(&current->mm->flags);

if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(&current->mm->mmap_lock);
--
2.31.1


2021-05-08 01:16:13

by John Hubbard

[permalink] [raw]
Subject: Re: [PATCH v2 3/3] mm: gup: pack has_pinned in MMF_HAS_PINNED

On 5/7/21 8:05 AM, Peter Xu wrote:
> From: Andrea Arcangeli <[email protected]>
>
> has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop
> cleanup.
>
> Any atomic_inc/dec to the mm cacheline shared by all threads in
> pin-fast would reintroduce a loss of SMP scalability to pin-fast, so
> there's no future potential usefulness to keep an atomic in the mm for
> this.
>
> set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than
> WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit
> (just like atomic_set after this commit) has to be still issued only
> once per "mm", so the difference between the two will be lost in the
> noise.
>
> will-it-scale "mmap2" shows no change in performance with enterprise
> config as expected.
>
> will-it-scale "pin_fast" retains the > 4000% SMP scalability
> performance improvement against upstream as expected.
>
> This is a noop as far as overall performance and SMP scalability are
> concerned.
>
> Signed-off-by: Andrea Arcangeli <[email protected]>
> [peterx: Fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix
> comment here and there]
> Signed-off-by: Peter Xu <[email protected]>
> ---
> fs/proc/task_mmu.c | 2 +-
> include/linux/mm.h | 2 +-
> include/linux/mm_types.h | 10 ----------
> include/linux/sched/coredump.h | 8 ++++++++
> kernel/fork.c | 1 -
> mm/gup.c | 19 +++++++++++++++----
> 6 files changed, 25 insertions(+), 17 deletions(-)
>

Looks good.

Reviewed-by: John Hubbard <[email protected]>


thanks,
--
John Hubbard
NVIDIA

> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 4c95cc57a66a8..6144571942db9 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -1049,7 +1049,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
> return false;
> if (!is_cow_mapping(vma->vm_flags))
> return false;
> - if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
> + if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
> return false;
> page = vm_normal_page(vma, addr, pte);
> if (!page)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index d6790ab0cf575..94dc84f6d8658 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1331,7 +1331,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
> if (!is_cow_mapping(vma->vm_flags))
> return false;
>
> - if (!atomic_read(&vma->vm_mm->has_pinned))
> + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
> return false;
>
> return page_maybe_dma_pinned(page);
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 6613b26a88946..15d79858fadbd 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -435,16 +435,6 @@ struct mm_struct {
> */
> atomic_t mm_count;
>
> - /**
> - * @has_pinned: Whether this mm has pinned any pages. This can
> - * be either replaced in the future by @pinned_vm when it
> - * becomes stable, or grow into a counter on its own. We're
> - * aggresive on this bit now - even if the pinned pages were
> - * unpinned later on, we'll still keep this bit set for the
> - * lifecycle of this mm just for simplicity.
> - */
> - atomic_t has_pinned;
> -
> /**
> * @write_protect_seq: Locked when any thread is write
> * protecting pages mapped by this mm to enforce a later COW,
> diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
> index dfd82eab29025..4d9e3a6568758 100644
> --- a/include/linux/sched/coredump.h
> +++ b/include/linux/sched/coredump.h
> @@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
> #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
> #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
> #define MMF_MULTIPROCESS 27 /* mm is shared between processes */
> +/*
> + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
> + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
> + * a counter on its own. We're aggresive on this bit for now: even if the
> + * pinned pages were unpinned later on, we'll still keep this bit set for the
> + * lifecycle of this mm, just for simplicity.
> + */
> +#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
> #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
>
> #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 502dc046fbc62..a71e73707ef59 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -1026,7 +1026,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
> mm_pgtables_bytes_init(mm);
> mm->map_count = 0;
> mm->locked_vm = 0;
> - atomic_set(&mm->has_pinned, 0);
> atomic64_set(&mm->pinned_vm, 0);
> memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
> spin_lock_init(&mm->page_table_lock);
> diff --git a/mm/gup.c b/mm/gup.c
> index 9933bc5c2eff2..bb130723a6717 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
> }
> EXPORT_SYMBOL_GPL(fixup_user_fault);
>
> +/*
> + * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
> + * lifecycle. Avoid setting the bit unless necessary, or it might cause write
> + * cache bouncing on large SMP machines for concurrent pinned gups.
> + */
> +static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
> +{
> + if (!test_bit(MMF_HAS_PINNED, mm_flags))
> + set_bit(MMF_HAS_PINNED, mm_flags);
> +}
> +
> /*
> * Please note that this function, unlike __get_user_pages will not
> * return 0 for nr_pages > 0 without FOLL_NOWAIT
> @@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
> BUG_ON(*locked != 1);
> }
>
> - if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
> - atomic_set(&mm->has_pinned, 1);
> + if (flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&mm->flags);
>
> /*
> * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
> @@ -2617,8 +2628,8 @@ static int internal_get_user_pages_fast(unsigned long start,
> FOLL_FAST_ONLY)))
> return -EINVAL;
>
> - if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
> - atomic_set(&current->mm->has_pinned, 1);
> + if (gup_flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&current->mm->flags);
>
> if (!(gup_flags & FOLL_FAST_ONLY))
> might_lock_read(&current->mm->mmap_lock);
>

2021-05-12 09:50:31

by Geert Uytterhoeven

[permalink] [raw]
Subject: Re: [PATCH v2 3/3] mm: gup: pack has_pinned in MMF_HAS_PINNED

Hi Peter, Andrea,

On Fri, May 7, 2021 at 7:26 PM Peter Xu <[email protected]> wrote:
> From: Andrea Arcangeli <[email protected]>
>
> has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop
> cleanup.
>
> Any atomic_inc/dec to the mm cacheline shared by all threads in
> pin-fast would reintroduce a loss of SMP scalability to pin-fast, so
> there's no future potential usefulness to keep an atomic in the mm for
> this.
>
> set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than
> WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit
> (just like atomic_set after this commit) has to be still issued only
> once per "mm", so the difference between the two will be lost in the
> noise.
>
> will-it-scale "mmap2" shows no change in performance with enterprise
> config as expected.
>
> will-it-scale "pin_fast" retains the > 4000% SMP scalability
> performance improvement against upstream as expected.
>
> This is a noop as far as overall performance and SMP scalability are
> concerned.
>
> Signed-off-by: Andrea Arcangeli <[email protected]>
> [peterx: Fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix
> comment here and there]
> Signed-off-by: Peter Xu <[email protected]>

Thanks for your patch, which is now in linux-next.

> diff --git a/mm/gup.c b/mm/gup.c
> index 9933bc5c2eff2..bb130723a6717 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
> }
> EXPORT_SYMBOL_GPL(fixup_user_fault);
>
> +/*
> + * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
> + * lifecycle. Avoid setting the bit unless necessary, or it might cause write
> + * cache bouncing on large SMP machines for concurrent pinned gups.
> + */
> +static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
> +{
> + if (!test_bit(MMF_HAS_PINNED, mm_flags))
> + set_bit(MMF_HAS_PINNED, mm_flags);
> +}
> +
> /*
> * Please note that this function, unlike __get_user_pages will not
> * return 0 for nr_pages > 0 without FOLL_NOWAIT
> @@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
> BUG_ON(*locked != 1);
> }
>
> - if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
> - atomic_set(&mm->has_pinned, 1);
> + if (flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&mm->flags);
>
> /*
> * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
> @@ -2617,8 +2628,8 @@ static int internal_get_user_pages_fast(unsigned long start,
> FOLL_FAST_ONLY)))
> return -EINVAL;
>
> - if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
> - atomic_set(&current->mm->has_pinned, 1);
> + if (gup_flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&current->mm->flags);

[email protected] reports:

FAILED linux-next/m5272c3_defconfig/m68k-gcc8 Wed May 12, 19:30
http://kisskb.ellerman.id.au/kisskb/buildresult/14543658/
Commit: Add linux-next specific files for 20210512
ec85c95b0c90a17413901b018e8ade7b9eae7cad
Compiler: m68k-linux-gcc (GCC) 8.1.0 / GNU ld (GNU Binutils) 2.30

mm/gup.c:2698:3: error: implicit declaration of function
'mm_set_has_pinned_flag'; did you mean 'set_tsk_thread_flag'?
[-Werror=implicit-function-declaration]

It's definition is inside the #ifdef CONFIG_MMU section, but the last
user isn't.

Gr{oetje,eeting}s,

Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds

2021-05-12 09:51:05

by Naresh Kamboju

[permalink] [raw]
Subject: Re: [PATCH v2 3/3] mm: gup: pack has_pinned in MMF_HAS_PINNED

On Fri, 7 May 2021 at 20:36, Peter Xu <[email protected]> wrote:
>
> From: Andrea Arcangeli <[email protected]>
>
> has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop
> cleanup.
>
> Any atomic_inc/dec to the mm cacheline shared by all threads in
> pin-fast would reintroduce a loss of SMP scalability to pin-fast, so
> there's no future potential usefulness to keep an atomic in the mm for
> this.
>
> set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than
> WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit
> (just like atomic_set after this commit) has to be still issued only
> once per "mm", so the difference between the two will be lost in the
> noise.
>
> will-it-scale "mmap2" shows no change in performance with enterprise
> config as expected.
>
> will-it-scale "pin_fast" retains the > 4000% SMP scalability
> performance improvement against upstream as expected.
>
> This is a noop as far as overall performance and SMP scalability are
> concerned.
>
> Signed-off-by: Andrea Arcangeli <[email protected]>
> [peterx: Fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix
> comment here and there]
> Signed-off-by: Peter Xu <[email protected]>
> ---
> fs/proc/task_mmu.c | 2 +-
> include/linux/mm.h | 2 +-
> include/linux/mm_types.h | 10 ----------
> include/linux/sched/coredump.h | 8 ++++++++
> kernel/fork.c | 1 -
> mm/gup.c | 19 +++++++++++++++----
> 6 files changed, 25 insertions(+), 17 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 4c95cc57a66a8..6144571942db9 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -1049,7 +1049,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
> return false;
> if (!is_cow_mapping(vma->vm_flags))
> return false;
> - if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
> + if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
> return false;
> page = vm_normal_page(vma, addr, pte);
> if (!page)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index d6790ab0cf575..94dc84f6d8658 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1331,7 +1331,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
> if (!is_cow_mapping(vma->vm_flags))
> return false;
>
> - if (!atomic_read(&vma->vm_mm->has_pinned))
> + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
> return false;
>
> return page_maybe_dma_pinned(page);
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 6613b26a88946..15d79858fadbd 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -435,16 +435,6 @@ struct mm_struct {
> */
> atomic_t mm_count;
>
> - /**
> - * @has_pinned: Whether this mm has pinned any pages. This can
> - * be either replaced in the future by @pinned_vm when it
> - * becomes stable, or grow into a counter on its own. We're
> - * aggresive on this bit now - even if the pinned pages were
> - * unpinned later on, we'll still keep this bit set for the
> - * lifecycle of this mm just for simplicity.
> - */
> - atomic_t has_pinned;
> -
> /**
> * @write_protect_seq: Locked when any thread is write
> * protecting pages mapped by this mm to enforce a later COW,
> diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
> index dfd82eab29025..4d9e3a6568758 100644
> --- a/include/linux/sched/coredump.h
> +++ b/include/linux/sched/coredump.h
> @@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
> #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
> #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
> #define MMF_MULTIPROCESS 27 /* mm is shared between processes */
> +/*
> + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
> + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
> + * a counter on its own. We're aggresive on this bit for now: even if the
> + * pinned pages were unpinned later on, we'll still keep this bit set for the
> + * lifecycle of this mm, just for simplicity.
> + */
> +#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
> #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
>
> #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 502dc046fbc62..a71e73707ef59 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -1026,7 +1026,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
> mm_pgtables_bytes_init(mm);
> mm->map_count = 0;
> mm->locked_vm = 0;
> - atomic_set(&mm->has_pinned, 0);
> atomic64_set(&mm->pinned_vm, 0);
> memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
> spin_lock_init(&mm->page_table_lock);
> diff --git a/mm/gup.c b/mm/gup.c
> index 9933bc5c2eff2..bb130723a6717 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
> }
> EXPORT_SYMBOL_GPL(fixup_user_fault);
>
> +/*
> + * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
> + * lifecycle. Avoid setting the bit unless necessary, or it might cause write
> + * cache bouncing on large SMP machines for concurrent pinned gups.
> + */
> +static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
> +{
> + if (!test_bit(MMF_HAS_PINNED, mm_flags))
> + set_bit(MMF_HAS_PINNED, mm_flags);
> +}
> +
> /*
> * Please note that this function, unlike __get_user_pages will not
> * return 0 for nr_pages > 0 without FOLL_NOWAIT
> @@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
> BUG_ON(*locked != 1);
> }
>
> - if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
> - atomic_set(&mm->has_pinned, 1);
> + if (flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&mm->flags);
>
> /*
> * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
> @@ -2617,8 +2628,8 @@ static int internal_get_user_pages_fast(unsigned long start,
> FOLL_FAST_ONLY)))
> return -EINVAL;
>
> - if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
> - atomic_set(&current->mm->has_pinned, 1);
> + if (gup_flags & FOLL_PIN)
> + mm_set_has_pinned_flag(&current->mm->flags);

Linux next tag next-20210512 builds failed on arm, riscv, mips and sh
for the tinyconfig and allnoconfig due this patch.

arm, mips, riscv and sh (tinyconfig) with gcc-8
arm, mips, riscv and sh (allnoconfig) with gcc-8
arm, mips, riscv and sh (tinyconfig) with gcc-9
arm, mips, riscv and sh (allnoconfig) with gcc-9
arm, mips, riscv and sh (tinyconfig) with gcc-10
arm, mips, riscv and sh (allnoconfig) with gcc-10

mm/gup.c: In function 'internal_get_user_pages_fast':
mm/gup.c:2698:3: error: implicit declaration of function
'mm_set_has_pinned_flag' [-Werror=implicit-function-declaration]
2698 | mm_set_has_pinned_flag(&current->mm->flags);
| ^~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
make[2]: *** [/builds/linux/scripts/Makefile.build:273: mm/gup.o] Error 1

Reported-by: Naresh Kamboju <[email protected]>

#regzb introduced: 354a2e3604e2 ("mm: gup: pack has_pinned in MMF_HAS_PINNED")

Build url:
https://gitlab.com/Linaro/lkft/mirrors/next/linux-next/-/jobs/1255567072#L315

--
Linaro LKFT
https://lkft.linaro.org

2021-05-12 12:52:58

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH v2 3/3] mm: gup: pack has_pinned in MMF_HAS_PINNED

On Wed, May 12, 2021 at 11:49:05AM +0200, Geert Uytterhoeven wrote:
> Hi Peter, Andrea,

Hi, Geert, Naresh,

(Adding Naresh too since Naresh reported the same issue at the meantime)

>
> On Fri, May 7, 2021 at 7:26 PM Peter Xu <[email protected]> wrote:
> > From: Andrea Arcangeli <[email protected]>
> >
> > has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop
> > cleanup.
> >
> > Any atomic_inc/dec to the mm cacheline shared by all threads in
> > pin-fast would reintroduce a loss of SMP scalability to pin-fast, so
> > there's no future potential usefulness to keep an atomic in the mm for
> > this.
> >
> > set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than
> > WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit
> > (just like atomic_set after this commit) has to be still issued only
> > once per "mm", so the difference between the two will be lost in the
> > noise.
> >
> > will-it-scale "mmap2" shows no change in performance with enterprise
> > config as expected.
> >
> > will-it-scale "pin_fast" retains the > 4000% SMP scalability
> > performance improvement against upstream as expected.
> >
> > This is a noop as far as overall performance and SMP scalability are
> > concerned.
> >
> > Signed-off-by: Andrea Arcangeli <[email protected]>
> > [peterx: Fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix
> > comment here and there]
> > Signed-off-by: Peter Xu <[email protected]>
>
> Thanks for your patch, which is now in linux-next.
>
> > diff --git a/mm/gup.c b/mm/gup.c
> > index 9933bc5c2eff2..bb130723a6717 100644
> > --- a/mm/gup.c
> > +++ b/mm/gup.c
> > @@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
> > }
> > EXPORT_SYMBOL_GPL(fixup_user_fault);
> >
> > +/*
> > + * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
> > + * lifecycle. Avoid setting the bit unless necessary, or it might cause write
> > + * cache bouncing on large SMP machines for concurrent pinned gups.
> > + */
> > +static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
> > +{
> > + if (!test_bit(MMF_HAS_PINNED, mm_flags))
> > + set_bit(MMF_HAS_PINNED, mm_flags);
> > +}
> > +
> > /*
> > * Please note that this function, unlike __get_user_pages will not
> > * return 0 for nr_pages > 0 without FOLL_NOWAIT
> > @@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
> > BUG_ON(*locked != 1);
> > }
> >
> > - if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
> > - atomic_set(&mm->has_pinned, 1);
> > + if (flags & FOLL_PIN)
> > + mm_set_has_pinned_flag(&mm->flags);
> >
> > /*
> > * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
> > @@ -2617,8 +2628,8 @@ static int internal_get_user_pages_fast(unsigned long start,
> > FOLL_FAST_ONLY)))
> > return -EINVAL;
> >
> > - if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
> > - atomic_set(&current->mm->has_pinned, 1);
> > + if (gup_flags & FOLL_PIN)
> > + mm_set_has_pinned_flag(&current->mm->flags);
>
> [email protected] reports:
>
> FAILED linux-next/m5272c3_defconfig/m68k-gcc8 Wed May 12, 19:30
> http://kisskb.ellerman.id.au/kisskb/buildresult/14543658/
> Commit: Add linux-next specific files for 20210512
> ec85c95b0c90a17413901b018e8ade7b9eae7cad
> Compiler: m68k-linux-gcc (GCC) 8.1.0 / GNU ld (GNU Binutils) 2.30
>
> mm/gup.c:2698:3: error: implicit declaration of function
> 'mm_set_has_pinned_flag'; did you mean 'set_tsk_thread_flag'?
> [-Werror=implicit-function-declaration]
>
> It's definition is inside the #ifdef CONFIG_MMU section, but the last
> user isn't.

Indeed that's wrong and I replied to the mm-commit email but not here to fix
this up yesterday:

https://lore.kernel.org/mm-commits/20210511220029.m6tGcxUIw%[email protected]/

I'll remember to reply to the thread next time. Sorry for that!

--
Peter Xu