On Thu, 3 Feb 2022 01:32:32 -0800
John Hubbard <[email protected]> wrote:
> There is only one caller of get_user_pages_locked(). The purpose of
> get_user_pages_locked() is to allow for unlocking the mmap_lock when
> reading a page from the disk during a page fault (hidden behind
> VM_FAULT_RETRY). The idea is to reduce contention on the heavily-used
> mmap_lock. (Thanks to Jan Kara for clearly pointing that out, and in
> fact I've used some of his wording here.)
>
> However, it is unlikely for lookup_node() to take a page fault. With
> that in mind, change over to calling get_user_pages_fast(). This
> simplifies the code, runs a little faster in the expected case, and
> allows removing get_user_pages_locked() entirely.
>
> Reviewed-by: Jan Kara <[email protected]>
> Reviewed-by: Jason Gunthorpe <[email protected]>
> Signed-off-by: John Hubbard <[email protected]>
I have always disliked these functions that might or might not unlock
the lock under the hood. I'm happy to see one more go.
Reviewed-by: Claudio Imbrenda <[email protected]>
> ---
> include/linux/mm.h | 2 --
> mm/gup.c | 59 ----------------------------------------------
> mm/mempolicy.c | 21 +++++++----------
> 3 files changed, 9 insertions(+), 73 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 80c540c17d83..528ef1cb4f3a 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1916,8 +1916,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
> long pin_user_pages(unsigned long start, unsigned long nr_pages,
> unsigned int gup_flags, struct page **pages,
> struct vm_area_struct **vmas);
> -long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
> - unsigned int gup_flags, struct page **pages, int *locked);
> long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
> struct page **pages, unsigned int gup_flags);
> long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
> diff --git a/mm/gup.c b/mm/gup.c
> index b0ecbfe03928..7da49df59110 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -2118,65 +2118,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
> }
> EXPORT_SYMBOL(get_user_pages);
>
> -/**
> - * get_user_pages_locked() - variant of get_user_pages()
> - *
> - * @start: starting user address
> - * @nr_pages: number of pages from start to pin
> - * @gup_flags: flags modifying lookup behaviour
> - * @pages: array that receives pointers to the pages pinned.
> - * Should be at least nr_pages long. Or NULL, if caller
> - * only intends to ensure the pages are faulted in.
> - * @locked: pointer to lock flag indicating whether lock is held and
> - * subsequently whether VM_FAULT_RETRY functionality can be
> - * utilised. Lock must initially be held.
> - *
> - * It is suitable to replace the form:
> - *
> - * mmap_read_lock(mm);
> - * do_something()
> - * get_user_pages(mm, ..., pages, NULL);
> - * mmap_read_unlock(mm);
> - *
> - * to:
> - *
> - * int locked = 1;
> - * mmap_read_lock(mm);
> - * do_something()
> - * get_user_pages_locked(mm, ..., pages, &locked);
> - * if (locked)
> - * mmap_read_unlock(mm);
> - *
> - * We can leverage the VM_FAULT_RETRY functionality in the page fault
> - * paths better by using either get_user_pages_locked() or
> - * get_user_pages_unlocked().
> - *
> - */
> -long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
> - unsigned int gup_flags, struct page **pages,
> - int *locked)
> -{
> - /*
> - * FIXME: Current FOLL_LONGTERM behavior is incompatible with
> - * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
> - * vmas. As there are no users of this flag in this call we simply
> - * disallow this option for now.
> - */
> - if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
> - return -EINVAL;
> - /*
> - * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
> - * never directly by the caller, so enforce that:
> - */
> - if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
> - return -EINVAL;
> -
> - return __get_user_pages_locked(current->mm, start, nr_pages,
> - pages, NULL, locked,
> - gup_flags | FOLL_TOUCH);
> -}
> -EXPORT_SYMBOL(get_user_pages_locked);
> -
> /*
> * get_user_pages_unlocked() is suitable to replace the form:
> *
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 028e8dd82b44..3f8dc58da3e8 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -907,17 +907,14 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
> static int lookup_node(struct mm_struct *mm, unsigned long addr)
> {
> struct page *p = NULL;
> - int err;
> + int ret;
>
> - int locked = 1;
> - err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
> - if (err > 0) {
> - err = page_to_nid(p);
> + ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
> + if (ret > 0) {
> + ret = page_to_nid(p);
> put_page(p);
> }
> - if (locked)
> - mmap_read_unlock(mm);
> - return err;
> + return ret;
> }
>
> /* Retrieve NUMA policy */
> @@ -968,14 +965,14 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
> if (flags & MPOL_F_NODE) {
> if (flags & MPOL_F_ADDR) {
> /*
> - * Take a refcount on the mpol, lookup_node()
> - * will drop the mmap_lock, so after calling
> - * lookup_node() only "pol" remains valid, "vma"
> - * is stale.
> + * Take a refcount on the mpol, because we are about to
> + * drop the mmap_lock, after which only "pol" remains
> + * valid, "vma" is stale.
> */
> pol_refcount = pol;
> vma = NULL;
> mpol_get(pol);
> + mmap_read_unlock(mm);
> err = lookup_node(mm, addr);
> if (err < 0)
> goto out;