2019-08-08 18:58:18

by Bharath Vedartham

[permalink] [raw]
Subject: [Linux-kernel-mentees][PATCH v4 0/1] get_user_pages changes

In this 4th version of the patch series, I have compressed the patches
of the v2 patch series into one patch. This was suggested by Christoph Hellwig.
The suggestion was to remove the pte_lookup functions and use the
get_user_pages* functions directly instead of the pte_lookup functions.

There is nothing different in this series compared to the previous
series, It essentially compresses the 3 patches of the original series
into one patch.

This series survives a compile test.

Bharath Vedartham (1):
sgi-gru: Remove *pte_lookup functions

drivers/misc/sgi-gru/grufault.c | 112 +++++++++-------------------------------
1 file changed, 24 insertions(+), 88 deletions(-)

--
2.7.4


2019-08-08 18:58:22

by Bharath Vedartham

[permalink] [raw]
Subject: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

The *pte_lookup functions can be removed and be easily replaced with
get_user_pages_fast functions. In the case of atomic lookup,
__get_user_pages_fast is used which does not fall back to slow
get_user_pages. get_user_pages_fast on the other hand tries to use
__get_user_pages_fast but fallbacks to slow get_user_pages if
__get_user_pages_fast fails.

Also unnecessary ifdefs to check for CONFIG_HUGETLB is removed as the
check is redundant.

Cc: Ira Weiny <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Jérôme Glisse <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Dimitri Sivanich <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: William Kucharski <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Reviewed-by: Ira Weiny <[email protected]>
Reviewed-by: John Hubbard <[email protected]>
Reviewed-by: William Kucharski <[email protected]>
Signed-off-by: Bharath Vedartham <[email protected]>
---
This is a fold of the 3 patches in the v2 patch series.
The review tags were given to the individual patches.

Changes since v3
- Used gup flags in get_user_pages_fast rather than
boolean flags.
---
drivers/misc/sgi-gru/grufault.c | 112 +++++++++-------------------------------
1 file changed, 24 insertions(+), 88 deletions(-)

diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 4b713a8..304e9c5 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -166,96 +166,20 @@ static void get_clear_fault_map(struct gru_state *gru,
}

/*
- * Atomic (interrupt context) & non-atomic (user context) functions to
- * convert a vaddr into a physical address. The size of the page
- * is returned in pageshift.
- * returns:
- * 0 - successful
- * < 0 - error code
- * 1 - (atomic only) try again in non-atomic context
- */
-static int non_atomic_pte_lookup(struct vm_area_struct *vma,
- unsigned long vaddr, int write,
- unsigned long *paddr, int *pageshift)
-{
- struct page *page;
-
-#ifdef CONFIG_HUGETLB_PAGE
- *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
-#else
- *pageshift = PAGE_SHIFT;
-#endif
- if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
- return -EFAULT;
- *paddr = page_to_phys(page);
- put_page(page);
- return 0;
-}
-
-/*
- * atomic_pte_lookup
+ * mmap_sem is already helod on entry to this function. This guarantees
+ * existence of the page tables.
*
- * Convert a user virtual address to a physical address
* Only supports Intel large pages (2MB only) on x86_64.
- * ZZZ - hugepage support is incomplete
- *
- * NOTE: mmap_sem is already held on entry to this function. This
- * guarantees existence of the page tables.
+ * ZZZ - hugepage support is incomplete.
*/
-static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
- int write, unsigned long *paddr, int *pageshift)
-{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t pte;
-
- pgdp = pgd_offset(vma->vm_mm, vaddr);
- if (unlikely(pgd_none(*pgdp)))
- goto err;
-
- p4dp = p4d_offset(pgdp, vaddr);
- if (unlikely(p4d_none(*p4dp)))
- goto err;
-
- pudp = pud_offset(p4dp, vaddr);
- if (unlikely(pud_none(*pudp)))
- goto err;
-
- pmdp = pmd_offset(pudp, vaddr);
- if (unlikely(pmd_none(*pmdp)))
- goto err;
-#ifdef CONFIG_X86_64
- if (unlikely(pmd_large(*pmdp)))
- pte = *(pte_t *) pmdp;
- else
-#endif
- pte = *pte_offset_kernel(pmdp, vaddr);
-
- if (unlikely(!pte_present(pte) ||
- (write && (!pte_write(pte) || !pte_dirty(pte)))))
- return 1;
-
- *paddr = pte_pfn(pte) << PAGE_SHIFT;
-#ifdef CONFIG_HUGETLB_PAGE
- *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
-#else
- *pageshift = PAGE_SHIFT;
-#endif
- return 0;
-
-err:
- return 1;
-}
-
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
int write, int atomic, unsigned long *gpa, int *pageshift)
{
struct mm_struct *mm = gts->ts_mm;
struct vm_area_struct *vma;
unsigned long paddr;
- int ret, ps;
+ int ret;
+ struct page *page;

vma = find_vma(mm, vaddr);
if (!vma)
@@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,

/*
* Atomic lookup is faster & usually works even if called in non-atomic
- * context.
+ * context. get_user_pages_fast does atomic lookup before falling back to
+ * slow gup.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
- ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
- if (ret) {
- if (atomic)
+ if (atomic) {
+ ret = __get_user_pages_fast(vaddr, 1, write, &page);
+ if (!ret)
goto upm;
- if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
+ } else {
+ ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
+ if (!ret)
goto inval;
}
+
+ paddr = page_to_phys(page);
+ put_user_page(page);
+
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ *pageshift = HPAGE_SHIFT;
+ else
+ *pageshift = PAGE_SHIFT;
+
if (is_gru_paddr(paddr))
goto inval;
- paddr = paddr & ~((1UL << ps) - 1);
+ paddr = paddr & ~((1UL << *pageshift) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
- *pageshift = ps;
+
return VTOP_SUCCESS;

inval:
--
2.7.4

2019-08-08 23:25:33

by John Hubbard

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On 8/8/19 11:55 AM, Bharath Vedartham wrote:
...
> static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> int write, int atomic, unsigned long *gpa, int *pageshift)
> {
> struct mm_struct *mm = gts->ts_mm;
> struct vm_area_struct *vma;
> unsigned long paddr;
> - int ret, ps;
> + int ret;
> + struct page *page;
>
> vma = find_vma(mm, vaddr);
> if (!vma)
> @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
>
> /*
> * Atomic lookup is faster & usually works even if called in non-atomic
> - * context.
> + * context. get_user_pages_fast does atomic lookup before falling back to
> + * slow gup.
> */
> rmb(); /* Must/check ms_range_active before loading PTEs */
> - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
> - if (ret) {
> - if (atomic)
> + if (atomic) {
> + ret = __get_user_pages_fast(vaddr, 1, write, &page);
> + if (!ret)
> goto upm;
> - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
> + } else {
> + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
> + if (!ret)
> goto inval;
> }
> +
> + paddr = page_to_phys(page);
> + put_user_page(page);
> +
> + if (unlikely(is_vm_hugetlb_page(vma)))
> + *pageshift = HPAGE_SHIFT;
> + else
> + *pageshift = PAGE_SHIFT;
> +
> if (is_gru_paddr(paddr))
> goto inval;
> - paddr = paddr & ~((1UL << ps) - 1);
> + paddr = paddr & ~((1UL << *pageshift) - 1);
> *gpa = uv_soc_phys_ram_to_gpa(paddr);
> - *pageshift = ps;

Why are you no longer setting *pageshift? There are a couple of callers
that both use this variable.


thanks,
--
John Hubbard
NVIDIA

2019-08-08 23:31:49

by John Hubbard

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On 8/8/19 4:21 PM, John Hubbard wrote:
> On 8/8/19 11:55 AM, Bharath Vedartham wrote:
> ...
>> if (is_gru_paddr(paddr))
>> goto inval;
>> - paddr = paddr & ~((1UL << ps) - 1);
>> + paddr = paddr & ~((1UL << *pageshift) - 1);
>> *gpa = uv_soc_phys_ram_to_gpa(paddr);
>> - *pageshift = ps;
>
> Why are you no longer setting *pageshift? There are a couple of callers
> that both use this variable.
>
>

...and once that's figured out, I can fix it up here and send it up with
the next misc callsites series. I'm also inclined to make the commit
log read more like this:

sgi-gru: Remove *pte_lookup functions, convert to put_user_page*()

For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().

This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").

As part of this conversion, the *pte_lookup functions can be removed and
be easily replaced with get_user_pages_fast() functions. In the case of
atomic lookup, __get_user_pages_fast() is used, because it does not fall
back to the slow path: get_user_pages(). get_user_pages_fast(), on the other
hand, first calls __get_user_pages_fast(), but then falls back to the
slow path if __get_user_pages_fast() fails.

Also: remove unnecessary CONFIG_HUGETLB ifdefs.


thanks,
--
John Hubbard
NVIDIA

2019-08-09 09:50:54

by Bharath Vedartham

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote:
> On 8/8/19 11:55 AM, Bharath Vedartham wrote:
> ...
> > static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> > int write, int atomic, unsigned long *gpa, int *pageshift)
> > {
> > struct mm_struct *mm = gts->ts_mm;
> > struct vm_area_struct *vma;
> > unsigned long paddr;
> > - int ret, ps;
> > + int ret;
> > + struct page *page;
> >
> > vma = find_vma(mm, vaddr);
> > if (!vma)
> > @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> >
> > /*
> > * Atomic lookup is faster & usually works even if called in non-atomic
> > - * context.
> > + * context. get_user_pages_fast does atomic lookup before falling back to
> > + * slow gup.
> > */
> > rmb(); /* Must/check ms_range_active before loading PTEs */
> > - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
> > - if (ret) {
> > - if (atomic)
> > + if (atomic) {
> > + ret = __get_user_pages_fast(vaddr, 1, write, &page);
> > + if (!ret)
> > goto upm;
> > - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
> > + } else {
> > + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
> > + if (!ret)
> > goto inval;
> > }
> > +
> > + paddr = page_to_phys(page);
> > + put_user_page(page);
> > +
> > + if (unlikely(is_vm_hugetlb_page(vma)))
> > + *pageshift = HPAGE_SHIFT;
> > + else
> > + *pageshift = PAGE_SHIFT;
> > +
> > if (is_gru_paddr(paddr))
> > goto inval;
> > - paddr = paddr & ~((1UL << ps) - 1);
> > + paddr = paddr & ~((1UL << *pageshift) - 1);
> > *gpa = uv_soc_phys_ram_to_gpa(paddr);
> > - *pageshift = ps;
>
> Why are you no longer setting *pageshift? There are a couple of callers
> that both use this variable.
Hi John,

I did set *pageshift. The if statement above sets *pageshift. ps was
used to retrive the pageshift value when the pte_lookup functions were
present. ps was passed by reference to those functions and set by them.
But here since we are trying to remove those functions, we don't need ps
and we directly set *pageshift to HPAGE_SHIFT or PAGE_SHIFT based on the
type of vma.

Hope this clears things up?

Thank you
Bharath
>
> thanks,
> --
> John Hubbard
> NVIDIA

2019-08-09 09:50:54

by Bharath Vedartham

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On Thu, Aug 08, 2019 at 04:30:48PM -0700, John Hubbard wrote:
> On 8/8/19 4:21 PM, John Hubbard wrote:
> > On 8/8/19 11:55 AM, Bharath Vedartham wrote:
> > ...
> >> if (is_gru_paddr(paddr))
> >> goto inval;
> >> - paddr = paddr & ~((1UL << ps) - 1);
> >> + paddr = paddr & ~((1UL << *pageshift) - 1);
> >> *gpa = uv_soc_phys_ram_to_gpa(paddr);
> >> - *pageshift = ps;
> >
> > Why are you no longer setting *pageshift? There are a couple of callers
> > that both use this variable.
> >
> >
>
> ...and once that's figured out, I can fix it up here and send it up with
> the next misc callsites series. I'm also inclined to make the commit
> log read more like this:
>
> sgi-gru: Remove *pte_lookup functions, convert to put_user_page*()
>
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> As part of this conversion, the *pte_lookup functions can be removed and
> be easily replaced with get_user_pages_fast() functions. In the case of
> atomic lookup, __get_user_pages_fast() is used, because it does not fall
> back to the slow path: get_user_pages(). get_user_pages_fast(), on the other
> hand, first calls __get_user_pages_fast(), but then falls back to the
> slow path if __get_user_pages_fast() fails.
>
> Also: remove unnecessary CONFIG_HUGETLB ifdefs.
Sounds great! I will send the next version with an updated changelog!

Thank you
Bharath
>
> thanks,
> --
> John Hubbard
> NVIDIA

2019-08-09 10:32:41

by Bharath Vedartham

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote:
> On 8/8/19 11:55 AM, Bharath Vedartham wrote:
> ...
> > static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> > int write, int atomic, unsigned long *gpa, int *pageshift)
> > {
> > struct mm_struct *mm = gts->ts_mm;
> > struct vm_area_struct *vma;
> > unsigned long paddr;
> > - int ret, ps;
> > + int ret;
> > + struct page *page;
> >
> > vma = find_vma(mm, vaddr);
> > if (!vma)
> > @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> >
> > /*
> > * Atomic lookup is faster & usually works even if called in non-atomic
> > - * context.
> > + * context. get_user_pages_fast does atomic lookup before falling back to
> > + * slow gup.
> > */
> > rmb(); /* Must/check ms_range_active before loading PTEs */
> > - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
> > - if (ret) {
> > - if (atomic)
> > + if (atomic) {
> > + ret = __get_user_pages_fast(vaddr, 1, write, &page);
> > + if (!ret)
> > goto upm;
> > - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
> > + } else {
> > + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
> > + if (!ret)
> > goto inval;
> > }
> > +
> > + paddr = page_to_phys(page);
> > + put_user_page(page);
> > +
> > + if (unlikely(is_vm_hugetlb_page(vma)))
> > + *pageshift = HPAGE_SHIFT;
> > + else
> > + *pageshift = PAGE_SHIFT;
> > +
> > if (is_gru_paddr(paddr))
> > goto inval;
> > - paddr = paddr & ~((1UL << ps) - 1);
> > + paddr = paddr & ~((1UL << *pageshift) - 1);
> > *gpa = uv_soc_phys_ram_to_gpa(paddr);
> > - *pageshift = ps;
>
> Why are you no longer setting *pageshift? There are a couple of callers
> that both use this variable.
I ll send v5 once your convinced by my argument that ps is not necessary
to set *pageshift and that *pageshift is being set.

Thank you
Bharath
>
> thanks,
> --
> John Hubbard
> NVIDIA

2019-08-09 18:05:52

by John Hubbard

[permalink] [raw]
Subject: Re: [Linux-kernel-mentees][PATCH v4 1/1] sgi-gru: Remove *pte_lookup functions

On 8/9/19 2:44 AM, Bharath Vedartham wrote:
> On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote:
>> On 8/8/19 11:55 AM, Bharath Vedartham wrote:
>> ...
>>> static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
>>> int write, int atomic, unsigned long *gpa, int *pageshift)
>>> {
>>> struct mm_struct *mm = gts->ts_mm;
>>> struct vm_area_struct *vma;
>>> unsigned long paddr;
>>> - int ret, ps;
>>> + int ret;
>>> + struct page *page;
>>>
>>> vma = find_vma(mm, vaddr);
>>> if (!vma)
>>> @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
>>>
>>> /*
>>> * Atomic lookup is faster & usually works even if called in non-atomic
>>> - * context.
>>> + * context. get_user_pages_fast does atomic lookup before falling back to
>>> + * slow gup.
>>> */
>>> rmb(); /* Must/check ms_range_active before loading PTEs */
>>> - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
>>> - if (ret) {
>>> - if (atomic)
>>> + if (atomic) {
>>> + ret = __get_user_pages_fast(vaddr, 1, write, &page);
>>> + if (!ret)
>>> goto upm;
>>> - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
>>> + } else {
>>> + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
>>> + if (!ret)
>>> goto inval;
>>> }
>>> +
>>> + paddr = page_to_phys(page);
>>> + put_user_page(page);
>>> +
>>> + if (unlikely(is_vm_hugetlb_page(vma)))
>>> + *pageshift = HPAGE_SHIFT;
>>> + else
>>> + *pageshift = PAGE_SHIFT;
>>> +
>>> if (is_gru_paddr(paddr))
>>> goto inval;
>>> - paddr = paddr & ~((1UL << ps) - 1);
>>> + paddr = paddr & ~((1UL << *pageshift) - 1);
>>> *gpa = uv_soc_phys_ram_to_gpa(paddr);
>>> - *pageshift = ps;
>>
>> Why are you no longer setting *pageshift? There are a couple of callers
>> that both use this variable.
> Hi John,
>
> I did set *pageshift. The if statement above sets *pageshift. ps was
> used to retrive the pageshift value when the pte_lookup functions were
> present. ps was passed by reference to those functions and set by them.
> But here since we are trying to remove those functions, we don't need ps
> and we directly set *pageshift to HPAGE_SHIFT or PAGE_SHIFT based on the
> type of vma.
>
> Hope this clears things up?
>

Right you are, sorry for overlooking that. Looks good.

thanks,
--
John Hubbard
NVIDIA