2024-06-04 23:49:33

by Yang Shi

[permalink] [raw]
Subject: [PATCH 1/2] mm: page_ref: remove folio_try_get_rcu()

The below bug was reported on a non-SMP kernel:

[ 275.267158][ T4335] ------------[ cut here ]------------
[ 275.267949][ T4335] kernel BUG at include/linux/page_ref.h:275!
[ 275.268526][ T4335] invalid opcode: 0000 [#1] KASAN PTI
[ 275.269001][ T4335] CPU: 0 PID: 4335 Comm: trinity-c3 Not tainted 6.7.0-rc4-00061-gefa7df3e3bb5 #1
[ 275.269787][ T4335] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
[ 275.270679][ T4335] RIP: 0010:try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.272813][ T4335] RSP: 0018:ffffc90005dcf650 EFLAGS: 00010202
[ 275.273346][ T4335] RAX: 0000000000000246 RBX: ffffea00066e0000 RCX: 0000000000000000
[ 275.274032][ T4335] RDX: fffff94000cdc007 RSI: 0000000000000004 RDI: ffffea00066e0034
[ 275.274719][ T4335] RBP: ffffea00066e0000 R08: 0000000000000000 R09: fffff94000cdc006
[ 275.275404][ T4335] R10: ffffea00066e0037 R11: 0000000000000000 R12: 0000000000000136
[ 275.276106][ T4335] R13: ffffea00066e0034 R14: dffffc0000000000 R15: ffffea00066e0008
[ 275.276790][ T4335] FS: 00007fa2f9b61740(0000) GS:ffffffff89d0d000(0000) knlGS:0000000000000000
[ 275.277570][ T4335] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 275.278143][ T4335] CR2: 00007fa2f6c00000 CR3: 0000000134b04000 CR4: 00000000000406f0
[ 275.278833][ T4335] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 275.279521][ T4335] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 275.280201][ T4335] Call Trace:
[ 275.280499][ T4335] <TASK>
[ 275.280751][ T4335] ? die (arch/x86/kernel/dumpstack.c:421 arch/x86/kernel/dumpstack.c:434 arch/x86/kernel/dumpstack.c:447)
[ 275.281087][ T4335] ? do_trap (arch/x86/kernel/traps.c:112 arch/x86/kernel/traps.c:153)
[ 275.281463][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.281884][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.282300][ T4335] ? do_error_trap (arch/x86/kernel/traps.c:174)
[ 275.282711][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.283129][ T4335] ? handle_invalid_op (arch/x86/kernel/traps.c:212)
[ 275.283561][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.283990][ T4335] ? exc_invalid_op (arch/x86/kernel/traps.c:264)
[ 275.284415][ T4335] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:568)
[ 275.284859][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
[ 275.285278][ T4335] try_grab_folio (mm/gup.c:148)
[ 275.285684][ T4335] __get_user_pages (mm/gup.c:1297 (discriminator 1))
[ 275.286111][ T4335] ? __pfx___get_user_pages (mm/gup.c:1188)
[ 275.286579][ T4335] ? __pfx_validate_chain (kernel/locking/lockdep.c:3825)
[ 275.287034][ T4335] ? mark_lock (kernel/locking/lockdep.c:4656 (discriminator 1))
[ 275.287416][ T4335] __gup_longterm_locked (mm/gup.c:1509 mm/gup.c:2209)
[ 275.288192][ T4335] ? __pfx___gup_longterm_locked (mm/gup.c:2204)
[ 275.288697][ T4335] ? __pfx_lock_acquire (kernel/locking/lockdep.c:5722)
[ 275.289135][ T4335] ? __pfx___might_resched (kernel/sched/core.c:10106)
[ 275.289595][ T4335] pin_user_pages_remote (mm/gup.c:3350)
[ 275.290041][ T4335] ? __pfx_pin_user_pages_remote (mm/gup.c:3350)
[ 275.290545][ T4335] ? find_held_lock (kernel/locking/lockdep.c:5244 (discriminator 1))
[ 275.290961][ T4335] ? mm_access (kernel/fork.c:1573)
[ 275.291353][ T4335] process_vm_rw_single_vec+0x142/0x360
[ 275.291900][ T4335] ? __pfx_process_vm_rw_single_vec+0x10/0x10
[ 275.292471][ T4335] ? mm_access (kernel/fork.c:1573)
[ 275.292859][ T4335] process_vm_rw_core+0x272/0x4e0
[ 275.293384][ T4335] ? hlock_class (arch/x86/include/asm/bitops.h:227 arch/x86/include/asm/bitops.h:239 include/asm-generic/bitops/instrumented-non-atomic.h:142 kernel/locking/lockdep.c:228)
[ 275.293780][ T4335] ? __pfx_process_vm_rw_core+0x10/0x10
[ 275.294350][ T4335] process_vm_rw (mm/process_vm_access.c:284)
[ 275.294748][ T4335] ? __pfx_process_vm_rw (mm/process_vm_access.c:259)
[ 275.295197][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
[ 275.295634][ T4335] __x64_sys_process_vm_readv (mm/process_vm_access.c:291)
[ 275.296139][ T4335] ? syscall_enter_from_user_mode (kernel/entry/common.c:94 kernel/entry/common.c:112)
[ 275.296642][ T4335] do_syscall_64 (arch/x86/entry/common.c:51 (discriminator 1) arch/x86/entry/common.c:82 (discriminator 1))
[ 275.297032][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
[ 275.297470][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
[ 275.297988][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
[ 275.298389][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
[ 275.298906][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
[ 275.299304][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
[ 275.299703][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
[ 275.300115][ T4335] entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:129)

This BUG is the VM_BUG_ON(!in_atomic() && !irqs_disabled()) assertion in
folio_ref_try_add_rcu() for non-SMP kernel.

The process_vm_readv() calls GUP to pin the THP. An optimization for
pinning THP instroduced by commit 57edfcfd3419 ("mm/gup: accelerate thp
gup even for "pages != NULL"") calls try_grab_folio() to pin the THP,
but try_grab_folio() is supposed to be called in atomic context for
non-SMP kernel, for example, irq disabled or preemption disabled, due to
the optimization introduced by commit e286781d5f2e ("mm: speculative
page references").

The commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP
boundaries") is not actually the root cause although it was bisected to.
It just makes the problem exposed more likely.

The follow up discussion suggested the optimization for non-SMP kernel
may be out-dated and not worth it anymore [1]. So removing the
optimization to silence the BUG.

However calling try_grab_folio() in GUP slow path actually is
unnecessary, so the following patch will clean this up.

[1] https://lore.kernel.org/linux-mm/821cf1d6-92b9-4ac4-bacc-d8f2364ac14f@paulmck-laptop/
Fixes: 57edfcfd3419 ("mm/gup: accelerate thp gup even for "pages != NULL"")
Reported-by: kernel test robot <[email protected]>
Cc: linux-stable <[email protected]> v6.6+
Signed-off-by: Yang Shi <[email protected]>
---
include/linux/page_ref.h | 49 ++--------------------------------------
mm/filemap.c | 10 ++++----
mm/gup.c | 2 +-
3 files changed, 8 insertions(+), 53 deletions(-)

diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 1acf5bac7f50..490d0ad6e56d 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -258,54 +258,9 @@ static inline bool folio_try_get(struct folio *folio)
return folio_ref_add_unless(folio, 1, 0);
}

-static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
-{
-#ifdef CONFIG_TINY_RCU
- /*
- * The caller guarantees the folio will not be freed from interrupt
- * context, so (on !SMP) we only need preemption to be disabled
- * and TINY_RCU does that for us.
- */
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
- folio_ref_add(folio, count);
-#else
- if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
- /* Either the folio has been freed, or will be freed. */
- return false;
- }
-#endif
- return true;
-}
-
-/**
- * folio_try_get_rcu - Attempt to increase the refcount on a folio.
- * @folio: The folio.
- *
- * This is a version of folio_try_get() optimised for non-SMP kernels.
- * If you are still holding the rcu_read_lock() after looking up the
- * page and know that the page cannot have its refcount decreased to
- * zero in interrupt context, you can use this instead of folio_try_get().
- *
- * Example users include get_user_pages_fast() (as pages are not unmapped
- * from interrupt context) and the page cache lookups (as pages are not
- * truncated from interrupt context). We also know that pages are not
- * frozen in interrupt context for the purposes of splitting or migration.
- *
- * You can also use this function if you're holding a lock that prevents
- * pages being frozen & removed; eg the i_pages lock for the page cache
- * or the mmap_lock or page table lock for page tables. In this case,
- * it will always succeed, and you could have used a plain folio_get(),
- * but it's sometimes more convenient to have a common function called
- * from both locked and RCU-protected contexts.
- *
- * Return: True if the reference count was successfully incremented.
- */
-static inline bool folio_try_get_rcu(struct folio *folio)
+static inline bool folio_ref_try_add(struct folio *folio, int count)
{
- return folio_ref_try_add_rcu(folio, 1);
+ return folio_ref_add_unless(folio, count, 0);
}

static inline int page_ref_freeze(struct page *page, int count)
diff --git a/mm/filemap.c b/mm/filemap.c
index 9fe5c02ae92e..0fb5f3097094 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1847,7 +1847,7 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
if (!folio || xa_is_value(folio))
goto out;

- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto repeat;

if (unlikely(folio != xas_reload(&xas))) {
@@ -2001,7 +2001,7 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
if (!folio || xa_is_value(folio))
return folio;

- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto reset;

if (unlikely(folio != xas_reload(xas))) {
@@ -2181,7 +2181,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;

- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto retry;

if (unlikely(folio != xas_reload(&xas)))
@@ -2313,7 +2313,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
break;
if (xa_is_sibling(folio))
break;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
goto retry;

if (unlikely(folio != xas_reload(&xas)))
@@ -3473,7 +3473,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
continue;
if (folio_test_locked(folio))
continue;
- if (!folio_try_get_rcu(folio))
+ if (!folio_try_get(folio))
continue;
/* Has the page moved or been split? */
if (unlikely(folio != xas_reload(xas)))
diff --git a/mm/gup.c b/mm/gup.c
index e17466fd62bb..17f89e8d31f1 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -78,7 +78,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
folio = page_folio(page);
if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
return NULL;
- if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
+ if (unlikely(!folio_ref_try_add(folio, refs)))
return NULL;

/*
--
2.41.0



2024-06-04 23:49:43

by Yang Shi

[permalink] [raw]
Subject: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path

The try_grab_folio() is supposed to be used in fast path and it elevates
folio refcount by using add ref unless zero. We are guaranteed to have
at least one stable reference in slow path, so the simple atomic add
could be used. The performance difference should be trivial, but the
misuse may be confusing and misleading.

Signed-off-by: Yang Shi <[email protected]>
---
mm/gup.c | 112 +++++++++++++++++++++++++++--------------------
mm/huge_memory.c | 2 +-
mm/internal.h | 3 +-
3 files changed, 66 insertions(+), 51 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 17f89e8d31f1..a683e7ac47b5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -100,7 +100,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
}

/**
- * try_grab_folio() - Attempt to get or pin a folio.
+ * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
* @page: pointer to page to be grabbed
* @refs: the value to (effectively) add to the folio's refcount
* @flags: gup flags: these are the FOLL_* flag values.
@@ -124,11 +124,18 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
* incremented) for success, or NULL upon failure. If neither FOLL_GET
* nor FOLL_PIN was set, that's considered failure, and furthermore,
* a likely bug in the caller, so a warning is also emitted.
+ *
+ * It uses add ref unless zero to elevate the folio refcount and must be called
+ * in fast path only.
*/
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
+static struct folio *try_grab_folio_fast(struct page *page, int refs,
+ unsigned int flags)
{
struct folio *folio;

+ /* Raise warn if it is not called in fast GUP */
+ VM_WARN_ON_ONCE(!irqs_disabled());
+
if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
return NULL;

@@ -205,28 +212,31 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
}

/**
- * try_grab_page() - elevate a page's refcount by a flag-dependent amount
- * @page: pointer to page to be grabbed
- * @flags: gup flags: these are the FOLL_* flag values.
+ * try_grab_folio() - add a folio's refcount by a flag-dependent amount
+ * @folio: pointer to folio to be grabbed
+ * @refs: the value to (effectively) add to the folio's refcount
+ * @flags: gup flags: these are the FOLL_* flag values.
*
* This might not do anything at all, depending on the flags argument.
*
* "grab" names in this file mean, "look at flags to decide whether to use
- * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
*
* Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
- * time. Cases: please see the try_grab_folio() documentation, with
- * "refs=1".
+ * time.
*
* Return: 0 for success, or if no action was required (if neither FOLL_PIN
* nor FOLL_GET was set, nothing is done). A negative error code for failure:
*
- * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
+ * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
* be grabbed.
+ *
+ * It is called when we have a stable reference for the folio, typically in
+ * GUP slow path.
*/
-int __must_check try_grab_page(struct page *page, unsigned int flags)
+int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags)
{
- struct folio *folio = page_folio(page);
+ struct page *page = &folio->page;

if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
return -ENOMEM;
@@ -235,7 +245,7 @@ int __must_check try_grab_page(struct page *page, unsigned int flags)
return -EREMOTEIO;

if (flags & FOLL_GET)
- folio_ref_inc(folio);
+ folio_ref_add(folio, refs);
else if (flags & FOLL_PIN) {
/*
* Don't take a pin on the zero page - it's not going anywhere
@@ -245,18 +255,18 @@ int __must_check try_grab_page(struct page *page, unsigned int flags)
return 0;

/*
- * Similar to try_grab_folio(): be sure to *also*
- * increment the normal page refcount field at least once,
+ * Increment the normal page refcount field at least once,
* so that the page really is pinned.
*/
if (folio_test_large(folio)) {
- folio_ref_add(folio, 1);
- atomic_add(1, &folio->_pincount);
+ folio_ref_add(folio, refs);
+ atomic_add(refs, &folio->_pincount);
} else {
- folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
+ folio_ref_add(folio,
+ refs * GUP_PIN_COUNTING_BIAS);
}

- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
+ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
}

return 0;
@@ -584,7 +594,7 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
*/
static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz,
unsigned long addr, unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
unsigned long pte_end;
struct page *page;
@@ -607,9 +617,15 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
page = pte_page(pte);
refs = record_subpages(page, sz, addr, end, pages + *nr);

- folio = try_grab_folio(page, refs, flags);
- if (!folio)
- return 0;
+ if (fast) {
+ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+ } else {
+ folio = page_folio(page);
+ if (try_grab_folio(folio, refs, flags))
+ return 0;
+ }

if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
gup_put_folio(folio, refs, flags);
@@ -637,7 +653,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
unsigned long addr, unsigned int pdshift,
unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
pte_t *ptep;
unsigned long sz = 1UL << hugepd_shift(hugepd);
@@ -647,7 +663,7 @@ static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
- ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr);
+ ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr, fast);
if (ret != 1)
return ret;
} while (ptep++, addr = next, addr != end);
@@ -674,7 +690,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
ptep = hugepte_offset(hugepd, addr, pdshift);
ptl = huge_pte_lock(h, vma->vm_mm, ptep);
ret = gup_hugepd(vma, hugepd, addr, pdshift, addr + PAGE_SIZE,
- flags, &page, &nr);
+ flags, &page, &nr, false);
spin_unlock(ptl);

if (ret == 1) {
@@ -691,7 +707,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
static inline int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
unsigned long addr, unsigned int pdshift,
unsigned long end, unsigned int flags,
- struct page **pages, int *nr)
+ struct page **pages, int *nr, bool fast)
{
return 0;
}
@@ -778,7 +794,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
gup_must_unshare(vma, flags, page))
return ERR_PTR(-EMLINK);

- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
page = ERR_PTR(ret);
else
@@ -855,7 +871,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);

- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
return ERR_PTR(ret);

@@ -1017,8 +1033,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);

- /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
- ret = try_grab_page(page, flags);
+ /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (unlikely(ret)) {
page = ERR_PTR(ret);
goto out;
@@ -1282,7 +1298,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap;
*page = pte_page(entry);
}
- ret = try_grab_page(*page, gup_flags);
+ ret = try_grab_folio(page_folio(*page), 1, gup_flags);
if (unlikely(ret))
goto unmap;
out:
@@ -1685,20 +1701,19 @@ static long __get_user_pages(struct mm_struct *mm,
* pages.
*/
if (page_increm > 1) {
- struct folio *folio;
+ struct folio *folio = page_folio(page);

/*
* Since we already hold refcount on the
* large folio, this should never fail.
*/
- folio = try_grab_folio(page, page_increm - 1,
- foll_flags);
- if (WARN_ON_ONCE(!folio)) {
+ if (try_grab_folio(folio, page_increm - 1,
+ foll_flags)) {
/*
* Release the 1st page ref if the
* folio is problematic, fail hard.
*/
- gup_put_folio(page_folio(page), 1,
+ gup_put_folio(folio, 1,
foll_flags);
ret = -EFAULT;
goto out;
@@ -3041,7 +3056,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);

- folio = try_grab_folio(page, 1, flags);
+ folio = try_grab_folio_fast(page, 1, flags);
if (!folio)
goto pte_unmap;

@@ -3128,7 +3143,7 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
break;
}

- folio = try_grab_folio(page, 1, flags);
+ folio = try_grab_folio_fast(page, 1, flags);
if (!folio) {
gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
break;
@@ -3217,7 +3232,7 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
page = pmd_page(orig);
refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);

- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;

@@ -3261,7 +3276,7 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
page = pud_page(orig);
refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);

- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;

@@ -3301,7 +3316,7 @@ static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
page = pgd_page(orig);
refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);

- folio = try_grab_folio(page, refs, flags);
+ folio = try_grab_folio_fast(page, refs, flags);
if (!folio)
return 0;

@@ -3355,7 +3370,7 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
* pmd format and THP pmd format
*/
if (gup_hugepd(NULL, __hugepd(pmd_val(pmd)), addr,
- PMD_SHIFT, next, flags, pages, nr) != 1)
+ PMD_SHIFT, next, flags, pages, nr, true) != 1)
return 0;
} else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
pages, nr))
@@ -3385,7 +3400,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (gup_hugepd(NULL, __hugepd(pud_val(pud)), addr,
- PUD_SHIFT, next, flags, pages, nr) != 1)
+ PUD_SHIFT, next, flags, pages, nr, true) != 1)
return 0;
} else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
pages, nr))
@@ -3412,7 +3427,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
BUILD_BUG_ON(p4d_leaf(p4d));
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
if (gup_hugepd(NULL, __hugepd(p4d_val(p4d)), addr,
- P4D_SHIFT, next, flags, pages, nr) != 1)
+ P4D_SHIFT, next, flags, pages, nr, true) != 1)
return 0;
} else if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
pages, nr))
@@ -3441,7 +3456,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (gup_hugepd(NULL, __hugepd(pgd_val(pgd)), addr,
- PGDIR_SHIFT, next, flags, pages, nr) != 1)
+ PGDIR_SHIFT, next, flags, pages, nr, true) != 1)
return;
} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
pages, nr))
@@ -3842,13 +3857,14 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
next_idx != folio_index(fbatch.folios[i]))
continue;

- folio = try_grab_folio(&fbatch.folios[i]->page,
- 1, FOLL_PIN);
- if (!folio) {
+ if (try_grab_folio(fbatch.folios[i],
+ 1, FOLL_PIN)) {
folio_batch_release(&fbatch);
goto err;
}

+ folio = fbatch.folios[i];
+
if (nr_folios == 0)
*offset = offset_in_folio(folio, start);

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8e49f402d7c7..b6280a01c5fd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1331,7 +1331,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
- ret = try_grab_page(page, flags);
+ ret = try_grab_folio(page_folio(page), 1, flags);
if (ret)
page = ERR_PTR(ret);

diff --git a/mm/internal.h b/mm/internal.h
index 3419c329b3bc..dc358cd51135 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1215,8 +1215,7 @@ int migrate_device_coherent_page(struct page *page);
/*
* mm/gup.c
*/
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
-int __must_check try_grab_page(struct page *page, unsigned int flags);
+int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags);

/*
* mm/huge_memory.c
--
2.41.0


2024-06-05 02:57:51

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path

Hi Yang,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]

url: https://github.com/intel-lab-lkp/linux/commits/Yang-Shi/mm-gup-do-not-call-try_grab_folio-in-slow-path/20240605-075027
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240604234858.948986-2-yang%40os.amperecomputing.com
patch subject: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path
config: openrisc-allnoconfig (https://download.01.org/0day-ci/archive/20240605/[email protected]/config)
compiler: or1k-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240605/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

>> mm/gup.c:131:22: warning: 'try_grab_folio_fast' defined but not used [-Wunused-function]
131 | static struct folio *try_grab_folio_fast(struct page *page, int refs,
| ^~~~~~~~~~~~~~~~~~~


vim +/try_grab_folio_fast +131 mm/gup.c

101
102 /**
103 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
104 * @page: pointer to page to be grabbed
105 * @refs: the value to (effectively) add to the folio's refcount
106 * @flags: gup flags: these are the FOLL_* flag values.
107 *
108 * "grab" names in this file mean, "look at flags to decide whether to use
109 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
110 *
111 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
112 * same time. (That's true throughout the get_user_pages*() and
113 * pin_user_pages*() APIs.) Cases:
114 *
115 * FOLL_GET: folio's refcount will be incremented by @refs.
116 *
117 * FOLL_PIN on large folios: folio's refcount will be incremented by
118 * @refs, and its pincount will be incremented by @refs.
119 *
120 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
121 * @refs * GUP_PIN_COUNTING_BIAS.
122 *
123 * Return: The folio containing @page (with refcount appropriately
124 * incremented) for success, or NULL upon failure. If neither FOLL_GET
125 * nor FOLL_PIN was set, that's considered failure, and furthermore,
126 * a likely bug in the caller, so a warning is also emitted.
127 *
128 * It uses add ref unless zero to elevate the folio refcount and must be called
129 * in fast path only.
130 */
> 131 static struct folio *try_grab_folio_fast(struct page *page, int refs,
132 unsigned int flags)
133 {
134 struct folio *folio;
135
136 /* Raise warn if it is not called in fast GUP */
137 VM_WARN_ON_ONCE(!irqs_disabled());
138
139 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
140 return NULL;
141
142 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
143 return NULL;
144
145 if (flags & FOLL_GET)
146 return try_get_folio(page, refs);
147
148 /* FOLL_PIN is set */
149
150 /*
151 * Don't take a pin on the zero page - it's not going anywhere
152 * and it is used in a *lot* of places.
153 */
154 if (is_zero_page(page))
155 return page_folio(page);
156
157 folio = try_get_folio(page, refs);
158 if (!folio)
159 return NULL;
160
161 /*
162 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
163 * right zone, so fail and let the caller fall back to the slow
164 * path.
165 */
166 if (unlikely((flags & FOLL_LONGTERM) &&
167 !folio_is_longterm_pinnable(folio))) {
168 if (!put_devmap_managed_folio_refs(folio, refs))
169 folio_put_refs(folio, refs);
170 return NULL;
171 }
172
173 /*
174 * When pinning a large folio, use an exact count to track it.
175 *
176 * However, be sure to *also* increment the normal folio
177 * refcount field at least once, so that the folio really
178 * is pinned. That's why the refcount from the earlier
179 * try_get_folio() is left intact.
180 */
181 if (folio_test_large(folio))
182 atomic_add(refs, &folio->_pincount);
183 else
184 folio_ref_add(folio,
185 refs * (GUP_PIN_COUNTING_BIAS - 1));
186 /*
187 * Adjust the pincount before re-checking the PTE for changes.
188 * This is essentially a smp_mb() and is paired with a memory
189 * barrier in folio_try_share_anon_rmap_*().
190 */
191 smp_mb__after_atomic();
192
193 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
194
195 return folio;
196 }
197

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2024-06-05 15:56:28

by Peter Xu

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm: page_ref: remove folio_try_get_rcu()

On Tue, Jun 04, 2024 at 04:48:57PM -0700, Yang Shi wrote:
> The below bug was reported on a non-SMP kernel:
>
> [ 275.267158][ T4335] ------------[ cut here ]------------
> [ 275.267949][ T4335] kernel BUG at include/linux/page_ref.h:275!
> [ 275.268526][ T4335] invalid opcode: 0000 [#1] KASAN PTI
> [ 275.269001][ T4335] CPU: 0 PID: 4335 Comm: trinity-c3 Not tainted 6.7.0-rc4-00061-gefa7df3e3bb5 #1
> [ 275.269787][ T4335] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
> [ 275.270679][ T4335] RIP: 0010:try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.272813][ T4335] RSP: 0018:ffffc90005dcf650 EFLAGS: 00010202
> [ 275.273346][ T4335] RAX: 0000000000000246 RBX: ffffea00066e0000 RCX: 0000000000000000
> [ 275.274032][ T4335] RDX: fffff94000cdc007 RSI: 0000000000000004 RDI: ffffea00066e0034
> [ 275.274719][ T4335] RBP: ffffea00066e0000 R08: 0000000000000000 R09: fffff94000cdc006
> [ 275.275404][ T4335] R10: ffffea00066e0037 R11: 0000000000000000 R12: 0000000000000136
> [ 275.276106][ T4335] R13: ffffea00066e0034 R14: dffffc0000000000 R15: ffffea00066e0008
> [ 275.276790][ T4335] FS: 00007fa2f9b61740(0000) GS:ffffffff89d0d000(0000) knlGS:0000000000000000
> [ 275.277570][ T4335] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [ 275.278143][ T4335] CR2: 00007fa2f6c00000 CR3: 0000000134b04000 CR4: 00000000000406f0
> [ 275.278833][ T4335] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [ 275.279521][ T4335] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [ 275.280201][ T4335] Call Trace:
> [ 275.280499][ T4335] <TASK>
> [ 275.280751][ T4335] ? die (arch/x86/kernel/dumpstack.c:421 arch/x86/kernel/dumpstack.c:434 arch/x86/kernel/dumpstack.c:447)
> [ 275.281087][ T4335] ? do_trap (arch/x86/kernel/traps.c:112 arch/x86/kernel/traps.c:153)
> [ 275.281463][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.281884][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.282300][ T4335] ? do_error_trap (arch/x86/kernel/traps.c:174)
> [ 275.282711][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.283129][ T4335] ? handle_invalid_op (arch/x86/kernel/traps.c:212)
> [ 275.283561][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.283990][ T4335] ? exc_invalid_op (arch/x86/kernel/traps.c:264)
> [ 275.284415][ T4335] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:568)
> [ 275.284859][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
> [ 275.285278][ T4335] try_grab_folio (mm/gup.c:148)
> [ 275.285684][ T4335] __get_user_pages (mm/gup.c:1297 (discriminator 1))
> [ 275.286111][ T4335] ? __pfx___get_user_pages (mm/gup.c:1188)
> [ 275.286579][ T4335] ? __pfx_validate_chain (kernel/locking/lockdep.c:3825)
> [ 275.287034][ T4335] ? mark_lock (kernel/locking/lockdep.c:4656 (discriminator 1))
> [ 275.287416][ T4335] __gup_longterm_locked (mm/gup.c:1509 mm/gup.c:2209)
> [ 275.288192][ T4335] ? __pfx___gup_longterm_locked (mm/gup.c:2204)
> [ 275.288697][ T4335] ? __pfx_lock_acquire (kernel/locking/lockdep.c:5722)
> [ 275.289135][ T4335] ? __pfx___might_resched (kernel/sched/core.c:10106)
> [ 275.289595][ T4335] pin_user_pages_remote (mm/gup.c:3350)
> [ 275.290041][ T4335] ? __pfx_pin_user_pages_remote (mm/gup.c:3350)
> [ 275.290545][ T4335] ? find_held_lock (kernel/locking/lockdep.c:5244 (discriminator 1))
> [ 275.290961][ T4335] ? mm_access (kernel/fork.c:1573)
> [ 275.291353][ T4335] process_vm_rw_single_vec+0x142/0x360
> [ 275.291900][ T4335] ? __pfx_process_vm_rw_single_vec+0x10/0x10
> [ 275.292471][ T4335] ? mm_access (kernel/fork.c:1573)
> [ 275.292859][ T4335] process_vm_rw_core+0x272/0x4e0
> [ 275.293384][ T4335] ? hlock_class (arch/x86/include/asm/bitops.h:227 arch/x86/include/asm/bitops.h:239 include/asm-generic/bitops/instrumented-non-atomic.h:142 kernel/locking/lockdep.c:228)
> [ 275.293780][ T4335] ? __pfx_process_vm_rw_core+0x10/0x10
> [ 275.294350][ T4335] process_vm_rw (mm/process_vm_access.c:284)
> [ 275.294748][ T4335] ? __pfx_process_vm_rw (mm/process_vm_access.c:259)
> [ 275.295197][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
> [ 275.295634][ T4335] __x64_sys_process_vm_readv (mm/process_vm_access.c:291)
> [ 275.296139][ T4335] ? syscall_enter_from_user_mode (kernel/entry/common.c:94 kernel/entry/common.c:112)
> [ 275.296642][ T4335] do_syscall_64 (arch/x86/entry/common.c:51 (discriminator 1) arch/x86/entry/common.c:82 (discriminator 1))
> [ 275.297032][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
> [ 275.297470][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
> [ 275.297988][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
> [ 275.298389][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
> [ 275.298906][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
> [ 275.299304][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
> [ 275.299703][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
> [ 275.300115][ T4335] entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:129)
>
> This BUG is the VM_BUG_ON(!in_atomic() && !irqs_disabled()) assertion in
> folio_ref_try_add_rcu() for non-SMP kernel.
>
> The process_vm_readv() calls GUP to pin the THP. An optimization for
> pinning THP instroduced by commit 57edfcfd3419 ("mm/gup: accelerate thp
> gup even for "pages != NULL"") calls try_grab_folio() to pin the THP,
> but try_grab_folio() is supposed to be called in atomic context for
> non-SMP kernel, for example, irq disabled or preemption disabled, due to
> the optimization introduced by commit e286781d5f2e ("mm: speculative
> page references").
>
> The commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP
> boundaries") is not actually the root cause although it was bisected to.
> It just makes the problem exposed more likely.
>
> The follow up discussion suggested the optimization for non-SMP kernel
> may be out-dated and not worth it anymore [1]. So removing the
> optimization to silence the BUG.
>
> However calling try_grab_folio() in GUP slow path actually is
> unnecessary, so the following patch will clean this up.
>
> [1] https://lore.kernel.org/linux-mm/821cf1d6-92b9-4ac4-bacc-d8f2364ac14f@paulmck-laptop/
> Fixes: 57edfcfd3419 ("mm/gup: accelerate thp gup even for "pages != NULL"")
> Reported-by: kernel test robot <[email protected]>
> Cc: linux-stable <[email protected]> v6.6+
> Signed-off-by: Yang Shi <[email protected]>

Just to mention, IMHO it'll still be nicer if we keep the 1st fix patch
only have the folio_ref_try_add_rcu() changes, it'll be easier for
backport.

Now this patch contains not only that but also logically a cleanup patch
that replaces old rcu calls to folio_try_get(). But squashing these may
mean we need explicit backport to 6.6 depending on whether those lines
changed, meanwhile the cleanup part may not be justfied to be backported in
the first place. I'll leave that to you to decide, no strong feelings here.

Acked-by: Peter Xu <[email protected]>

Thanks,

--
Peter Xu


2024-06-05 16:16:38

by Yang Shi

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm: page_ref: remove folio_try_get_rcu()



On 6/5/24 8:25 AM, Peter Xu wrote:
> On Tue, Jun 04, 2024 at 04:48:57PM -0700, Yang Shi wrote:
>> The below bug was reported on a non-SMP kernel:
>>
>> [ 275.267158][ T4335] ------------[ cut here ]------------
>> [ 275.267949][ T4335] kernel BUG at include/linux/page_ref.h:275!
>> [ 275.268526][ T4335] invalid opcode: 0000 [#1] KASAN PTI
>> [ 275.269001][ T4335] CPU: 0 PID: 4335 Comm: trinity-c3 Not tainted 6.7.0-rc4-00061-gefa7df3e3bb5 #1
>> [ 275.269787][ T4335] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
>> [ 275.270679][ T4335] RIP: 0010:try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.272813][ T4335] RSP: 0018:ffffc90005dcf650 EFLAGS: 00010202
>> [ 275.273346][ T4335] RAX: 0000000000000246 RBX: ffffea00066e0000 RCX: 0000000000000000
>> [ 275.274032][ T4335] RDX: fffff94000cdc007 RSI: 0000000000000004 RDI: ffffea00066e0034
>> [ 275.274719][ T4335] RBP: ffffea00066e0000 R08: 0000000000000000 R09: fffff94000cdc006
>> [ 275.275404][ T4335] R10: ffffea00066e0037 R11: 0000000000000000 R12: 0000000000000136
>> [ 275.276106][ T4335] R13: ffffea00066e0034 R14: dffffc0000000000 R15: ffffea00066e0008
>> [ 275.276790][ T4335] FS: 00007fa2f9b61740(0000) GS:ffffffff89d0d000(0000) knlGS:0000000000000000
>> [ 275.277570][ T4335] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
>> [ 275.278143][ T4335] CR2: 00007fa2f6c00000 CR3: 0000000134b04000 CR4: 00000000000406f0
>> [ 275.278833][ T4335] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
>> [ 275.279521][ T4335] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
>> [ 275.280201][ T4335] Call Trace:
>> [ 275.280499][ T4335] <TASK>
>> [ 275.280751][ T4335] ? die (arch/x86/kernel/dumpstack.c:421 arch/x86/kernel/dumpstack.c:434 arch/x86/kernel/dumpstack.c:447)
>> [ 275.281087][ T4335] ? do_trap (arch/x86/kernel/traps.c:112 arch/x86/kernel/traps.c:153)
>> [ 275.281463][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.281884][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.282300][ T4335] ? do_error_trap (arch/x86/kernel/traps.c:174)
>> [ 275.282711][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.283129][ T4335] ? handle_invalid_op (arch/x86/kernel/traps.c:212)
>> [ 275.283561][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.283990][ T4335] ? exc_invalid_op (arch/x86/kernel/traps.c:264)
>> [ 275.284415][ T4335] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:568)
>> [ 275.284859][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>> [ 275.285278][ T4335] try_grab_folio (mm/gup.c:148)
>> [ 275.285684][ T4335] __get_user_pages (mm/gup.c:1297 (discriminator 1))
>> [ 275.286111][ T4335] ? __pfx___get_user_pages (mm/gup.c:1188)
>> [ 275.286579][ T4335] ? __pfx_validate_chain (kernel/locking/lockdep.c:3825)
>> [ 275.287034][ T4335] ? mark_lock (kernel/locking/lockdep.c:4656 (discriminator 1))
>> [ 275.287416][ T4335] __gup_longterm_locked (mm/gup.c:1509 mm/gup.c:2209)
>> [ 275.288192][ T4335] ? __pfx___gup_longterm_locked (mm/gup.c:2204)
>> [ 275.288697][ T4335] ? __pfx_lock_acquire (kernel/locking/lockdep.c:5722)
>> [ 275.289135][ T4335] ? __pfx___might_resched (kernel/sched/core.c:10106)
>> [ 275.289595][ T4335] pin_user_pages_remote (mm/gup.c:3350)
>> [ 275.290041][ T4335] ? __pfx_pin_user_pages_remote (mm/gup.c:3350)
>> [ 275.290545][ T4335] ? find_held_lock (kernel/locking/lockdep.c:5244 (discriminator 1))
>> [ 275.290961][ T4335] ? mm_access (kernel/fork.c:1573)
>> [ 275.291353][ T4335] process_vm_rw_single_vec+0x142/0x360
>> [ 275.291900][ T4335] ? __pfx_process_vm_rw_single_vec+0x10/0x10
>> [ 275.292471][ T4335] ? mm_access (kernel/fork.c:1573)
>> [ 275.292859][ T4335] process_vm_rw_core+0x272/0x4e0
>> [ 275.293384][ T4335] ? hlock_class (arch/x86/include/asm/bitops.h:227 arch/x86/include/asm/bitops.h:239 include/asm-generic/bitops/instrumented-non-atomic.h:142 kernel/locking/lockdep.c:228)
>> [ 275.293780][ T4335] ? __pfx_process_vm_rw_core+0x10/0x10
>> [ 275.294350][ T4335] process_vm_rw (mm/process_vm_access.c:284)
>> [ 275.294748][ T4335] ? __pfx_process_vm_rw (mm/process_vm_access.c:259)
>> [ 275.295197][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
>> [ 275.295634][ T4335] __x64_sys_process_vm_readv (mm/process_vm_access.c:291)
>> [ 275.296139][ T4335] ? syscall_enter_from_user_mode (kernel/entry/common.c:94 kernel/entry/common.c:112)
>> [ 275.296642][ T4335] do_syscall_64 (arch/x86/entry/common.c:51 (discriminator 1) arch/x86/entry/common.c:82 (discriminator 1))
>> [ 275.297032][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
>> [ 275.297470][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
>> [ 275.297988][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>> [ 275.298389][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
>> [ 275.298906][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>> [ 275.299304][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>> [ 275.299703][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>> [ 275.300115][ T4335] entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:129)
>>
>> This BUG is the VM_BUG_ON(!in_atomic() && !irqs_disabled()) assertion in
>> folio_ref_try_add_rcu() for non-SMP kernel.
>>
>> The process_vm_readv() calls GUP to pin the THP. An optimization for
>> pinning THP instroduced by commit 57edfcfd3419 ("mm/gup: accelerate thp
>> gup even for "pages != NULL"") calls try_grab_folio() to pin the THP,
>> but try_grab_folio() is supposed to be called in atomic context for
>> non-SMP kernel, for example, irq disabled or preemption disabled, due to
>> the optimization introduced by commit e286781d5f2e ("mm: speculative
>> page references").
>>
>> The commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP
>> boundaries") is not actually the root cause although it was bisected to.
>> It just makes the problem exposed more likely.
>>
>> The follow up discussion suggested the optimization for non-SMP kernel
>> may be out-dated and not worth it anymore [1]. So removing the
>> optimization to silence the BUG.
>>
>> However calling try_grab_folio() in GUP slow path actually is
>> unnecessary, so the following patch will clean this up.
>>
>> [1] https://lore.kernel.org/linux-mm/821cf1d6-92b9-4ac4-bacc-d8f2364ac14f@paulmck-laptop/
>> Fixes: 57edfcfd3419 ("mm/gup: accelerate thp gup even for "pages != NULL"")
>> Reported-by: kernel test robot <[email protected]>
>> Cc: linux-stable <[email protected]> v6.6+
>> Signed-off-by: Yang Shi <[email protected]>
> Just to mention, IMHO it'll still be nicer if we keep the 1st fix patch
> only have the folio_ref_try_add_rcu() changes, it'll be easier for
> backport.
>
> Now this patch contains not only that but also logically a cleanup patch
> that replaces old rcu calls to folio_try_get(). But squashing these may
> mean we need explicit backport to 6.6 depending on whether those lines
> changed, meanwhile the cleanup part may not be justfied to be backported in
> the first place. I'll leave that to you to decide, no strong feelings here.

Neither do I. But I slightly prefer have the patch as is for mainline
since removing the #ifdef and the clean up lead by it seems
self-contained and naturally integral. If it can not be applied to
stable tree without conflict, I can generate a separate patch for stable
tree with the removing #ifdef part. The effort should be trivial.

>
> Acked-by: Peter Xu <[email protected]>

Thank you!

>
> Thanks,
>


2024-06-05 16:17:38

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH 1/2] mm: page_ref: remove folio_try_get_rcu()

On 05.06.24 18:16, Yang Shi wrote:
>
>
> On 6/5/24 8:25 AM, Peter Xu wrote:
>> On Tue, Jun 04, 2024 at 04:48:57PM -0700, Yang Shi wrote:
>>> The below bug was reported on a non-SMP kernel:
>>>
>>> [ 275.267158][ T4335] ------------[ cut here ]------------
>>> [ 275.267949][ T4335] kernel BUG at include/linux/page_ref.h:275!
>>> [ 275.268526][ T4335] invalid opcode: 0000 [#1] KASAN PTI
>>> [ 275.269001][ T4335] CPU: 0 PID: 4335 Comm: trinity-c3 Not tainted 6.7.0-rc4-00061-gefa7df3e3bb5 #1
>>> [ 275.269787][ T4335] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
>>> [ 275.270679][ T4335] RIP: 0010:try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.272813][ T4335] RSP: 0018:ffffc90005dcf650 EFLAGS: 00010202
>>> [ 275.273346][ T4335] RAX: 0000000000000246 RBX: ffffea00066e0000 RCX: 0000000000000000
>>> [ 275.274032][ T4335] RDX: fffff94000cdc007 RSI: 0000000000000004 RDI: ffffea00066e0034
>>> [ 275.274719][ T4335] RBP: ffffea00066e0000 R08: 0000000000000000 R09: fffff94000cdc006
>>> [ 275.275404][ T4335] R10: ffffea00066e0037 R11: 0000000000000000 R12: 0000000000000136
>>> [ 275.276106][ T4335] R13: ffffea00066e0034 R14: dffffc0000000000 R15: ffffea00066e0008
>>> [ 275.276790][ T4335] FS: 00007fa2f9b61740(0000) GS:ffffffff89d0d000(0000) knlGS:0000000000000000
>>> [ 275.277570][ T4335] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
>>> [ 275.278143][ T4335] CR2: 00007fa2f6c00000 CR3: 0000000134b04000 CR4: 00000000000406f0
>>> [ 275.278833][ T4335] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
>>> [ 275.279521][ T4335] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
>>> [ 275.280201][ T4335] Call Trace:
>>> [ 275.280499][ T4335] <TASK>
>>> [ 275.280751][ T4335] ? die (arch/x86/kernel/dumpstack.c:421 arch/x86/kernel/dumpstack.c:434 arch/x86/kernel/dumpstack.c:447)
>>> [ 275.281087][ T4335] ? do_trap (arch/x86/kernel/traps.c:112 arch/x86/kernel/traps.c:153)
>>> [ 275.281463][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.281884][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.282300][ T4335] ? do_error_trap (arch/x86/kernel/traps.c:174)
>>> [ 275.282711][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.283129][ T4335] ? handle_invalid_op (arch/x86/kernel/traps.c:212)
>>> [ 275.283561][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.283990][ T4335] ? exc_invalid_op (arch/x86/kernel/traps.c:264)
>>> [ 275.284415][ T4335] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:568)
>>> [ 275.284859][ T4335] ? try_get_folio (include/linux/page_ref.h:275 (discriminator 3) mm/gup.c:79 (discriminator 3))
>>> [ 275.285278][ T4335] try_grab_folio (mm/gup.c:148)
>>> [ 275.285684][ T4335] __get_user_pages (mm/gup.c:1297 (discriminator 1))
>>> [ 275.286111][ T4335] ? __pfx___get_user_pages (mm/gup.c:1188)
>>> [ 275.286579][ T4335] ? __pfx_validate_chain (kernel/locking/lockdep.c:3825)
>>> [ 275.287034][ T4335] ? mark_lock (kernel/locking/lockdep.c:4656 (discriminator 1))
>>> [ 275.287416][ T4335] __gup_longterm_locked (mm/gup.c:1509 mm/gup.c:2209)
>>> [ 275.288192][ T4335] ? __pfx___gup_longterm_locked (mm/gup.c:2204)
>>> [ 275.288697][ T4335] ? __pfx_lock_acquire (kernel/locking/lockdep.c:5722)
>>> [ 275.289135][ T4335] ? __pfx___might_resched (kernel/sched/core.c:10106)
>>> [ 275.289595][ T4335] pin_user_pages_remote (mm/gup.c:3350)
>>> [ 275.290041][ T4335] ? __pfx_pin_user_pages_remote (mm/gup.c:3350)
>>> [ 275.290545][ T4335] ? find_held_lock (kernel/locking/lockdep.c:5244 (discriminator 1))
>>> [ 275.290961][ T4335] ? mm_access (kernel/fork.c:1573)
>>> [ 275.291353][ T4335] process_vm_rw_single_vec+0x142/0x360
>>> [ 275.291900][ T4335] ? __pfx_process_vm_rw_single_vec+0x10/0x10
>>> [ 275.292471][ T4335] ? mm_access (kernel/fork.c:1573)
>>> [ 275.292859][ T4335] process_vm_rw_core+0x272/0x4e0
>>> [ 275.293384][ T4335] ? hlock_class (arch/x86/include/asm/bitops.h:227 arch/x86/include/asm/bitops.h:239 include/asm-generic/bitops/instrumented-non-atomic.h:142 kernel/locking/lockdep.c:228)
>>> [ 275.293780][ T4335] ? __pfx_process_vm_rw_core+0x10/0x10
>>> [ 275.294350][ T4335] process_vm_rw (mm/process_vm_access.c:284)
>>> [ 275.294748][ T4335] ? __pfx_process_vm_rw (mm/process_vm_access.c:259)
>>> [ 275.295197][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
>>> [ 275.295634][ T4335] __x64_sys_process_vm_readv (mm/process_vm_access.c:291)
>>> [ 275.296139][ T4335] ? syscall_enter_from_user_mode (kernel/entry/common.c:94 kernel/entry/common.c:112)
>>> [ 275.296642][ T4335] do_syscall_64 (arch/x86/entry/common.c:51 (discriminator 1) arch/x86/entry/common.c:82 (discriminator 1))
>>> [ 275.297032][ T4335] ? __task_pid_nr_ns (include/linux/rcupdate.h:306 (discriminator 1) include/linux/rcupdate.h:780 (discriminator 1) kernel/pid.c:504 (discriminator 1))
>>> [ 275.297470][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
>>> [ 275.297988][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>>> [ 275.298389][ T4335] ? lockdep_hardirqs_on_prepare (kernel/locking/lockdep.c:4300 kernel/locking/lockdep.c:4359)
>>> [ 275.298906][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>>> [ 275.299304][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>>> [ 275.299703][ T4335] ? do_syscall_64 (arch/x86/include/asm/cpufeature.h:171 arch/x86/entry/common.c:97)
>>> [ 275.300115][ T4335] entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:129)
>>>
>>> This BUG is the VM_BUG_ON(!in_atomic() && !irqs_disabled()) assertion in
>>> folio_ref_try_add_rcu() for non-SMP kernel.
>>>
>>> The process_vm_readv() calls GUP to pin the THP. An optimization for
>>> pinning THP instroduced by commit 57edfcfd3419 ("mm/gup: accelerate thp
>>> gup even for "pages != NULL"") calls try_grab_folio() to pin the THP,
>>> but try_grab_folio() is supposed to be called in atomic context for
>>> non-SMP kernel, for example, irq disabled or preemption disabled, due to
>>> the optimization introduced by commit e286781d5f2e ("mm: speculative
>>> page references").
>>>
>>> The commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP
>>> boundaries") is not actually the root cause although it was bisected to.
>>> It just makes the problem exposed more likely.
>>>
>>> The follow up discussion suggested the optimization for non-SMP kernel
>>> may be out-dated and not worth it anymore [1]. So removing the
>>> optimization to silence the BUG.
>>>
>>> However calling try_grab_folio() in GUP slow path actually is
>>> unnecessary, so the following patch will clean this up.
>>>
>>> [1] https://lore.kernel.org/linux-mm/821cf1d6-92b9-4ac4-bacc-d8f2364ac14f@paulmck-laptop/
>>> Fixes: 57edfcfd3419 ("mm/gup: accelerate thp gup even for "pages != NULL"")
>>> Reported-by: kernel test robot <[email protected]>
>>> Cc: linux-stable <[email protected]> v6.6+
>>> Signed-off-by: Yang Shi <[email protected]>
>> Just to mention, IMHO it'll still be nicer if we keep the 1st fix patch
>> only have the folio_ref_try_add_rcu() changes, it'll be easier for
>> backport.
>>
>> Now this patch contains not only that but also logically a cleanup patch
>> that replaces old rcu calls to folio_try_get(). But squashing these may
>> mean we need explicit backport to 6.6 depending on whether those lines
>> changed, meanwhile the cleanup part may not be justfied to be backported in
>> the first place. I'll leave that to you to decide, no strong feelings here.
>
> Neither do I. But I slightly prefer have the patch as is for mainline
> since removing the #ifdef and the clean up lead by it seems
> self-contained and naturally integral. If it can not be applied to
> stable tree without conflict, I can generate a separate patch for stable
> tree with the removing #ifdef part. The effort should be trivial.

Agreed

Acked-by: David Hildenbrand <[email protected]>

--
Cheers,

David / dhildenb


2024-06-05 16:20:45

by Yang Shi

[permalink] [raw]
Subject: Re: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path



On 6/4/24 7:57 PM, kernel test robot wrote:
> Hi Yang,
>
> kernel test robot noticed the following build warnings:
>
> [auto build test WARNING on akpm-mm/mm-everything]
>
> url: https://github.com/intel-lab-lkp/linux/commits/Yang-Shi/mm-gup-do-not-call-try_grab_folio-in-slow-path/20240605-075027
> base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link: https://lore.kernel.org/r/20240604234858.948986-2-yang%40os.amperecomputing.com
> patch subject: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path
> config: openrisc-allnoconfig (https://download.01.org/0day-ci/archive/20240605/[email protected]/config)
> compiler: or1k-linux-gcc (GCC) 13.2.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240605/[email protected]/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <[email protected]>
> | Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
>
> All warnings (new ones prefixed by >>):
>
>>> mm/gup.c:131:22: warning: 'try_grab_folio_fast' defined but not used [-Wunused-function]

Thanks for reporting the problem. It seems try_grab_folio_fast()
definition should be protected by CONFIG_HAVE_FAST_GUP, will fix it in v2.

> 131 | static struct folio *try_grab_folio_fast(struct page *page, int refs,
> | ^~~~~~~~~~~~~~~~~~~
>
>
> vim +/try_grab_folio_fast +131 mm/gup.c
>
> 101
> 102 /**
> 103 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
> 104 * @page: pointer to page to be grabbed
> 105 * @refs: the value to (effectively) add to the folio's refcount
> 106 * @flags: gup flags: these are the FOLL_* flag values.
> 107 *
> 108 * "grab" names in this file mean, "look at flags to decide whether to use
> 109 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
> 110 *
> 111 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
> 112 * same time. (That's true throughout the get_user_pages*() and
> 113 * pin_user_pages*() APIs.) Cases:
> 114 *
> 115 * FOLL_GET: folio's refcount will be incremented by @refs.
> 116 *
> 117 * FOLL_PIN on large folios: folio's refcount will be incremented by
> 118 * @refs, and its pincount will be incremented by @refs.
> 119 *
> 120 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
> 121 * @refs * GUP_PIN_COUNTING_BIAS.
> 122 *
> 123 * Return: The folio containing @page (with refcount appropriately
> 124 * incremented) for success, or NULL upon failure. If neither FOLL_GET
> 125 * nor FOLL_PIN was set, that's considered failure, and furthermore,
> 126 * a likely bug in the caller, so a warning is also emitted.
> 127 *
> 128 * It uses add ref unless zero to elevate the folio refcount and must be called
> 129 * in fast path only.
> 130 */
> > 131 static struct folio *try_grab_folio_fast(struct page *page, int refs,
> 132 unsigned int flags)
> 133 {
> 134 struct folio *folio;
> 135
> 136 /* Raise warn if it is not called in fast GUP */
> 137 VM_WARN_ON_ONCE(!irqs_disabled());
> 138
> 139 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
> 140 return NULL;
> 141
> 142 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
> 143 return NULL;
> 144
> 145 if (flags & FOLL_GET)
> 146 return try_get_folio(page, refs);
> 147
> 148 /* FOLL_PIN is set */
> 149
> 150 /*
> 151 * Don't take a pin on the zero page - it's not going anywhere
> 152 * and it is used in a *lot* of places.
> 153 */
> 154 if (is_zero_page(page))
> 155 return page_folio(page);
> 156
> 157 folio = try_get_folio(page, refs);
> 158 if (!folio)
> 159 return NULL;
> 160
> 161 /*
> 162 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
> 163 * right zone, so fail and let the caller fall back to the slow
> 164 * path.
> 165 */
> 166 if (unlikely((flags & FOLL_LONGTERM) &&
> 167 !folio_is_longterm_pinnable(folio))) {
> 168 if (!put_devmap_managed_folio_refs(folio, refs))
> 169 folio_put_refs(folio, refs);
> 170 return NULL;
> 171 }
> 172
> 173 /*
> 174 * When pinning a large folio, use an exact count to track it.
> 175 *
> 176 * However, be sure to *also* increment the normal folio
> 177 * refcount field at least once, so that the folio really
> 178 * is pinned. That's why the refcount from the earlier
> 179 * try_get_folio() is left intact.
> 180 */
> 181 if (folio_test_large(folio))
> 182 atomic_add(refs, &folio->_pincount);
> 183 else
> 184 folio_ref_add(folio,
> 185 refs * (GUP_PIN_COUNTING_BIAS - 1));
> 186 /*
> 187 * Adjust the pincount before re-checking the PTE for changes.
> 188 * This is essentially a smp_mb() and is paired with a memory
> 189 * barrier in folio_try_share_anon_rmap_*().
> 190 */
> 191 smp_mb__after_atomic();
> 192
> 193 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
> 194
> 195 return folio;
> 196 }
> 197
>