From: Johannes Weiner <[email protected]>
One of our services is observing hanging ps/top/etc under heavy write
IO, and the task states show this is an mmap_sem priority inversion:
A write fault is holding the mmap_sem in read-mode and waiting for
(heavily cgroup-limited) IO in balance_dirty_pages():
[<0>] balance_dirty_pages+0x724/0x905
[<0>] balance_dirty_pages_ratelimited+0x254/0x390
[<0>] fault_dirty_shared_page.isra.96+0x4a/0x90
[<0>] do_wp_page+0x33e/0x400
[<0>] __handle_mm_fault+0x6f0/0xfa0
[<0>] handle_mm_fault+0xe4/0x200
[<0>] __do_page_fault+0x22b/0x4a0
[<0>] page_fault+0x45/0x50
[<0>] 0xffffffffffffffff
Somebody tries to change the address space, contending for the
mmap_sem in write-mode:
[<0>] call_rwsem_down_write_failed_killable+0x13/0x20
[<0>] do_mprotect_pkey+0xa8/0x330
[<0>] SyS_mprotect+0xf/0x20
[<0>] do_syscall_64+0x5b/0x100
[<0>] entry_SYSCALL_64_after_hwframe+0x3d/0xa2
[<0>] 0xffffffffffffffff
The waiting writer locks out all subsequent readers to avoid lock
starvation, and several threads can be seen hanging like this:
[<0>] call_rwsem_down_read_failed+0x14/0x30
[<0>] proc_pid_cmdline_read+0xa0/0x480
[<0>] __vfs_read+0x23/0x140
[<0>] vfs_read+0x87/0x130
[<0>] SyS_read+0x42/0x90
[<0>] do_syscall_64+0x5b/0x100
[<0>] entry_SYSCALL_64_after_hwframe+0x3d/0xa2
[<0>] 0xffffffffffffffff
To fix this, do what we do for cache read faults already: drop the
mmap_sem before calling into anything IO bound, in this case the
balance_dirty_pages() function, and return VM_FAULT_RETRY.
Signed-off-by: Johannes Weiner <[email protected]>
---
mm/memory.c | 53 ++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 40 insertions(+), 13 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 2e796372927f..da5eb1d67447 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2221,12 +2221,14 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
*
* The function expects the page to be locked and unlocks it.
*/
-static void fault_dirty_shared_page(struct vm_area_struct *vma,
- struct page *page)
+static int fault_dirty_shared_page(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
+ struct page *page = vmf->page;
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
+ int ret = 0;
dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
@@ -2239,16 +2241,36 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
mapping = page_rmapping(page);
unlock_page(page);
+ if (!page_mkwrite)
+ file_update_time(vma->vm_file);
+
+ /*
+ * Throttle page dirtying rate down to writeback speed.
+ *
+ * mapping may be NULL here because some device drivers do not
+ * set page.mapping but still dirty their pages
+ *
+ * Drop the mmap_sem before waiting on IO, if we can. The file
+ * is pinning the mapping, as per above.
+ */
if ((dirtied || page_mkwrite) && mapping) {
- /*
- * Some device drivers do not set page.mapping
- * but still dirty their pages
- */
+ struct file *fpin = NULL;
+
+ if ((vmf->flags &
+ (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+ FAULT_FLAG_ALLOW_RETRY) {
+ fpin = get_file(vma->vm_file);
+ up_read(&vma->vm_mm->mmap_sem);
+ ret = VM_FAULT_RETRY;
+ }
+
balance_dirty_pages_ratelimited(mapping);
+
+ if (fpin)
+ fput(fpin);
}
- if (!page_mkwrite)
- file_update_time(vma->vm_file);
+ return ret;
}
/*
@@ -2491,6 +2513,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
+ int ret = VM_FAULT_WRITE;
get_page(vmf->page);
@@ -2514,10 +2537,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
wp_page_reuse(vmf);
lock_page(vmf->page);
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
put_page(vmf->page);
- return VM_FAULT_WRITE;
+ return ret;
}
/*
@@ -3561,7 +3584,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
return ret;
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
return ret;
}
@@ -3576,7 +3599,6 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
static vm_fault_t do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct mm_struct *vm_mm = vma->vm_mm;
vm_fault_t ret;
/*
@@ -3617,7 +3639,12 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
- pte_free(vm_mm, vmf->prealloc_pte);
+ /*
+ * XXX: Accessing vma->vm_mm now is not safe. The page
+ * fault handler may have dropped the mmap_sem a long
+ * time ago. Only s390 derefs that parameter.
+ */
+ pte_free(vma->vm_mm, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
return ret;
--
2.23.0
On Tue, Sep 24, 2019 at 01:15:18PM -0400, Johannes Weiner wrote:
> +static int fault_dirty_shared_page(struct vm_fault *vmf)
vm_fault_t, shirley?
> @@ -2239,16 +2241,36 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
> mapping = page_rmapping(page);
> unlock_page(page);
>
> + if (!page_mkwrite)
> + file_update_time(vma->vm_file);
> +
> + /*
> + * Throttle page dirtying rate down to writeback speed.
> + *
> + * mapping may be NULL here because some device drivers do not
> + * set page.mapping but still dirty their pages
> + *
> + * Drop the mmap_sem before waiting on IO, if we can. The file
> + * is pinning the mapping, as per above.
> + */
> if ((dirtied || page_mkwrite) && mapping) {
> - /*
> - * Some device drivers do not set page.mapping
> - * but still dirty their pages
> - */
> + struct file *fpin = NULL;
> +
> + if ((vmf->flags &
> + (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
> + FAULT_FLAG_ALLOW_RETRY) {
> + fpin = get_file(vma->vm_file);
> + up_read(&vma->vm_mm->mmap_sem);
> + ret = VM_FAULT_RETRY;
> + }
> +
> balance_dirty_pages_ratelimited(mapping);
> +
> + if (fpin)
> + fput(fpin);
> }
>
> - if (!page_mkwrite)
> - file_update_time(vma->vm_file);
> + return ret;
> }
I'm not a fan of moving file_update_time() to _before_ the
balance_dirty_pages call. Also, this is now the third place that needs
maybe_unlock_mmap_for_io, see
https://lore.kernel.org/linux-mm/20190917120852.x6x3aypwvh573kfa@box/
How about:
+ struct file *fpin = NULL;
if ((dirtied || page_mkwrite) && mapping) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
balance_dirty_pages_ratelimited(mapping);
}
+
+ if (fpin) {
+ file_update_time(fpin);
+ fput(fpin);
+ return VM_FAULT_RETRY;
+ }
if (!page_mkwrite)
file_update_time(vma->vm_file);
+ return 0;
}
> /*
> @@ -2491,6 +2513,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
> __releases(vmf->ptl)
> {
> struct vm_area_struct *vma = vmf->vma;
> + int ret = VM_FAULT_WRITE;
vm_fault_t again.
> @@ -3576,7 +3599,6 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
> static vm_fault_t do_fault(struct vm_fault *vmf)
> {
> struct vm_area_struct *vma = vmf->vma;
> - struct mm_struct *vm_mm = vma->vm_mm;
> vm_fault_t ret;
>
> /*
> @@ -3617,7 +3639,12 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
>
> /* preallocated pagetable is unused: free it */
> if (vmf->prealloc_pte) {
> - pte_free(vm_mm, vmf->prealloc_pte);
> + /*
> + * XXX: Accessing vma->vm_mm now is not safe. The page
> + * fault handler may have dropped the mmap_sem a long
> + * time ago. Only s390 derefs that parameter.
> + */
> + pte_free(vma->vm_mm, vmf->prealloc_pte);
I'm confused. This code looks like it was safe before (as it was caching
vma->vm_mm in a local variable), and now you've made it unsafe ... ?