2019-04-11 16:49:15

by Mina Almasry

[permalink] [raw]
Subject: [PATCH] fs: Fix ovl_i_mutex_dir_key/p->lock/cred cred_guard_mutex deadlock

These 3 locks are acquired simultaneously in different order causing
deadlock:

https://syzkaller.appspot.com/bug?id=00f119b8bb35a3acbcfafb9d36a2752b364e8d66

======================================================
WARNING: possible circular locking dependency detected
4.19.0-rc5+ #253 Not tainted
------------------------------------------------------
syz-executor1/545 is trying to acquire lock:
00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: inode_lock_shared include/linux/fs.h:748 [inline]
00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: do_last fs/namei.c:3323 [inline]
00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: path_openat+0x250d/0x5160 fs/namei.c:3534

but task is already holding lock:
0000000044500cca (&sig->cred_guard_mutex){+.+.}, at: prepare_bprm_creds+0x53/0x120 fs/exec.c:1404

which lock already depends on the new lock.

the existing dependency chain (in reverse order) is:

-> #3 (&sig->cred_guard_mutex){+.+.}:
__mutex_lock_common kernel/locking/mutex.c:925 [inline]
__mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
mutex_lock_killable_nested+0x16/0x20 kernel/locking/mutex.c:1102
lock_trace+0x4c/0xe0 fs/proc/base.c:384
proc_pid_stack+0x196/0x3b0 fs/proc/base.c:420
proc_single_show+0x101/0x190 fs/proc/base.c:723
seq_read+0x4af/0x1150 fs/seq_file.c:229
do_loop_readv_writev fs/read_write.c:700 [inline]
do_iter_read+0x4a3/0x650 fs/read_write.c:924
vfs_readv+0x175/0x1c0 fs/read_write.c:986
do_preadv+0x1cc/0x280 fs/read_write.c:1070
__do_sys_preadv fs/read_write.c:1120 [inline]
__se_sys_preadv fs/read_write.c:1115 [inline]
__x64_sys_preadv+0x9a/0xf0 fs/read_write.c:1115
do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #2 (&p->lock){+.+.}:
__mutex_lock_common kernel/locking/mutex.c:925 [inline]
__mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
seq_read+0x71/0x1150 fs/seq_file.c:161
do_loop_readv_writev fs/read_write.c:700 [inline]
do_iter_read+0x4a3/0x650 fs/read_write.c:924
vfs_readv+0x175/0x1c0 fs/read_write.c:986
kernel_readv fs/splice.c:362 [inline]
default_file_splice_read+0x53c/0xb20 fs/splice.c:417
do_splice_to+0x12e/0x190 fs/splice.c:881
splice_direct_to_actor+0x270/0x8f0 fs/splice.c:953
do_splice_direct+0x2d4/0x420 fs/splice.c:1062
do_sendfile+0x62a/0xe20 fs/read_write.c:1440
__do_sys_sendfile64 fs/read_write.c:1495 [inline]
__se_sys_sendfile64 fs/read_write.c:1487 [inline]
__x64_sys_sendfile64+0x15d/0x250 fs/read_write.c:1487
do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #1 (sb_writers#5){.+.+}:
percpu_down_read_preempt_disable include/linux/percpu-rwsem.h:36 [inline]
percpu_down_read include/linux/percpu-rwsem.h:59 [inline]
__sb_start_write+0x214/0x370 fs/super.c:1387
sb_start_write include/linux/fs.h:1566 [inline]
mnt_want_write+0x3f/0xc0 fs/namespace.c:360
ovl_want_write+0x76/0xa0 fs/overlayfs/util.c:24
ovl_create_object+0x142/0x3a0 fs/overlayfs/dir.c:596
ovl_create+0x2b/0x30 fs/overlayfs/dir.c:627
lookup_open+0x1319/0x1b90 fs/namei.c:3234
do_last fs/namei.c:3324 [inline]
path_openat+0x15e7/0x5160 fs/namei.c:3534
do_filp_open+0x255/0x380 fs/namei.c:3564
do_sys_open+0x568/0x700 fs/open.c:1063
ksys_open include/linux/syscalls.h:1276 [inline]
__do_sys_creat fs/open.c:1121 [inline]
__se_sys_creat fs/open.c:1119 [inline]
__x64_sys_creat+0x61/0x80 fs/open.c:1119
do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #0 (&ovl_i_mutex_dir_key[depth]){++++}:
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
down_read+0xb0/0x1d0 kernel/locking/rwsem.c:24
inode_lock_shared include/linux/fs.h:748 [inline]
do_last fs/namei.c:3323 [inline]
path_openat+0x250d/0x5160 fs/namei.c:3534
do_filp_open+0x255/0x380 fs/namei.c:3564
do_open_execat+0x221/0x8e0 fs/exec.c:853
__do_execve_file.isra.33+0x173f/0x2540 fs/exec.c:1755
do_execveat_common fs/exec.c:1866 [inline]
do_execve fs/exec.c:1883 [inline]
__do_sys_execve fs/exec.c:1964 [inline]
__se_sys_execve fs/exec.c:1959 [inline]
__x64_sys_execve+0x8f/0xc0 fs/exec.c:1959
do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe

other info that might help us debug this:

Chain exists of:
&ovl_i_mutex_dir_key[depth] --> &p->lock --> &sig->cred_guard_mutex

Possible unsafe locking scenario:

CPU0 CPU1
---- ----
lock(&sig->cred_guard_mutex);
lock(&p->lock);
lock(&sig->cred_guard_mutex);
lock(&ovl_i_mutex_dir_key[depth]);

*** DEADLOCK ***

Solution: I establish this locking order for these locks:

1. ovl_i_mutex_dir_key
2. p->lock
3. sig->cred_guard_mutex

In this change i fix the locking order of exec.c, which is the only
instance that voilates this order.

Signed-off-by: Mina Almasry <[email protected]>
---
fs/exec.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 2e0033348d8e..423d90bc75cc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1742,6 +1742,12 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval)
goto out_ret;

+ if (!file)
+ file = do_open_execat(fd, filename, flags);
+ retval = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out_free;
+
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
@@ -1754,12 +1760,6 @@ static int __do_execve_file(int fd, struct filename *filename,
check_unsafe_exec(bprm);
current->in_execve = 1;

- if (!file)
- file = do_open_execat(fd, filename, flags);
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- goto out_unmark;
-
sched_exec();

bprm->file = file;
@@ -1775,7 +1775,7 @@ static int __do_execve_file(int fd, struct filename *filename,
fd, filename->name);
if (!pathbuf) {
retval = -ENOMEM;
- goto out_unmark;
+ goto out_free;
}
/*
* Record that a name derived from an O_CLOEXEC fd will be
@@ -1790,7 +1790,7 @@ static int __do_execve_file(int fd, struct filename *filename,

retval = bprm_mm_init(bprm);
if (retval)
- goto out_unmark;
+ goto out_free;

retval = prepare_arg_pages(bprm, argv, envp);
if (retval < 0)
@@ -1840,10 +1840,6 @@ static int __do_execve_file(int fd, struct filename *filename,
mmput(bprm->mm);
}

-out_unmark:
- current->fs->in_exec = 0;
- current->in_execve = 0;
-
out_free:
free_bprm(bprm);
kfree(pathbuf);
--
2.21.0.392.gf8f6787159e-goog


2019-04-12 14:20:40

by Chen, Rong A

[permalink] [raw]
Subject: [fs] 853fbf8946: BUG:unable_to_handle_kernel

FYI, we noticed the following commit (built with gcc-7):

commit: 853fbf894629ed7df6b3d494bdf0dca547325188 ("[PATCH] fs: Fix ovl_i_mutex_dir_key/p->lock/cred cred_guard_mutex deadlock")
url: https://github.com/0day-ci/linux/commits/Mina-Almasry/fs-Fix-ovl_i_mutex_dir_key-p-lock-cred-cred_guard_mutex-deadlock/20190412-080519


in testcase: boot

on test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 2G

caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):


+-------------------------------------------------+------------+------------+
| | 582549e3fb | 853fbf8946 |
+-------------------------------------------------+------------+------------+
| boot_successes | 39 | 0 |
| boot_failures | 24 | 5 |
| BUG:kernel_reboot-without-warning_in_test_stage | 24 | |
| BUG:unable_to_handle_kernel | 0 | 5 |
| Oops:#[##] | 0 | 5 |
| RIP:kfree | 0 | 5 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 5 |
+-------------------------------------------------+------------+------------+



[ 0.775676] BUG: unable to handle kernel paging request at ffffebe9e000cac8
[ 0.775676] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645041785100000 ns
[ 0.775676] #PF error: [normal kernel read fault]
[ 0.775676] PGD 0 P4D 0
[ 0.775676] Oops: 0000 [#1] SMP PTI
[ 0.775676] CPU: 1 PID: 21 Comm: kworker/u4:0 Not tainted 5.1.0-rc4-00059-g853fbf8 #2
[ 0.775676] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
[ 0.775676] RIP: 0010:kfree+0xa1/0x153
[ 0.779952] futex hash table entries: 512 (order: 3, 32768 bytes)
[ 0.780581] xor: automatically using best checksumming function avx
[ 0.779965] Code: 15 9c 1a 16 01 48 01 d8 72 0e 49 c7 c2 00 00 00 80 4c 2b 15 01 93 0b 01 49 01 c2 49 c1 ea 0c 49 c1 e2 06 4c 03 15 df 92 0b 01 <49> 8b 42 08 a8 01 74 04 4c 8d 50 ff 49 8b 52 08 4c 89 d0 f6 c2 01
[ 0.779965] RSP: 0000:ffffc900003cbe60 EFLAGS: 00010286
[ 0.779965] RAX: 000002f88032b644 RBX: 000002f80032b644 RCX: 0000000000000000
[ 0.779965] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 000002f80032b644
[ 0.779965] RBP: ffffc900003cbf08 R08: 0000000080000000 R09: ffffc900003cba68
[ 0.779965] R10: ffffebe9e000cac0 R11: 8080808080808080 R12: ffffffff812dcc00
[ 0.779965] R13: 00000000fffffffe R14: 00000000ffffff9c R15: 0000000000000000
[ 0.779965] FS: 0000000000000000(0000) GS:ffff88806e700000(0000) knlGS:0000000000000000
[ 0.782372] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 0.782372] CR2: ffffebe9e000cac8 CR3: 000000000240e000 CR4: 00000000000406e0
[ 0.782372] Call Trace:
[ 0.782372] free_bprm+0x73/0x7c
[ 0.782372] __do_execve_file+0x720/0x7a6
[ 0.782372] do_execve+0x21/0x24
[ 0.782372] call_usermodehelper_exec_async+0x141/0x16c
[ 0.782372] ? umh_complete+0x1a/0x1a
[ 0.782372] ret_from_fork+0x3a/0x50
[ 0.782372] Modules linked in:
[ 0.782372] CR2: ffffebe9e000cac8
[ 0.782372] ---[ end trace 803d9c656c15319d ]---


To reproduce:

# build kernel
cd linux
cp config-5.1.0-rc4-00059-g853fbf8 .config
make HOSTCC=gcc-7 CC=gcc-7 ARCH=x86_64 olddefconfig
make HOSTCC=gcc-7 CC=gcc-7 ARCH=x86_64 prepare
make HOSTCC=gcc-7 CC=gcc-7 ARCH=x86_64 modules_prepare
make HOSTCC=gcc-7 CC=gcc-7 ARCH=x86_64 SHELL=/bin/bash
make HOSTCC=gcc-7 CC=gcc-7 ARCH=x86_64 bzImage


git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp qemu -k <bzImage> job-script # job-script is attached in this email



Thanks,
Rong Chen


Attachments:
(No filename) (3.91 kB)
config-5.1.0-rc4-00059-g853fbf8 (111.21 kB)
dmesg.xz (6.02 kB)
Download all attachments

2019-04-23 14:31:29

by Miklos Szeredi

[permalink] [raw]
Subject: Re: [PATCH] fs: Fix ovl_i_mutex_dir_key/p->lock/cred cred_guard_mutex deadlock

Cc: linux-unionfs

On Thu, Apr 11, 2019 at 6:48 PM Mina Almasry <[email protected]> wrote:
>
> These 3 locks are acquired simultaneously in different order causing
> deadlock:
>
> https://syzkaller.appspot.com/bug?id=00f119b8bb35a3acbcfafb9d36a2752b364e8d66
>
> ======================================================
> WARNING: possible circular locking dependency detected
> 4.19.0-rc5+ #253 Not tainted
> ------------------------------------------------------
> syz-executor1/545 is trying to acquire lock:
> 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: inode_lock_shared include/linux/fs.h:748 [inline]
> 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: do_last fs/namei.c:3323 [inline]
> 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: path_openat+0x250d/0x5160 fs/namei.c:3534
>
> but task is already holding lock:
> 0000000044500cca (&sig->cred_guard_mutex){+.+.}, at: prepare_bprm_creds+0x53/0x120 fs/exec.c:1404
>
> which lock already depends on the new lock.
>
> the existing dependency chain (in reverse order) is:
>
> -> #3 (&sig->cred_guard_mutex){+.+.}:
> __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> mutex_lock_killable_nested+0x16/0x20 kernel/locking/mutex.c:1102
> lock_trace+0x4c/0xe0 fs/proc/base.c:384
> proc_pid_stack+0x196/0x3b0 fs/proc/base.c:420
> proc_single_show+0x101/0x190 fs/proc/base.c:723
> seq_read+0x4af/0x1150 fs/seq_file.c:229
> do_loop_readv_writev fs/read_write.c:700 [inline]
> do_iter_read+0x4a3/0x650 fs/read_write.c:924
> vfs_readv+0x175/0x1c0 fs/read_write.c:986
> do_preadv+0x1cc/0x280 fs/read_write.c:1070
> __do_sys_preadv fs/read_write.c:1120 [inline]
> __se_sys_preadv fs/read_write.c:1115 [inline]
> __x64_sys_preadv+0x9a/0xf0 fs/read_write.c:1115
> do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> entry_SYSCALL_64_after_hwframe+0x49/0xbe
>
> -> #2 (&p->lock){+.+.}:
> __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
> seq_read+0x71/0x1150 fs/seq_file.c:161
> do_loop_readv_writev fs/read_write.c:700 [inline]
> do_iter_read+0x4a3/0x650 fs/read_write.c:924
> vfs_readv+0x175/0x1c0 fs/read_write.c:986
> kernel_readv fs/splice.c:362 [inline]
> default_file_splice_read+0x53c/0xb20 fs/splice.c:417
> do_splice_to+0x12e/0x190 fs/splice.c:881
> splice_direct_to_actor+0x270/0x8f0 fs/splice.c:953
> do_splice_direct+0x2d4/0x420 fs/splice.c:1062
> do_sendfile+0x62a/0xe20 fs/read_write.c:1440
> __do_sys_sendfile64 fs/read_write.c:1495 [inline]
> __se_sys_sendfile64 fs/read_write.c:1487 [inline]
> __x64_sys_sendfile64+0x15d/0x250 fs/read_write.c:1487
> do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> entry_SYSCALL_64_after_hwframe+0x49/0xbe
>
> -> #1 (sb_writers#5){.+.+}:
> percpu_down_read_preempt_disable include/linux/percpu-rwsem.h:36 [inline]
> percpu_down_read include/linux/percpu-rwsem.h:59 [inline]
> __sb_start_write+0x214/0x370 fs/super.c:1387
> sb_start_write include/linux/fs.h:1566 [inline]
> mnt_want_write+0x3f/0xc0 fs/namespace.c:360
> ovl_want_write+0x76/0xa0 fs/overlayfs/util.c:24
> ovl_create_object+0x142/0x3a0 fs/overlayfs/dir.c:596
> ovl_create+0x2b/0x30 fs/overlayfs/dir.c:627
> lookup_open+0x1319/0x1b90 fs/namei.c:3234
> do_last fs/namei.c:3324 [inline]
> path_openat+0x15e7/0x5160 fs/namei.c:3534
> do_filp_open+0x255/0x380 fs/namei.c:3564
> do_sys_open+0x568/0x700 fs/open.c:1063
> ksys_open include/linux/syscalls.h:1276 [inline]
> __do_sys_creat fs/open.c:1121 [inline]
> __se_sys_creat fs/open.c:1119 [inline]
> __x64_sys_creat+0x61/0x80 fs/open.c:1119
> do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> entry_SYSCALL_64_after_hwframe+0x49/0xbe
>
> -> #0 (&ovl_i_mutex_dir_key[depth]){++++}:
> lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> down_read+0xb0/0x1d0 kernel/locking/rwsem.c:24
> inode_lock_shared include/linux/fs.h:748 [inline]
> do_last fs/namei.c:3323 [inline]
> path_openat+0x250d/0x5160 fs/namei.c:3534
> do_filp_open+0x255/0x380 fs/namei.c:3564
> do_open_execat+0x221/0x8e0 fs/exec.c:853
> __do_execve_file.isra.33+0x173f/0x2540 fs/exec.c:1755
> do_execveat_common fs/exec.c:1866 [inline]
> do_execve fs/exec.c:1883 [inline]
> __do_sys_execve fs/exec.c:1964 [inline]
> __se_sys_execve fs/exec.c:1959 [inline]
> __x64_sys_execve+0x8f/0xc0 fs/exec.c:1959
> do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> entry_SYSCALL_64_after_hwframe+0x49/0xbe
>
> other info that might help us debug this:
>
> Chain exists of:
> &ovl_i_mutex_dir_key[depth] --> &p->lock --> &sig->cred_guard_mutex
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> lock(&sig->cred_guard_mutex);
> lock(&p->lock);
> lock(&sig->cred_guard_mutex);
> lock(&ovl_i_mutex_dir_key[depth]);
>
> *** DEADLOCK ***
>
> Solution: I establish this locking order for these locks:
>
> 1. ovl_i_mutex_dir_key
> 2. p->lock
> 3. sig->cred_guard_mutex
>
> In this change i fix the locking order of exec.c, which is the only
> instance that voilates this order.
>
> Signed-off-by: Mina Almasry <[email protected]>
> ---
> fs/exec.c | 20 ++++++++------------
> 1 file changed, 8 insertions(+), 12 deletions(-)
>
> diff --git a/fs/exec.c b/fs/exec.c
> index 2e0033348d8e..423d90bc75cc 100644
> --- a/fs/exec.c
> +++ b/fs/exec.c
> @@ -1742,6 +1742,12 @@ static int __do_execve_file(int fd, struct filename *filename,
> if (retval)
> goto out_ret;
>
> + if (!file)
> + file = do_open_execat(fd, filename, flags);
> + retval = PTR_ERR(file);
> + if (IS_ERR(file))
> + goto out_free;
> +
> retval = -ENOMEM;
> bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
> if (!bprm)
> @@ -1754,12 +1760,6 @@ static int __do_execve_file(int fd, struct filename *filename,
> check_unsafe_exec(bprm);
> current->in_execve = 1;
>
> - if (!file)
> - file = do_open_execat(fd, filename, flags);
> - retval = PTR_ERR(file);
> - if (IS_ERR(file))
> - goto out_unmark;
> -
> sched_exec();
>
> bprm->file = file;
> @@ -1775,7 +1775,7 @@ static int __do_execve_file(int fd, struct filename *filename,
> fd, filename->name);
> if (!pathbuf) {
> retval = -ENOMEM;
> - goto out_unmark;
> + goto out_free;
> }
> /*
> * Record that a name derived from an O_CLOEXEC fd will be
> @@ -1790,7 +1790,7 @@ static int __do_execve_file(int fd, struct filename *filename,
>
> retval = bprm_mm_init(bprm);
> if (retval)
> - goto out_unmark;
> + goto out_free;
>
> retval = prepare_arg_pages(bprm, argv, envp);
> if (retval < 0)
> @@ -1840,10 +1840,6 @@ static int __do_execve_file(int fd, struct filename *filename,
> mmput(bprm->mm);
> }
>
> -out_unmark:
> - current->fs->in_exec = 0;
> - current->in_execve = 0;
> -
> out_free:
> free_bprm(bprm);
> kfree(pathbuf);
> --
> 2.21.0.392.gf8f6787159e-goog
>

2019-04-23 21:32:55

by Mina Almasry

[permalink] [raw]
Subject: Re: [PATCH] fs: Fix ovl_i_mutex_dir_key/p->lock/cred cred_guard_mutex deadlock

On Tue, Apr 23, 2019 at 7:28 AM Miklos Szeredi <[email protected]> wrote:
>
> Cc: linux-unionfs
>
> On Thu, Apr 11, 2019 at 6:48 PM Mina Almasry <[email protected]> wrote:
> >
> > These 3 locks are acquired simultaneously in different order causing
> > deadlock:
> >
> > https://syzkaller.appspot.com/bug?id=00f119b8bb35a3acbcfafb9d36a2752b364e8d66
> >
> > ======================================================
> > WARNING: possible circular locking dependency detected
> > 4.19.0-rc5+ #253 Not tainted
> > ------------------------------------------------------
> > syz-executor1/545 is trying to acquire lock:
> > 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: inode_lock_shared include/linux/fs.h:748 [inline]
> > 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: do_last fs/namei.c:3323 [inline]
> > 00000000b04209e4 (&ovl_i_mutex_dir_key[depth]){++++}, at: path_openat+0x250d/0x5160 fs/namei.c:3534
> >
> > but task is already holding lock:
> > 0000000044500cca (&sig->cred_guard_mutex){+.+.}, at: prepare_bprm_creds+0x53/0x120 fs/exec.c:1404
> >
> > which lock already depends on the new lock.
> >
> > the existing dependency chain (in reverse order) is:
> >
> > -> #3 (&sig->cred_guard_mutex){+.+.}:
> > __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> > __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> > mutex_lock_killable_nested+0x16/0x20 kernel/locking/mutex.c:1102
> > lock_trace+0x4c/0xe0 fs/proc/base.c:384
> > proc_pid_stack+0x196/0x3b0 fs/proc/base.c:420
> > proc_single_show+0x101/0x190 fs/proc/base.c:723
> > seq_read+0x4af/0x1150 fs/seq_file.c:229
> > do_loop_readv_writev fs/read_write.c:700 [inline]
> > do_iter_read+0x4a3/0x650 fs/read_write.c:924
> > vfs_readv+0x175/0x1c0 fs/read_write.c:986
> > do_preadv+0x1cc/0x280 fs/read_write.c:1070
> > __do_sys_preadv fs/read_write.c:1120 [inline]
> > __se_sys_preadv fs/read_write.c:1115 [inline]
> > __x64_sys_preadv+0x9a/0xf0 fs/read_write.c:1115
> > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > -> #2 (&p->lock){+.+.}:
> > __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> > __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> > mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
> > seq_read+0x71/0x1150 fs/seq_file.c:161
> > do_loop_readv_writev fs/read_write.c:700 [inline]
> > do_iter_read+0x4a3/0x650 fs/read_write.c:924
> > vfs_readv+0x175/0x1c0 fs/read_write.c:986
> > kernel_readv fs/splice.c:362 [inline]
> > default_file_splice_read+0x53c/0xb20 fs/splice.c:417
> > do_splice_to+0x12e/0x190 fs/splice.c:881
> > splice_direct_to_actor+0x270/0x8f0 fs/splice.c:953
> > do_splice_direct+0x2d4/0x420 fs/splice.c:1062
> > do_sendfile+0x62a/0xe20 fs/read_write.c:1440
> > __do_sys_sendfile64 fs/read_write.c:1495 [inline]
> > __se_sys_sendfile64 fs/read_write.c:1487 [inline]
> > __x64_sys_sendfile64+0x15d/0x250 fs/read_write.c:1487
> > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > -> #1 (sb_writers#5){.+.+}:
> > percpu_down_read_preempt_disable include/linux/percpu-rwsem.h:36 [inline]
> > percpu_down_read include/linux/percpu-rwsem.h:59 [inline]
> > __sb_start_write+0x214/0x370 fs/super.c:1387
> > sb_start_write include/linux/fs.h:1566 [inline]
> > mnt_want_write+0x3f/0xc0 fs/namespace.c:360
> > ovl_want_write+0x76/0xa0 fs/overlayfs/util.c:24
> > ovl_create_object+0x142/0x3a0 fs/overlayfs/dir.c:596
> > ovl_create+0x2b/0x30 fs/overlayfs/dir.c:627
> > lookup_open+0x1319/0x1b90 fs/namei.c:3234
> > do_last fs/namei.c:3324 [inline]
> > path_openat+0x15e7/0x5160 fs/namei.c:3534
> > do_filp_open+0x255/0x380 fs/namei.c:3564
> > do_sys_open+0x568/0x700 fs/open.c:1063
> > ksys_open include/linux/syscalls.h:1276 [inline]
> > __do_sys_creat fs/open.c:1121 [inline]
> > __se_sys_creat fs/open.c:1119 [inline]
> > __x64_sys_creat+0x61/0x80 fs/open.c:1119
> > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > -> #0 (&ovl_i_mutex_dir_key[depth]){++++}:
> > lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> > down_read+0xb0/0x1d0 kernel/locking/rwsem.c:24
> > inode_lock_shared include/linux/fs.h:748 [inline]
> > do_last fs/namei.c:3323 [inline]
> > path_openat+0x250d/0x5160 fs/namei.c:3534
> > do_filp_open+0x255/0x380 fs/namei.c:3564
> > do_open_execat+0x221/0x8e0 fs/exec.c:853
> > __do_execve_file.isra.33+0x173f/0x2540 fs/exec.c:1755
> > do_execveat_common fs/exec.c:1866 [inline]
> > do_execve fs/exec.c:1883 [inline]
> > __do_sys_execve fs/exec.c:1964 [inline]
> > __se_sys_execve fs/exec.c:1959 [inline]
> > __x64_sys_execve+0x8f/0xc0 fs/exec.c:1959
> > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > other info that might help us debug this:
> >
> > Chain exists of:
> > &ovl_i_mutex_dir_key[depth] --> &p->lock --> &sig->cred_guard_mutex
> >
> > Possible unsafe locking scenario:
> >
> > CPU0 CPU1
> > ---- ----
> > lock(&sig->cred_guard_mutex);
> > lock(&p->lock);
> > lock(&sig->cred_guard_mutex);
> > lock(&ovl_i_mutex_dir_key[depth]);
> >
> > *** DEADLOCK ***
> >
> > Solution: I establish this locking order for these locks:
> >
> > 1. ovl_i_mutex_dir_key
> > 2. p->lock
> > 3. sig->cred_guard_mutex
> >
> > In this change i fix the locking order of exec.c, which is the only
> > instance that voilates this order.
> >
> > Signed-off-by: Mina Almasry <[email protected]>
> > ---
> > fs/exec.c | 20 ++++++++------------
> > 1 file changed, 8 insertions(+), 12 deletions(-)
> >
> > diff --git a/fs/exec.c b/fs/exec.c
> > index 2e0033348d8e..423d90bc75cc 100644
> > --- a/fs/exec.c
> > +++ b/fs/exec.c
> > @@ -1742,6 +1742,12 @@ static int __do_execve_file(int fd, struct filename *filename,
> > if (retval)
> > goto out_ret;
> >
> > + if (!file)
> > + file = do_open_execat(fd, filename, flags);
> > + retval = PTR_ERR(file);
> > + if (IS_ERR(file))
> > + goto out_free;
> > +
> > retval = -ENOMEM;
> > bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
> > if (!bprm)
> > @@ -1754,12 +1760,6 @@ static int __do_execve_file(int fd, struct filename *filename,
> > check_unsafe_exec(bprm);
> > current->in_execve = 1;
> >
> > - if (!file)
> > - file = do_open_execat(fd, filename, flags);
> > - retval = PTR_ERR(file);
> > - if (IS_ERR(file))
> > - goto out_unmark;
> > -
> > sched_exec();
> >
> > bprm->file = file;
> > @@ -1775,7 +1775,7 @@ static int __do_execve_file(int fd, struct filename *filename,
> > fd, filename->name);
> > if (!pathbuf) {
> > retval = -ENOMEM;
> > - goto out_unmark;
> > + goto out_free;
> > }
> > /*
> > * Record that a name derived from an O_CLOEXEC fd will be
> > @@ -1790,7 +1790,7 @@ static int __do_execve_file(int fd, struct filename *filename,
> >
> > retval = bprm_mm_init(bprm);
> > if (retval)
> > - goto out_unmark;
> > + goto out_free;
> >
> > retval = prepare_arg_pages(bprm, argv, envp);
> > if (retval < 0)
> > @@ -1840,10 +1840,6 @@ static int __do_execve_file(int fd, struct filename *filename,
> > mmput(bprm->mm);
> > }
> >
> > -out_unmark:
> > - current->fs->in_exec = 0;
> > - current->in_execve = 0;
> > -
> > out_free:
> > free_bprm(bprm);
> > kfree(pathbuf);
> > --
> > 2.21.0.392.gf8f6787159e-goog
> >

Miklos, this patch is outdated, I've sent out v2 to the maintainer.
I'll forward that to linux-unionfs.