2020-08-09 19:07:40

by Dmitry Monakhov

[permalink] [raw]
Subject: [PATCH] lib/test_lockup.c: add parameters for cond_resched inside loop

call_cond_resched_before=Y call cond_resched with resource before wait
call_cond_resched_after=Y call cond_resched with resource after wait
measure_cond_resched=Y measure maximum cond_resched time inside loop

This simulate situation where process call cond_resched() with lock held.

Example demonstrate priority inversion issue with epbf-program, where
low priority task sheduled out while holding cgroup_mutex for a long
periods of time which blocks others programs with high priority.

CGROUP_MUTEX=$(gawk '$3 == "cgroup_mutex" {print "0x"$1}' /proc/kallsyms)
# Emulate ebpf-application load which can hung inside cgroup_bpf_attach()
nice -20 modprobe lib/test_lockup.ko \
time_nsecs=1000 cooldown_nsecs=100000 iterations=100000 \
lock_mutex_ptr=$CGROUP_MUTEX \
measure_lock_wait=Y call_cond_resched_after=Y &

stress-ng -c $(nproc) --timeout 10s&
time mkdir /sys/fs/cgroup/blkio/a

Signed-off-by: Dmitry Monakhov <[email protected]>
---
lib/test_lockup.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 43 insertions(+), 1 deletion(-)

diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index 0f81252..3e05d6e 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -77,6 +77,18 @@ static bool call_cond_resched;
module_param(call_cond_resched, bool, 0600);
MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");

+static bool call_cond_resched_before;
+module_param(call_cond_resched_before, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched_before, "call cond_resched() before wait");
+
+static bool call_cond_resched_after;
+module_param(call_cond_resched_after, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched_after, "call cond_resched() after wait");
+
+static bool measure_cond_resched;
+module_param(measure_cond_resched, bool, 0400);
+MODULE_PARM_DESC(measure_cond_resched, "measure cond_resched time");
+
static bool measure_lock_wait;
module_param(measure_lock_wait, bool, 0400);
MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
@@ -162,6 +174,7 @@ MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
static atomic_t alloc_pages_failed = ATOMIC_INIT(0);

static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+static atomic64_t max_cond_resched = ATOMIC64_INIT(0);

static struct task_struct *main_task;
static int master_cpu;
@@ -346,6 +359,22 @@ static void test_wait(unsigned int secs, unsigned int nsecs)
}
}

+static void test_cond_resched(void)
+{
+ s64 cur, old_max;
+ s64 start = local_clock();
+
+ cond_resched();
+
+ cur = local_clock() - start;
+ old_max = atomic64_read(&max_cond_resched);
+ do {
+ if (cur < old_max)
+ break;
+ old_max = atomic64_cmpxchg(&max_cond_resched, old_max, cur);
+ } while (old_max != cur);
+}
+
static void test_lockup(bool master)
{
u64 lockup_start = local_clock();
@@ -363,8 +392,14 @@ static void test_lockup(bool master)
if (iowait)
current->in_iowait = 1;

+ if (call_cond_resched_before)
+ test_cond_resched();
+
test_wait(time_secs, time_nsecs);

+ if (call_cond_resched_after)
+ test_cond_resched();
+
if (iowait)
current->in_iowait = 0;

@@ -497,6 +532,7 @@ static int __init test_lockup_init(void)

if ((wait_state != TASK_RUNNING ||
(call_cond_resched && !reacquire_locks) ||
+ call_cond_resched_before || call_cond_resched_after ||
(alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
(test_disable_irq || disable_softirq || disable_preempt ||
lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
@@ -532,7 +568,7 @@ static int __init test_lockup_init(void)
if (test_lock_sb_umount && test_inode)
lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;

- pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s%s%s\n",
main_task->pid, time_secs, time_nsecs,
cooldown_secs, cooldown_nsecs, iterations, state,
all_cpus ? "all_cpus " : "",
@@ -545,6 +581,8 @@ static int __init test_lockup_init(void)
touch_softlockup ? "touch_softlockup " : "",
touch_hardlockup ? "touch_hardlockup " : "",
call_cond_resched ? "call_cond_resched " : "",
+ call_cond_resched_before ? "call_cond_resched_before " : "",
+ call_cond_resched_after ? "call_cond_resched_after " : "",
reacquire_locks ? "reacquire_locks " : "");

if (alloc_pages_nr)
@@ -578,6 +616,10 @@ static int __init test_lockup_init(void)
pr_notice("Maximum lock wait: %lld ns\n",
atomic64_read(&max_lock_wait));

+ if (measure_cond_resched)
+ pr_notice("Maximum cond resched time: %lld ns\n",
+ atomic64_read(&max_cond_resched));
+
if (alloc_pages_nr)
pr_notice("Page allocation failed %u times\n",
atomic_read(&alloc_pages_failed));
--
2.7.4


2020-08-09 20:57:46

by Konstantin Khlebnikov

[permalink] [raw]
Subject: Re: [PATCH] lib/test_lockup.c: add parameters for cond_resched inside loop

On Sun, Aug 9, 2020 at 10:06 PM Dmitry Monakhov
<[email protected]> wrote:
>
> call_cond_resched_before=Y call cond_resched with resource before wait
> call_cond_resched_after=Y call cond_resched with resource after wait
> measure_cond_resched=Y measure maximum cond_resched time inside loop

Do you really need all of them? It seems "call_cond_resched_after" is
enough for demonstration.
It could be shortened to just "call_cond_resched". I see no sense in
ordering before\after sleep.

Measuring time of cond_resched has vague meaning too. This just "yep,
we have an overload".

>
> This simulate situation where process call cond_resched() with lock held.
>
> Example demonstrate priority inversion issue with epbf-program, where
> low priority task sheduled out while holding cgroup_mutex for a long
> periods of time which blocks others programs with high priority.
>
> CGROUP_MUTEX=$(gawk '$3 == "cgroup_mutex" {print "0x"$1}' /proc/kallsyms)
> # Emulate ebpf-application load which can hung inside cgroup_bpf_attach()
> nice -20 modprobe lib/test_lockup.ko \
> time_nsecs=1000 cooldown_nsecs=100000 iterations=100000 \
> lock_mutex_ptr=$CGROUP_MUTEX \
> measure_lock_wait=Y call_cond_resched_after=Y &
>
> stress-ng -c $(nproc) --timeout 10s&
> time mkdir /sys/fs/cgroup/blkio/a
>
> Signed-off-by: Dmitry Monakhov <[email protected]>
> ---
> lib/test_lockup.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 43 insertions(+), 1 deletion(-)
>
> diff --git a/lib/test_lockup.c b/lib/test_lockup.c
> index 0f81252..3e05d6e 100644
> --- a/lib/test_lockup.c
> +++ b/lib/test_lockup.c
> @@ -77,6 +77,18 @@ static bool call_cond_resched;
> module_param(call_cond_resched, bool, 0600);
> MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
>
> +static bool call_cond_resched_before;
> +module_param(call_cond_resched_before, bool, 0600);
> +MODULE_PARM_DESC(call_cond_resched_before, "call cond_resched() before wait");
> +
> +static bool call_cond_resched_after;
> +module_param(call_cond_resched_after, bool, 0600);
> +MODULE_PARM_DESC(call_cond_resched_after, "call cond_resched() after wait");
> +
> +static bool measure_cond_resched;
> +module_param(measure_cond_resched, bool, 0400);
> +MODULE_PARM_DESC(measure_cond_resched, "measure cond_resched time");
> +
> static bool measure_lock_wait;
> module_param(measure_lock_wait, bool, 0400);
> MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
> @@ -162,6 +174,7 @@ MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
> static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
>
> static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
> +static atomic64_t max_cond_resched = ATOMIC64_INIT(0);
>
> static struct task_struct *main_task;
> static int master_cpu;
> @@ -346,6 +359,22 @@ static void test_wait(unsigned int secs, unsigned int nsecs)
> }
> }
>
> +static void test_cond_resched(void)
> +{
> + s64 cur, old_max;
> + s64 start = local_clock();
> +
> + cond_resched();
> +
> + cur = local_clock() - start;
> + old_max = atomic64_read(&max_cond_resched);
> + do {
> + if (cur < old_max)
> + break;
> + old_max = atomic64_cmpxchg(&max_cond_resched, old_max, cur);
> + } while (old_max != cur);
> +}
> +
> static void test_lockup(bool master)
> {
> u64 lockup_start = local_clock();
> @@ -363,8 +392,14 @@ static void test_lockup(bool master)
> if (iowait)
> current->in_iowait = 1;
>
> + if (call_cond_resched_before)
> + test_cond_resched();
> +
> test_wait(time_secs, time_nsecs);
>
> + if (call_cond_resched_after)
> + test_cond_resched();
> +
> if (iowait)
> current->in_iowait = 0;
>
> @@ -497,6 +532,7 @@ static int __init test_lockup_init(void)
>
> if ((wait_state != TASK_RUNNING ||
> (call_cond_resched && !reacquire_locks) ||
> + call_cond_resched_before || call_cond_resched_after ||
> (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
> (test_disable_irq || disable_softirq || disable_preempt ||
> lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
> @@ -532,7 +568,7 @@ static int __init test_lockup_init(void)
> if (test_lock_sb_umount && test_inode)
> lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
>
> - pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
> + pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s%s%s\n",
> main_task->pid, time_secs, time_nsecs,
> cooldown_secs, cooldown_nsecs, iterations, state,
> all_cpus ? "all_cpus " : "",
> @@ -545,6 +581,8 @@ static int __init test_lockup_init(void)
> touch_softlockup ? "touch_softlockup " : "",
> touch_hardlockup ? "touch_hardlockup " : "",
> call_cond_resched ? "call_cond_resched " : "",
> + call_cond_resched_before ? "call_cond_resched_before " : "",
> + call_cond_resched_after ? "call_cond_resched_after " : "",
> reacquire_locks ? "reacquire_locks " : "");
>
> if (alloc_pages_nr)
> @@ -578,6 +616,10 @@ static int __init test_lockup_init(void)
> pr_notice("Maximum lock wait: %lld ns\n",
> atomic64_read(&max_lock_wait));
>
> + if (measure_cond_resched)
> + pr_notice("Maximum cond resched time: %lld ns\n",
> + atomic64_read(&max_cond_resched));
> +
> if (alloc_pages_nr)
> pr_notice("Page allocation failed %u times\n",
> atomic_read(&alloc_pages_failed));
> --
> 2.7.4
>