2021-05-13 07:45:00

by Srikar Dronamraju

[permalink] [raw]
Subject: [PATCH v3 4/8] sched/fair: Prefer idle CPU to cache affinity

Current order of preference to pick a LLC while waking a wake-affine
task:
1. Between the waker CPU and previous CPU, prefer the LLC of the CPU
that is idle.

2. Between the waker CPU and previous CPU, prefer the LLC of the CPU
that is less lightly loaded.

In the current situation where waker and previous CPUs are busy, but
only one of its LLC has an idle CPU, Scheduler may end up picking a LLC
with no idle CPUs. To mitigate this, add a method where Scheduler
compares idle CPUs in waker and previous LLCs and picks the appropriate
one.

The new method looks at idle-core to figure out idle LLC. If there are
no idle LLCs, it compares the ratio of busy CPUs to the total number of
CPUs in the LLC. This method will only be useful to compare 2 LLCs. If
the previous CPU and the waking CPU are in the same LLC, this method
would not be useful. For now the new method is disabled by default.

sync flag decides which CPU/LLC to try first. If sync is set, choose
current LLC, else choose previous LLC.

Cc: LKML <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Michael Neuling <[email protected]>
Cc: Gautham R Shenoy <[email protected]>
Cc: Parth Shah <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Valentin Schneider <[email protected]>
Cc: Dietmar Eggemann <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Vincent Guittot <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Aubrey Li <[email protected]>
Signed-off-by: Srikar Dronamraju <[email protected]>
---
Changelog: v1->v2:
- Swap the cpus, if the wakeup is not sync, so that a single order of
code suffices for both sync and non-sync wakeups.

- Mel reported a crash. Apparently two threads can race to find an
idle-core. I now cache the idlecore. Also use compare-exchange, so
that no 2 waking tasks contend on the same CPU.

Also Based on similar posting:
http://lore.kernel.org/lkml/[email protected]/t/#u
- Make WA_WAKER default (Suggested by Rik) : done in next patch
- Make WA_WAKER check more conservative: (Suggested by Rik / Peter)
- Rename WA_WAKER to WA_IDLER_LLC (Suggested by Vincent)
- s/pllc_size/tllc_size while checking for busy case: (Pointed by Dietmar)
- Add rcu_read_lock and check for validity of shared domains
- Add idle-core support

kernel/sched/fair.c | 66 +++++++++++++++++++++++++++++++++++++++++
kernel/sched/features.h | 1 +
2 files changed, 67 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d002bc95c0bc..d95a2c9c8797 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5893,6 +5893,59 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
}

+static inline bool test_reset_idle_core(struct sched_domain_shared *sds, int val);
+
+static int wake_affine_idler_llc(struct task_struct *p, int pref_cpu, int try_cpu, int sync)
+{
+#ifdef CONFIG_NO_HZ_COMMON
+ int tnr_busy, tllc_size, pnr_busy, pllc_size;
+#endif
+ struct sched_domain_shared *pref_sds, *try_sds;
+ int diff, idle_core;
+
+ if (!sync)
+ swap(pref_cpu, try_cpu);
+
+ pref_sds = rcu_dereference(per_cpu(sd_llc_shared, pref_cpu));
+ try_sds = rcu_dereference(per_cpu(sd_llc_shared, try_cpu));
+ if (!pref_sds || !try_sds)
+ return nr_cpumask_bits;
+
+ if (available_idle_cpu(pref_cpu) || sched_idle_cpu(pref_cpu))
+ return pref_cpu;
+
+ idle_core = READ_ONCE(pref_sds->idle_core);
+ if (idle_core > -1 && cpumask_test_cpu(idle_core, p->cpus_ptr) &&
+ test_reset_idle_core(pref_sds, idle_core))
+ return idle_core;
+
+ if (available_idle_cpu(try_cpu) || sched_idle_cpu(try_cpu))
+ return try_cpu;
+
+ idle_core = READ_ONCE(try_sds->idle_core);
+ if (idle_core > -1 && cpumask_test_cpu(idle_core, p->cpus_ptr) &&
+ test_reset_idle_core(try_sds, idle_core))
+ return idle_core;
+
+#ifdef CONFIG_NO_HZ_COMMON
+ pnr_busy = atomic_read(&pref_sds->nr_busy_cpus);
+ tnr_busy = atomic_read(&try_sds->nr_busy_cpus);
+ pllc_size = per_cpu(sd_llc_size, pref_cpu);
+ tllc_size = per_cpu(sd_llc_size, try_cpu);
+
+ if (tnr_busy == tllc_size && pnr_busy == pllc_size)
+ return nr_cpumask_bits;
+
+ diff = tnr_busy * pllc_size - pnr_busy * tllc_size;
+ if (diff > 0)
+ return pref_cpu;
+ if (diff < 0)
+ return try_cpu;
+#endif /* CONFIG_NO_HZ_COMMON */
+
+ return nr_cpumask_bits;
+}
+
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
@@ -5901,6 +5954,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
if (sched_feat(WA_IDLE))
target = wake_affine_idle(this_cpu, prev_cpu, sync);

+ if (sched_feat(WA_IDLER_LLC) && target == nr_cpumask_bits)
+ target = wake_affine_idler_llc(p, this_cpu, prev_cpu, sync);
+
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);

@@ -6068,6 +6124,11 @@ static inline int get_idle_core(int cpu, int def)
return def;
}

+static inline bool test_reset_idle_core(struct sched_domain_shared *sds, int val)
+{
+ return cmpxchg(&sds->idle_core, val, -1) == val;
+}
+
static void set_next_idle_core(int target)
{
struct sched_domain *sd = rcu_dereference(per_cpu(sd_llc, target));
@@ -6204,6 +6265,11 @@ static inline bool get_idle_core(int cpu, int def)
return def;
}

+static inline bool test_reset_idle_core(struct sched_domain_shared *sds, int val)
+{
+ return false;
+}
+
static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
{
return __select_idle_cpu(core);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 7f8dace0964c..77e0b2c4e02c 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -83,6 +83,7 @@ SCHED_FEAT(ATTACH_AGE_LOAD, true)

SCHED_FEAT(WA_IDLE, true)
SCHED_FEAT(WA_WEIGHT, true)
+SCHED_FEAT(WA_IDLER_LLC, false)
SCHED_FEAT(WA_BIAS, true)

/*
--
2.18.2