Currently the iowait_boost feature in schedutil makes the frequency go to max
on iowait wakeups. This feature was added to handle a case that Peter
described where the throughput of operations involving continuous I/O requests
[1] is reduced due to running at a lower frequency, however the lower
throughput itself causes utilization to be low and hence causing frequency to
be low hence its "stuck".
Instead of going to max, its also possible to achieve the same effect by
ramping up to max if there are repeated in_iowait wakeups happening. This patch
is an attempt to do that. We start from a lower frequency (policy->min)
and double the boost for every consecutive iowait update until we reach the
maximum iowait boost frequency (iowait_boost_max).
I ran a synthetic test (continuous O_DIRECT writes in a loop) on an x86 machine
with intel_pstate in passive mode using schedutil. In this test the iowait_boost
value ramped from 800MHz to 4GHz in 60ms. The patch achieves the desired improved
throughput as the existing behavior.
Also while at it, make iowait_boost and iowait_boost_max as unsigned int since
its unit is kHz and this is consistent with struct cpufreq_policy.
[1] https://patchwork.kernel.org/patch/9735885/
Cc: Srinivas Pandruvada <[email protected]>
Cc: Len Brown <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Cc: Viresh Kumar <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Suggested-by: Peter Zijlstra <[email protected]>
Suggested-by: Viresh Kumar <[email protected]>
Signed-off-by: Joel Fernandes <[email protected]>
---
Viresh, made slight modifications to the last approach we agreed on using, but
nothing we didn't already discuss. I also dropped the RFC tag since I think
this is increasingly now becoming final (or has become final if no one else has
any other objection).
kernel/sched/cpufreq_schedutil.c | 37 +++++++++++++++++++++++++++++++------
1 file changed, 31 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 622eed1b7658..0c0b6c8c15fc 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,6 +53,7 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
+ bool iowait_boost_pending;
unsigned long iowait_boost;
unsigned long iowait_boost_max;
u64 last_update;
@@ -172,30 +173,53 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
unsigned int flags)
{
if (flags & SCHED_CPUFREQ_IOWAIT) {
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ if (sg_cpu->iowait_boost_pending)
+ return;
+
+ sg_cpu->iowait_boost_pending = true;
+
+ if (sg_cpu->iowait_boost) {
+ sg_cpu->iowait_boost = min(sg_cpu->iowait_boost << 1,
+ sg_cpu->iowait_boost_max);
+ } else {
+ sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ }
} else if (sg_cpu->iowait_boost) {
s64 delta_ns = time - sg_cpu->last_update;
/* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
}
}
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
- unsigned long boost_util = sg_cpu->iowait_boost;
- unsigned long boost_max = sg_cpu->iowait_boost_max;
+ unsigned long boost_util, boost_max;
- if (!boost_util)
+ if (!sg_cpu->iowait_boost)
return;
+ if (sg_cpu->iowait_boost_pending) {
+ sg_cpu->iowait_boost_pending = false;
+ } else {
+ sg_cpu->iowait_boost >>= 1;
+ if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ sg_cpu->iowait_boost = 0;
+ return;
+ }
+ }
+
+ boost_util = sg_cpu->iowait_boost;
+ boost_max = sg_cpu->iowait_boost_max;
+
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
- sg_cpu->iowait_boost >>= 1;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -267,6 +291,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
+ j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
--
2.14.0.rc0.284.gd933b75aa4-goog
On Saturday, July 22, 2017 12:47:53 AM Joel Fernandes wrote:
> Currently the iowait_boost feature in schedutil makes the frequency go to max
> on iowait wakeups. This feature was added to handle a case that Peter
> described where the throughput of operations involving continuous I/O requests
> [1] is reduced due to running at a lower frequency, however the lower
> throughput itself causes utilization to be low and hence causing frequency to
> be low hence its "stuck".
>
> Instead of going to max, its also possible to achieve the same effect by
> ramping up to max if there are repeated in_iowait wakeups happening. This patch
> is an attempt to do that. We start from a lower frequency (policy->min)
> and double the boost for every consecutive iowait update until we reach the
> maximum iowait boost frequency (iowait_boost_max).
>
> I ran a synthetic test (continuous O_DIRECT writes in a loop) on an x86 machine
> with intel_pstate in passive mode using schedutil. In this test the iowait_boost
> value ramped from 800MHz to 4GHz in 60ms. The patch achieves the desired improved
> throughput as the existing behavior.
>
> Also while at it, make iowait_boost and iowait_boost_max as unsigned int since
> its unit is kHz and this is consistent with struct cpufreq_policy.
>
> [1] https://patchwork.kernel.org/patch/9735885/
>
> Cc: Srinivas Pandruvada <[email protected]>
> Cc: Len Brown <[email protected]>
> Cc: Rafael J. Wysocki <[email protected]>
> Cc: Viresh Kumar <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Suggested-by: Peter Zijlstra <[email protected]>
> Suggested-by: Viresh Kumar <[email protected]>
> Signed-off-by: Joel Fernandes <[email protected]>
> ---
> Viresh, made slight modifications to the last approach we agreed on using, but
> nothing we didn't already discuss. I also dropped the RFC tag since I think
> this is increasingly now becoming final (or has become final if no one else has
> any other objection).
>
> kernel/sched/cpufreq_schedutil.c | 37 +++++++++++++++++++++++++++++++------
> 1 file changed, 31 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index 622eed1b7658..0c0b6c8c15fc 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -53,6 +53,7 @@ struct sugov_cpu {
> struct update_util_data update_util;
> struct sugov_policy *sg_policy;
>
> + bool iowait_boost_pending;
> unsigned long iowait_boost;
> unsigned long iowait_boost_max;
> u64 last_update;
> @@ -172,30 +173,53 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
> unsigned int flags)
> {
> if (flags & SCHED_CPUFREQ_IOWAIT) {
> - sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
> + if (sg_cpu->iowait_boost_pending)
> + return;
> +
> + sg_cpu->iowait_boost_pending = true;
> +
> + if (sg_cpu->iowait_boost) {
> + sg_cpu->iowait_boost = min(sg_cpu->iowait_boost << 1,
> + sg_cpu->iowait_boost_max);
I would do
sg_cpu->iowait_boost <<= 1;
if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
as that's easeir to read.
The rest of the patch is fine by me.
Thanks,
Rafael
On Sat, Jul 22, 2017 at 2:44 PM, Rafael J. Wysocki <[email protected]> wrote:
> On Saturday, July 22, 2017 12:47:53 AM Joel Fernandes wrote:
>> Currently the iowait_boost feature in schedutil makes the frequency go to max
>> on iowait wakeups. This feature was added to handle a case that Peter
>> described where the throughput of operations involving continuous I/O requests
>> [1] is reduced due to running at a lower frequency, however the lower
>> throughput itself causes utilization to be low and hence causing frequency to
>> be low hence its "stuck".
>>
>> Instead of going to max, its also possible to achieve the same effect by
>> ramping up to max if there are repeated in_iowait wakeups happening. This patch
>> is an attempt to do that. We start from a lower frequency (policy->min)
>> and double the boost for every consecutive iowait update until we reach the
>> maximum iowait boost frequency (iowait_boost_max).
>>
>> I ran a synthetic test (continuous O_DIRECT writes in a loop) on an x86 machine
>> with intel_pstate in passive mode using schedutil. In this test the iowait_boost
>> value ramped from 800MHz to 4GHz in 60ms. The patch achieves the desired improved
>> throughput as the existing behavior.
>>
>> Also while at it, make iowait_boost and iowait_boost_max as unsigned int since
>> its unit is kHz and this is consistent with struct cpufreq_policy.
>>
>> [1] https://patchwork.kernel.org/patch/9735885/
>>
>> Cc: Srinivas Pandruvada <[email protected]>
>> Cc: Len Brown <[email protected]>
>> Cc: Rafael J. Wysocki <[email protected]>
>> Cc: Viresh Kumar <[email protected]>
>> Cc: Ingo Molnar <[email protected]>
>> Cc: Peter Zijlstra <[email protected]>
>> Suggested-by: Peter Zijlstra <[email protected]>
>> Suggested-by: Viresh Kumar <[email protected]>
>> Signed-off-by: Joel Fernandes <[email protected]>
>> ---
>> Viresh, made slight modifications to the last approach we agreed on using, but
>> nothing we didn't already discuss. I also dropped the RFC tag since I think
>> this is increasingly now becoming final (or has become final if no one else has
>> any other objection).
>>
>> kernel/sched/cpufreq_schedutil.c | 37 +++++++++++++++++++++++++++++++------
>> 1 file changed, 31 insertions(+), 6 deletions(-)
>>
>> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
>> index 622eed1b7658..0c0b6c8c15fc 100644
>> --- a/kernel/sched/cpufreq_schedutil.c
>> +++ b/kernel/sched/cpufreq_schedutil.c
>> @@ -53,6 +53,7 @@ struct sugov_cpu {
>> struct update_util_data update_util;
>> struct sugov_policy *sg_policy;
>>
>> + bool iowait_boost_pending;
>> unsigned long iowait_boost;
>> unsigned long iowait_boost_max;
>> u64 last_update;
>> @@ -172,30 +173,53 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
>> unsigned int flags)
>> {
>> if (flags & SCHED_CPUFREQ_IOWAIT) {
>> - sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
>> + if (sg_cpu->iowait_boost_pending)
>> + return;
>> +
>> + sg_cpu->iowait_boost_pending = true;
>> +
>> + if (sg_cpu->iowait_boost) {
>> + sg_cpu->iowait_boost = min(sg_cpu->iowait_boost << 1,
>> + sg_cpu->iowait_boost_max);
>
> I would do
>
> sg_cpu->iowait_boost <<= 1;
> if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
> sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
>
> as that's easeir to read.
>
> The rest of the patch is fine by me.
Done, and resent patches. Also added one more to change the
iowait_boost and iowait_boost_max to unsigned it.
thanks,
-Joel