2011-03-23 03:10:35

by Paul Turner

[permalink] [raw]
Subject: [patch 02/15] sched: validate CFS quota hierarchies

Add constraints validation for CFS bandwidth hierachies.

It is checked that:
sum(child bandwidth) <= parent_bandwidth

In a quota limited hierarchy, an unconstrainted entity
(e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent.

Since bandwidth periods may be non-uniform we normalize to the maximum allowed
period, 5 seconds.

This behavior may be disabled (allowing child bandwidth to exceed parent) via
kernel.sched_cfs_bandwidth_consistent=0

Signed-off-by: Paul Turner <[email protected]>

---
include/linux/sched.h | 8 +++
kernel/sched.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++---
kernel/sched_fair.c | 8 +++
kernel/sysctl.c | 11 ++++
4 files changed, 147 insertions(+), 7 deletions(-)

Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -253,6 +253,7 @@ struct cfs_bandwidth {
raw_spinlock_t lock;
ktime_t period;
u64 runtime, quota;
+ s64 hierarchal_quota; /* used for validating consistency */
struct hrtimer period_timer;
#endif
};
@@ -8868,7 +8869,7 @@ struct rt_schedulable_data {
u64 rt_runtime;
};

-static int tg_schedulable(struct task_group *tg, void *data)
+static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
@@ -8932,7 +8933,7 @@ static int __rt_schedulable(struct task_
.rt_runtime = runtime,
};

- return walk_tg_tree(tg_schedulable, tg_nop, &data);
+ return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
}

static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -9223,14 +9224,17 @@ static u64 cpu_shares_read_u64(struct cg
}

#ifdef CONFIG_CFS_BANDWIDTH
+static DEFINE_MUTEX(cfs_constraints_mutex);
+
const u64 max_cfs_quota_period = 5 * NSEC_PER_SEC; /* 5s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */

+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
+
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
- int i;
+ int i, ret = 0;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
- static DEFINE_MUTEX(mutex);

if (tg == &root_task_group)
return -EINVAL;
@@ -9251,7 +9255,13 @@ static int tg_set_cfs_bandwidth(struct t
if (period > max_cfs_quota_period)
return -EINVAL;

- mutex_lock(&mutex);
+ mutex_lock(&cfs_constraints_mutex);
+ if (sysctl_sched_cfs_bandwidth_consistent) {
+ ret = __cfs_schedulable(tg, period, quota);
+ if (ret)
+ goto out_unlock;
+ }
+
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->runtime = cfs_b->quota = quota;
@@ -9265,9 +9275,10 @@ static int tg_set_cfs_bandwidth(struct t
init_cfs_rq_quota(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
- mutex_unlock(&mutex);
+out_unlock:
+ mutex_unlock(&cfs_constraints_mutex);

- return 0;
+ return ret;
}

int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
@@ -9339,6 +9350,108 @@ static int cpu_cfs_period_write_u64(stru
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
}

+
+struct cfs_schedulable_data {
+ struct task_group *tg;
+ u64 period, quota;
+};
+
+/*
+ * normalize group quota/period to be quota/max_period
+ * note: units are usecs
+ */
+static u64 normalize_cfs_quota(struct task_group *tg,
+ struct cfs_schedulable_data *d)
+{
+ u64 quota, period;
+ struct load_weight lw;
+
+ if (tg == d->tg) {
+ period = d->period;
+ quota = d->quota;
+ } else {
+ period = tg_get_cfs_period(tg);
+ quota = tg_get_cfs_quota(tg);
+ }
+
+ if (quota == RUNTIME_INF)
+ return RUNTIME_INF;
+
+ lw.weight = period;
+ lw.inv_weight = 0;
+
+ return calc_delta_mine(quota, max_cfs_quota_period, &lw) - 1;
+}
+
+static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
+{
+ struct cfs_schedulable_data *d = data;
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ s64 quota = 0, parent_quota = -1;
+
+ quota = normalize_cfs_quota(tg, d);
+ if (!tg->parent) {
+ quota = RUNTIME_INF;
+ } else {
+ struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
+
+ parent_quota = parent_b->hierarchal_quota;
+ if (parent_quota != RUNTIME_INF) {
+ parent_quota -= quota;
+ /* invalid hierarchy, child bandwidth exceeds parent */
+ if (parent_quota < 0)
+ return -EINVAL;
+ }
+
+ /* if no inherent limit then inherit parent quota */
+ if (quota == RUNTIME_INF)
+ quota = parent_quota;
+ parent_b->hierarchal_quota = parent_quota;
+ }
+ cfs_b->hierarchal_quota = quota;
+
+ return 0;
+}
+
+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
+{
+ int ret;
+ struct cfs_schedulable_data data = {
+ .tg = tg,
+ .period = period / NSEC_PER_USEC,
+ .quota = quota / NSEC_PER_USEC,
+ };
+
+ if (!sysctl_sched_cfs_bandwidth_consistent)
+ return 0;
+
+ rcu_read_lock();
+ ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop,
+ &data);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int sched_cfs_consistent_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&cfs_constraints_mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write && sysctl_sched_cfs_bandwidth_consistent) {
+ ret = __cfs_schedulable(NULL, 0, 0);
+
+ /* must be consistent to enable */
+ if (ret)
+ sysctl_sched_cfs_bandwidth_consistent = 0;
+ }
+ mutex_unlock(&cfs_constraints_mutex);
+ return ret;
+}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */

Index: tip/kernel/sysctl.c
===================================================================
--- tip.orig/kernel/sysctl.c
+++ tip/kernel/sysctl.c
@@ -361,6 +361,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
+#ifdef CONFIG_CFS_BANDWIDTH
+ {
+ .procname = "sched_cfs_bandwidth_consistent",
+ .data = &sysctl_sched_cfs_bandwidth_consistent,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_cfs_consistent_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
Index: tip/include/linux/sched.h
===================================================================
--- tip.orig/include/linux/sched.h
+++ tip/include/linux/sched.h
@@ -1943,6 +1943,14 @@ int sched_rt_handler(struct ctl_table *t
void __user *buffer, size_t *lenp,
loff_t *ppos);

+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_consistent;
+
+int sched_cfs_consistent_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;

Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -88,6 +88,14 @@ const_debug unsigned int sysctl_sched_mi
*/
unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;

+#ifdef CONFIG_CFS_BANDWIDTH
+/*
+ * Whether a CFS bandwidth hierarchy is required to be consistent, that is:
+ * sum(child_bandwidth) <= parent_bandwidth
+ */
+unsigned int sysctl_sched_cfs_bandwidth_consistent = 1;
+#endif
+
static const struct sched_class fair_sched_class;

/**************************************************************


2011-03-23 10:40:00

by torbenh

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Tue, Mar 22, 2011 at 08:03:28PM -0700, Paul Turner wrote:
> Add constraints validation for CFS bandwidth hierachies.
>
> It is checked that:
> sum(child bandwidth) <= parent_bandwidth
>
> In a quota limited hierarchy, an unconstrainted entity
> (e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent.
>
> Since bandwidth periods may be non-uniform we normalize to the maximum allowed
> period, 5 seconds.
>
> This behavior may be disabled (allowing child bandwidth to exceed parent) via
> kernel.sched_cfs_bandwidth_consistent=0
>
> Signed-off-by: Paul Turner <[email protected]>
>
> ---
> include/linux/sched.h | 8 +++
> kernel/sched.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++---
> kernel/sched_fair.c | 8 +++
> kernel/sysctl.c | 11 ++++
> 4 files changed, 147 insertions(+), 7 deletions(-)
>
> Index: tip/kernel/sched.c
> ===================================================================
> --- tip.orig/kernel/sched.c
> +++ tip/kernel/sched.c
> +static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
> +{
> + struct cfs_schedulable_data *d = data;
> + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
> + s64 quota = 0, parent_quota = -1;
> +
> + quota = normalize_cfs_quota(tg, d);
> + if (!tg->parent) {
> + quota = RUNTIME_INF;
> + } else {
> + struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
> +
> + parent_quota = parent_b->hierarchal_quota;
> + if (parent_quota != RUNTIME_INF) {
> + parent_quota -= quota;
> + /* invalid hierarchy, child bandwidth exceeds parent */
> + if (parent_quota < 0)
> + return -EINVAL;
> + }
> +
> + /* if no inherent limit then inherit parent quota */
> + if (quota == RUNTIME_INF)
> + quota = parent_quota;
> + parent_b->hierarchal_quota = parent_quota;
> + }
> + cfs_b->hierarchal_quota = quota;
> +
> + return 0;
> +}

I find this logic pretty weird.
As long as quota == INF i can overcommit, but as soon as there is some
quota, i can not ?

Its clear, that one needs to be able to overcommit runtime, or the
default runtime for a new cgroup would need to be 0.
The root problem imo is that runtime and shares should not be in the
same cgroup subsystem. The semantics are too different.


> +
> +static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
> +{
> + int ret;
> + struct cfs_schedulable_data data = {
> + .tg = tg,
> + .period = period / NSEC_PER_USEC,
> + .quota = quota / NSEC_PER_USEC,
> + };
> +
> + if (!sysctl_sched_cfs_bandwidth_consistent)
> + return 0;
> +
> + rcu_read_lock();
> + ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop,
> + &data);
> + rcu_read_unlock();

walk_tg_tree does the rcu_read_lock itself.
not necessary to do that here.
look at __rt_schedulable there is no rcu.


> +
> + return ret;
> +}

--
torben Hohn

2011-03-23 20:56:12

by Paul Turner

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Wed, Mar 23, 2011 at 3:39 AM, torbenh <[email protected]> wrote:
> On Tue, Mar 22, 2011 at 08:03:28PM -0700, Paul Turner wrote:
>> Add constraints validation for CFS bandwidth hierachies.
>>
>> It is checked that:
>> ? ?sum(child bandwidth) <= parent_bandwidth
>>
>> In a quota limited hierarchy, an unconstrainted entity
>> (e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent.
>>
>> Since bandwidth periods may be non-uniform we normalize to the maximum allowed
>> period, 5 seconds.
>>
>> This behavior may be disabled (allowing child bandwidth to exceed parent) via
>> kernel.sched_cfs_bandwidth_consistent=0
>>
>> Signed-off-by: Paul Turner <[email protected]>
>>
>> ---
>> ?include/linux/sched.h | ? ?8 +++
>> ?kernel/sched.c ? ? ? ?| ?127 +++++++++++++++++++++++++++++++++++++++++++++++---
>> ?kernel/sched_fair.c ? | ? ?8 +++
>> ?kernel/sysctl.c ? ? ? | ? 11 ++++
>> ?4 files changed, 147 insertions(+), 7 deletions(-)
>>
>> Index: tip/kernel/sched.c
>> ===================================================================
>> --- tip.orig/kernel/sched.c
>> +++ tip/kernel/sched.c
>> +static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
>> +{
>> + ? ? struct cfs_schedulable_data *d = data;
>> + ? ? struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
>> + ? ? s64 quota = 0, parent_quota = -1;
>> +
>> + ? ? quota = normalize_cfs_quota(tg, d);
>> + ? ? if (!tg->parent) {
>> + ? ? ? ? ? ? quota = RUNTIME_INF;
>> + ? ? } else {
>> + ? ? ? ? ? ? struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
>> +
>> + ? ? ? ? ? ? parent_quota = parent_b->hierarchal_quota;
>> + ? ? ? ? ? ? if (parent_quota != RUNTIME_INF) {
>> + ? ? ? ? ? ? ? ? ? ? parent_quota -= quota;
>> + ? ? ? ? ? ? ? ? ? ? /* invalid hierarchy, child bandwidth exceeds parent */
>> + ? ? ? ? ? ? ? ? ? ? if (parent_quota < 0)
>> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? return -EINVAL;
>> + ? ? ? ? ? ? }
>> +
>> + ? ? ? ? ? ? /* if no inherent limit then inherit parent quota */
>> + ? ? ? ? ? ? if (quota == RUNTIME_INF)
>> + ? ? ? ? ? ? ? ? ? ? quota = parent_quota;
>> + ? ? ? ? ? ? parent_b->hierarchal_quota = parent_quota;
>> + ? ? }
>> + ? ? cfs_b->hierarchal_quota = quota;
>> +
>> + ? ? return 0;
>> +}
>
> I find this logic pretty weird.
> As long as quota == INF i can overcommit, but as soon as there is some
> quota, i can not ?
>

I don't think I understand what you mean by being able to overcommit
when quota ==INF. For a state of overcommit to exist an upper limit
must exist to exceed.

> Its clear, that one needs to be able to overcommit runtime, or the
> default runtime for a new cgroup would need to be 0.

Actually this is one of the primary reasons that the semantic of
inheritance was chosen. By inheriting our parent's quota all existing
hiearchies remain valid.

There are also many cases which are not expressible without such a semantic:

Consider,

X
/ | \
A B C

Suppose we have the following constraints:
X - is the top level application group, we wish to provision X with 4 cpus
C - is a threadpool performing some background work, we wish it to
consume no more than 2 cpus
A/B - split the remaining time available to the hierarchy

If we require absolute limits on A/B there is no way to allow this
usage, we must establish a priori hard usage ratios; yet, if a usage
ratio is desired using cpu.shares to specify this is a much better
solution as gives you a soft-ratio while remaining work-conserving
with respect to X's limit).


> The root problem imo is that runtime and shares should not be in the
> same cgroup subsystem. The semantics are too different.
>

Shares are a relative and represent a lower limit for cgroup bandwidth
Bandwidth is absolute and represents an upper limit for cgroup bandwidth

Given that one is absolute and the other relative, the semantics will
be necessarily different. It does not work to extend bandwidth from
the definition of shares since there are many use-cases which require
their specification to be independent.

They are however intrinsically related since they effectively form
upper/lower bounds on the same parameter and should be controlled from
the same group.

>> +
>> +static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
>> +{
>> + ? ? int ret;
>> + ? ? struct cfs_schedulable_data data = {
>> + ? ? ? ? ? ? .tg = tg,
>> + ? ? ? ? ? ? .period = period / NSEC_PER_USEC,
>> + ? ? ? ? ? ? .quota = quota / NSEC_PER_USEC,
>> + ? ? };
>> +
>> + ? ? if (!sysctl_sched_cfs_bandwidth_consistent)
>> + ? ? ? ? ? ? return 0;
>> +
>> + ? ? rcu_read_lock();
>> + ? ? ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop,
>> + ? ? ? ? ? ? ? ? ? ? ? ? &data);
>> + ? ? rcu_read_unlock();
>
> walk_tg_tree does the rcu_read_lock itself.
> not necessary to do that here.
> look at __rt_schedulable there is no rcu.
>

Oops yeah -- I wrote this one after fiddling with the new _from
variant (which does require rcu_lock to be external); you're right,
it's not needed here. Thanks!

>
>> +
>> + ? ? return ret;
>> +}
>
> --
> torben Hohn
>

2011-03-24 06:31:00

by Bharata B Rao

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Tue, Mar 22, 2011 at 08:03:28PM -0700, Paul Turner wrote:
> Add constraints validation for CFS bandwidth hierachies.
>
> +static u64 normalize_cfs_quota(struct task_group *tg,
> + struct cfs_schedulable_data *d)
> +{
> + u64 quota, period;
> + struct load_weight lw;
> +
> + if (tg == d->tg) {
> + period = d->period;
> + quota = d->quota;
> + } else {
> + period = tg_get_cfs_period(tg);
> + quota = tg_get_cfs_quota(tg);
> + }
> +
> + if (quota == RUNTIME_INF)
> + return RUNTIME_INF;
> +
> + lw.weight = period;
> + lw.inv_weight = 0;
> +
> + return calc_delta_mine(quota, max_cfs_quota_period, &lw) - 1;

Time to rename calc_delta_mine to something more meaningful ?

Regards,
Bharata.

2011-03-29 06:58:37

by Hidetoshi Seto

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

(2011/03/23 12:03), Paul Turner wrote:
> @@ -9251,7 +9255,13 @@ static int tg_set_cfs_bandwidth(struct t
> if (period > max_cfs_quota_period)
> return -EINVAL;
>
> - mutex_lock(&mutex);
> + mutex_lock(&cfs_constraints_mutex);
> + if (sysctl_sched_cfs_bandwidth_consistent) {
> + ret = __cfs_schedulable(tg, period, quota);

At this point:
period => scale in ns unit
quota => scale in ns unit, or RUNTIME_INF

And both are unsigned. But...

> @@ -9339,6 +9350,108 @@ static int cpu_cfs_period_write_u64(stru
> return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
> }
>
> +
> +struct cfs_schedulable_data {
> + struct task_group *tg;
> + u64 period, quota;
> +};
> +
> +/*
> + * normalize group quota/period to be quota/max_period
> + * note: units are usecs
> + */
> +static u64 normalize_cfs_quota(struct task_group *tg,
> + struct cfs_schedulable_data *d)
> +{
> + u64 quota, period;
> + struct load_weight lw;
> +
> + if (tg == d->tg) {
> + period = d->period;
> + quota = d->quota;
> + } else {
> + period = tg_get_cfs_period(tg);
> + quota = tg_get_cfs_quota(tg);
> + }

... at this point:
period => scale in us unit
quota => scale in us unit, or -1
Moreover:
d->period => (scale in ns unit) / NSEC_PER_USEC
d->quota => (scale in ns unit, or RUNTIME_INF) / NSEC_PER_USEC

Therefore, ...

> +
> + if (quota == RUNTIME_INF)
> + return RUNTIME_INF;

This check doesn't work properly.

I found this problem because I could not get child group back to be
unconstrained:

[root@localhost group0]# cat cpu.cfs_*
500000
500000
[root@localhost group0]# cat sub0/cpu.cfs_*
500000
100000
[root@localhost group0]# cat sub1/cpu.cfs_*
500000
100000
[root@localhost group0]# echo -1 > sub1/cpu.cfs_quota_us
bash: echo: write error: Invalid argument

I confirmed that this write error is removed by the following
change. I'm looking forward to seeing your V6 soon.

Reviewed-by: Hidetoshi Seto <[email protected]>


Thanks,
H.Seto

---
kernel/sched.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 6d764b5..c8f9820 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9467,8 +9467,8 @@ static u64 normalize_cfs_quota(struct task_group *tg,
period = d->period;
quota = d->quota;
} else {
- period = tg_get_cfs_period(tg);
- quota = tg_get_cfs_quota(tg);
+ period = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
+ quota = tg_cfs_bandwidth(tg)->quota;
}

if (quota == RUNTIME_INF)
@@ -9515,8 +9515,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
- .period = period / NSEC_PER_USEC,
- .quota = quota / NSEC_PER_USEC,
+ .period = period,
+ .quota = quota,
};

if (!sysctl_sched_cfs_bandwidth_consistent)
--
1.7.4

2011-04-04 23:11:27

by Paul Turner

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Mon, Mar 28, 2011 at 11:57 PM, Hidetoshi Seto
<[email protected]> wrote:
> (2011/03/23 12:03), Paul Turner wrote:
>> @@ -9251,7 +9255,13 @@ static int tg_set_cfs_bandwidth(struct t
>> ? ? ? if (period > max_cfs_quota_period)
>> ? ? ? ? ? ? ? return -EINVAL;
>>
>> - ? ? mutex_lock(&mutex);
>> + ? ? mutex_lock(&cfs_constraints_mutex);
>> + ? ? if (sysctl_sched_cfs_bandwidth_consistent) {
>> + ? ? ? ? ? ? ret = __cfs_schedulable(tg, period, quota);
>
> At this point:
> ?period => scale in ns unit
> ?quota ?=> scale in ns unit, or RUNTIME_INF
>
> And both are unsigned. But...

Ack.. I had accounted for this at one point but I obviously churned it out.

Good catch, thanks!


>
>> @@ -9339,6 +9350,108 @@ static int cpu_cfs_period_write_u64(stru
>> ? ? ? return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
>> ?}
>>
>> +
>> +struct cfs_schedulable_data {
>> + ? ? struct task_group *tg;
>> + ? ? u64 period, quota;
>> +};
>> +
>> +/*
>> + * normalize group quota/period to be quota/max_period
>> + * note: units are usecs
>> + */
>> +static u64 normalize_cfs_quota(struct task_group *tg,
>> + ? ? ? ? ? ? ? ? ? ? ? ? ? ?struct cfs_schedulable_data *d)
>> +{
>> + ? ? u64 quota, period;
>> + ? ? struct load_weight lw;
>> +
>> + ? ? if (tg == d->tg) {
>> + ? ? ? ? ? ? period = d->period;
>> + ? ? ? ? ? ? quota = d->quota;
>> + ? ? } else {
>> + ? ? ? ? ? ? period = tg_get_cfs_period(tg);
>> + ? ? ? ? ? ? quota = tg_get_cfs_quota(tg);
>> + ? ? }
>
> ... at this point:
> ?period => scale in us unit
> ?quota ?=> scale in us unit, or -1
> Moreover:
> ?d->period => (scale in ns unit) / NSEC_PER_USEC
> ?d->quota ?=> (scale in ns unit, or RUNTIME_INF) / NSEC_PER_USEC
>
> Therefore, ...
>
>> +
>> + ? ? if (quota == RUNTIME_INF)
>> + ? ? ? ? ? ? return RUNTIME_INF;
>
> This check doesn't work properly.

Right. Fixed, sorry for the delayed response -- was out last week.

>
> I found this problem because I could not get child group back to be
> unconstrained:
>
> [root@localhost group0]# cat cpu.cfs_*
> 500000
> 500000
> [root@localhost group0]# cat sub0/cpu.cfs_*
> 500000
> 100000
> [root@localhost group0]# cat sub1/cpu.cfs_*
> 500000
> 100000
> [root@localhost group0]# echo -1 > sub1/cpu.cfs_quota_us
> bash: echo: write error: Invalid argument
>
> I confirmed that this write error is removed by the following
> change. ?I'm looking forward to seeing your V6 soon.
>
> Reviewed-by: Hidetoshi Seto <[email protected]>
>
>
> Thanks,
> H.Seto
>
> ---
> ?kernel/sched.c | ? ?8 ++++----
> ?1 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched.c b/kernel/sched.c
> index 6d764b5..c8f9820 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -9467,8 +9467,8 @@ static u64 normalize_cfs_quota(struct task_group *tg,
> ? ? ? ? ? ? ? ?period = d->period;
> ? ? ? ? ? ? ? ?quota = d->quota;
> ? ? ? ?} else {
> - ? ? ? ? ? ? ? period = tg_get_cfs_period(tg);
> - ? ? ? ? ? ? ? quota = tg_get_cfs_quota(tg);
> + ? ? ? ? ? ? ? period = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
> + ? ? ? ? ? ? ? quota = tg_cfs_bandwidth(tg)->quota;
> ? ? ? ?}
>
> ? ? ? ?if (quota == RUNTIME_INF)
> @@ -9515,8 +9515,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
> ? ? ? ?int ret;
> ? ? ? ?struct cfs_schedulable_data data = {
> ? ? ? ? ? ? ? ?.tg = tg,
> - ? ? ? ? ? ? ? .period = period / NSEC_PER_USEC,
> - ? ? ? ? ? ? ? .quota = quota / NSEC_PER_USEC,
> + ? ? ? ? ? ? ? .period = period,
> + ? ? ? ? ? ? ? .quota = quota,
> ? ? ? ?};
>
> ? ? ? ?if (!sysctl_sched_cfs_bandwidth_consistent)
> --
> 1.7.4
>
>

2011-04-05 13:28:45

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Tue, 2011-03-22 at 20:03 -0700, Paul Turner wrote:

> +/*
> + * Whether a CFS bandwidth hierarchy is required to be consistent, that is:
> + * sum(child_bandwidth) <= parent_bandwidth
> + */
> +unsigned int sysctl_sched_cfs_bandwidth_consistent = 1;

Why have this configurable?

2011-04-08 17:01:40

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [patch 02/15] sched: validate CFS quota hierarchies

On Thu, 2011-03-24 at 12:01 +0530, Bharata B Rao wrote:
> On Tue, Mar 22, 2011 at 08:03:28PM -0700, Paul Turner wrote:
> > Add constraints validation for CFS bandwidth hierachies.
> >
> > +static u64 normalize_cfs_quota(struct task_group *tg,
> > + struct cfs_schedulable_data *d)
> > +{
> > + u64 quota, period;
> > + struct load_weight lw;
> > +
> > + if (tg == d->tg) {
> > + period = d->period;
> > + quota = d->quota;
> > + } else {
> > + period = tg_get_cfs_period(tg);
> > + quota = tg_get_cfs_quota(tg);
> > + }
> > +
> > + if (quota == RUNTIME_INF)
> > + return RUNTIME_INF;
> > +
> > + lw.weight = period;
> > + lw.inv_weight = 0;
> > +
> > + return calc_delta_mine(quota, max_cfs_quota_period, &lw) - 1;
>
> Time to rename calc_delta_mine to something more meaningful ?

Or not use it there at all:

- I'm not sure why we have different periods per cgroup, given that we
don't have EDF like scheduling and there's a very limited set of useful
periods. Too small and overhead increases like mad, too large and we get
lots of priority inversion crap.

- Its not a fast-path by any means, so a straight fwd division wouldn't
hurt anybody.