2011-02-16 03:20:37

by Paul Turner

[permalink] [raw]
Subject: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

At the start of a new period there are several actions we must take:
- Refresh global bandwidth pool
- Unthrottle entities who ran out of quota as refreshed bandwidth permits

Unthrottled entities have the cfs_rq->throttled flag set and are re-enqueued
into the cfs entity hierarchy.

sched_rt_period_mask() is refactored slightly into sched_bw_period_mask()
since it is now shared by both cfs and rt bandwidth period timers.

The !CONFIG_RT_GROUP_SCHED && CONFIG_SMP case has been collapsed to use
rd->span instead of cpu_online_mask since I think that was incorrect before
(don't want to hit cpu's outside of your root_domain for RT bandwidth).

Signed-off-by: Paul Turner <[email protected]>
Signed-off-by: Nikhil Rao <[email protected]>
Signed-off-by: Bharata B Rao <[email protected]>
---
kernel/sched.c | 16 +++++++++++
kernel/sched_fair.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched_rt.c | 19 -------------
3 files changed, 90 insertions(+), 19 deletions(-)

Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -1561,6 +1561,8 @@ static int tg_nop(struct task_group *tg,
}
#endif

+static inline const struct cpumask *sched_bw_period_mask(void);
+
#ifdef CONFIG_SMP
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
@@ -8503,6 +8505,18 @@ void set_curr_task(int cpu, struct task_

#endif

+#ifdef CONFIG_SMP
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+ return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+ return cpu_online_mask;
+}
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
@@ -9240,6 +9254,8 @@ static int tg_set_cfs_bandwidth(struct t

raw_spin_lock_irq(&rq->lock);
init_cfs_rq_quota(cfs_rq);
+ if (cfs_rq_throttled(cfs_rq))
+ unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
mutex_unlock(&mutex);
Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -327,6 +327,13 @@ static inline u64 sched_cfs_bandwidth_sl
return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
}

+static inline
+struct cfs_rq *cfs_bandwidth_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
+{
+ return container_of(cfs_b, struct task_group,
+ cfs_bandwidth)->cfs_rq[cpu];
+}
+
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return &tg->cfs_bandwidth;
@@ -1513,6 +1520,33 @@ out_throttled:
update_cfs_rq_load_contribution(cfs_rq, 1);
}

+static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+{
+ struct rq *rq = rq_of(cfs_rq);
+ struct sched_entity *se;
+
+ se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
+
+ update_rq_clock(rq);
+ /* (Try to) avoid maintaining share statistics for idle time */
+ cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
+
+ cfs_rq->throttled = 0;
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+ break;
+
+ cfs_rq = cfs_rq_of(se);
+ enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* determine whether we need to wake up potentally idle cpu */
+ if (rq->curr == rq->idle && rq->cfs.nr_running)
+ resched_task(rq->curr);
+}
+
static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
unsigned long delta_exec)
{
@@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct

static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
{
- return 1;
+ int i, idle = 1;
+ u64 delta;
+ const struct cpumask *span;
+
+ if (cfs_b->quota == RUNTIME_INF)
+ return 1;
+
+ /* reset group quota */
+ raw_spin_lock(&cfs_b->lock);
+ cfs_b->runtime = cfs_b->quota;
+ raw_spin_unlock(&cfs_b->lock);
+
+ span = sched_bw_period_mask();
+ for_each_cpu(i, span) {
+ struct rq *rq = cpu_rq(i);
+ struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
+
+ if (cfs_rq->nr_running)
+ idle = 0;
+
+ if (!cfs_rq_throttled(cfs_rq))
+ continue;
+
+ delta = tg_request_cfs_quota(cfs_rq->tg);
+
+ if (delta) {
+ raw_spin_lock(&rq->lock);
+ cfs_rq->quota_assigned += delta;
+
+ /* avoid race with tg_set_cfs_bandwidth */
+ if (cfs_rq_throttled(cfs_rq) &&
+ cfs_rq->quota_used < cfs_rq->quota_assigned)
+ unthrottle_cfs_rq(cfs_rq);
+ raw_spin_unlock(&rq->lock);
+ }
+ }
+
+ return idle;
}
+
#endif

#ifdef CONFIG_SMP
Index: tip/kernel/sched_rt.c
===================================================================
--- tip.orig/kernel/sched_rt.c
+++ tip/kernel/sched_rt.c
@@ -252,18 +252,6 @@ static int rt_se_boosted(struct sched_rt
return p->prio != p->normal_prio;
}

-#ifdef CONFIG_SMP
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_rq(smp_processor_id())->rd->span;
-}
-#else
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-#endif
-
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
@@ -321,11 +309,6 @@ static inline int rt_rq_throttled(struct
return rt_rq->rt_throttled;
}

-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
@@ -543,7 +526,7 @@ static int do_sched_rt_period_timer(stru
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return 1;

- span = sched_rt_period_mask();
+ span = sched_bw_period_mask();
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);


2011-02-18 07:19:15

by Balbir Singh

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

* Paul Turner <[email protected]> [2011-02-15 19:18:35]:

> At the start of a new period there are several actions we must take:
> - Refresh global bandwidth pool
> - Unthrottle entities who ran out of quota as refreshed bandwidth permits
>
> Unthrottled entities have the cfs_rq->throttled flag set and are re-enqueued
> into the cfs entity hierarchy.
>
> sched_rt_period_mask() is refactored slightly into sched_bw_period_mask()
> since it is now shared by both cfs and rt bandwidth period timers.
>
> The !CONFIG_RT_GROUP_SCHED && CONFIG_SMP case has been collapsed to use
> rd->span instead of cpu_online_mask since I think that was incorrect before
> (don't want to hit cpu's outside of your root_domain for RT bandwidth).
>
> Signed-off-by: Paul Turner <[email protected]>
> Signed-off-by: Nikhil Rao <[email protected]>
> Signed-off-by: Bharata B Rao <[email protected]>
> ---
> kernel/sched.c | 16 +++++++++++
> kernel/sched_fair.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++-
> kernel/sched_rt.c | 19 -------------
> 3 files changed, 90 insertions(+), 19 deletions(-)
>
> Index: tip/kernel/sched.c
> ===================================================================
> --- tip.orig/kernel/sched.c
> +++ tip/kernel/sched.c
> @@ -1561,6 +1561,8 @@ static int tg_nop(struct task_group *tg,
> }
> #endif
>
> +static inline const struct cpumask *sched_bw_period_mask(void);
> +
> #ifdef CONFIG_SMP
> /* Used instead of source_load when we know the type == 0 */
> static unsigned long weighted_cpuload(const int cpu)
> @@ -8503,6 +8505,18 @@ void set_curr_task(int cpu, struct task_
>
> #endif
>
> +#ifdef CONFIG_SMP
> +static inline const struct cpumask *sched_bw_period_mask(void)
> +{
> + return cpu_rq(smp_processor_id())->rd->span;
> +}
> +#else
> +static inline const struct cpumask *sched_bw_period_mask(void)
> +{
> + return cpu_online_mask;
> +}
> +#endif
> +
> #ifdef CONFIG_FAIR_GROUP_SCHED
> static void free_fair_sched_group(struct task_group *tg)
> {
> @@ -9240,6 +9254,8 @@ static int tg_set_cfs_bandwidth(struct t
>
> raw_spin_lock_irq(&rq->lock);
> init_cfs_rq_quota(cfs_rq);
> + if (cfs_rq_throttled(cfs_rq))
> + unthrottle_cfs_rq(cfs_rq);
> raw_spin_unlock_irq(&rq->lock);
> }
> mutex_unlock(&mutex);
> Index: tip/kernel/sched_fair.c
> ===================================================================
> --- tip.orig/kernel/sched_fair.c
> +++ tip/kernel/sched_fair.c
> @@ -327,6 +327,13 @@ static inline u64 sched_cfs_bandwidth_sl
> return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
> }
>
> +static inline
> +struct cfs_rq *cfs_bandwidth_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
> +{
> + return container_of(cfs_b, struct task_group,
> + cfs_bandwidth)->cfs_rq[cpu];
> +}
> +
> static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> {
> return &tg->cfs_bandwidth;
> @@ -1513,6 +1520,33 @@ out_throttled:
> update_cfs_rq_load_contribution(cfs_rq, 1);
> }
>
> +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
> +{
> + struct rq *rq = rq_of(cfs_rq);
> + struct sched_entity *se;
> +
> + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
> +
> + update_rq_clock(rq);
> + /* (Try to) avoid maintaining share statistics for idle time */
> + cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
> +
> + cfs_rq->throttled = 0;
> + for_each_sched_entity(se) {
> + if (se->on_rq)
> + break;
> +
> + cfs_rq = cfs_rq_of(se);
> + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
> + if (cfs_rq_throttled(cfs_rq))
> + break;
> + }
> +
> + /* determine whether we need to wake up potentally idle cpu */
> + if (rq->curr == rq->idle && rq->cfs.nr_running)
> + resched_task(rq->curr);
> +}
> +
> static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
> unsigned long delta_exec)
> {
> @@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct
>
> static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
> {
> - return 1;
> + int i, idle = 1;
> + u64 delta;
> + const struct cpumask *span;
> +
> + if (cfs_b->quota == RUNTIME_INF)
> + return 1;
> +
> + /* reset group quota */
> + raw_spin_lock(&cfs_b->lock);
> + cfs_b->runtime = cfs_b->quota;
> + raw_spin_unlock(&cfs_b->lock);
> +
> + span = sched_bw_period_mask();
> + for_each_cpu(i, span) {
> + struct rq *rq = cpu_rq(i);
> + struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
> +
> + if (cfs_rq->nr_running)
> + idle = 0;
> +
> + if (!cfs_rq_throttled(cfs_rq))
> + continue;

This is an interesting situation, does this mean we got scheduled in
between periods with quota to last us enough to cross the period.
Should we be donating quota back to the global pool?

> +
> + delta = tg_request_cfs_quota(cfs_rq->tg);
> +
> + if (delta) {
> + raw_spin_lock(&rq->lock);
> + cfs_rq->quota_assigned += delta;
> +
> + /* avoid race with tg_set_cfs_bandwidth */
> + if (cfs_rq_throttled(cfs_rq) &&
> + cfs_rq->quota_used < cfs_rq->quota_assigned)
> + unthrottle_cfs_rq(cfs_rq);
> + raw_spin_unlock(&rq->lock);
> + }
> + }
> +
> + return idle;
> }
> +
> #endif
>
> #ifdef CONFIG_SMP
> Index: tip/kernel/sched_rt.c
> ===================================================================
> --- tip.orig/kernel/sched_rt.c
> +++ tip/kernel/sched_rt.c
> @@ -252,18 +252,6 @@ static int rt_se_boosted(struct sched_rt
> return p->prio != p->normal_prio;
> }
>
> -#ifdef CONFIG_SMP
> -static inline const struct cpumask *sched_rt_period_mask(void)
> -{
> - return cpu_rq(smp_processor_id())->rd->span;
> -}
> -#else
> -static inline const struct cpumask *sched_rt_period_mask(void)
> -{
> - return cpu_online_mask;
> -}
> -#endif
> -
> static inline
> struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
> {
> @@ -321,11 +309,6 @@ static inline int rt_rq_throttled(struct
> return rt_rq->rt_throttled;
> }
>
> -static inline const struct cpumask *sched_rt_period_mask(void)
> -{
> - return cpu_online_mask;
> -}
> -
> static inline
> struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
> {
> @@ -543,7 +526,7 @@ static int do_sched_rt_period_timer(stru
> if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
> return 1;
>
> - span = sched_rt_period_mask();
> + span = sched_bw_period_mask();
> for_each_cpu(i, span) {
> int enqueue = 0;
> struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
>
>

--
Three Cheers,
Balbir

2011-02-18 08:09:57

by Bharata B Rao

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

On Fri, Feb 18, 2011 at 12:49:04PM +0530, Balbir Singh wrote:
> * Paul Turner <[email protected]> [2011-02-15 19:18:35]:
>
> > At the start of a new period there are several actions we must take:
> > - Refresh global bandwidth pool
> > - Unthrottle entities who ran out of quota as refreshed bandwidth permits
> >
> > Unthrottled entities have the cfs_rq->throttled flag set and are re-enqueued
> > into the cfs entity hierarchy.
> >
> > sched_rt_period_mask() is refactored slightly into sched_bw_period_mask()
> > since it is now shared by both cfs and rt bandwidth period timers.
> >
> > The !CONFIG_RT_GROUP_SCHED && CONFIG_SMP case has been collapsed to use
> > rd->span instead of cpu_online_mask since I think that was incorrect before
> > (don't want to hit cpu's outside of your root_domain for RT bandwidth).
> >
> > Signed-off-by: Paul Turner <[email protected]>
> > Signed-off-by: Nikhil Rao <[email protected]>
> > Signed-off-by: Bharata B Rao <[email protected]>
> > ---
> > kernel/sched.c | 16 +++++++++++
> > kernel/sched_fair.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++-
> > kernel/sched_rt.c | 19 -------------
> > 3 files changed, 90 insertions(+), 19 deletions(-)
> >
> > Index: tip/kernel/sched.c
> > ===================================================================
> > --- tip.orig/kernel/sched.c
> > +++ tip/kernel/sched.c
> > @@ -1561,6 +1561,8 @@ static int tg_nop(struct task_group *tg,
> > }
> > #endif
> >
> > +static inline const struct cpumask *sched_bw_period_mask(void);
> > +
> > #ifdef CONFIG_SMP
> > /* Used instead of source_load when we know the type == 0 */
> > static unsigned long weighted_cpuload(const int cpu)
> > @@ -8503,6 +8505,18 @@ void set_curr_task(int cpu, struct task_
> >
> > #endif
> >
> > +#ifdef CONFIG_SMP
> > +static inline const struct cpumask *sched_bw_period_mask(void)
> > +{
> > + return cpu_rq(smp_processor_id())->rd->span;
> > +}
> > +#else
> > +static inline const struct cpumask *sched_bw_period_mask(void)
> > +{
> > + return cpu_online_mask;
> > +}
> > +#endif
> > +
> > #ifdef CONFIG_FAIR_GROUP_SCHED
> > static void free_fair_sched_group(struct task_group *tg)
> > {
> > @@ -9240,6 +9254,8 @@ static int tg_set_cfs_bandwidth(struct t
> >
> > raw_spin_lock_irq(&rq->lock);
> > init_cfs_rq_quota(cfs_rq);
> > + if (cfs_rq_throttled(cfs_rq))
> > + unthrottle_cfs_rq(cfs_rq);
> > raw_spin_unlock_irq(&rq->lock);
> > }
> > mutex_unlock(&mutex);
> > Index: tip/kernel/sched_fair.c
> > ===================================================================
> > --- tip.orig/kernel/sched_fair.c
> > +++ tip/kernel/sched_fair.c
> > @@ -327,6 +327,13 @@ static inline u64 sched_cfs_bandwidth_sl
> > return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
> > }
> >
> > +static inline
> > +struct cfs_rq *cfs_bandwidth_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
> > +{
> > + return container_of(cfs_b, struct task_group,
> > + cfs_bandwidth)->cfs_rq[cpu];
> > +}
> > +
> > static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> > {
> > return &tg->cfs_bandwidth;
> > @@ -1513,6 +1520,33 @@ out_throttled:
> > update_cfs_rq_load_contribution(cfs_rq, 1);
> > }
> >
> > +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
> > +{
> > + struct rq *rq = rq_of(cfs_rq);
> > + struct sched_entity *se;
> > +
> > + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
> > +
> > + update_rq_clock(rq);
> > + /* (Try to) avoid maintaining share statistics for idle time */
> > + cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
> > +
> > + cfs_rq->throttled = 0;
> > + for_each_sched_entity(se) {
> > + if (se->on_rq)
> > + break;
> > +
> > + cfs_rq = cfs_rq_of(se);
> > + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
> > + if (cfs_rq_throttled(cfs_rq))
> > + break;
> > + }
> > +
> > + /* determine whether we need to wake up potentally idle cpu */
> > + if (rq->curr == rq->idle && rq->cfs.nr_running)
> > + resched_task(rq->curr);
> > +}
> > +
> > static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
> > unsigned long delta_exec)
> > {
> > @@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct
> >
> > static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
> > {
> > - return 1;
> > + int i, idle = 1;
> > + u64 delta;
> > + const struct cpumask *span;
> > +
> > + if (cfs_b->quota == RUNTIME_INF)
> > + return 1;
> > +
> > + /* reset group quota */
> > + raw_spin_lock(&cfs_b->lock);
> > + cfs_b->runtime = cfs_b->quota;
> > + raw_spin_unlock(&cfs_b->lock);
> > +
> > + span = sched_bw_period_mask();
> > + for_each_cpu(i, span) {
> > + struct rq *rq = cpu_rq(i);
> > + struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
> > +
> > + if (cfs_rq->nr_running)
> > + idle = 0;
> > +
> > + if (!cfs_rq_throttled(cfs_rq))
> > + continue;
>
> This is an interesting situation, does this mean we got scheduled in
> between periods with quota to last us enough to cross the period.
> Should we be donating quota back to the global pool?

I see 2 possibilities of this happening:

1. The tasks in the group were dequeued (due to sleep) before utilizing
the full quota within the period.

2. The tasks in the group couldn't fully utilize the available quota before
the period timer fired.

Note that the period timer fires every period as long as the group has
active tasks.

In either case, we reset the global pool.

We could potentially return the local slice back to global pool. I had a patch
in v3 to cover case 1. But we decided to get the base stuff accepted before
having such optimizations.

Regards,
Bharata.

2011-02-23 12:24:35

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

On Tue, 2011-02-15 at 19:18 -0800, Paul Turner wrote:

> +static inline const struct cpumask *sched_bw_period_mask(void)
> +{
> + return cpu_rq(smp_processor_id())->rd->span;
> +}

Some day I'm going to bite the bullet and merge part of cpusets into
this controller and have Balbir take the node part into the memcg thing
and then make (cpu||memcg) ^ cpuset

Feh.

2011-02-23 13:33:25

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

On Tue, 2011-02-15 at 19:18 -0800, Paul Turner wrote:

> +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
> +{
> + struct rq *rq = rq_of(cfs_rq);
> + struct sched_entity *se;
> +
> + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
> +
> + update_rq_clock(rq);
> + /* (Try to) avoid maintaining share statistics for idle time */
> + cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;

Ok, so here you try to compensate for some of the weirdness from the
previous patch.. wouldn't it be much saner to fully consider the
throttled things dequeued for the load calculation etc.?

> +
> + cfs_rq->throttled = 0;
> + for_each_sched_entity(se) {
> + if (se->on_rq)
> + break;
> +
> + cfs_rq = cfs_rq_of(se);
> + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
> + if (cfs_rq_throttled(cfs_rq))
> + break;

That's just weird, it was throttled, you enqueued it but find it
throttled.

> + }
> +
> + /* determine whether we need to wake up potentally idle cpu */

SP: potentially, also isn't there a determiner missing?

> + if (rq->curr == rq->idle && rq->cfs.nr_running)
> + resched_task(rq->curr);
> +}
> +
> static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
> unsigned long delta_exec)
> {
> @@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct
>
> static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
> {
> - return 1;
> + int i, idle = 1;
> + u64 delta;
> + const struct cpumask *span;
> +
> + if (cfs_b->quota == RUNTIME_INF)
> + return 1;
> +
> + /* reset group quota */
> + raw_spin_lock(&cfs_b->lock);
> + cfs_b->runtime = cfs_b->quota;

Shouldn't that be something like:

cfs_b->runtime =
min(cfs_b->runtime + overrun * cfs_b->quota, cfs_b->quota);

afaict runtime can go negative in which case we need to compensate for
that, but we cannot ever get more than quota because we allow for
overcommit, so not limiting things would allow us to accrue an unlimited
amount of runtime.

Or can only the per-cpu quota muck go negative? In that case it should
probably be propagated back into the global bw on throttle, otherwise
you can get deficits on CPUs that remain unused for a while.

> + raw_spin_unlock(&cfs_b->lock);
> +
> + span = sched_bw_period_mask();
> + for_each_cpu(i, span) {
> + struct rq *rq = cpu_rq(i);
> + struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
> +
> + if (cfs_rq->nr_running)
> + idle = 0;
> +
> + if (!cfs_rq_throttled(cfs_rq))
> + continue;
> +
> + delta = tg_request_cfs_quota(cfs_rq->tg);
> +
> + if (delta) {
> + raw_spin_lock(&rq->lock);
> + cfs_rq->quota_assigned += delta;
> +
> + /* avoid race with tg_set_cfs_bandwidth */

*what* race, and *how*

> + if (cfs_rq_throttled(cfs_rq) &&
> + cfs_rq->quota_used < cfs_rq->quota_assigned)
> + unthrottle_cfs_rq(cfs_rq);
> + raw_spin_unlock(&rq->lock);
> + }
> + }
> +
> + return idle;
> }

This whole positive quota muck makes my head hurt, whatever did you do
that for? Also it doesn't deal with wrapping, which admittedly won't
really happen but still.


2011-02-24 07:04:10

by Bharata B Rao

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

On Wed, Feb 23, 2011 at 02:32:12PM +0100, Peter Zijlstra wrote:
> On Tue, 2011-02-15 at 19:18 -0800, Paul Turner wrote:
>
> > +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
> > +{
> > + struct rq *rq = rq_of(cfs_rq);
> > + struct sched_entity *se;
> > +
> > + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
> > +
> > + update_rq_clock(rq);
> > + /* (Try to) avoid maintaining share statistics for idle time */
> > + cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
>
> Ok, so here you try to compensate for some of the weirdness from the
> previous patch.. wouldn't it be much saner to fully consider the
> throttled things dequeued for the load calculation etc.?
>
> > +
> > + cfs_rq->throttled = 0;
> > + for_each_sched_entity(se) {
> > + if (se->on_rq)
> > + break;
> > +
> > + cfs_rq = cfs_rq_of(se);
> > + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
> > + if (cfs_rq_throttled(cfs_rq))
> > + break;
>
> That's just weird, it was throttled, you enqueued it but find it
> throttled.

se got enqueued to cfs_rq, but we find that cfs_rq is throttled and hence
refrain from enqueueing cfs_rq futher.

So essentially enqueing to a throttled cfs_rq is allowed, but a throttled
group entitiy can't be enqueued further.

Regards,
Bharata.

2011-02-24 11:14:38

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

On Thu, 2011-02-24 at 12:34 +0530, Bharata B Rao wrote:
> On Wed, Feb 23, 2011 at 02:32:12PM +0100, Peter Zijlstra wrote:
> > On Tue, 2011-02-15 at 19:18 -0800, Paul Turner wrote:
> >
> > > +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
> > > +{
> > > + struct rq *rq = rq_of(cfs_rq);
> > > + struct sched_entity *se;
> > > +
> > > + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
> > > +
> > > + update_rq_clock(rq);
> > > + /* (Try to) avoid maintaining share statistics for idle time */
> > > + cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
> >
> > Ok, so here you try to compensate for some of the weirdness from the
> > previous patch.. wouldn't it be much saner to fully consider the
> > throttled things dequeued for the load calculation etc.?
> >
> > > +
> > > + cfs_rq->throttled = 0;
> > > + for_each_sched_entity(se) {
> > > + if (se->on_rq)
> > > + break;
> > > +
> > > + cfs_rq = cfs_rq_of(se);
> > > + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
> > > + if (cfs_rq_throttled(cfs_rq))
> > > + break;
> >
> > That's just weird, it was throttled, you enqueued it but find it
> > throttled.
>
> se got enqueued to cfs_rq, but we find that cfs_rq is throttled and hence
> refrain from enqueueing cfs_rq futher.
>
> So essentially enqueing to a throttled cfs_rq is allowed, but a throttled
> group entitiy can't be enqueued further.

Argh, so this is about the total trainwreck you have for hierarchy
semantics (which you've basically inherited from the RT bits I guess,
which are similarly broken).

Do you want to support per-cgroup throttle periods? If so we need to sit
down and work this out, if not the above should not be possible because
each cgroup can basically run from the same refresh timer and everybody
will get throttled at the same time.

2011-02-26 00:02:53

by Paul Turner

[permalink] [raw]
Subject: Re: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh

Oops missed this one before:

On Wed, Feb 23, 2011 at 5:32 AM, Peter Zijlstra <[email protected]> wrote:
> On Tue, 2011-02-15 at 19:18 -0800, Paul Turner wrote:
>
>> +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
>> +{
>> + ? ? struct rq *rq = rq_of(cfs_rq);
>> + ? ? struct sched_entity *se;
>> +
>> + ? ? se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
>> +
>> + ? ? update_rq_clock(rq);
>> + ? ? /* (Try to) avoid maintaining share statistics for idle time */
>> + ? ? cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
>
> Ok, so here you try to compensate for some of the weirdness from the
> previous patch.. wouldn't it be much saner to fully consider the
> throttled things dequeued for the load calculation etc.?
>

That's attempted -- but there's no to control wakeups which will
trigger the usual updates so we do have to do something.

The alternative is more invasive re-ordering of the dequeue/enqueue
paths which I think actually ends up pretty ugly without improving
things.

>> +
>> + ? ? cfs_rq->throttled = 0;
>> + ? ? for_each_sched_entity(se) {
>> + ? ? ? ? ? ? if (se->on_rq)
>> + ? ? ? ? ? ? ? ? ? ? break;
>> +
>> + ? ? ? ? ? ? cfs_rq = cfs_rq_of(se);
>> + ? ? ? ? ? ? enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
>> + ? ? ? ? ? ? if (cfs_rq_throttled(cfs_rq))
>> + ? ? ? ? ? ? ? ? ? ? break;
>
> That's just weird, it was throttled, you enqueued it but find it
> throttled.
>

Two reasons:

a) We might be unthrottling a child in a throttled hierarchy. This
can occur regardless of conformancy (e.g. different periods)
b) edge case: suppose there's no bandwidth already and the enqueue
pushes things back into a throttled state.

>> + ? ? }
>> +
>> + ? ? /* determine whether we need to wake up potentally idle cpu */
>
> SP: potentially, also isn't there a determiner missing?

Spelling fixed, I think the determiner is ok though:

- We know nr_running must have been zero before since rq->curr ==
rq->idle, (also if this *has* changed then there's already a resched
for that in flight and we don't need to. This also implies that
rq->cfs.nr_running was == 0.

- Root cfs_rq.nr_running now being greater than zero tells us that our
unthrottle was root visible (specifically, it was not a throttled
child of another throttled hierachy) which tells us that there's a
task waiting.

Am I missing a case?

>
>> + ? ? if (rq->curr == rq->idle && rq->cfs.nr_running)
>> + ? ? ? ? ? ? resched_task(rq->curr);
>> +}
>> +
>> ?static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
>> ? ? ? ? ? ? ? unsigned long delta_exec)
>> ?{
>> @@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct
>>
>> ?static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
>> ?{
>> - ? ? return 1;
>> + ? ? int i, idle = 1;
>> + ? ? u64 delta;
>> + ? ? const struct cpumask *span;
>> +
>> + ? ? if (cfs_b->quota == RUNTIME_INF)
>> + ? ? ? ? ? ? return 1;
>> +
>> + ? ? /* reset group quota */
>> + ? ? raw_spin_lock(&cfs_b->lock);
>> + ? ? cfs_b->runtime = cfs_b->quota;
>
> Shouldn't that be something like:
>
> cfs_b->runtime =
> ? min(cfs_b->runtime + overrun * cfs_b->quota, cfs_b->quota);
>
> afaict runtime can go negative in which case we need to compensate for
> that, but we cannot ever get more than quota because we allow for
> overcommit, so not limiting things would allow us to accrue an unlimited
> amount of runtime.
>
> Or can only the per-cpu quota muck go negative?

The over-run can only occur on a local cpu (e.g. due to us being
unable to immediately evict a throttled entity). By injecting a
constant amount of bandwidth into the global pool we are able to
correct that over-run in the subsequent period.

> In that case it should
> probably be propagated back into the global bw on throttle, otherwise
> you can get deficits on CPUs that remain unused for a while.
>

I think you mean surplus :). Yes there is potentially a small amount
of surplus quota in the system, the "hard" bound is that across N
periods you can receive [N periods + (slice size * NR_CPUs)] quota,
since this is what may be outstanding as above surpluses. Since the
slice size is fairly small this over-run is also fairly tight to the
stated bounds (as well as being manageable through the slice size
sysctl when required).


>> + ? ? raw_spin_unlock(&cfs_b->lock);
>> +
>> + ? ? span = sched_bw_period_mask();
>> + ? ? for_each_cpu(i, span) {
>> + ? ? ? ? ? ? struct rq *rq = cpu_rq(i);
>> + ? ? ? ? ? ? struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
>> +
>> + ? ? ? ? ? ? if (cfs_rq->nr_running)
>> + ? ? ? ? ? ? ? ? ? ? idle = 0;
>> +
>> + ? ? ? ? ? ? if (!cfs_rq_throttled(cfs_rq))
>> + ? ? ? ? ? ? ? ? ? ? continue;
>> +
>> + ? ? ? ? ? ? delta = tg_request_cfs_quota(cfs_rq->tg);
>> +
>> + ? ? ? ? ? ? if (delta) {
>> + ? ? ? ? ? ? ? ? ? ? raw_spin_lock(&rq->lock);
>> + ? ? ? ? ? ? ? ? ? ? cfs_rq->quota_assigned += delta;
>> +
>> + ? ? ? ? ? ? ? ? ? ? /* avoid race with tg_set_cfs_bandwidth */
>
> *what* race, and *how*
>

When a user sets a new bandwidth limit for the cgroup (e.g. removes
it, sets unlimited bandwidth). That process may in itself unthrottle
the group. Since we synchronize on rq->lock, rechecking this
condition is sufficient to avoid a double unthrottle here.

>> + ? ? ? ? ? ? ? ? ? ? if (cfs_rq_throttled(cfs_rq) &&
>> + ? ? ? ? ? ? ? ? ? ? ? ? ?cfs_rq->quota_used < cfs_rq->quota_assigned)
>> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? unthrottle_cfs_rq(cfs_rq);
>> + ? ? ? ? ? ? ? ? ? ? raw_spin_unlock(&rq->lock);
>> + ? ? ? ? ? ? }
>> + ? ? }
>> +
>> + ? ? return idle;
>> ?}
>
> This whole positive quota muck makes my head hurt, whatever did you do
> that for? Also it doesn't deal with wrapping, which admittedly won't
> really happen but still.
>

Ah-ha! In going through and swapping things to a single counter I
remember the reason now:

It's that since we can overflow usage on the per-cpu tracking, in
using a single counter care must be taken to avoid collision with
RUNTIME_INF since it's (-1).

Now I'm debating whether the ugliness of these checks is worth it.
Perhaps moving RUNTIME_INF out of quota_remaining and having a
separate per-cpu quota enabled indicator would be the cleanest of all
three.

>
>
>