2017-08-21 20:03:08

by Josef Bacik

[permalink] [raw]
Subject: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP

From: Josef Bacik <[email protected]>

The series of patches adding runnable_avg and subsequent supporting
patches broke on !CONFIG_SMP. Fix this by moving the definitions under
the appropriate checks, and moving the !CONFIG_SMP definitions higher
up.

Signed-off-by: Josef Bacik <[email protected]>
---
kernel/sched/fair.c | 155 +++++++++++++++++++++++++++-------------------------
1 file changed, 80 insertions(+), 75 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40a89f4..c53da64 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2796,7 +2796,81 @@ static long calc_group_shares(struct cfs_rq *cfs_rq)
*/
return clamp_t(long, shares, MIN_SHARES, tg_shares);
}
-# endif /* CONFIG_SMP */
+
+/*
+ * The runnable shares of this group are calculated as such
+ *
+ * max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight)
+ * shares * ------------------------------------------------------------
+ * max(cfs_rq->avg.load_avg, cfs_rq->load.weight)
+ *
+ * We do this to keep the shares in line with expected load on the cfs_rq.
+ * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first
+ * time, it's runnable_load_avg is not going to be representative of the actual
+ * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq.
+ * The weight on the cfs_rq is the immediate effect of having new tasks
+ * enqueue'd onto it which should be used to calculate the new runnable shares.
+ * At the same time we need the actual load_avg to be the lower bounds for the
+ * calculation, to handle when our weight drops quickly from having entities
+ * dequeued.
+ */
+static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
+{
+ long load_avg = max(cfs_rq->avg.load_avg,
+ scale_load_down(cfs_rq->load.weight));
+ long runnable = max(cfs_rq->avg.runnable_load_avg,
+ scale_load_down(cfs_rq->runnable_weight));
+
+ runnable *= shares;
+ if (load_avg)
+ runnable /= load_avg;
+ return clamp_t(long, runnable, MIN_SHARES, shares);
+}
+
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->runnable_weight += se->runnable_weight;
+
+ cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
+ cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
+}
+
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->runnable_weight -= se->runnable_weight;
+
+ sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
+ sub_positive(&cfs_rq->avg.runnable_load_sum,
+ se_runnable(se) * se->avg.runnable_load_sum);
+}
+
+static inline void
+__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->avg.load_avg += se->avg.load_avg;
+ cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+}
+
+static inline void
+__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+ sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+}
+
+#else /* CONFIG_SMP */
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+#endif /* CONFIG_SMP */
+

/*
* Signed add and clamp on underflow.
@@ -2848,44 +2922,9 @@ static inline long se_runnable(struct sched_entity *se)
return scale_load_down(se->runnable_weight);
}

-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->runnable_weight += se->runnable_weight;
-
- cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
- cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
-}
-
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->runnable_weight -= se->runnable_weight;
-
- sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
- sub_positive(&cfs_rq->avg.runnable_load_sum,
- se_runnable(se) * se->avg.runnable_load_sum);
-}
-
-static inline void
-__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->avg.load_avg += se->avg.load_avg;
- cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
-}
-
-static inline void
-__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
-}
-
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight, unsigned long runnable)
{
- u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
-
if (se->on_rq) {
/* commit outstanding execution time */
if (cfs_rq->curr == se)
@@ -2899,9 +2938,9 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
se->runnable_weight = runnable;
update_load_set(&se->load, weight);

- se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
- se->avg.runnable_load_avg =
- div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
+#ifdef CONFIG_SMP
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+#endif

__add_load_avg(cfs_rq, se);
if (se->on_rq) {
@@ -2924,36 +2963,6 @@ void reweight_task(struct task_struct *p, int prio)
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);

/*
- * The runnable shares of this group are calculated as such
- *
- * max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight)
- * shares * ------------------------------------------------------------
- * max(cfs_rq->avg.load_avg, cfs_rq->load.weight)
- *
- * We do this to keep the shares in line with expected load on the cfs_rq.
- * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first
- * time, it's runnable_load_avg is not going to be representative of the actual
- * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq.
- * The weight on the cfs_rq is the immediate effect of having new tasks
- * enqueue'd onto it which should be used to calculate the new runnable shares.
- * At the same time we need the actual load_avg to be the lower bounds for the
- * calculation, to handle when our weight drops quickly from having entities
- * dequeued.
- */
-static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
-{
- long load_avg = max(cfs_rq->avg.load_avg,
- scale_load_down(cfs_rq->load.weight));
- long runnable = max(cfs_rq->avg.runnable_load_avg,
- scale_load_down(cfs_rq->runnable_weight));
-
- runnable *= shares;
- if (load_avg)
- runnable /= load_avg;
- return clamp_t(long, runnable, MIN_SHARES, shares);
-}
-
-/*
* Recomputes the group entity based on the current state of its group
* runqueue.
*/
@@ -2969,14 +2978,14 @@ static void update_cfs_group(struct sched_entity *se)
return;

#ifndef CONFIG_SMP
- shares = READ_ONCE(gcfs_rq->tg->shares);
+ runnable = shares = READ_ONCE(gcfs_rq->tg->shares);

if (likely(se->load.weight == shares))
return;
#else
shares = calc_group_shares(gcfs_rq);
-#endif
runnable = calc_group_runnable(gcfs_rq, shares);
+#endif

reweight_entity(cfs_rq_of(se), se, shares, runnable);
}
@@ -3819,10 +3828,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
cfs_rq_util_change(cfs_rq);
}

-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}

static inline void
--
2.7.4


2017-08-21 20:06:59

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP

On Mon, Aug 21, 2017 at 04:03:05PM -0400, [email protected] wrote:
> From: Josef Bacik <[email protected]>
>
> The series of patches adding runnable_avg and subsequent supporting
> patches broke on !CONFIG_SMP. Fix this by moving the definitions under
> the appropriate checks, and moving the !CONFIG_SMP definitions higher
> up.

Is this on top or can I fold this in somewhere? I was meaning to rework
the series to avoid this superfluous build borkage.

2017-08-21 20:20:50

by Josef Bacik

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP

On Mon, Aug 21, 2017 at 04:03:05PM -0400, [email protected] wrote:
> From: Josef Bacik <[email protected]>
>
> The series of patches adding runnable_avg and subsequent supporting
> patches broke on !CONFIG_SMP. Fix this by moving the definitions under
> the appropriate checks, and moving the !CONFIG_SMP definitions higher
> up.
>
> Signed-off-by: Josef Bacik <[email protected]>

Sorry ignore this, it's still screwed up. I'll send a new series, there's
multiple broken things here. Thanks,

Josef

2017-08-21 20:44:44

by Josef Bacik

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP

On Mon, Aug 21, 2017 at 10:06:53PM +0200, Peter Zijlstra wrote:
> On Mon, Aug 21, 2017 at 04:03:05PM -0400, [email protected] wrote:
> > From: Josef Bacik <[email protected]>
> >
> > The series of patches adding runnable_avg and subsequent supporting
> > patches broke on !CONFIG_SMP. Fix this by moving the definitions under
> > the appropriate checks, and moving the !CONFIG_SMP definitions higher
> > up.
>
> Is this on top or can I fold this in somewhere? I was meaning to rework
> the series to avoid this superfluous build borkage.

There's actually a lot of individual breakages. I can go through and make
individual patches that can be folded in to where they were introduced, or I can
do them all at once. There's breakage around !CONFIG_FAIR_GROUP_SCHED as well,
basically everything you added needs to be moved around some. I'll do whatever
is most convenient for you. Thanks,

Josef