Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753685AbdHUUDI (ORCPT ); Mon, 21 Aug 2017 16:03:08 -0400 Received: from mail-qk0-f193.google.com ([209.85.220.193]:33208 "EHLO mail-qk0-f193.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753492AbdHUUDH (ORCPT ); Mon, 21 Aug 2017 16:03:07 -0400 From: josef@toxicpanda.com X-Google-Original-From: jbacik@fb.com To: peterz@infradead.org, linux-kernel@vger.kernel.org, kernel-team@fb.com Cc: Josef Bacik Subject: [PATCH] sched/fair: move definitions to fix !CONFIG_SMP Date: Mon, 21 Aug 2017 16:03:05 -0400 Message-Id: <1503345785-9323-1-git-send-email-jbacik@fb.com> X-Mailer: git-send-email 2.7.4 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7958 Lines: 224 From: Josef Bacik The series of patches adding runnable_avg and subsequent supporting patches broke on !CONFIG_SMP. Fix this by moving the definitions under the appropriate checks, and moving the !CONFIG_SMP definitions higher up. Signed-off-by: Josef Bacik --- kernel/sched/fair.c | 155 +++++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 75 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40a89f4..c53da64 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2796,7 +2796,81 @@ static long calc_group_shares(struct cfs_rq *cfs_rq) */ return clamp_t(long, shares, MIN_SHARES, tg_shares); } -# endif /* CONFIG_SMP */ + +/* + * The runnable shares of this group are calculated as such + * + * max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight) + * shares * ------------------------------------------------------------ + * max(cfs_rq->avg.load_avg, cfs_rq->load.weight) + * + * We do this to keep the shares in line with expected load on the cfs_rq. + * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first + * time, it's runnable_load_avg is not going to be representative of the actual + * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq. + * The weight on the cfs_rq is the immediate effect of having new tasks + * enqueue'd onto it which should be used to calculate the new runnable shares. + * At the same time we need the actual load_avg to be the lower bounds for the + * calculation, to handle when our weight drops quickly from having entities + * dequeued. + */ +static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) +{ + long load_avg = max(cfs_rq->avg.load_avg, + scale_load_down(cfs_rq->load.weight)); + long runnable = max(cfs_rq->avg.runnable_load_avg, + scale_load_down(cfs_rq->runnable_weight)); + + runnable *= shares; + if (load_avg) + runnable /= load_avg; + return clamp_t(long, runnable, MIN_SHARES, shares); +} + +static inline void +enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + cfs_rq->runnable_weight += se->runnable_weight; + + cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; + cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; +} + +static inline void +dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + cfs_rq->runnable_weight -= se->runnable_weight; + + sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); + sub_positive(&cfs_rq->avg.runnable_load_sum, + se_runnable(se) * se->avg.runnable_load_sum); +} + +static inline void +__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + cfs_rq->avg.load_avg += se->avg.load_avg; + cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; +} + +static inline void +__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ + sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); + sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); +} + +#else /* CONFIG_SMP */ +static inline void +enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} +static inline void +dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} +static inline void +__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} +static inline void +__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} +#endif /* CONFIG_SMP */ + /* * Signed add and clamp on underflow. @@ -2848,44 +2922,9 @@ static inline long se_runnable(struct sched_entity *se) return scale_load_down(se->runnable_weight); } -static inline void -enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - cfs_rq->runnable_weight += se->runnable_weight; - - cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; - cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; -} - -static inline void -dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - cfs_rq->runnable_weight -= se->runnable_weight; - - sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); - sub_positive(&cfs_rq->avg.runnable_load_sum, - se_runnable(se) * se->avg.runnable_load_sum); -} - -static inline void -__add_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - cfs_rq->avg.load_avg += se->avg.load_avg; - cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; -} - -static inline void -__sub_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) -{ - sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); - sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); -} - static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight, unsigned long runnable) { - u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; - if (se->on_rq) { /* commit outstanding execution time */ if (cfs_rq->curr == se) @@ -2899,9 +2938,9 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, se->runnable_weight = runnable; update_load_set(&se->load, weight); - se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); - se->avg.runnable_load_avg = - div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); +#ifdef CONFIG_SMP + ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); +#endif __add_load_avg(cfs_rq, se); if (se->on_rq) { @@ -2924,36 +2963,6 @@ void reweight_task(struct task_struct *p, int prio) static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); /* - * The runnable shares of this group are calculated as such - * - * max(cfs_rq->avg.runnable_load_avg, cfs_rq->runnable_weight) - * shares * ------------------------------------------------------------ - * max(cfs_rq->avg.load_avg, cfs_rq->load.weight) - * - * We do this to keep the shares in line with expected load on the cfs_rq. - * Consider a cfs_rq that has several tasks wake up on this cfs_rq for the first - * time, it's runnable_load_avg is not going to be representative of the actual - * load this cfs_rq will now experience, which will bias us agaisnt this cfs_rq. - * The weight on the cfs_rq is the immediate effect of having new tasks - * enqueue'd onto it which should be used to calculate the new runnable shares. - * At the same time we need the actual load_avg to be the lower bounds for the - * calculation, to handle when our weight drops quickly from having entities - * dequeued. - */ -static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) -{ - long load_avg = max(cfs_rq->avg.load_avg, - scale_load_down(cfs_rq->load.weight)); - long runnable = max(cfs_rq->avg.runnable_load_avg, - scale_load_down(cfs_rq->runnable_weight)); - - runnable *= shares; - if (load_avg) - runnable /= load_avg; - return clamp_t(long, runnable, MIN_SHARES, shares); -} - -/* * Recomputes the group entity based on the current state of its group * runqueue. */ @@ -2969,14 +2978,14 @@ static void update_cfs_group(struct sched_entity *se) return; #ifndef CONFIG_SMP - shares = READ_ONCE(gcfs_rq->tg->shares); + runnable = shares = READ_ONCE(gcfs_rq->tg->shares); if (likely(se->load.weight == shares)) return; #else shares = calc_group_shares(gcfs_rq); -#endif runnable = calc_group_runnable(gcfs_rq, shares); +#endif reweight_entity(cfs_rq_of(se), se, shares, runnable); } @@ -3819,10 +3828,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s cfs_rq_util_change(cfs_rq); } -static inline void -enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} -static inline void -dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void remove_entity_load_avg(struct sched_entity *se) {} static inline void -- 2.7.4