2014-11-20 17:28:49

by Xunlei Pang

[permalink] [raw]
Subject: [PATCH v3 1/3] sched/deadline: Modify cpudl.free_cpus to reflect rd->span

Currently, cpudl.free_cpus contains all cpus during init(see cpudl_init()),
so when calling cpudl_find() we have to add rd->span cpumask(cpus_allowed is
undependable when performing clustered scheduling using the cpuset) to avoid
selecting the cpu outside current root domain, see find_later_rq().

This patch adds cpudl_set_freecpu() to initialize cpudl.free_cpus when doing
rq_attach_root(), so we can avoid the extra rd->span operation when calling
cpudl_find().

Signed-off-by: pang.xunlei <[email protected]>
---
kernel/sched/core.c | 2 ++
kernel/sched/cpudeadline.c | 18 ++++++++++++++----
kernel/sched/cpudeadline.h | 1 +
kernel/sched/deadline.c | 3 ---
4 files changed, 17 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 240157c..1b417de 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5584,6 +5584,8 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
rq->rd = rd;

cpumask_set_cpu(rq->cpu, rd->span);
+ cpudl_set_freecpu(&rd->cpudl, rq->cpu);
+
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 539ca3c..c79f0d7 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -107,7 +107,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;

- if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
+ if (later_mask &&
+ cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
+ cpumask_and(later_mask, later_mask, cpu_active_mask)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
@@ -186,6 +188,16 @@ out:
}

/*
+ * cpudl_set_freecpu - Set the cpudl.free_cpus
+ * @cp: the cpudl max-heap context
+ * @cpu: rd attached cpu
+ */
+void cpudl_set_freecpu(struct cpudl *cp, int cpu)
+{
+ cpumask_set_cpu(cpu, cp->free_cpus);
+}
+
+/*
* cpudl_init - initialize the cpudl structure
* @cp: the cpudl max-heap context
*/
@@ -203,7 +215,7 @@ int cpudl_init(struct cpudl *cp)
if (!cp->elements)
return -ENOMEM;

- if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
kfree(cp->elements);
return -ENOMEM;
}
@@ -211,8 +223,6 @@ int cpudl_init(struct cpudl *cp)
for_each_possible_cpu(i)
cp->elements[i].idx = IDX_INVALID;

- cpumask_setall(cp->free_cpus);
-
return 0;
}

diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index 020039b..4a10a65 100755
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -24,6 +24,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask);
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
int cpudl_init(struct cpudl *cp);
+void cpudl_set_freecpu(struct cpudl *cp, int cpu);
void cpudl_cleanup(struct cpudl *cp);
#endif /* CONFIG_SMP */

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 5285332..bd83272 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1187,9 +1187,6 @@ static int find_later_rq(struct task_struct *task)
* We have to consider system topology and task affinity
* first, then we can look for a suitable cpu.
*/
- cpumask_copy(later_mask, task_rq(task)->rd->span);
- cpumask_and(later_mask, later_mask, cpu_active_mask);
- cpumask_and(later_mask, later_mask, &task->cpus_allowed);
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
if (best_cpu == -1)
--
2.1.0


2014-11-20 17:28:56

by Xunlei Pang

[permalink] [raw]
Subject: [PATCH v3 2/3] sched/deadline: Fix wrong cpudl_find() in check_preempt_equal_dl()

In check_preempt_equal_dl(), cpudl_find() is called with a NULL later_mask,
thus cpudl_find() here doesn't check cpudl.free_cpus at all.

This patch takles this issue by always passing a non-NULL cpumask to cpudl_find(),
and assigns later_mask in this function.

Signed-off-by: pang.xunlei <[email protected]>
---
kernel/sched/cpudeadline.c | 10 ++++------
kernel/sched/deadline.c | 14 ++++++++++----
2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index c79f0d7..c01b3aa 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -97,7 +97,7 @@ static inline int cpudl_maximum(struct cpudl *cp)
* cpudl_find - find the best (later-dl) CPU in the system
* @cp: the cpudl max-heap context
* @p: the task
- * @later_mask: a mask to fill in with the selected CPUs (or NULL)
+ * @later_mask: a mask to fill in with the selected CPUs (not NULL)
*
* Returns: int - best CPU (heap maximum if suitable)
*/
@@ -107,16 +107,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;

- if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
- cpumask_and(later_mask, later_mask, cpu_active_mask)) {
+ cpumask_and(later_mask, cpu_active_mask, &p->cpus_allowed);
+ if (cpumask_and(later_mask, later_mask, cp->free_cpus)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
best_cpu = cpudl_maximum(cp);
- if (later_mask)
- cpumask_set_cpu(best_cpu, later_mask);
+ cpumask_set_cpu(best_cpu, later_mask);
}

out:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index bd83272..03bb3f0 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -965,14 +965,22 @@ out:
return cpu;
}

+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
+
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
{
+ struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
+
+ /* Make sure the mask is initialized first */
+ if (unlikely(!later_mask))
+ return;
+
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
- cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
+ cpudl_find(&rq->rd->cpudl, rq->curr, later_mask) == -1)
return;

/*
@@ -980,7 +988,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
- cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
+ cpudl_find(&rq->rd->cpudl, p, later_mask) != -1)
return;

resched_curr(rq);
@@ -1167,8 +1175,6 @@ next_node:
return NULL;
}

-static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
-
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
--
2.1.0

2014-11-20 17:29:06

by Xunlei Pang

[permalink] [raw]
Subject: [PATCH v3 3/3] sched/deadline: Change the return meaning of cpudl_find()

cpudl_find() is not a good place to select the best cpu, so leave
this role to its call site, currently it is find_later_rq() where
we can do the election of the best cpu according to sd topology.

This patch tackles cpudl_find() using a trick with its return value:
- define macros for the return values of cpudl_find():

then, with return value >=0, means it returns the only available cpu.

- In the leg of "if", it can just return CPUDL_FIND_CPUMASK, as we
want to select the best_cpu in find_later_rq().
In the leg of "else if", just returns cpudl_maximum(cp), apparently
there is no need to set the later_mask, since we will definitely
select this cpu as the best_cpu in find_later_rq() .

- Convert all its call sites to reflect this return meaning change.
Sync find_later_rq()'s processing logic of best cpu election to that
of RT find_lowest_rq().

Additionally, this patch avoids the extra cpumask_set_cpu() operation
in cpudl_find() as well.

Signed-off-by: pang.xunlei <[email protected]>
---
kernel/sched/cpudeadline.c | 16 +++++++---------
kernel/sched/cpudeadline.h | 3 +++
kernel/sched/deadline.c | 17 ++++++++---------
3 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index c01b3aa..f17e6af 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -99,28 +99,26 @@ static inline int cpudl_maximum(struct cpudl *cp)
* @p: the task
* @later_mask: a mask to fill in with the selected CPUs (not NULL)
*
- * Returns: int - best CPU (heap maximum if suitable)
+ * Returns: int
+ * CPUDL_FIND_NONE: no available cpus;
+ * CPUDL_FIND_CPUMASK: available cpus in later_mask
+ * >=0: the only one available 0-based cpu
*/
int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask)
{
- int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;

cpumask_and(later_mask, cpu_active_mask, &p->cpus_allowed);
if (cpumask_and(later_mask, later_mask, cp->free_cpus)) {
- best_cpu = cpumask_any(later_mask);
- goto out;
+ return CPUDL_FIND_CPUMASK;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
- best_cpu = cpudl_maximum(cp);
- cpumask_set_cpu(best_cpu, later_mask);
+ return cpudl_maximum(cp);
}

-out:
- WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

- return best_cpu;
+ return CPUDL_FIND_NONE;
}

/*
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index 4a10a65..235f2b9 100755
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -3,6 +3,9 @@

#include <linux/sched.h>

+#define CPUDL_FIND_NONE -2 /* no available cpus */
+#define CPUDL_FIND_CPUMASK -1 /* available cpus in later_mask */
+
#define IDX_INVALID -1

struct cpudl_item {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 03bb3f0..bb37506 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -980,7 +980,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
- cpudl_find(&rq->rd->cpudl, rq->curr, later_mask) == -1)
+ cpudl_find(&rq->rd->cpudl, rq->curr, later_mask) == CPUDL_FIND_NONE)
return;

/*
@@ -988,7 +988,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
- cpudl_find(&rq->rd->cpudl, p, later_mask) != -1)
+ cpudl_find(&rq->rd->cpudl, p, later_mask) != CPUDL_FIND_NONE)
return;

resched_curr(rq);
@@ -1195,8 +1195,10 @@ static int find_later_rq(struct task_struct *task)
*/
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
- if (best_cpu == -1)
+ if (best_cpu == CPUDL_FIND_NONE)
return -1;
+ if (best_cpu != CPUDL_FIND_CPUMASK)
+ return best_cpu;

/*
* If we are here, some target has been found,
@@ -1234,12 +1236,9 @@ static int find_later_rq(struct task_struct *task)
return this_cpu;
}

- /*
- * Last chance: if best_cpu is valid and is
- * in the mask, that becomes our choice.
- */
- if (best_cpu < nr_cpu_ids &&
- cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
+ best_cpu = cpumask_first_and(later_mask,
+ sched_domain_span(sd));
+ if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
--
2.1.0