2015-11-06 13:57:55

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v4] sched: fix incorrect wait time and wait count statistics

On Tue, Oct 27, 2015 at 09:46:53PM -0700, Joonwoo Park wrote:
> @@ -1272,6 +1272,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
> WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
> !p->on_rq);
>
> + /*
> + * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
> + * because schedstat_wait_{start,end} rebase migrating task's wait_start
> + * time relying on p->on_rq.
> + */
> + WARN_ON_ONCE(p->state == TASK_RUNNING &&
> + p->sched_class == &fair_sched_class &&
> + (p->on_rq && !task_on_rq_migrating(p)));
> +

Why do we have to test p->on_rq? Would not ->state == RUNNING imply
that?

> +++ b/kernel/sched/fair.c
> @@ -737,41 +737,69 @@ static void update_curr_fair(struct rq *rq)
> update_curr(cfs_rq_of(&rq->curr->se));
> }
>
> +#ifdef CONFIG_SCHEDSTATS
> static inline void
> update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> + u64 wait_start = rq_clock(rq_of(cfs_rq));
>
> + if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
> + likely(wait_start > se->statistics.wait_start))
> + wait_start -= se->statistics.wait_start;
> +
> + schedstat_set(se->statistics.wait_start, wait_start);
> }
>
> static void
> update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {

Since this is now all under CONFIG_SCHEDSTAT, would it not make sense
to do something like:

u64 now = rq_clock(rq_of(cfs_rq));

to avoid the endless calling of that function?

Also, for that very same reason; would it not make sense to drop the
schedstat_set() usage below, that would greatly enhance readability.

> + if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) {
> + /*
> + * Preserve migrating task's wait time so wait_start time stamp
> + * can be adjusted to accumulate wait time prior to migration.
> + */
> + schedstat_set(se->statistics.wait_start,
> + rq_clock(rq_of(cfs_rq)) -
> + se->statistics.wait_start);
> + return;
> + }
> +
> schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
> rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
> schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
> schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
> rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
> +
> if (entity_is_task(se)) {
> trace_sched_stat_wait(task_of(se),
> rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
> }

Is there no means of collapsing the two 'entity_is_task()' branches?

> schedstat_set(se->statistics.wait_start, 0);
> }
> +#else
> +static inline void
> +update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +{
> +}
> +
> +static inline void
> +update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +{
> +}
> +#endif


2015-11-07 02:41:49

by Joonwoo Park

[permalink] [raw]
Subject: Re: [PATCH v4] sched: fix incorrect wait time and wait count statistics

On Fri, Nov 06, 2015 at 02:57:49PM +0100, Peter Zijlstra wrote:
> On Tue, Oct 27, 2015 at 09:46:53PM -0700, Joonwoo Park wrote:
> > @@ -1272,6 +1272,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
> > WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
> > !p->on_rq);
> >
> > + /*
> > + * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
> > + * because schedstat_wait_{start,end} rebase migrating task's wait_start
> > + * time relying on p->on_rq.
> > + */
> > + WARN_ON_ONCE(p->state == TASK_RUNNING &&
> > + p->sched_class == &fair_sched_class &&
> > + (p->on_rq && !task_on_rq_migrating(p)));
> > +
>
> Why do we have to test p->on_rq? Would not ->state == RUNNING imply
> that?
>

sched_fork() sets p->state = RUNNING before changing task cpu.
Please let me know if you got better idea.

> > +++ b/kernel/sched/fair.c
> > @@ -737,41 +737,69 @@ static void update_curr_fair(struct rq *rq)
> > update_curr(cfs_rq_of(&rq->curr->se));
> > }
> >
> > +#ifdef CONFIG_SCHEDSTATS
> > static inline void
> > update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > {
> > + u64 wait_start = rq_clock(rq_of(cfs_rq));
> >
> > + if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
> > + likely(wait_start > se->statistics.wait_start))
> > + wait_start -= se->statistics.wait_start;
> > +
> > + schedstat_set(se->statistics.wait_start, wait_start);
> > }
> >
> > static void
> > update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > {
>
> Since this is now all under CONFIG_SCHEDSTAT, would it not make sense
> to do something like:
>
> u64 now = rq_clock(rq_of(cfs_rq));
>
> to avoid the endless calling of that function?
>
> Also, for that very same reason; would it not make sense to drop the
> schedstat_set() usage below, that would greatly enhance readability.
>

Agreed.

> > + if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) {
> > + /*
> > + * Preserve migrating task's wait time so wait_start time stamp
> > + * can be adjusted to accumulate wait time prior to migration.
> > + */
> > + schedstat_set(se->statistics.wait_start,
> > + rq_clock(rq_of(cfs_rq)) -
> > + se->statistics.wait_start);
> > + return;
> > + }
> > +
> > schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
> > rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
> > schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
> > schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
> > rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
> > +
> > if (entity_is_task(se)) {
> > trace_sched_stat_wait(task_of(se),
> > rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
> > }
>
> Is there no means of collapsing the two 'entity_is_task()' branches?
>

Agreed. Will spin v5 with these clean up.

Thanks,
Joonwoo

> > schedstat_set(se->statistics.wait_start, 0);
> > }
> > +#else
> > +static inline void
> > +update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > +{
> > +}
> > +
> > +static inline void
> > +update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > +{
> > +}
> > +#endif

--
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation

2015-11-09 10:32:31

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v4] sched: fix incorrect wait time and wait count statistics

On Fri, Nov 06, 2015 at 06:41:43PM -0800, Joonwoo Park wrote:
> On Fri, Nov 06, 2015 at 02:57:49PM +0100, Peter Zijlstra wrote:
> > On Tue, Oct 27, 2015 at 09:46:53PM -0700, Joonwoo Park wrote:
> > > @@ -1272,6 +1272,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
> > > WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
> > > !p->on_rq);
> > >
> > > + /*
> > > + * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
> > > + * because schedstat_wait_{start,end} rebase migrating task's wait_start
> > > + * time relying on p->on_rq.
> > > + */
> > > + WARN_ON_ONCE(p->state == TASK_RUNNING &&
> > > + p->sched_class == &fair_sched_class &&
> > > + (p->on_rq && !task_on_rq_migrating(p)));
> > > +
> >
> > Why do we have to test p->on_rq? Would not ->state == RUNNING imply
> > that?
> >
>
> sched_fork() sets p->state = RUNNING before changing task cpu.
> Please let me know if you got better idea.

Ah, indeed. OK.

2015-11-13 03:39:08

by Joonwoo Park

[permalink] [raw]
Subject: Re: [PATCH v5] sched: fix incorrect wait time and wait count statistics

At present scheduler resets task's wait start timestamp when the task
migrates to another rq. This misleads scheduler itself into reporting
less wait time than actual by omitting time spent for waiting prior to
migration and also more wait count than actual by counting migration as
wait end event which can be seen by trace or /proc/<pid>/sched with
CONFIG_SCHEDSTATS=y.

Carry forward migrating task's wait time prior to migration and
don't count migration as a wait end event to fix such statistics error.

In order to determine whether task is migrating mark task->on_rq with
TASK_ON_RQ_MIGRATING while dequeuing and enqueuing due to migration.

To: Ingo Molnar <[email protected]>
To: Peter Zijlstra <[email protected]>
Cc: [email protected]
Signed-off-by: Joonwoo Park <[email protected]>
---
Changes in v2:
* Set p->on_rq = TASK_ON_RQ_MIGRATING while doing migration dequeue/enqueue
and check whether task's migrating with task_on_rq_migrating().
Changes in v3:
* Fixed "WARNING: CPU: 0 PID: 3 at kernel/sched/fair.c:260 update_stats_wait_end+0x23/0x30()" caught by Intel kernel test robot.
Changes in v4:
* Made __migrate_swap_task() to set p->on_rq = TASK_ON_RQ_MIGRATING.
* Added WARN_ON_ONCE() inside CONFIG_SCHED_DEBUG.
* Added comments.
* Cleanup with ifdefy.
Changes in v5:
* Cleanup update_stats_wait_end().

kernel/sched/core.c | 15 ++++++++++--
kernel/sched/fair.c | 67 +++++++++++++++++++++++++++++++++++++----------------
2 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bcd214e..1ddbabc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1069,8 +1069,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new
{
lockdep_assert_held(&rq->lock);

- dequeue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ dequeue_task(rq, p, 0);
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);

@@ -1078,8 +1078,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new

raw_spin_lock(&rq->lock);
BUG_ON(task_cpu(p) != new_cpu);
- p->on_rq = TASK_ON_RQ_QUEUED;
enqueue_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);

return rq;
@@ -1272,6 +1272,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!p->on_rq);

+ /*
+ * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
+ * because schedstat_wait_{start,end} rebase migrating task's wait_start
+ * time relying on p->on_rq.
+ */
+ WARN_ON_ONCE(p->state == TASK_RUNNING &&
+ p->sched_class == &fair_sched_class &&
+ (p->on_rq && !task_on_rq_migrating(p)));
+
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
@@ -1308,9 +1317,11 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);

+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
} else {
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9a5e60f..fc54ecb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -737,12 +737,56 @@ static void update_curr_fair(struct rq *rq)
update_curr(cfs_rq_of(&rq->curr->se));
}

+#ifdef CONFIG_SCHEDSTATS
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
+ u64 wait_start = rq_clock(rq_of(cfs_rq));
+
+ if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
+ likely(wait_start > se->statistics.wait_start))
+ wait_start -= se->statistics.wait_start;
+
+ se->statistics.wait_start = wait_start;
}

+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct task_struct *p;
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+
+ if (entity_is_task(se)) {
+ p = task_of(se);
+ if (task_on_rq_migrating(p)) {
+ /*
+ * Preserve migrating task's wait time so wait_start
+ * time stamp can be adjusted to accumulate wait time
+ * prior to migration.
+ */
+ se->statistics.wait_start = delta;
+ return;
+ }
+ trace_sched_stat_wait(p, delta);
+ }
+
+ se->statistics.wait_max = max(se->statistics.wait_max, delta);
+ se->statistics.wait_count++;
+ se->statistics.wait_sum += delta;
+ se->statistics.wait_start = 0;
+}
+#else
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+#endif
+
/*
* Task is being enqueued - update stats:
*/
@@ -756,23 +800,6 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_start(cfs_rq, se);
}

-static void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-#ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- trace_sched_stat_wait(task_of(se),
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
- }
-#endif
- schedstat_set(se->statistics.wait_start, 0);
-}
-
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -5656,8 +5683,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);

- deactivate_task(env->src_rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ deactivate_task(env->src_rq, p, 0);
set_task_cpu(p, env->dst_cpu);
}

@@ -5790,8 +5817,8 @@ static void attach_task(struct rq *rq, struct task_struct *p)
lockdep_assert_held(&rq->lock);

BUG_ON(task_rq(p) != rq);
- p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
}

--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

Subject: [tip:sched/core] sched/core: Fix incorrect wait time and wait count statistics

Commit-ID: 3ea94de15ce9f3a217f6d0a7e9e0f48388902bb7
Gitweb: http://git.kernel.org/tip/3ea94de15ce9f3a217f6d0a7e9e0f48388902bb7
Author: Joonwoo Park <[email protected]>
AuthorDate: Thu, 12 Nov 2015 19:38:54 -0800
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 23 Nov 2015 09:48:17 +0100

sched/core: Fix incorrect wait time and wait count statistics

At present scheduler resets task's wait start timestamp when the task
migrates to another rq. This misleads scheduler itself into reporting
less wait time than actual by omitting time spent for waiting prior to
migration and also more wait count than actual by counting migration as
wait end event which can be seen by trace or /proc/<pid>/sched with
CONFIG_SCHEDSTATS=y.

Carry forward migrating task's wait time prior to migration and
don't count migration as a wait end event to fix such statistics error.

In order to determine whether task is migrating mark task->on_rq with
TASK_ON_RQ_MIGRATING while dequeuing and enqueuing due to migration.

Signed-off-by: Joonwoo Park <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/sched/core.c | 15 ++++++++++--
kernel/sched/fair.c | 67 +++++++++++++++++++++++++++++++++++++----------------
2 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4d568ac..1b7cb5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1071,8 +1071,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new
{
lockdep_assert_held(&rq->lock);

- dequeue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ dequeue_task(rq, p, 0);
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);

@@ -1080,8 +1080,8 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new

raw_spin_lock(&rq->lock);
BUG_ON(task_cpu(p) != new_cpu);
- p->on_rq = TASK_ON_RQ_QUEUED;
enqueue_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);

return rq;
@@ -1274,6 +1274,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!p->on_rq);

+ /*
+ * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
+ * because schedstat_wait_{start,end} rebase migrating task's wait_start
+ * time relying on p->on_rq.
+ */
+ WARN_ON_ONCE(p->state == TASK_RUNNING &&
+ p->sched_class == &fair_sched_class &&
+ (p->on_rq && !task_on_rq_migrating(p)));
+
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
@@ -1310,9 +1319,11 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);

+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
} else {
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 95b944e..f7017ad 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -738,12 +738,56 @@ static void update_curr_fair(struct rq *rq)
update_curr(cfs_rq_of(&rq->curr->se));
}

+#ifdef CONFIG_SCHEDSTATS
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
+ u64 wait_start = rq_clock(rq_of(cfs_rq));
+
+ if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
+ likely(wait_start > se->statistics.wait_start))
+ wait_start -= se->statistics.wait_start;
+
+ se->statistics.wait_start = wait_start;
}

+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct task_struct *p;
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+
+ if (entity_is_task(se)) {
+ p = task_of(se);
+ if (task_on_rq_migrating(p)) {
+ /*
+ * Preserve migrating task's wait time so wait_start
+ * time stamp can be adjusted to accumulate wait time
+ * prior to migration.
+ */
+ se->statistics.wait_start = delta;
+ return;
+ }
+ trace_sched_stat_wait(p, delta);
+ }
+
+ se->statistics.wait_max = max(se->statistics.wait_max, delta);
+ se->statistics.wait_count++;
+ se->statistics.wait_sum += delta;
+ se->statistics.wait_start = 0;
+}
+#else
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+#endif
+
/*
* Task is being enqueued - update stats:
*/
@@ -757,23 +801,6 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_start(cfs_rq, se);
}

-static void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-#ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- trace_sched_stat_wait(task_of(se),
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
- }
-#endif
- schedstat_set(se->statistics.wait_start, 0);
-}
-
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -5745,8 +5772,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);

- deactivate_task(env->src_rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ deactivate_task(env->src_rq, p, 0);
set_task_cpu(p, env->dst_cpu);
}

@@ -5879,8 +5906,8 @@ static void attach_task(struct rq *rq, struct task_struct *p)
lockdep_assert_held(&rq->lock);

BUG_ON(task_rq(p) != rq);
- p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
}