2009-04-01 16:39:47

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH] sched_rt: fix overload bug on rt group scheduling

Fixes an easily triggerable BUG() when setting process affinities.

Make sure to count the number of migratable tasks in the same place:
the root rt_rq. Otherwise the number doesn't make sense and we'll hit
the BUG in set_cpus_allowed_rt().

Also, make sure we only count tasks, not groups (this is probably
already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
for groups, but be more explicit)

Signed-off-by: Peter Zijlstra <[email protected]>
Tested-by: Thomas Gleixner <[email protected]>
CC: [email protected]
---
kernel/sched_rt.c | 16 +++++++++++++++-
1 files changed, 15 insertions(+), 1 deletions(-)

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index de4469a..c1ee8dc 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)

#ifdef CONFIG_RT_GROUP_SCHED

+#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
@@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)

#else /* CONFIG_RT_GROUP_SCHED */

+#define rt_entity_is_task(rt_se) (1)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
@@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)

static void update_rt_migration(struct rt_rq *rt_rq)
{
- if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+ if (rt_rq->rt_nr_migratory > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
@@ -86,6 +90,11 @@ static void update_rt_migration(struct rt_rq *rt_rq)

static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;

@@ -94,6 +103,11 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;


2009-04-02 00:56:44

by Gregory Haskins

[permalink] [raw]
Subject: Re: [PATCH] sched_rt: fix overload bug on rt group scheduling

Hi Peter,

Peter Zijlstra wrote:
> Fixes an easily triggerable BUG() when setting process affinities.
>
> Make sure to count the number of migratable tasks in the same place:
> the root rt_rq. Otherwise the number doesn't make sense and we'll hit
> the BUG in set_cpus_allowed_rt().
>
> Also, make sure we only count tasks, not groups (this is probably
> already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
> for groups, but be more explicit)
>
> Signed-off-by: Peter Zijlstra <[email protected]>
> Tested-by: Thomas Gleixner <[email protected]>
> CC: [email protected]
> ---
> kernel/sched_rt.c | 16 +++++++++++++++-
> 1 files changed, 15 insertions(+), 1 deletions(-)
>
> diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
> index de4469a..c1ee8dc 100644
> --- a/kernel/sched_rt.c
> +++ b/kernel/sched_rt.c
> @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
>
> #ifdef CONFIG_RT_GROUP_SCHED
>
> +#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
> +
> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> {
> return rt_rq->rq;
> @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
>
> #else /* CONFIG_RT_GROUP_SCHED */
>
> +#define rt_entity_is_task(rt_se) (1)
> +
> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> {
> return container_of(rt_rq, struct rq, rt);
> @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)
>
> static void update_rt_migration(struct rt_rq *rt_rq)
> {
> - if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
> + if (rt_rq->rt_nr_migratory > 1) {
>

The rest of the patch is making sense to me, but I am a little concerned
about this change.

The original logic was designed to catch the condition when you might
have a non-migratory task running, and a migratory task queued. This
would mean nr_running == 2, and nr_migratory == 1, which is eligible for
overload handling. (Of course, the opposite could be true..the
migratory is running and the non-migratory is queued...we cannot discern
the difference here and we go into overload anyway. This is just
suboptimal but functionally correct).

What can happen now is you could have that above condition but we will
not go into overload unless there is at least two migratory tasks
queued. This will undoubtedly allow a potential scheduling latency on
task #2.

I think we really need to qualify overload on both running > 1 and at
least one migratory task. Is there a way to get this state, even if by
other means?

> if (!rt_rq->overloaded) {
> rt_set_overload(rq_of_rt_rq(rt_rq));
> rt_rq->overloaded = 1;
> @@ -86,6 +90,11 @@ static void update_rt_migration(struct rt_rq *rt_rq)
>
> static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
> {
> + if (!rt_entity_is_task(rt_se))
> + return;
> +
> + rt_rq = &rq_of_rt_rq(rt_rq)->rt;
> +
> if (rt_se->nr_cpus_allowed > 1)
> rt_rq->rt_nr_migratory++;
>
> @@ -94,6 +103,11 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
>
> static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
> {
> + if (!rt_entity_is_task(rt_se))
> + return;
> +
> + rt_rq = &rq_of_rt_rq(rt_rq)->rt;
> +
> if (rt_se->nr_cpus_allowed > 1)
> rt_rq->rt_nr_migratory--;
>
>
>



Attachments:
signature.asc (257.00 B)
OpenPGP digital signature

2009-04-02 06:47:48

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] sched_rt: fix overload bug on rt group scheduling

On Wed, 2009-04-01 at 20:58 -0400, Gregory Haskins wrote:
> Hi Peter,
>
> Peter Zijlstra wrote:
> > Fixes an easily triggerable BUG() when setting process affinities.
> >
> > Make sure to count the number of migratable tasks in the same place:
> > the root rt_rq. Otherwise the number doesn't make sense and we'll hit
> > the BUG in set_cpus_allowed_rt().
> >
> > Also, make sure we only count tasks, not groups (this is probably
> > already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
> > for groups, but be more explicit)
> >
> > Signed-off-by: Peter Zijlstra <[email protected]>
> > Tested-by: Thomas Gleixner <[email protected]>
> > CC: [email protected]
> > ---
> > kernel/sched_rt.c | 16 +++++++++++++++-
> > 1 files changed, 15 insertions(+), 1 deletions(-)
> >
> > diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
> > index de4469a..c1ee8dc 100644
> > --- a/kernel/sched_rt.c
> > +++ b/kernel/sched_rt.c
> > @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
> >
> > #ifdef CONFIG_RT_GROUP_SCHED
> >
> > +#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
> > +
> > static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> > {
> > return rt_rq->rq;
> > @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
> >
> > #else /* CONFIG_RT_GROUP_SCHED */
> >
> > +#define rt_entity_is_task(rt_se) (1)
> > +
> > static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> > {
> > return container_of(rt_rq, struct rq, rt);
> > @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)
> >
> > static void update_rt_migration(struct rt_rq *rt_rq)
> > {
> > - if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
> > + if (rt_rq->rt_nr_migratory > 1) {
> >
>
> The rest of the patch is making sense to me, but I am a little concerned
> about this change.
>
> The original logic was designed to catch the condition when you might
> have a non-migratory task running, and a migratory task queued. This
> would mean nr_running == 2, and nr_migratory == 1, which is eligible for
> overload handling. (Of course, the opposite could be true..the
> migratory is running and the non-migratory is queued...we cannot discern
> the difference here and we go into overload anyway. This is just
> suboptimal but functionally correct).
>
> What can happen now is you could have that above condition but we will
> not go into overload unless there is at least two migratory tasks
> queued. This will undoubtedly allow a potential scheduling latency on
> task #2.
>
> I think we really need to qualify overload on both running > 1 and at
> least one migratory task. Is there a way to get this state, even if by
> other means?

Ah, yes, I missed that bit. I ripped out the rt_nr_running because I 1)
didn't think of this, and 2) rt_nr_running is accounted per rt_rq, not
per-cpu, so it doesn't match.

Since rt_nr_running is also used in a per rt_rq setting, changing that
isn't possible and we'd need to introduce another per-cpu variant is you
want to re-instate this.

2009-04-02 11:19:27

by Gregory Haskins

[permalink] [raw]
Subject: Re: [PATCH] sched_rt: fix overload bug on rt group scheduling

Peter Zijlstra wrote:
> On Wed, 2009-04-01 at 20:58 -0400, Gregory Haskins wrote:
>
>> Hi Peter,
>>
>> Peter Zijlstra wrote:
>>
>>> Fixes an easily triggerable BUG() when setting process affinities.
>>>
>>> Make sure to count the number of migratable tasks in the same place:
>>> the root rt_rq. Otherwise the number doesn't make sense and we'll hit
>>> the BUG in set_cpus_allowed_rt().
>>>
>>> Also, make sure we only count tasks, not groups (this is probably
>>> already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
>>> for groups, but be more explicit)
>>>
>>> Signed-off-by: Peter Zijlstra <[email protected]>
>>> Tested-by: Thomas Gleixner <[email protected]>
>>> CC: [email protected]
>>> ---
>>> kernel/sched_rt.c | 16 +++++++++++++++-
>>> 1 files changed, 15 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
>>> index de4469a..c1ee8dc 100644
>>> --- a/kernel/sched_rt.c
>>> +++ b/kernel/sched_rt.c
>>> @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
>>>
>>> #ifdef CONFIG_RT_GROUP_SCHED
>>>
>>> +#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
>>> +
>>> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
>>> {
>>> return rt_rq->rq;
>>> @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
>>>
>>> #else /* CONFIG_RT_GROUP_SCHED */
>>>
>>> +#define rt_entity_is_task(rt_se) (1)
>>> +
>>> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
>>> {
>>> return container_of(rt_rq, struct rq, rt);
>>> @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)
>>>
>>> static void update_rt_migration(struct rt_rq *rt_rq)
>>> {
>>> - if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
>>> + if (rt_rq->rt_nr_migratory > 1) {
>>>
>>>
>> The rest of the patch is making sense to me, but I am a little concerned
>> about this change.
>>
>> The original logic was designed to catch the condition when you might
>> have a non-migratory task running, and a migratory task queued. This
>> would mean nr_running == 2, and nr_migratory == 1, which is eligible for
>> overload handling. (Of course, the opposite could be true..the
>> migratory is running and the non-migratory is queued...we cannot discern
>> the difference here and we go into overload anyway. This is just
>> suboptimal but functionally correct).
>>
>> What can happen now is you could have that above condition but we will
>> not go into overload unless there is at least two migratory tasks
>> queued. This will undoubtedly allow a potential scheduling latency on
>> task #2.
>>
>> I think we really need to qualify overload on both running > 1 and at
>> least one migratory task. Is there a way to get this state, even if by
>> other means?
>>
>
> Ah, yes, I missed that bit. I ripped out the rt_nr_running because I 1)
> didn't think of this, and 2) rt_nr_running is accounted per rt_rq, not
> per-cpu, so it doesn't match.
>
> Since rt_nr_running is also used in a per rt_rq setting, changing that
> isn't possible and we'd need to introduce another per-cpu variant is you
> want to re-instate this.
>
Yeah, I actually don't care if its literally a nr_running stat
reinstated, or some other way to restore "correctness" ;)

Double bonus if you can solve that problem I mentioned above where I
can't tell if its really eligible for overload in all cases (but goes
into overload anyway to be conservative). I had been thinking of doing
something like subtracting the nr_migration number when a migratory task
is put on the cpu. But this is kind of messy because you need to handle
all the places that can manipulate nr_migratory to make sure it doesnt
break.

Thanks Peter!
-Greg




Attachments:
signature.asc (257.00 B)
OpenPGP digital signature

2009-07-08 15:38:30

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH] sched_rt: fix overload bug on rt group scheduling -v2

Greg, how's this?

---
Subject: sched_rt: fix overload bug on rt group scheduling
From: Peter Zijlstra <[email protected]>
Date: Wed, 01 Apr 2009 18:40:15 +0200

Fixes an easily triggerable BUG() when setting process affinities.

Make sure to count the number of migratable tasks in the same place:
the root rt_rq. Otherwise the number doesn't make sense and we'll hit
the BUG in set_cpus_allowed_rt().

Also, make sure we only count tasks, not groups (this is probably
already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
for groups, but be more explicit)

Signed-off-by: Peter Zijlstra <[email protected]>
---
kernel/sched_rt.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)

Index: linux-2.6/kernel/sched_rt.c
===================================================================
--- linux-2.6.orig/kernel/sched_rt.c
+++ linux-2.6/kernel/sched_rt.c
@@ -10,6 +10,8 @@ static inline struct task_struct *rt_tas

#ifdef CONFIG_RT_GROUP_SCHED

+#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
@@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(

#else /* CONFIG_RT_GROUP_SCHED */

+#define rt_entity_is_task(rt_se) (1)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
@@ -73,7 +77,7 @@ static inline void rt_clear_overload(str

static void update_rt_migration(struct rt_rq *rt_rq)
{
- if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+ if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
@@ -86,6 +90,12 @@ static void update_rt_migration(struct r

static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+ rt_rq->rt_nr_total++;
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;

@@ -94,6 +104,12 @@ static void inc_rt_migration(struct sche

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+ rt_rq->rt_nr_total--;
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;

diff --git a/kernel/sched.c b/kernel/sched.c
index fd3ac58..a07d520 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -493,6 +493,7 @@ struct rt_rq {
#endif
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
+ unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
#endif

2009-07-08 15:54:42

by Gregory Haskins

[permalink] [raw]
Subject: Re: [PATCH] sched_rt: fix overload bug on rt group scheduling -v2

Peter Zijlstra wrote:
> Greg, how's this?
>

Quick glance looks reasonable. I'll try to take a closer look this
afternoon.

-Greg



Attachments:
signature.asc (266.00 B)
OpenPGP digital signature

2009-07-10 04:05:49

by Gregory Haskins

[permalink] [raw]
Subject: Re: [PATCH] sched_rt: fix overload bug on rt group scheduling -v2

Peter Zijlstra wrote:
> Greg, how's this?
>

Hi Peter,
Sorry for the delay getting back to you.

I can't vouch specifically for the rt-group specific part, but as far as
the general balancer modifications to the migration code, it looks good.

> ---
> Subject: sched_rt: fix overload bug on rt group scheduling
> From: Peter Zijlstra <[email protected]>
> Date: Wed, 01 Apr 2009 18:40:15 +0200
>
> Fixes an easily triggerable BUG() when setting process affinities.
>
> Make sure to count the number of migratable tasks in the same place:
> the root rt_rq. Otherwise the number doesn't make sense and we'll hit
> the BUG in set_cpus_allowed_rt().
>
> Also, make sure we only count tasks, not groups (this is probably
> already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
> for groups, but be more explicit)
>
> Signed-off-by: Peter Zijlstra <[email protected]>
>

Acked-by: Gregory Haskins <[email protected]>

> ---
> kernel/sched_rt.c | 18 +++++++++++++++++-
> 1 file changed, 17 insertions(+), 1 deletion(-)
>
> Index: linux-2.6/kernel/sched_rt.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched_rt.c
> +++ linux-2.6/kernel/sched_rt.c
> @@ -10,6 +10,8 @@ static inline struct task_struct *rt_tas
>
> #ifdef CONFIG_RT_GROUP_SCHED
>
> +#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
> +
> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> {
> return rt_rq->rq;
> @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(
>
> #else /* CONFIG_RT_GROUP_SCHED */
>
> +#define rt_entity_is_task(rt_se) (1)
> +
> static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
> {
> return container_of(rt_rq, struct rq, rt);
> @@ -73,7 +77,7 @@ static inline void rt_clear_overload(str
>
> static void update_rt_migration(struct rt_rq *rt_rq)
> {
> - if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
> + if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
> if (!rt_rq->overloaded) {
> rt_set_overload(rq_of_rt_rq(rt_rq));
> rt_rq->overloaded = 1;
> @@ -86,6 +90,12 @@ static void update_rt_migration(struct r
>
> static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
> {
> + if (!rt_entity_is_task(rt_se))
> + return;
> +
> + rt_rq = &rq_of_rt_rq(rt_rq)->rt;
> +
> + rt_rq->rt_nr_total++;
> if (rt_se->nr_cpus_allowed > 1)
> rt_rq->rt_nr_migratory++;
>
> @@ -94,6 +104,12 @@ static void inc_rt_migration(struct sche
>
> static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
> {
> + if (!rt_entity_is_task(rt_se))
> + return;
> +
> + rt_rq = &rq_of_rt_rq(rt_rq)->rt;
> +
> + rt_rq->rt_nr_total--;
> if (rt_se->nr_cpus_allowed > 1)
> rt_rq->rt_nr_migratory--;
>
> diff --git a/kernel/sched.c b/kernel/sched.c
> index fd3ac58..a07d520 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -493,6 +493,7 @@ struct rt_rq {
> #endif
> #ifdef CONFIG_SMP
> unsigned long rt_nr_migratory;
> + unsigned long rt_nr_total;
> int overloaded;
> struct plist_head pushable_tasks;
> #endif
>
>



Attachments:
signature.asc (266.00 B)
OpenPGP digital signature

2009-07-10 10:42:08

by Peter Zijlstra

[permalink] [raw]
Subject: [tip:sched/urgent] sched_rt: Fix overload bug on rt group scheduling

Commit-ID: a1ba4d8ba9f06a397e97cbd67a93ee306860b40a
Gitweb: http://git.kernel.org/tip/a1ba4d8ba9f06a397e97cbd67a93ee306860b40a
Author: Peter Zijlstra <[email protected]>
AuthorDate: Wed, 1 Apr 2009 18:40:15 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 10 Jul 2009 10:43:29 +0200

sched_rt: Fix overload bug on rt group scheduling

Fixes an easily triggerable BUG() when setting process affinities.

Make sure to count the number of migratable tasks in the same place:
the root rt_rq. Otherwise the number doesn't make sense and we'll hit
the BUG in set_cpus_allowed_rt().

Also, make sure we only count tasks, not groups (this is probably
already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
for groups, but be more explicit)

Tested-by: Thomas Gleixner <[email protected]>
CC: [email protected]
Signed-off-by: Peter Zijlstra <[email protected]>
Acked-by: Gregory Haskins <[email protected]>
LKML-Reference: <1247067476.9777.57.camel@twins>
Signed-off-by: Ingo Molnar <[email protected]>


---
kernel/sched.c | 1 +
kernel/sched_rt.c | 18 +++++++++++++++++-
2 files changed, 18 insertions(+), 1 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 7c9098d..a17f3d9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -493,6 +493,7 @@ struct rt_rq {
#endif
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
+ unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
#endif
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9bf0d2a..3918e01 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)

#ifdef CONFIG_RT_GROUP_SCHED

+#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
@@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)

#else /* CONFIG_RT_GROUP_SCHED */

+#define rt_entity_is_task(rt_se) (1)
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
@@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)

static void update_rt_migration(struct rt_rq *rt_rq)
{
- if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+ if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
@@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq)

static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+ rt_rq->rt_nr_total++;
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;

@@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
+ if (!rt_entity_is_task(rt_se))
+ return;
+
+ rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+ rt_rq->rt_nr_total--;
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;