2020-08-24 12:32:22

by Xunlei Pang

[permalink] [raw]
Subject: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

We've met problems that occasionally tasks with full cpumask
(e.g. by putting it into a cpuset or setting to full affinity)
were migrated to our isolated cpus in production environment.

After some analysis, we found that it is due to the current
select_idle_smt() not considering the sched_domain mask.

Fix it by checking the valid domain mask in select_idle_smt().

Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
Reported-by: Wetp Zhang <[email protected]>
Signed-off-by: Xunlei Pang <[email protected]>
---
kernel/sched/fair.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a68a05..fa942c4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
/*
* Scan the local SMT mask for idle CPUs.
*/
-static int select_idle_smt(struct task_struct *p, int target)
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
int cpu;

@@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
return -1;

for_each_cpu(cpu, cpu_smt_mask(target)) {
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
+ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
continue;
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
return cpu;
@@ -6099,7 +6100,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
return -1;
}

-static inline int select_idle_smt(struct task_struct *p, int target)
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
return -1;
}
@@ -6274,7 +6275,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
if ((unsigned)i < nr_cpumask_bits)
return i;

- i = select_idle_smt(p, target);
+ i = select_idle_smt(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;

--
1.8.3.1


2020-08-24 13:42:02

by Srikar Dronamraju

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

* Xunlei Pang <[email protected]> [2020-08-24 20:30:19]:

> We've met problems that occasionally tasks with full cpumask
> (e.g. by putting it into a cpuset or setting to full affinity)
> were migrated to our isolated cpus in production environment.
>
> After some analysis, we found that it is due to the current
> select_idle_smt() not considering the sched_domain mask.
>
> Fix it by checking the valid domain mask in select_idle_smt().
>
> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
> Reported-by: Wetp Zhang <[email protected]>
> Signed-off-by: Xunlei Pang <[email protected]>
> ---
> kernel/sched/fair.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a05..fa942c4 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
> /*
> * Scan the local SMT mask for idle CPUs.
> */
> -static int select_idle_smt(struct task_struct *p, int target)
> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> {
> int cpu;
>
> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
> return -1;
>
> for_each_cpu(cpu, cpu_smt_mask(target)) {
> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
> continue;

Don't think this is right thing to do. What if this task had set a cpumask
that doesn't cover all the cpus in this sched_domain_span(sd)

cpu_smt_mask(target) would already limit to the sched_domain_span(sd) so I
am not sure how this can help?


--
Thanks and Regards
Srikar Dronamraju

2020-08-25 02:12:34

by Xunlei Pang

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

On 2020/8/24 PM9:38, Srikar Dronamraju wrote:
> * Xunlei Pang <[email protected]> [2020-08-24 20:30:19]:
>
>> We've met problems that occasionally tasks with full cpumask
>> (e.g. by putting it into a cpuset or setting to full affinity)
>> were migrated to our isolated cpus in production environment.
>>
>> After some analysis, we found that it is due to the current
>> select_idle_smt() not considering the sched_domain mask.
>>
>> Fix it by checking the valid domain mask in select_idle_smt().
>>
>> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
>> Reported-by: Wetp Zhang <[email protected]>
>> Signed-off-by: Xunlei Pang <[email protected]>
>> ---
>> kernel/sched/fair.c | 9 +++++----
>> 1 file changed, 5 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 1a68a05..fa942c4 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
>> /*
>> * Scan the local SMT mask for idle CPUs.
>> */
>> -static int select_idle_smt(struct task_struct *p, int target)
>> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
>> {
>> int cpu;
>>
>> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
>> return -1;
>>
>> for_each_cpu(cpu, cpu_smt_mask(target)) {
>> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
>> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
>> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
>> continue;
>
> Don't think this is right thing to do. What if this task had set a cpumask
> that doesn't cover all the cpus in this sched_domain_span(sd)

It doesn't matter, without this patch, it selects an idle cpu from:
"cpu_smt_mask(target) and p->cpus_ptr"

with this patch, it selects an idle cpu from:
"cpu_smt_mask(target) and p->cpus_ptr and sched_domain_span(sd)"

>
> cpu_smt_mask(target) would already limit to the sched_domain_span(sd) so I
> am not sure how this can help?
>
>

Here is an example:
CPU0 and CPU16 are hyper-thread pair, CPU16 is domain isolated. So its
sd_llc doesn't contain CPU16, and cpu_smt_mask(0) is 0 and 16.

Then we have @target is 0, select_idle_smt() may return the isolated(and
idle) CPU16 without this patch.

2020-08-25 03:23:38

by Srikar Dronamraju

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

* xunlei <[email protected]> [2020-08-25 10:11:24]:

> On 2020/8/24 PM9:38, Srikar Dronamraju wrote:
> > * Xunlei Pang <[email protected]> [2020-08-24 20:30:19]:
> >
> >> We've met problems that occasionally tasks with full cpumask
> >> (e.g. by putting it into a cpuset or setting to full affinity)
> >> were migrated to our isolated cpus in production environment.
> >>
> >> After some analysis, we found that it is due to the current
> >> select_idle_smt() not considering the sched_domain mask.
> >>
> >> Fix it by checking the valid domain mask in select_idle_smt().
> >>
> >> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
> >> Reported-by: Wetp Zhang <[email protected]>
> >> Signed-off-by: Xunlei Pang <[email protected]>
> >> ---
> >> kernel/sched/fair.c | 9 +++++----
> >> 1 file changed, 5 insertions(+), 4 deletions(-)
> >>
> >> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> >> index 1a68a05..fa942c4 100644
> >> --- a/kernel/sched/fair.c
> >> +++ b/kernel/sched/fair.c
> >> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
> >> /*
> >> * Scan the local SMT mask for idle CPUs.
> >> */
> >> -static int select_idle_smt(struct task_struct *p, int target)
> >> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> >> {
> >> int cpu;
> >>
> >> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
> >> return -1;
> >>
> >> for_each_cpu(cpu, cpu_smt_mask(target)) {
> >> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
> >> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
> >> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
> >> continue;
> >
> > Don't think this is right thing to do. What if this task had set a cpumask
> > that doesn't cover all the cpus in this sched_domain_span(sd)

ah, right I missed the 'or' part.
>
> It doesn't matter, without this patch, it selects an idle cpu from:
> "cpu_smt_mask(target) and p->cpus_ptr"
>
> with this patch, it selects an idle cpu from:
> "cpu_smt_mask(target) and p->cpus_ptr and sched_domain_span(sd)"
>
> >
> > cpu_smt_mask(target) would already limit to the sched_domain_span(sd) so I
> > am not sure how this can help?
> >
> >
>
> Here is an example:
> CPU0 and CPU16 are hyper-thread pair, CPU16 is domain isolated. So its
> sd_llc doesn't contain CPU16, and cpu_smt_mask(0) is 0 and 16.
>
> Then we have @target is 0, select_idle_smt() may return the isolated(and
> idle) CPU16 without this patch.

Okay.

--
Thanks and Regards
Srikar Dronamraju

2020-08-25 08:56:31

by Jiang Biao

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

On Mon, 24 Aug 2020 at 20:31, Xunlei Pang <[email protected]> wrote:
>
> We've met problems that occasionally tasks with full cpumask
> (e.g. by putting it into a cpuset or setting to full affinity)
> were migrated to our isolated cpus in production environment.
>
> After some analysis, we found that it is due to the current
> select_idle_smt() not considering the sched_domain mask.
>
> Fix it by checking the valid domain mask in select_idle_smt().
>
> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
> Reported-by: Wetp Zhang <[email protected]>
> Signed-off-by: Xunlei Pang <[email protected]>
> ---
> kernel/sched/fair.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a05..fa942c4 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
> /*
> * Scan the local SMT mask for idle CPUs.
> */
> -static int select_idle_smt(struct task_struct *p, int target)
> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> {
> int cpu;
>
> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
> return -1;
>
> for_each_cpu(cpu, cpu_smt_mask(target)) {
> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
Maybe the following change could be better, :)
for_each_cpu_and(cpu, cpu_smt_mask(target), sched_domain_span(sd))
keep a similar style with select_idle_core/cpu, and could reduce loops.

Just an option.
Reviewed-by: Jiang Biao <[email protected]>

2020-08-25 12:18:43

by Xunlei Pang

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

On 2020/8/25 下午2:37, Jiang Biao wrote:
> On Mon, 24 Aug 2020 at 20:31, Xunlei Pang <[email protected]> wrote:
>>
>> We've met problems that occasionally tasks with full cpumask
>> (e.g. by putting it into a cpuset or setting to full affinity)
>> were migrated to our isolated cpus in production environment.
>>
>> After some analysis, we found that it is due to the current
>> select_idle_smt() not considering the sched_domain mask.
>>
>> Fix it by checking the valid domain mask in select_idle_smt().
>>
>> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
>> Reported-by: Wetp Zhang <[email protected]>
>> Signed-off-by: Xunlei Pang <[email protected]>
>> ---
>> kernel/sched/fair.c | 9 +++++----
>> 1 file changed, 5 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 1a68a05..fa942c4 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
>> /*
>> * Scan the local SMT mask for idle CPUs.
>> */
>> -static int select_idle_smt(struct task_struct *p, int target)
>> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
>> {
>> int cpu;
>>
>> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
>> return -1;
>>
>> for_each_cpu(cpu, cpu_smt_mask(target)) {
>> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
>> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
>> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
> Maybe the following change could be better, :)
> for_each_cpu_and(cpu, cpu_smt_mask(target), sched_domain_span(sd))
> keep a similar style with select_idle_core/cpu, and could reduce loops.
>

I thought that, but given that smt mask is usually small, the original
code may run a bit faster?

> Just an option.
> Reviewed-by: Jiang Biao <[email protected]>
>

Thanks :-)

2020-08-25 13:11:23

by Jiang Biao

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

On Tue, 25 Aug 2020 at 17:28, xunlei <[email protected]> wrote:
>
> On 2020/8/25 下午2:37, Jiang Biao wrote:
> > On Mon, 24 Aug 2020 at 20:31, Xunlei Pang <[email protected]> wrote:
> >>
> >> We've met problems that occasionally tasks with full cpumask
> >> (e.g. by putting it into a cpuset or setting to full affinity)
> >> were migrated to our isolated cpus in production environment.
> >>
> >> After some analysis, we found that it is due to the current
> >> select_idle_smt() not considering the sched_domain mask.
> >>
> >> Fix it by checking the valid domain mask in select_idle_smt().
> >>
> >> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
> >> Reported-by: Wetp Zhang <[email protected]>
> >> Signed-off-by: Xunlei Pang <[email protected]>
> >> ---
> >> kernel/sched/fair.c | 9 +++++----
> >> 1 file changed, 5 insertions(+), 4 deletions(-)
> >>
> >> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> >> index 1a68a05..fa942c4 100644
> >> --- a/kernel/sched/fair.c
> >> +++ b/kernel/sched/fair.c
> >> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
> >> /*
> >> * Scan the local SMT mask for idle CPUs.
> >> */
> >> -static int select_idle_smt(struct task_struct *p, int target)
> >> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> >> {
> >> int cpu;
> >>
> >> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
> >> return -1;
> >>
> >> for_each_cpu(cpu, cpu_smt_mask(target)) {
> >> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
> >> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
> >> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
> > Maybe the following change could be better, :)
> > for_each_cpu_and(cpu, cpu_smt_mask(target), sched_domain_span(sd))
> > keep a similar style with select_idle_core/cpu, and could reduce loops.
> >
>
> I thought that, but given that smt mask is usually small, the original
> code may run a bit faster?
Not sure. :)
It's OK for me.

Regards,
Jiang

2020-08-28 02:57:45

by Xunlei Pang

[permalink] [raw]
Subject: Re: [PATCH] sched/fair: Fix wrong cpu selecting from isolated domain

On 2020/8/24 PM8:30, Xunlei Pang wrote:
> We've met problems that occasionally tasks with full cpumask
> (e.g. by putting it into a cpuset or setting to full affinity)
> were migrated to our isolated cpus in production environment.
>
> After some analysis, we found that it is due to the current
> select_idle_smt() not considering the sched_domain mask.
>
> Fix it by checking the valid domain mask in select_idle_smt().
>
> Fixes: 10e2f1acd010 ("sched/core: Rewrite and improve select_idle_siblings())
> Reported-by: Wetp Zhang <[email protected]>
> Signed-off-by: Xunlei Pang <[email protected]>
> ---
> kernel/sched/fair.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a05..fa942c4 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
> /*
> * Scan the local SMT mask for idle CPUs.
> */
> -static int select_idle_smt(struct task_struct *p, int target)
> +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> {
> int cpu;
>
> @@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
> return -1;
>
> for_each_cpu(cpu, cpu_smt_mask(target)) {
> - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
> + if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
> + !cpumask_test_cpu(cpu, sched_domain_span(sd)))
> continue;
> if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
> return cpu;
> @@ -6099,7 +6100,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
> return -1;
> }
>
> -static inline int select_idle_smt(struct task_struct *p, int target)
> +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
> {
> return -1;
> }
> @@ -6274,7 +6275,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
> if ((unsigned)i < nr_cpumask_bits)
> return i;
>
> - i = select_idle_smt(p, target);
> + i = select_idle_smt(p, sd, target);
> if ((unsigned)i < nr_cpumask_bits)
> return i;
>
>

Hi Peter, any other comments?