2019-06-27 01:37:49

by Subhra Mazumdar

[permalink] [raw]
Subject: [PATCH v3 6/7] x86/smpboot: introduce per-cpu variable for HT siblings

Introduce a per-cpu variable to keep the number of HT siblings of a cpu.
This will be used for quick lookup in select_idle_cpu to determine the
limits of search. This patch does it only for x86.

Signed-off-by: subhra mazumdar <[email protected]>
---
arch/x86/include/asm/smp.h | 1 +
arch/x86/include/asm/topology.h | 1 +
arch/x86/kernel/smpboot.c | 17 ++++++++++++++++-
include/linux/topology.h | 4 ++++
4 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index da545df..1e90cbd 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -22,6 +22,7 @@ extern int smp_num_siblings;
extern unsigned int num_processors;

DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpumask_weight_sibling);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 453cf38..dd19c71 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -111,6 +111,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#ifdef CONFIG_SMP
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_sibling_weight(cpu) (per_cpu(cpumask_weight_sibling, cpu))

extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 362dd89..20bf676 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -85,6 +85,10 @@
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);

+/* representing number of HT siblings of each CPU */
+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpumask_weight_sibling);
+EXPORT_PER_CPU_SYMBOL(cpumask_weight_sibling);
+
/* representing HT and core siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -520,6 +524,8 @@ void set_cpu_sibling_map(int cpu)

if (!has_mp) {
cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
+ per_cpu(cpumask_weight_sibling, cpu) =
+ cpumask_weight(topology_sibling_cpumask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
c->booted_cores = 1;
@@ -529,8 +535,12 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);

- if ((i == cpu) || (has_smt && match_smt(c, o)))
+ if ((i == cpu) || (has_smt && match_smt(c, o))) {
link_mask(topology_sibling_cpumask, cpu, i);
+ threads = cpumask_weight(topology_sibling_cpumask(cpu));
+ per_cpu(cpumask_weight_sibling, cpu) = threads;
+ per_cpu(cpumask_weight_sibling, i) = threads;
+ }

if ((i == cpu) || (has_mp && match_llc(c, o)))
link_mask(cpu_llc_shared_mask, cpu, i);
@@ -1173,6 +1183,8 @@ static __init void disable_smp(void)
else
physid_set_mask_of_physid(0, &phys_cpu_present_map);
cpumask_set_cpu(0, topology_sibling_cpumask(0));
+ per_cpu(cpumask_weight_sibling, 0) =
+ cpumask_weight(topology_sibling_cpumask(0));
cpumask_set_cpu(0, topology_core_cpumask(0));
}

@@ -1482,6 +1494,8 @@ static void remove_siblinginfo(int cpu)

for_each_cpu(sibling, topology_core_cpumask(cpu)) {
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ per_cpu(cpumask_weight_sibling, sibling) =
+ cpumask_weight(topology_sibling_cpumask(sibling));
/*/
* last thread sibling in this cpu core going down
*/
@@ -1495,6 +1509,7 @@ static void remove_siblinginfo(int cpu)
cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
cpumask_clear(cpu_llc_shared_mask(cpu));
cpumask_clear(topology_sibling_cpumask(cpu));
+ per_cpu(cpumask_weight_sibling, cpu) = 0;
cpumask_clear(topology_core_cpumask(cpu));
c->cpu_core_id = 0;
c->booted_cores = 0;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index cb0775e..a85aea1 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -190,6 +190,10 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_sibling_weight
+#define topology_sibling_weight(cpu) \
+ cpumask_weight(topology_sibling_cpumask(cpu))
+#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
--
2.9.3


2019-06-27 06:52:40

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] x86/smpboot: introduce per-cpu variable for HT siblings

On Wed, 26 Jun 2019, subhra mazumdar wrote:

> Introduce a per-cpu variable to keep the number of HT siblings of a cpu.
> This will be used for quick lookup in select_idle_cpu to determine the
> limits of search.

Why? The number of siblings is constant at least today unless you play
silly cpu hotplug games. A bit more justification for adding yet another
random storage would be appreciated.

> This patch does it only for x86.

# grep 'This patch' Documentation/process/submitting-patches.rst

IOW, we all know already that this is a patch and from the subject prefix
and the diffstat it's pretty obvious that this is x86 only.

So instead of documenting the obvious, please add proper context to justify
the change.

> +/* representing number of HT siblings of each CPU */
> +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpumask_weight_sibling);
> +EXPORT_PER_CPU_SYMBOL(cpumask_weight_sibling);

Why does this need an export? No module has any reason to access this.

> /* representing HT and core siblings of each logical CPU */
> DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
> EXPORT_PER_CPU_SYMBOL(cpu_core_map);
> @@ -520,6 +524,8 @@ void set_cpu_sibling_map(int cpu)
>
> if (!has_mp) {
> cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
> + per_cpu(cpumask_weight_sibling, cpu) =
> + cpumask_weight(topology_sibling_cpumask(cpu));
> cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
> cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
> c->booted_cores = 1;
> @@ -529,8 +535,12 @@ void set_cpu_sibling_map(int cpu)
> for_each_cpu(i, cpu_sibling_setup_mask) {
> o = &cpu_data(i);
>
> - if ((i == cpu) || (has_smt && match_smt(c, o)))
> + if ((i == cpu) || (has_smt && match_smt(c, o))) {
> link_mask(topology_sibling_cpumask, cpu, i);
> + threads = cpumask_weight(topology_sibling_cpumask(cpu));
> + per_cpu(cpumask_weight_sibling, cpu) = threads;
> + per_cpu(cpumask_weight_sibling, i) = threads;

This only works for SMT=2, but fails to update the rest for SMT=4.

> @@ -1482,6 +1494,8 @@ static void remove_siblinginfo(int cpu)
>
> for_each_cpu(sibling, topology_core_cpumask(cpu)) {
> cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
> + per_cpu(cpumask_weight_sibling, sibling) =
> + cpumask_weight(topology_sibling_cpumask(sibling));

While remove does the right thing.

Thanks,

tglx

2019-06-27 06:56:58

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] x86/smpboot: introduce per-cpu variable for HT siblings

On Thu, 27 Jun 2019, Thomas Gleixner wrote:

> On Wed, 26 Jun 2019, subhra mazumdar wrote:
>
> > Introduce a per-cpu variable to keep the number of HT siblings of a cpu.
> > This will be used for quick lookup in select_idle_cpu to determine the
> > limits of search.
>
> Why? The number of siblings is constant at least today unless you play
> silly cpu hotplug games. A bit more justification for adding yet another
> random storage would be appreciated.
>
> > This patch does it only for x86.
>
> # grep 'This patch' Documentation/process/submitting-patches.rst
>
> IOW, we all know already that this is a patch and from the subject prefix
> and the diffstat it's pretty obvious that this is x86 only.
>
> So instead of documenting the obvious, please add proper context to justify
> the change.

Aside of that the right ordering is to introduce the default fallback in a
separate patch, which explains the reasoning and then in the next one add
the x86 optimized version.

Thanks,

tglx

2019-06-28 01:09:36

by Subhra Mazumdar

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] x86/smpboot: introduce per-cpu variable for HT siblings


On 6/26/19 11:51 PM, Thomas Gleixner wrote:
> On Wed, 26 Jun 2019, subhra mazumdar wrote:
>
>> Introduce a per-cpu variable to keep the number of HT siblings of a cpu.
>> This will be used for quick lookup in select_idle_cpu to determine the
>> limits of search.
> Why? The number of siblings is constant at least today unless you play
> silly cpu hotplug games. A bit more justification for adding yet another
> random storage would be appreciated.
Using cpumask_weight every time in select_idle_cpu to compute the no. of
SMT siblings can be costly as cpumask_weight may not be O(1) for systems
with large no. of CPUs (e.g 8 socket, each socket having lots of cores).
Over 512 CPUs the bitmask will span multiple cache lines and touching
multiple cache lines in the fast path of scheduler can cost more than we
save from this optimization. Even in single cache line it loops in longs.
We want to touch O(1) cache lines and do O(1) operations, hence
pre-compute it in per-CPU variable.
>
>> This patch does it only for x86.
> # grep 'This patch' Documentation/process/submitting-patches.rst
>
> IOW, we all know already that this is a patch and from the subject prefix
> and the diffstat it's pretty obvious that this is x86 only.
>
> So instead of documenting the obvious, please add proper context to justify
> the change.
Ok. The extra per-CPU optimization was done only for x86 as we cared about
it the most and make it future proof. I will add for other architectures.
>
>> +/* representing number of HT siblings of each CPU */
>> +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpumask_weight_sibling);
>> +EXPORT_PER_CPU_SYMBOL(cpumask_weight_sibling);
> Why does this need an export? No module has any reason to access this.
I will remove it
>
>> /* representing HT and core siblings of each logical CPU */
>> DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
>> EXPORT_PER_CPU_SYMBOL(cpu_core_map);
>> @@ -520,6 +524,8 @@ void set_cpu_sibling_map(int cpu)
>>
>> if (!has_mp) {
>> cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
>> + per_cpu(cpumask_weight_sibling, cpu) =
>> + cpumask_weight(topology_sibling_cpumask(cpu));
>> cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
>> cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
>> c->booted_cores = 1;
>> @@ -529,8 +535,12 @@ void set_cpu_sibling_map(int cpu)
>> for_each_cpu(i, cpu_sibling_setup_mask) {
>> o = &cpu_data(i);
>>
>> - if ((i == cpu) || (has_smt && match_smt(c, o)))
>> + if ((i == cpu) || (has_smt && match_smt(c, o))) {
>> link_mask(topology_sibling_cpumask, cpu, i);
>> + threads = cpumask_weight(topology_sibling_cpumask(cpu));
>> + per_cpu(cpumask_weight_sibling, cpu) = threads;
>> + per_cpu(cpumask_weight_sibling, i) = threads;
> This only works for SMT=2, but fails to update the rest for SMT=4.

I guess I assumed that x86 will always be SMT2, will fix this.

Thanks,
Subhra

>
>> @@ -1482,6 +1494,8 @@ static void remove_siblinginfo(int cpu)
>>
>> for_each_cpu(sibling, topology_core_cpumask(cpu)) {
>> cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
>> + per_cpu(cpumask_weight_sibling, sibling) =
>> + cpumask_weight(topology_sibling_cpumask(sibling));
> While remove does the right thing.
>
> Thanks,
>
> tglx

2019-06-28 01:12:53

by Subhra Mazumdar

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] x86/smpboot: introduce per-cpu variable for HT siblings


On 6/26/19 11:54 PM, Thomas Gleixner wrote:
> On Thu, 27 Jun 2019, Thomas Gleixner wrote:
>
>> On Wed, 26 Jun 2019, subhra mazumdar wrote:
>>
>>> Introduce a per-cpu variable to keep the number of HT siblings of a cpu.
>>> This will be used for quick lookup in select_idle_cpu to determine the
>>> limits of search.
>> Why? The number of siblings is constant at least today unless you play
>> silly cpu hotplug games. A bit more justification for adding yet another
>> random storage would be appreciated.
>>
>>> This patch does it only for x86.
>> # grep 'This patch' Documentation/process/submitting-patches.rst
>>
>> IOW, we all know already that this is a patch and from the subject prefix
>> and the diffstat it's pretty obvious that this is x86 only.
>>
>> So instead of documenting the obvious, please add proper context to justify
>> the change.
> Aside of that the right ordering is to introduce the default fallback in a
> separate patch, which explains the reasoning and then in the next one add
> the x86 optimized version.
OK. I will also add the extra optimization for other architectures.

Thanks,
Subhra
>
> Thanks,
>
> tglx