2022-03-17 03:41:18

by 王擎

[permalink] [raw]
Subject: [PATCH] sched: dynamic config sd_flags if described in DT

From: Wang Qing <[email protected]>

The device tree can describe cache topology by "next-level-cache",
Prefer configuring SD_SHARE_PKG_RESOURCES from DT instead of default value.

Signed-off-by: Wang Qing <[email protected]>
---
arch/arm/kernel/smp.c | 1 +
arch/arm64/kernel/smp.c | 1 +
arch/riscv/kernel/smpboot.c | 1 +
drivers/base/arch_topology.c | 59 +++++++++++++++++++++++++++++++++++++++++++
include/linux/arch_topology.h | 2 ++
kernel/sched/topology.c | 10 ++++++++
6 files changed, 74 insertions(+)

diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 73fc645..62bbd9a
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -508,6 +508,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
unsigned int ncores = num_possible_cpus();

init_cpu_topology();
+ init_cpu_cache_topology();

smp_store_cpu_info(smp_processor_id());

diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 27df5c1..94cf649
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -723,6 +723,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
unsigned int this_cpu;

init_cpu_topology();
+ init_cpu_cache_topology();

this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 622f226..4f5a8b7
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -41,6 +41,7 @@ static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
init_cpu_topology();
+ init_cpu_cache_topology();
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
apply_boot_alternatives();
#endif
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 9761541..127f540
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -609,6 +609,65 @@ static int __init parse_dt_topology(void)
#endif

/*
+ * cpu cache topology table
+ */
+#define MAX_CACHE_LEVEL 7
+struct device_node *cache_topology[NR_CPUS][MAX_CACHE_LEVEL];
+
+void init_cpu_cache_topology(void)
+{
+ struct device_node *node_cpu, *node_cache;
+ int cpu, level;
+
+ for_each_possible_cpu(cpu) {
+ node_cpu = of_get_cpu_node(cpu, NULL);
+ if (!node_cpu)
+ continue;
+
+ level = 0;
+ node_cache = node_cpu;
+ while (level < MAX_CACHE_LEVEL) {
+ node_cache = of_parse_phandle(node_cache, "next-level-cache", 0);
+ if (!node_cache)
+ break;
+
+ cache_topology[cpu][level++] = node_cache;
+ }
+ of_node_put(node_cpu);
+ }
+}
+
+int cpus_share_self_cache(const struct cpumask *cpu_map)
+{
+ int cache_level, cpu_id;
+ int first, last;
+ int id = cpumask_first(cpu_map);
+ int size = cpumask_weight(cpu_map);
+
+ for (cache_level = 0; cache_level < MAX_CACHE_LEVEL; cache_level++) {
+ if (!cache_topology[cpu][cache_level])
+ return -1;
+
+ first = -1;
+ last = id;
+ for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
+ if (cache_topology[cpu][cache_level] == cache_topology[cpu_id][cache_level]) {
+ if (cpu_id < id || cpu_id >= id + size)
+ return 0;
+
+ first = (first == -1)?cpu_id:first;
+ last = cpu_id;
+ }
+ }
+
+ if (first == id && last == id + size)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
* cpu topology table
*/
struct cpu_topology cpu_topology[NR_CPUS];
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index cce6136b..862e584
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -82,6 +82,8 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
+void init_cpu_cache_topology(void);
+int cpus_share_self_cache(const struct cpumask *cpu_map);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
const struct cpumask *cpu_clustergroup_mask(int cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a70..8264e2d
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1527,6 +1527,7 @@ sd_init(struct sched_domain_topology_level *tl,
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
int sd_id, sd_weight, sd_flags = 0;
struct cpumask *sd_span;
+ int ret;

#ifdef CONFIG_NUMA
/*
@@ -1539,6 +1540,15 @@ sd_init(struct sched_domain_topology_level *tl,

if (tl->sd_flags)
sd_flags = (*tl->sd_flags)();
+
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+ ret = cpus_share_self_cache(cpu_map);
+ if (ret == 1)
+ sd_flags |= SD_SHARE_PKG_RESOURCES;
+ else if (ret == 0)
+ sd_flags &= ~SD_SHARE_PKG_RESOURCES;
+#endif
+
if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
"wrong sd_flags in topology description\n"))
sd_flags &= TOPOLOGY_SD_FLAGS;
--
2.7.4