From: Rafael J. Wysocki <[email protected]>
Add arch_rebuild_sched_domains() for rebuilding scheduling domains and
updating topology on x86 and make the ITMT code use it.
First of all, this reduces code duplication somewhat and eliminates
a need to use an extern variable, but it will also lay the ground for
future work related to CPU capacity scaling.
Signed-off-by: Rafael J. Wysocki <[email protected]>
---
arch/x86/include/asm/topology.h | 6 ++++--
arch/x86/kernel/itmt.c | 12 ++++--------
arch/x86/kernel/smpboot.c | 10 +++++++++-
3 files changed, 17 insertions(+), 11 deletions(-)
Index: linux-pm/arch/x86/include/asm/topology.h
===================================================================
--- linux-pm.orig/arch/x86/include/asm/topology.h
+++ linux-pm/arch/x86/include/asm/topology.h
@@ -235,8 +235,6 @@ struct pci_bus;
int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
-extern bool x86_topology_update;
-
#ifdef CONFIG_SCHED_MC_PRIO
#include <asm/percpu.h>
@@ -284,9 +282,13 @@ static inline long arch_scale_freq_capac
extern void arch_set_max_freq_ratio(bool turbo_disabled);
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
+
+void arch_rebuild_sched_domains(void);
#else
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
+
+static inline void arch_rebuild_sched_domains(void) { }
#endif
extern void arch_scale_freq_tick(void);
Index: linux-pm/arch/x86/kernel/itmt.c
===================================================================
--- linux-pm.orig/arch/x86/kernel/itmt.c
+++ linux-pm/arch/x86/kernel/itmt.c
@@ -54,10 +54,8 @@ static int sched_itmt_update_handler(str
old_sysctl = sysctl_sched_itmt_enabled;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
- x86_topology_update = true;
- rebuild_sched_domains();
- }
+ if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled)
+ arch_rebuild_sched_domains();
mutex_unlock(&itmt_update_mutex);
@@ -114,8 +112,7 @@ int sched_set_itmt_support(void)
sysctl_sched_itmt_enabled = 1;
- x86_topology_update = true;
- rebuild_sched_domains();
+ arch_rebuild_sched_domains();
mutex_unlock(&itmt_update_mutex);
@@ -150,8 +147,7 @@ void sched_clear_itmt_support(void)
if (sysctl_sched_itmt_enabled) {
/* disable sched_itmt if we are no longer ITMT capable */
sysctl_sched_itmt_enabled = 0;
- x86_topology_update = true;
- rebuild_sched_domains();
+ arch_rebuild_sched_domains();
}
mutex_unlock(&itmt_update_mutex);
Index: linux-pm/arch/x86/kernel/smpboot.c
===================================================================
--- linux-pm.orig/arch/x86/kernel/smpboot.c
+++ linux-pm/arch/x86/kernel/smpboot.c
@@ -39,6 +39,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/export.h>
@@ -125,7 +126,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mwa
int __read_mostly __max_smt_threads = 1;
/* Flag to indicate if a complete sched domain rebuild is required */
-bool x86_topology_update;
+static bool x86_topology_update;
int arch_update_cpu_topology(void)
{
@@ -135,6 +136,13 @@ int arch_update_cpu_topology(void)
return retval;
}
+#ifdef CONFIG_X86_64
+void arch_rebuild_sched_domains(void) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+}
+#endif
+
static unsigned int smpboot_warm_reset_vector_count;
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
On Thursday, April 25, 2024 9:04:48 PM CEST Rafael J. Wysocki wrote:
> From: Rafael J. Wysocki <[email protected]>
>
> Add arch_rebuild_sched_domains() for rebuilding scheduling domains and
> updating topology on x86 and make the ITMT code use it.
>
> First of all, this reduces code duplication somewhat and eliminates
> a need to use an extern variable, but it will also lay the ground for
> future work related to CPU capacity scaling.
>
> Signed-off-by: Rafael J. Wysocki <[email protected]>
This obviously is a duplicate of patch [1/3], sorry about this. My bad.
I'll send the proper patch [2/3] in a reply to this message.
> ---
> arch/x86/include/asm/topology.h | 6 ++++--
> arch/x86/kernel/itmt.c | 12 ++++--------
> arch/x86/kernel/smpboot.c | 10 +++++++++-
> 3 files changed, 17 insertions(+), 11 deletions(-)
>
> Index: linux-pm/arch/x86/include/asm/topology.h
> ===================================================================
> --- linux-pm.orig/arch/x86/include/asm/topology.h
> +++ linux-pm/arch/x86/include/asm/topology.h
> @@ -235,8 +235,6 @@ struct pci_bus;
> int x86_pci_root_bus_node(int bus);
> void x86_pci_root_bus_resources(int bus, struct list_head *resources);
>
> -extern bool x86_topology_update;
> -
> #ifdef CONFIG_SCHED_MC_PRIO
> #include <asm/percpu.h>
>
> @@ -284,9 +282,13 @@ static inline long arch_scale_freq_capac
>
> extern void arch_set_max_freq_ratio(bool turbo_disabled);
> extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
> +
> +void arch_rebuild_sched_domains(void);
> #else
> static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
> static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
> +
> +static inline void arch_rebuild_sched_domains(void) { }
> #endif
>
> extern void arch_scale_freq_tick(void);
> Index: linux-pm/arch/x86/kernel/itmt.c
> ===================================================================
> --- linux-pm.orig/arch/x86/kernel/itmt.c
> +++ linux-pm/arch/x86/kernel/itmt.c
> @@ -54,10 +54,8 @@ static int sched_itmt_update_handler(str
> old_sysctl = sysctl_sched_itmt_enabled;
> ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
>
> - if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
> - x86_topology_update = true;
> - rebuild_sched_domains();
> - }
> + if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled)
> + arch_rebuild_sched_domains();
>
> mutex_unlock(&itmt_update_mutex);
>
> @@ -114,8 +112,7 @@ int sched_set_itmt_support(void)
>
> sysctl_sched_itmt_enabled = 1;
>
> - x86_topology_update = true;
> - rebuild_sched_domains();
> + arch_rebuild_sched_domains();
>
> mutex_unlock(&itmt_update_mutex);
>
> @@ -150,8 +147,7 @@ void sched_clear_itmt_support(void)
> if (sysctl_sched_itmt_enabled) {
> /* disable sched_itmt if we are no longer ITMT capable */
> sysctl_sched_itmt_enabled = 0;
> - x86_topology_update = true;
> - rebuild_sched_domains();
> + arch_rebuild_sched_domains();
> }
>
> mutex_unlock(&itmt_update_mutex);
> Index: linux-pm/arch/x86/kernel/smpboot.c
> ===================================================================
> --- linux-pm.orig/arch/x86/kernel/smpboot.c
> +++ linux-pm/arch/x86/kernel/smpboot.c
> @@ -39,6 +39,7 @@
>
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> +#include <linux/cpuset.h>
> #include <linux/init.h>
> #include <linux/smp.h>
> #include <linux/export.h>
> @@ -125,7 +126,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mwa
> int __read_mostly __max_smt_threads = 1;
>
> /* Flag to indicate if a complete sched domain rebuild is required */
> -bool x86_topology_update;
> +static bool x86_topology_update;
>
> int arch_update_cpu_topology(void)
> {
> @@ -135,6 +136,13 @@ int arch_update_cpu_topology(void)
> return retval;
> }
>
> +#ifdef CONFIG_X86_64
> +void arch_rebuild_sched_domains(void) {
> + x86_topology_update = true;
> + rebuild_sched_domains();
> +}
> +#endif
> +
> static unsigned int smpboot_warm_reset_vector_count;
>
> static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
>
From: Rafael J. Wysocki <[email protected]>
In order be able to compute the sizes of tasks consistently across all
CPUs in a hybrid system, it is necessary to provide CPU capacity scaling
information to the scheduler via arch_scale_cpu_capacity().
Add support for it via arch_cpu_scale per-CPU variables that can be set
by whoever has sufficient information on the CPU capacities.
By default, arch_cpu_scale is equal to SCHED_CAPACITY_SCALE for all
CPUs, so this change by itself is not expected to alter the current
behavior of the kernel.
Signed-off-by: Rafael J. Wysocki <[email protected]>
---
arch/x86/include/asm/topology.h | 15 +++++++++++++++
arch/x86/kernel/smpboot.c | 3 +++
2 files changed, 18 insertions(+)
Index: linux-pm/arch/x86/include/asm/topology.h
===================================================================
--- linux-pm.orig/arch/x86/include/asm/topology.h
+++ linux-pm/arch/x86/include/asm/topology.h
@@ -280,11 +280,26 @@ static inline long arch_scale_freq_capac
}
#define arch_scale_freq_capacity arch_scale_freq_capacity
+DECLARE_PER_CPU(unsigned long, arch_cpu_scale);
+
+static inline unsigned long arch_scale_cpu_capacity(int cpu)
+{
+ return READ_ONCE(per_cpu(arch_cpu_scale, cpu));
+}
+#define arch_scale_cpu_capacity arch_scale_cpu_capacity
+
+static inline void arch_set_cpu_capacity(int cpu, unsigned long cap)
+{
+ WRITE_ONCE(per_cpu(arch_cpu_scale, cpu), cap);
+}
+
extern void arch_set_max_freq_ratio(bool turbo_disabled);
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
void arch_rebuild_sched_domains(void);
#else
+static inline void arch_set_cpu_capacity(int cpu, unsigned long cap) { }
+
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
Index: linux-pm/arch/x86/kernel/smpboot.c
===================================================================
--- linux-pm.orig/arch/x86/kernel/smpboot.c
+++ linux-pm/arch/x86/kernel/smpboot.c
@@ -141,6 +141,9 @@ void arch_rebuild_sched_domains(void) {
x86_topology_update = true;
rebuild_sched_domains();
}
+
+/* CPU capacity scaling support */
+DEFINE_PER_CPU(unsigned long, arch_cpu_scale) = SCHED_CAPACITY_SCALE;
#endif
static unsigned int smpboot_warm_reset_vector_count;