During suspend, we disable all the non boot cpus. And during resume we bring
them all back again. So no need to do alternatives_smp_switch() in between.
This speeds up both suspend and resume paths.
Signed-off-by: Suresh Siddha <[email protected]>
---
arch/x86/kernel/smpboot.c | 7 ++++++-
include/linux/cpu.h | 2 ++
kernel/cpu.c | 3 +++
3 files changed, 11 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f0a0624..0b04ca3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1349,7 +1349,12 @@ void native_cpu_die(unsigned int cpu)
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
- if (1 == num_online_cpus())
+ /*
+ * Don't do the smp alternatives switch during
+ * suspend. We will be back in the SMP mode after
+ * resume.
+ */
+ if (1 == num_online_cpus() && !pm_sleep_smp)
alternatives_smp_switch(0);
return;
}
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4823af6..8cab04c 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -169,11 +169,13 @@ static inline void cpu_hotplug_driver_unlock(void)
#ifdef CONFIG_PM_SLEEP_SMP
extern int suspend_cpu_hotplug;
+extern int pm_sleep_smp;
extern int disable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
#define suspend_cpu_hotplug 0
+#define pm_sleep_smp 0
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8615aa6..2eed810 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -381,6 +381,7 @@ out:
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
+int pm_sleep_smp;
int disable_nonboot_cpus(void)
{
@@ -393,6 +394,7 @@ int disable_nonboot_cpus(void)
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
+ pm_sleep_smp = 1;
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
@@ -454,6 +456,7 @@ void __ref enable_nonboot_cpus(void)
cpumask_clear(frozen_cpus);
out:
+ pm_sleep_smp = 0;
cpu_maps_update_done();
}
On Fri, 19 Nov 2010 16:09:24 -0800, Suresh Siddha <[email protected]> wrote:
> During suspend, we disable all the non boot cpus. And during resume we bring
> them all back again. So no need to do alternatives_smp_switch() in between.
>
> This speeds up both suspend and resume paths.
>
Do you have typical before/after values? Is the change on the order of
microseconds, hundreds of milliseconds, or somewhere in between? It
might be good to note this in the changelog.
- Ben
On Fri, Nov 19, 2010 at 04:09:24PM -0800, Suresh Siddha wrote:
>During suspend, we disable all the non boot cpus. And during resume we bring
>them all back again. So no need to do alternatives_smp_switch() in between.
>
>This speeds up both suspend and resume paths.
>
>Signed-off-by: Suresh Siddha <[email protected]>
This is a cool idea!
The patch looks good for me, just note that kexec
also calls disable_nonboot_cpus(), so the name 'pm_sleep_smp'
is a bit confused. ;)
Reviewed-by: WANG Cong <[email protected]>
Thanks!
--
Live like a child, think like the god.
Hi.
On 20/11/10 11:09, Suresh Siddha wrote:
> During suspend, we disable all the non boot cpus. And during resume we bring
> them all back again. So no need to do alternatives_smp_switch() in between.
>
> This speeds up both suspend and resume paths.
>
> Signed-off-by: Suresh Siddha<[email protected]>
> ---
>
> arch/x86/kernel/smpboot.c | 7 ++++++-
> include/linux/cpu.h | 2 ++
> kernel/cpu.c | 3 +++
> 3 files changed, 11 insertions(+), 1 deletions(-)
>
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index f0a0624..0b04ca3 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -1349,7 +1349,12 @@ void native_cpu_die(unsigned int cpu)
> if (system_state == SYSTEM_RUNNING)
> pr_info("CPU %u is now offline\n", cpu);
>
> - if (1 == num_online_cpus())
> + /*
> + * Don't do the smp alternatives switch during
> + * suspend. We will be back in the SMP mode after
> + * resume.
> + */
> + if (1 == num_online_cpus()&& !pm_sleep_smp)
> alternatives_smp_switch(0);
> return;
> }
> diff --git a/include/linux/cpu.h b/include/linux/cpu.h
> index 4823af6..8cab04c 100644
> --- a/include/linux/cpu.h
> +++ b/include/linux/cpu.h
> @@ -169,11 +169,13 @@ static inline void cpu_hotplug_driver_unlock(void)
>
> #ifdef CONFIG_PM_SLEEP_SMP
> extern int suspend_cpu_hotplug;
> +extern int pm_sleep_smp;
>
> extern int disable_nonboot_cpus(void);
> extern void enable_nonboot_cpus(void);
> #else /* !CONFIG_PM_SLEEP_SMP */
> #define suspend_cpu_hotplug 0
> +#define pm_sleep_smp 0
>
> static inline int disable_nonboot_cpus(void) { return 0; }
> static inline void enable_nonboot_cpus(void) {}
> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index 8615aa6..2eed810 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -381,6 +381,7 @@ out:
>
> #ifdef CONFIG_PM_SLEEP_SMP
> static cpumask_var_t frozen_cpus;
> +int pm_sleep_smp;
>
> int disable_nonboot_cpus(void)
> {
> @@ -393,6 +394,7 @@ int disable_nonboot_cpus(void)
> * with the userspace trying to use the CPU hotplug at the same time
> */
> cpumask_clear(frozen_cpus);
> + pm_sleep_smp = 1;
>
> printk("Disabling non-boot CPUs ...\n");
> for_each_online_cpu(cpu) {
> @@ -454,6 +456,7 @@ void __ref enable_nonboot_cpus(void)
>
> cpumask_clear(frozen_cpus);
> out:
> + pm_sleep_smp = 0;
> cpu_maps_update_done();
> }
We have a few others things that want to modify their behaviour
according to whether we're doing the atomic copy/restore. Perhaps it
would be an idea to just use a single flag, perhaps a value for
system_state?
Nigel
On Sun, Nov 21, 2010 at 08:27:41PM +1100, Nigel Cunningham wrote:
> Hi.
>
> We have a few others things that want to modify their behaviour
> according to whether we're doing the atomic copy/restore. Perhaps it
> would be an idea to just use a single flag, perhaps a value for
> system_state?
I agree, it would be nicer if we don't introduce a special flag
just for that. Other than that, I like all sane code that speeds up
suspend/resume :). I've attached before/after dmesg excerpts on my
system with the patch ontop of v2.6.37-rc2-181-gb86db47. We end up
saving 11601 µsecs according to CONFIG_PRINTK_TIME but hey, the code is
simple enough :).
Tested-by: Borislav Petkov <[email protected]>
Thanks.
--
Regards/Gruss,
Boris.
On 11/21/2010 01:27 AM, Nigel Cunningham wrote:
> We have a few others things that want to modify their behaviour
> according to whether we're doing the atomic copy/restore. Perhaps it
> would be an idea to just use a single flag, perhaps a value for
> system_state?
system_state is pretty much considered harmful...
-hpa
--
H. Peter Anvin, Intel Open Source Technology Center
I work for Intel. I don't speak on their behalf.
On Sun, 2010-11-21 at 01:27 -0800, Nigel Cunningham wrote:
> We have a few others things that want to modify their behaviour
> according to whether we're doing the atomic copy/restore.
What are these other things and shouldn't they be using the smp_mode in
arch/x86/kernel/alternative.c ? Perhaps we need an API to export this
for others to use?
> Perhaps it
> would be an idea to just use a single flag, perhaps a value for
> system_state?
I initially thought of system_state but as Peter mentioned, I was
worried that the system_state modification in this path might break some
suspend/resume code flow.
Anyways appended a slightly modified patch that uses similar flow like
the existing arch_enable_nonboot_cpus_{begin, end}
thanks,
suresh
---
From: Suresh Siddha <[email protected]>
Subject: x86: avoid unnecessary smp alternatives switch during suspend/resume
During suspend, we disable all the non boot cpus. And during resume we bring
them all back again. So no need to do alternatives_smp_switch() in between.
On my core 2 based laptop, this speeds up the suspend path by 15msec and the
resume path by 5 msec (suspend/resume speed up differences can be attributed
to the different P-states that the cpu is in during suspend/resume).
Signed-off-by: Suresh Siddha <[email protected]>
---
arch/x86/include/asm/alternative.h | 1 +
arch/x86/kernel/alternative.c | 3 ++-
arch/x86/kernel/smpboot.c | 14 ++++++++++++++
kernel/cpu.c | 11 +++++++++++
4 files changed, 28 insertions(+), 1 deletions(-)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 76561d2..01171f6 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -66,6 +66,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
extern int alternatives_text_reserved(void *start, void *end);
+extern bool skip_smp_alternatives;
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5079f24..9f98eb4 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -353,6 +353,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
mutex_unlock(&smp_alt);
}
+bool skip_smp_alternatives;
void alternatives_smp_switch(int smp)
{
struct smp_alt_module *mod;
@@ -368,7 +369,7 @@ void alternatives_smp_switch(int smp)
printk("lockdep: fixing up alternatives.\n");
#endif
- if (noreplace_smp || smp_alt_once)
+ if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
return;
BUG_ON(!smp && (num_online_cpus() > 1));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f0a0624..589db1d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1158,6 +1158,20 @@ out:
preempt_enable();
}
+void arch_disable_nonboot_cpus_begin(void)
+{
+ /*
+ * Avoid the smp alternatives switch during the disable_nonboot_cpus().
+ * In the suspend path, we will be back in the SMP mode shortly anyways.
+ */
+ skip_smp_alternatives = true;
+}
+
+void arch_disable_nonboot_cpus_end(void)
+{
+ skip_smp_alternatives = false;
+}
+
void arch_enable_nonboot_cpus_begin(void)
{
set_mtrr_aps_delayed_init();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index cb7a1ef..156cc55 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -384,6 +384,14 @@ out:
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
+void __weak arch_disable_nonboot_cpus_begin(void)
+{
+}
+
+void __weak arch_disable_nonboot_cpus_end(void)
+{
+}
+
int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
@@ -395,6 +403,7 @@ int disable_nonboot_cpus(void)
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
+ arch_disable_nonboot_cpus_begin();
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
@@ -410,6 +419,8 @@ int disable_nonboot_cpus(void)
}
}
+ arch_disable_nonboot_cpus_end();
+
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
Commit-ID: 3fb82d56ad003e804923185316236f26b30dfdd5
Gitweb: http://git.kernel.org/tip/3fb82d56ad003e804923185316236f26b30dfdd5
Author: Suresh Siddha <[email protected]>
AuthorDate: Tue, 23 Nov 2010 16:11:40 -0800
Committer: H. Peter Anvin <[email protected]>
CommitDate: Mon, 13 Dec 2010 16:23:56 -0800
x86, suspend: Avoid unnecessary smp alternatives switch during suspend/resume
During suspend, we disable all the non boot cpus. And during resume we bring
them all back again. So no need to do alternatives_smp_switch() in between.
On my core 2 based laptop, this speeds up the suspend path by 15msec and the
resume path by 5 msec (suspend/resume speed up differences can be attributed
to the different P-states that the cpu is in during suspend/resume).
Signed-off-by: Suresh Siddha <[email protected]>
LKML-Reference: <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/include/asm/alternative.h | 1 +
arch/x86/kernel/alternative.c | 3 ++-
arch/x86/kernel/smpboot.c | 14 ++++++++++++++
kernel/cpu.c | 11 +++++++++++
4 files changed, 28 insertions(+), 1 deletions(-)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 76561d2..01171f6 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -66,6 +66,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
extern int alternatives_text_reserved(void *start, void *end);
+extern bool skip_smp_alternatives;
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5079f24..9f98eb4 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -353,6 +353,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
mutex_unlock(&smp_alt);
}
+bool skip_smp_alternatives;
void alternatives_smp_switch(int smp)
{
struct smp_alt_module *mod;
@@ -368,7 +369,7 @@ void alternatives_smp_switch(int smp)
printk("lockdep: fixing up alternatives.\n");
#endif
- if (noreplace_smp || smp_alt_once)
+ if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
return;
BUG_ON(!smp && (num_online_cpus() > 1));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 083e99d..837c81e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1166,6 +1166,20 @@ out:
preempt_enable();
}
+void arch_disable_nonboot_cpus_begin(void)
+{
+ /*
+ * Avoid the smp alternatives switch during the disable_nonboot_cpus().
+ * In the suspend path, we will be back in the SMP mode shortly anyways.
+ */
+ skip_smp_alternatives = true;
+}
+
+void arch_disable_nonboot_cpus_end(void)
+{
+ skip_smp_alternatives = false;
+}
+
void arch_enable_nonboot_cpus_begin(void)
{
set_mtrr_aps_delayed_init();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f6e726f..8ccc182 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -386,6 +386,14 @@ out:
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
+void __weak arch_disable_nonboot_cpus_begin(void)
+{
+}
+
+void __weak arch_disable_nonboot_cpus_end(void)
+{
+}
+
int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
@@ -397,6 +405,7 @@ int disable_nonboot_cpus(void)
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
+ arch_disable_nonboot_cpus_begin();
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
@@ -412,6 +421,8 @@ int disable_nonboot_cpus(void)
}
}
+ arch_disable_nonboot_cpus_end();
+
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
On Tuesday, December 14, 2010, tip-bot for Suresh Siddha wrote:
> Commit-ID: 3fb82d56ad003e804923185316236f26b30dfdd5
> Gitweb: http://git.kernel.org/tip/3fb82d56ad003e804923185316236f26b30dfdd5
> Author: Suresh Siddha <[email protected]>
> AuthorDate: Tue, 23 Nov 2010 16:11:40 -0800
> Committer: H. Peter Anvin <[email protected]>
> CommitDate: Mon, 13 Dec 2010 16:23:56 -0800
>
> x86, suspend: Avoid unnecessary smp alternatives switch during suspend/resume
>
> During suspend, we disable all the non boot cpus. And during resume we bring
> them all back again. So no need to do alternatives_smp_switch() in between.
>
> On my core 2 based laptop, this speeds up the suspend path by 15msec and the
> resume path by 5 msec (suspend/resume speed up differences can be attributed
> to the different P-states that the cpu is in during suspend/resume).
>
> Signed-off-by: Suresh Siddha <[email protected]>
> LKML-Reference: <[email protected]>
> Cc: Rafael J. Wysocki <[email protected]>
> Signed-off-by: H. Peter Anvin <[email protected]>
OK, I guess there's no nicer way to implement that.
Acked-by: Rafael J. Wysocki <[email protected]>
> ---
> arch/x86/include/asm/alternative.h | 1 +
> arch/x86/kernel/alternative.c | 3 ++-
> arch/x86/kernel/smpboot.c | 14 ++++++++++++++
> kernel/cpu.c | 11 +++++++++++
> 4 files changed, 28 insertions(+), 1 deletions(-)
>
> diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
> index 76561d2..01171f6 100644
> --- a/arch/x86/include/asm/alternative.h
> +++ b/arch/x86/include/asm/alternative.h
> @@ -66,6 +66,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
> extern void alternatives_smp_module_del(struct module *mod);
> extern void alternatives_smp_switch(int smp);
> extern int alternatives_text_reserved(void *start, void *end);
> +extern bool skip_smp_alternatives;
> #else
> static inline void alternatives_smp_module_add(struct module *mod, char *name,
> void *locks, void *locks_end,
> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
> index 5079f24..9f98eb4 100644
> --- a/arch/x86/kernel/alternative.c
> +++ b/arch/x86/kernel/alternative.c
> @@ -353,6 +353,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
> mutex_unlock(&smp_alt);
> }
>
> +bool skip_smp_alternatives;
> void alternatives_smp_switch(int smp)
> {
> struct smp_alt_module *mod;
> @@ -368,7 +369,7 @@ void alternatives_smp_switch(int smp)
> printk("lockdep: fixing up alternatives.\n");
> #endif
>
> - if (noreplace_smp || smp_alt_once)
> + if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
> return;
> BUG_ON(!smp && (num_online_cpus() > 1));
>
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index 083e99d..837c81e 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -1166,6 +1166,20 @@ out:
> preempt_enable();
> }
>
> +void arch_disable_nonboot_cpus_begin(void)
> +{
> + /*
> + * Avoid the smp alternatives switch during the disable_nonboot_cpus().
> + * In the suspend path, we will be back in the SMP mode shortly anyways.
> + */
> + skip_smp_alternatives = true;
> +}
> +
> +void arch_disable_nonboot_cpus_end(void)
> +{
> + skip_smp_alternatives = false;
> +}
> +
> void arch_enable_nonboot_cpus_begin(void)
> {
> set_mtrr_aps_delayed_init();
> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index f6e726f..8ccc182 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -386,6 +386,14 @@ out:
> #ifdef CONFIG_PM_SLEEP_SMP
> static cpumask_var_t frozen_cpus;
>
> +void __weak arch_disable_nonboot_cpus_begin(void)
> +{
> +}
> +
> +void __weak arch_disable_nonboot_cpus_end(void)
> +{
> +}
> +
> int disable_nonboot_cpus(void)
> {
> int cpu, first_cpu, error = 0;
> @@ -397,6 +405,7 @@ int disable_nonboot_cpus(void)
> * with the userspace trying to use the CPU hotplug at the same time
> */
> cpumask_clear(frozen_cpus);
> + arch_disable_nonboot_cpus_begin();
>
> printk("Disabling non-boot CPUs ...\n");
> for_each_online_cpu(cpu) {
> @@ -412,6 +421,8 @@ int disable_nonboot_cpus(void)
> }
> }
>
> + arch_disable_nonboot_cpus_end();
> +
> if (!error) {
> BUG_ON(num_online_cpus() > 1);
> /* Make sure the CPUs won't be enabled by someone else */
>
>