From: David Daney <[email protected]>
This is a follow-on patch set to f21afc25f9 (smp.h: Use
local_irq_{save,restore}() in !SMP version of on_each_cpu().). There
are no problems known to me that are fixed, but it does make things more
consistent.
These are based on linux-next next-20130802 and the corresponding mm
patch series. And specifically akpm's
"include/linux/smp.h:on_each_cpu(): switch back to a C function"
patch.
The third patch (smp.h: Move !SMP version of on_each_cpu()
out-of-line) does seem to make the kernel slightly larger on some
architectures, so it is possible that it could be dropped if that
concerns people.
David Daney (3):
smp: Quit unconditionally enabling irq in on_each_cpu_mask and
on_each_cpu_cond
up.c: Use local_irq_{save,restore}() in smp_call_function_single.
smp.h: Move !SMP version of on_each_cpu() out-of-line
include/linux/smp.h | 83 ++++++++++++++---------------------------------------
kernel/up.c | 58 +++++++++++++++++++++++++++++++++++--
2 files changed, 76 insertions(+), 65 deletions(-)
--
1.7.11.7
From: David Daney <[email protected]>
The SMP version of this function doesn't unconditionally enable irqs,
so neither should this !SMP version. There are no know problems
caused by this, but we make the change for consistency's sake.
Signed-off-by: David Daney <[email protected]>
---
kernel/up.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/kernel/up.c b/kernel/up.c
index 144e572..b1cf036 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -10,11 +10,13 @@
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
+ unsigned long flags;
+
WARN_ON(cpu != 0);
- local_irq_disable();
- (func)(info);
- local_irq_enable();
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
return 0;
}
--
1.7.11.7
From: David Daney <[email protected]>
As in f21afc25f9ed (smp.h: Use local_irq_{save,restore}() in !SMP
version of on_each_cpu().), we don't want to enable irqs if they are
not already enabled. There are currently no known problematical
callers of these functions, but since it is a known failure pattern,
we preemptively fix them.
Since they are not trivial functions, make them non-inline by moving
them to up.c. This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.
Signed-off-by: David Daney <[email protected]>
---
include/linux/smp.h | 62 ++++++++++++++---------------------------------------
kernel/up.c | 39 +++++++++++++++++++++++++++++++++
2 files changed, 55 insertions(+), 46 deletions(-)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c848876..3724a90 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -29,6 +29,22 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -101,22 +117,6 @@ static inline void call_function_init(void) { }
int on_each_cpu(smp_call_func_t func, void *info, int wait);
/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
-
-/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
@@ -151,36 +151,6 @@ static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
return 0;
}
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
- do { \
- if (cpumask_test_cpu(0, (mask))) { \
- local_irq_disable(); \
- (func)(info); \
- local_irq_enable(); \
- } \
- } while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
- do { \
- void *__info = (info); \
- preempt_disable(); \
- if ((cond_func)(0, __info)) { \
- local_irq_disable(); \
- (func)(__info); \
- local_irq_enable(); \
- } \
- preempt_enable(); \
- } while (0)
-
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e..144e572 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ unsigned long flags;
+
+ if (cpumask_test_cpu(0, mask)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ if (cond_func(0, info)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
--
1.7.11.7
From: David Daney <[email protected]>
All of the other non-trivial !SMP versions of functions in smp.h are
out-of-line in up.c. Move on_each_cpu() there as well.
This allows us to get rid of the #include <linux/irqflags.h>. The
drawback is that this makes both the x86_64 and i386 defconfig !SMP
kernels about 200 bytes larger each.
Signed-off-by: David Daney <[email protected]>
---
include/linux/smp.h | 21 +++++----------------
kernel/up.c | 11 +++++++++++
2 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3724a90..cfb7ca0 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,7 +11,6 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-#include <linux/irqflags.h>
extern void cpu_idle(void);
@@ -30,6 +29,11 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
* Call a function on processors specified by mask, which might include
* the local one.
*/
@@ -112,11 +116,6 @@ static inline void call_function_init(void) { }
#endif
/*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
@@ -141,16 +140,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- return 0;
-}
-
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
diff --git a/kernel/up.c b/kernel/up.c
index b1cf036..630d72b 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,6 +22,17 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
+int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(on_each_cpu);
+
/*
* Note we still need to test the mask even for UP
* because we actually can get an empty mask from
--
1.7.11.7
On 08/02/2013 02:09 PM, David Daney wrote:
> From: David Daney <[email protected]>
>
> All of the other non-trivial !SMP versions of functions in smp.h are
> out-of-line in up.c. Move on_each_cpu() there as well.
>
> This allows us to get rid of the #include <linux/irqflags.h>. The
> drawback is that this makes both the x86_64 and i386 defconfig !SMP
> kernels about 200 bytes larger each.
I looked at the cause of the size difference and there are several main
parts to it.
1) kallsym data (28 bytes).
2) with the patch generating a function pointer to pass to on_each_cpu
vs. direct call (7 bytes * 22 call sites) and generating the the ignored
third parameter (5 bytes * 22 call sites); without the patch
local_irq_save/local_irq_restore(5 bytes * 22 call sites) for a total
of 7 * 22 == 154 bytes.
3) Other (21 bytes).
>
> Signed-off-by: David Daney <[email protected]> ---
> include/linux/smp.h | 21 +++++---------------- kernel/up.c |
> 11 +++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-)
>