Use cpu ops to deal with the per cpu data instead of a local_t. Reduces memory
requirements, cache footprint and decreases cycle counts.
The this_cpu_xx operations are also used for !SMP mode. Otherwise we could
not drop the use of __module_ref_addr() which would make per cpu data handling
complicated. this_cpu_xx operations have their own fallback for !SMP.
The last hold out of users of local_t is the tracing ringbuffer after this patch
has been applied.
Signed-off-by: Christoph Lameter <[email protected]>
---
include/linux/module.h | 36 ++++++++++++------------------------
kernel/module.c | 30 ++++++++++++++++--------------
kernel/trace/ring_buffer.c | 1 +
3 files changed, 29 insertions(+), 38 deletions(-)
Index: linux-2.6/include/linux/module.h
===================================================================
--- linux-2.6.orig/include/linux/module.h 2009-11-13 09:34:39.000000000 -0600
+++ linux-2.6/include/linux/module.h 2009-12-14 15:26:58.000000000 -0600
@@ -16,8 +16,7 @@
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/tracepoint.h>
-
-#include <asm/local.h>
+#include <linux/percpu.h>
#include <asm/module.h>
#include <trace/events/module.h>
@@ -361,11 +360,9 @@ struct module
/* Destruction function. */
void (*exit)(void);
-#ifdef CONFIG_SMP
- char *refptr;
-#else
- local_t ref;
-#endif
+ struct module_ref {
+ int count;
+ } *refptr;
#endif
#ifdef CONFIG_CONSTRUCTORS
@@ -452,25 +449,16 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
-static inline local_t *__module_ref_addr(struct module *mod, int cpu)
-{
-#ifdef CONFIG_SMP
- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
-#else
- return &mod->ref;
-#endif
-}
-
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_inc(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
- put_cpu();
+ __this_cpu_read(module->refptr->count));
+ preempt_enable();
}
}
@@ -479,15 +467,15 @@ static inline int try_module_get(struct
int ret = 1;
if (module) {
- unsigned int cpu = get_cpu();
if (likely(module_is_live(module))) {
- local_inc(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
+ preempt_enable();
}
else
ret = 0;
- put_cpu();
}
return ret;
}
Index: linux-2.6/kernel/module.c
===================================================================
--- linux-2.6.orig/kernel/module.c 2009-12-14 14:55:49.000000000 -0600
+++ linux-2.6/kernel/module.c 2009-12-14 15:26:58.000000000 -0600
@@ -474,9 +474,10 @@ static void module_unload_init(struct mo
INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu)
- local_set(__module_ref_addr(mod, cpu), 0);
+ per_cpu_ptr(mod->refptr, cpu)->count = 0;
+
/* Hold reference count during initialization. */
- local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
+ __this_cpu_write(mod->refptr->count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
@@ -555,6 +556,7 @@ static void module_unload_free(struct mo
kfree(use);
sysfs_remove_link(i->holders_dir, mod->name);
/* There can be at most one match. */
+ free_percpu(i->refptr);
break;
}
}
@@ -619,7 +621,7 @@ unsigned int module_refcount(struct modu
int cpu;
for_each_possible_cpu(cpu)
- total += local_read(__module_ref_addr(mod, cpu));
+ total += per_cpu_ptr(mod->refptr, cpu)->count;
return total;
}
EXPORT_SYMBOL(module_refcount);
@@ -796,14 +798,15 @@ static struct module_attribute refcnt =
void module_put(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_dec(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_dec(module->refptr->count);
+
trace_module_put(module, _RET_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
- put_cpu();
+ preempt_enable();
}
}
EXPORT_SYMBOL(module_put);
@@ -1380,9 +1383,9 @@ static void free_module(struct module *m
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+#if defined(CONFIG_MODULE_UNLOAD)
if (mod->refptr)
- percpu_modfree(mod->refptr);
+ free_percpu(mod->refptr);
#endif
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2148,9 +2151,8 @@ static noinline struct module *load_modu
mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
- mod->name);
+#if defined(CONFIG_MODULE_UNLOAD)
+ mod->refptr = alloc_percpu(struct module_ref);
if (!mod->refptr) {
err = -ENOMEM;
goto free_init;
@@ -2376,8 +2378,8 @@ static noinline struct module *load_modu
kobject_put(&mod->mkobj.kobj);
free_unload:
module_unload_free(mod);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- percpu_modfree(mod->refptr);
+#if defined(CONFIG_MODULE_UNLOAD)
+ free_percpu(mod->refptr);
free_init:
#endif
module_free(mod, mod->module_init);
Index: linux-2.6/kernel/trace/ring_buffer.c
===================================================================
--- linux-2.6.orig/kernel/trace/ring_buffer.c 2009-12-07 08:59:46.000000000 -0600
+++ linux-2.6/kernel/trace/ring_buffer.c 2009-12-14 15:26:58.000000000 -0600
@@ -12,6 +12,7 @@
#include <linux/hardirq.h>
#include <linux/kmemcheck.h>
#include <linux/module.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/init.h>
--
(Rusty Russell cc'd.)
On 12/15/2009 07:03 AM, Christoph Lameter wrote:
> @@ -479,15 +467,15 @@ static inline int try_module_get(struct
> int ret = 1;
>
> if (module) {
> - unsigned int cpu = get_cpu();
> if (likely(module_is_live(module))) {
> - local_inc(__module_ref_addr(module, cpu));
> + preempt_disable();
> + __this_cpu_inc(module->refptr->count);
> trace_module_get(module, _THIS_IP_,
> - local_read(__module_ref_addr(module, cpu)));
> + __this_cpu_read(module->refptr->count));
> + preempt_enable();
> }
> else
> ret = 0;
> - put_cpu();
I think you need preemption disabled while checking whether
module_is_live(). The state is protected by stop_machine or
synchronize_sched().
Thanks.
--
tejun
On Tue, 15 Dec 2009 02:33:50 pm Tejun Heo wrote:
> (Rusty Russell cc'd.)
>
> On 12/15/2009 07:03 AM, Christoph Lameter wrote:
> > @@ -479,15 +467,15 @@ static inline int try_module_get(struct
> > int ret = 1;
> >
> > if (module) {
> > - unsigned int cpu = get_cpu();
> > if (likely(module_is_live(module))) {
> > - local_inc(__module_ref_addr(module, cpu));
> > + preempt_disable();
> > + __this_cpu_inc(module->refptr->count);
> > trace_module_get(module, _THIS_IP_,
> > - local_read(__module_ref_addr(module, cpu)));
> > + __this_cpu_read(module->refptr->count));
> > + preempt_enable();
> > }
> > else
> > ret = 0;
> > - put_cpu();
>
> I think you need preemption disabled while checking whether
> module_is_live(). The state is protected by stop_machine or
> synchronize_sched().
Yes.
Thanks,
Rusty.
Fixup patch:
Subject: Extend preempt_disable section around module_is_live()
Signed-off-by: Christoph Lameter <[email protected]>
---
include/linux/module.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
Index: linux-2.6/include/linux/module.h
===================================================================
--- linux-2.6.orig/include/linux/module.h 2009-12-16 08:57:37.000000000 -0600
+++ linux-2.6/include/linux/module.h 2009-12-16 08:58:09.000000000 -0600
@@ -467,15 +467,17 @@ static inline int try_module_get(struct
int ret = 1;
if (module) {
+ preempt_disable();
+
if (likely(module_is_live(module))) {
- preempt_disable();
__this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
__this_cpu_read(module->refptr->count));
- preempt_enable();
}
else
ret = 0;
+
+ preempt_enable();
}
return ret;
}
Hello,
On 12/17/2009 01:10 AM, Christoph Lameter wrote:
> Fixup patch:
>
> Subject: Extend preempt_disable section around module_is_live()
>
> Signed-off-by: Christoph Lameter <[email protected]>
Looks good to me but as the patch currently isn't any tree, it'll
probably be better to merge it into the original patch. Rusty, do you
want to take this one? If not, I can route it through percpu tree.
Thanks.
--
tejun
On Thu, 17 Dec 2009 10:55:13 am Tejun Heo wrote:
> Hello,
>
> On 12/17/2009 01:10 AM, Christoph Lameter wrote:
> > Fixup patch:
> >
> > Subject: Extend preempt_disable section around module_is_live()
> >
> > Signed-off-by: Christoph Lameter <[email protected]>
>
> Looks good to me but as the patch currently isn't any tree, it'll
> probably be better to merge it into the original patch. Rusty, do you
> want to take this one? If not, I can route it through percpu tree.
Easiest to go with your stuff I think:
Acked-by: Rusty Russell <[email protected]>
Thanks,
Rusty.