mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 13:41:30 +00:00
module: Use this_cpu_xx to dynamically allocate counters
Use cpu ops to deal with the per cpu data instead of a local_t. Reduces memory requirements, cache footprint and decreases cycle counts. The this_cpu_xx operations are also used for !SMP mode. Otherwise we could not drop the use of __module_ref_addr() which would make per cpu data handling complicated. this_cpu_xx operations have their own fallback for !SMP. V8-V9: - Leave include asm/module.h since ringbuffer.c depends on it. Nothing else does though. Another patch will deal with that. - Remove spurious free. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
38b7827fcd
commit
e1783a240f
2 changed files with 29 additions and 36 deletions
|
@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
|
|||
|
||||
INIT_LIST_HEAD(&mod->modules_which_use_me);
|
||||
for_each_possible_cpu(cpu)
|
||||
local_set(__module_ref_addr(mod, cpu), 0);
|
||||
per_cpu_ptr(mod->refptr, cpu)->count = 0;
|
||||
|
||||
/* Hold reference count during initialization. */
|
||||
local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
|
||||
__this_cpu_write(mod->refptr->count, 1);
|
||||
/* Backwards compatibility macros put refcount during init. */
|
||||
mod->waiter = current;
|
||||
}
|
||||
|
@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
|
|||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
total += local_read(__module_ref_addr(mod, cpu));
|
||||
total += per_cpu_ptr(mod->refptr, cpu)->count;
|
||||
return total;
|
||||
}
|
||||
EXPORT_SYMBOL(module_refcount);
|
||||
|
@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
|
|||
void module_put(struct module *module)
|
||||
{
|
||||
if (module) {
|
||||
unsigned int cpu = get_cpu();
|
||||
local_dec(__module_ref_addr(module, cpu));
|
||||
preempt_disable();
|
||||
__this_cpu_dec(module->refptr->count);
|
||||
|
||||
trace_module_put(module, _RET_IP_,
|
||||
local_read(__module_ref_addr(module, cpu)));
|
||||
__this_cpu_read(module->refptr->count));
|
||||
/* Maybe they're waiting for us to drop reference? */
|
||||
if (unlikely(!module_is_live(module)))
|
||||
wake_up_process(module->waiter);
|
||||
put_cpu();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(module_put);
|
||||
|
@ -1394,9 +1396,9 @@ static void free_module(struct module *mod)
|
|||
kfree(mod->args);
|
||||
if (mod->percpu)
|
||||
percpu_modfree(mod->percpu);
|
||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_MODULE_UNLOAD)
|
||||
if (mod->refptr)
|
||||
percpu_modfree(mod->refptr);
|
||||
free_percpu(mod->refptr);
|
||||
#endif
|
||||
/* Free lock-classes: */
|
||||
lockdep_free_key_range(mod->module_core, mod->core_size);
|
||||
|
@ -2159,9 +2161,8 @@ static noinline struct module *load_module(void __user *umod,
|
|||
mod = (void *)sechdrs[modindex].sh_addr;
|
||||
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
|
||||
|
||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
||||
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
|
||||
mod->name);
|
||||
#if defined(CONFIG_MODULE_UNLOAD)
|
||||
mod->refptr = alloc_percpu(struct module_ref);
|
||||
if (!mod->refptr) {
|
||||
err = -ENOMEM;
|
||||
goto free_init;
|
||||
|
@ -2393,8 +2394,8 @@ static noinline struct module *load_module(void __user *umod,
|
|||
kobject_put(&mod->mkobj.kobj);
|
||||
free_unload:
|
||||
module_unload_free(mod);
|
||||
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
||||
percpu_modfree(mod->refptr);
|
||||
#if defined(CONFIG_MODULE_UNLOAD)
|
||||
free_percpu(mod->refptr);
|
||||
free_init:
|
||||
#endif
|
||||
module_free(mod, mod->module_init);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue