mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 22:51:37 +00:00
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
0f9005a6f7
commit
52bad64d95
22 changed files with 96 additions and 73 deletions
|
@ -753,7 +753,7 @@ int slab_is_available(void)
|
|||
return g_cpucache_up == FULL;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct work_struct, reap_work);
|
||||
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
||||
|
||||
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
||||
{
|
||||
|
@ -916,16 +916,16 @@ static void next_reap_node(void)
|
|||
*/
|
||||
static void __devinit start_cpu_timer(int cpu)
|
||||
{
|
||||
struct work_struct *reap_work = &per_cpu(reap_work, cpu);
|
||||
struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
|
||||
|
||||
/*
|
||||
* When this gets called from do_initcalls via cpucache_init(),
|
||||
* init_workqueues() has already run, so keventd will be setup
|
||||
* at that time.
|
||||
*/
|
||||
if (keventd_up() && reap_work->func == NULL) {
|
||||
if (keventd_up() && reap_work->work.func == NULL) {
|
||||
init_reap_node(cpu);
|
||||
INIT_WORK(reap_work, cache_reap, NULL);
|
||||
INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
|
||||
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue