mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 09:02:06 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
This commit is contained in:
commit
ada3fa1505
80 changed files with 1912 additions and 1230 deletions
|
@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(find_module);
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
||||
|
||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
|
@ -394,7 +394,7 @@ static void percpu_modfree(void *freeme)
|
|||
free_percpu(freeme);
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
||||
|
||||
/* Number of blocks used and allocated. */
|
||||
static unsigned int pcpu_num_used, pcpu_num_allocated;
|
||||
|
@ -540,7 +540,7 @@ static int percpu_modinit(void)
|
|||
}
|
||||
__initcall(percpu_modinit);
|
||||
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
|
|
|
@ -106,16 +106,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|||
|
||||
void __weak perf_counter_print_debug(void) { }
|
||||
|
||||
static DEFINE_PER_CPU(int, disable_count);
|
||||
static DEFINE_PER_CPU(int, perf_disable_count);
|
||||
|
||||
void __perf_disable(void)
|
||||
{
|
||||
__get_cpu_var(disable_count)++;
|
||||
__get_cpu_var(perf_disable_count)++;
|
||||
}
|
||||
|
||||
bool __perf_enable(void)
|
||||
{
|
||||
return !--__get_cpu_var(disable_count);
|
||||
return !--__get_cpu_var(perf_disable_count);
|
||||
}
|
||||
|
||||
void perf_disable(void)
|
||||
|
|
|
@ -295,12 +295,12 @@ struct task_group root_task_group;
|
|||
/* Default task group's sched entity on each cpu */
|
||||
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
|
||||
/* Default task group's cfs_rq on each cpu */
|
||||
static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
||||
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
#else /* !CONFIG_USER_SCHED */
|
||||
#define root_task_group init_task_group
|
||||
|
|
|
@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
|
|||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
static DEFINE_PER_CPU(atomic_t, test_event_disable);
|
||||
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
|
||||
|
||||
static void
|
||||
function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
||||
|
@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
|||
pc = preempt_count();
|
||||
resched = ftrace_preempt_disable();
|
||||
cpu = raw_smp_processor_id();
|
||||
disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
|
||||
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
|
||||
|
||||
if (disabled != 1)
|
||||
goto out;
|
||||
|
@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
|||
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
|
||||
|
||||
out:
|
||||
atomic_dec(&per_cpu(test_event_disable, cpu));
|
||||
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
|
||||
ftrace_preempt_enable(resched);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue