mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 14:31:46 +00:00
perf/x86/amd: Cleanup Fam10h NB event constraints
Avoid allocating the AMD NB event constraints data structure when not needed. This gets rid of x86_max_cores usage and avoids allocating this on AMD Core Perfctr supporting hardware (which has separate MSRs for NB events). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: aherrmann@suse.com Cc: Rui Huang <ray.huang@amd.com> Cc: Borislav Petkov <bp@alien8.de> Cc: jencce.kernel@gmail.com Link: http://lkml.kernel.org/r/20160320124629.GY6375@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
ee6825c80e
commit
32b62f4468
2 changed files with 23 additions and 3 deletions
|
@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu)
|
||||||
|
|
||||||
WARN_ON_ONCE(cpuc->amd_nb);
|
WARN_ON_ONCE(cpuc->amd_nb);
|
||||||
|
|
||||||
if (boot_cpu_data.x86_max_cores < 2)
|
if (!x86_pmu.amd_nb_constraints)
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
cpuc->amd_nb = amd_alloc_nb(cpu);
|
cpuc->amd_nb = amd_alloc_nb(cpu);
|
||||||
|
@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu)
|
||||||
|
|
||||||
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
||||||
|
|
||||||
if (boot_cpu_data.x86_max_cores < 2)
|
if (!x86_pmu.amd_nb_constraints)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nb_id = amd_get_nb_id(cpu);
|
nb_id = amd_get_nb_id(cpu);
|
||||||
|
@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw;
|
struct cpu_hw_events *cpuhw;
|
||||||
|
|
||||||
if (boot_cpu_data.x86_max_cores < 2)
|
if (!x86_pmu.amd_nb_constraints)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpuhw = &per_cpu(cpu_hw_events, cpu);
|
cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = {
|
||||||
.cpu_prepare = amd_pmu_cpu_prepare,
|
.cpu_prepare = amd_pmu_cpu_prepare,
|
||||||
.cpu_starting = amd_pmu_cpu_starting,
|
.cpu_starting = amd_pmu_cpu_starting,
|
||||||
.cpu_dead = amd_pmu_cpu_dead,
|
.cpu_dead = amd_pmu_cpu_dead,
|
||||||
|
|
||||||
|
.amd_nb_constraints = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init amd_core_pmu_init(void)
|
static int __init amd_core_pmu_init(void)
|
||||||
|
@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void)
|
||||||
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
|
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
|
||||||
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
|
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
|
||||||
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
|
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
|
||||||
|
/*
|
||||||
|
* AMD Core perfctr has separate MSRs for the NB events, see
|
||||||
|
* the amd/uncore.c driver.
|
||||||
|
*/
|
||||||
|
x86_pmu.amd_nb_constraints = 0;
|
||||||
|
|
||||||
pr_cont("core perfctr, ");
|
pr_cont("core perfctr, ");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -693,6 +700,14 @@ __init int amd_pmu_init(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (num_possible_cpus() == 1) {
|
||||||
|
/*
|
||||||
|
* No point in allocating data structures to serialize
|
||||||
|
* against other CPUs, when there is only the one CPU.
|
||||||
|
*/
|
||||||
|
x86_pmu.amd_nb_constraints = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Events are common for all AMDs */
|
/* Events are common for all AMDs */
|
||||||
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
|
@ -607,6 +607,11 @@ struct x86_pmu {
|
||||||
*/
|
*/
|
||||||
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
|
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AMD bits
|
||||||
|
*/
|
||||||
|
unsigned int amd_nb_constraints : 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extra registers for events
|
* Extra registers for events
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue