mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 01:51:39 +00:00
perf, x86: Use unlocked bitops
There is no concurrency on these variables, so don't use LOCK'ed ops. As to the intel_pmu_handle_irq() status bit clean, nobody uses that so remove it all together. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo <acme@infradead.org> LKML-Reference: <20100304140100.240023029@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
aff3d91a91
commit
34538ee77b
3 changed files with 5 additions and 6 deletions
|
@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||||
if (test_bit(hwc->idx, used_mask))
|
if (test_bit(hwc->idx, used_mask))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
set_bit(hwc->idx, used_mask);
|
__set_bit(hwc->idx, used_mask);
|
||||||
if (assign)
|
if (assign)
|
||||||
assign[i] = hwc->idx;
|
assign[i] = hwc->idx;
|
||||||
}
|
}
|
||||||
|
@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||||
if (j == X86_PMC_IDX_MAX)
|
if (j == X86_PMC_IDX_MAX)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
set_bit(j, used_mask);
|
__set_bit(j, used_mask);
|
||||||
|
|
||||||
if (assign)
|
if (assign)
|
||||||
assign[i] = j;
|
assign[i] = j;
|
||||||
|
@ -842,7 +842,7 @@ void hw_perf_enable(void)
|
||||||
* clear active_mask and events[] yet it preserves
|
* clear active_mask and events[] yet it preserves
|
||||||
* idx
|
* idx
|
||||||
*/
|
*/
|
||||||
set_bit(hwc->idx, cpuc->active_mask);
|
__set_bit(hwc->idx, cpuc->active_mask);
|
||||||
cpuc->events[hwc->idx] = event;
|
cpuc->events[hwc->idx] = event;
|
||||||
|
|
||||||
x86_pmu.enable(event);
|
x86_pmu.enable(event);
|
||||||
|
@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event)
|
||||||
* Must be done before we disable, otherwise the nmi handler
|
* Must be done before we disable, otherwise the nmi handler
|
||||||
* could reenable again:
|
* could reenable again:
|
||||||
*/
|
*/
|
||||||
clear_bit(idx, cpuc->active_mask);
|
__clear_bit(idx, cpuc->active_mask);
|
||||||
x86_pmu.disable(event);
|
x86_pmu.disable(event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||||
* initialize all possible NB constraints
|
* initialize all possible NB constraints
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||||
set_bit(i, nb->event_constraints[i].idxmsk);
|
__set_bit(i, nb->event_constraints[i].idxmsk);
|
||||||
nb->event_constraints[i].weight = 1;
|
nb->event_constraints[i].weight = 1;
|
||||||
}
|
}
|
||||||
return nb;
|
return nb;
|
||||||
|
|
|
@ -765,7 +765,6 @@ again:
|
||||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||||
struct perf_event *event = cpuc->events[bit];
|
struct perf_event *event = cpuc->events[bit];
|
||||||
|
|
||||||
clear_bit(bit, (unsigned long *) &status);
|
|
||||||
if (!test_bit(bit, cpuc->active_mask))
|
if (!test_bit(bit, cpuc->active_mask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue