perf_events: Undo some recursion damage

Make perf_swevent_get_recursion_context return a context number
and disable preemption.

This could be used to remove the IRQ disable from the trace bit
and index the per-cpu buffer with.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20091123103819.993226816@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-11-23 11:37:29 +01:00 committed by Ingo Molnar
parent f67218c3e9
commit 4ed7c92d68
5 changed files with 61 additions and 63 deletions

View file

@ -3869,45 +3869,50 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
}
}
/*
* Must be called with preemption disabled
*/
int perf_swevent_get_recursion_context(int **recursion)
int perf_swevent_get_recursion_context(void)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
int rctx;
if (in_nmi())
*recursion = &cpuctx->recursion[3];
rctx = 3;
else if (in_irq())
*recursion = &cpuctx->recursion[2];
rctx = 2;
else if (in_softirq())
*recursion = &cpuctx->recursion[1];
rctx = 1;
else
*recursion = &cpuctx->recursion[0];
rctx = 0;
if (**recursion)
if (cpuctx->recursion[rctx]) {
put_cpu_var(perf_cpu_context);
return -1;
}
(**recursion)++;
cpuctx->recursion[rctx]++;
barrier();
return 0;
return rctx;
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
void perf_swevent_put_recursion_context(int *recursion)
void perf_swevent_put_recursion_context(int rctx)
{
(*recursion)--;
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
barrier();
cpuctx->recursion[rctx]++;
put_cpu_var(perf_cpu_context);
}
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
cpuctx = &__get_cpu_var(perf_cpu_context);
rcu_read_lock();
perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
nr, nmi, data, regs);
@ -3921,34 +3926,22 @@ static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
rcu_read_unlock();
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
int *recursion;
preempt_disable();
if (perf_swevent_get_recursion_context(&recursion))
goto out;
__do_perf_sw_event(type, event_id, nr, nmi, data, regs);
perf_swevent_put_recursion_context(recursion);
out:
preempt_enable();
}
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
data.addr = addr;
data.raw = NULL;
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
perf_swevent_put_recursion_context(rctx);
}
static void perf_swevent_read(struct perf_event *event)
@ -4172,7 +4165,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
regs = task_pt_regs(current);
/* Trace events already protected against recursion */
__do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
&data, regs);
}
EXPORT_SYMBOL_GPL(perf_tp_event);