mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
f633903af2
commit
e77405ad80
12 changed files with 163 additions and 109 deletions
|
@ -173,19 +173,20 @@ static int __trace_graph_entry(struct trace_array *tr,
|
|||
{
|
||||
struct ftrace_event_call *call = &event_funcgraph_entry;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
return 0;
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
return 0;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->graph_ent = *trace;
|
||||
if (!filter_current_check_discard(call, entry, event))
|
||||
ring_buffer_unlock_commit(tr->buffer, event);
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -236,19 +237,20 @@ static void __trace_graph_return(struct trace_array *tr,
|
|||
{
|
||||
struct ftrace_event_call *call = &event_funcgraph_exit;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->ret = *trace;
|
||||
if (!filter_current_check_discard(call, entry, event))
|
||||
ring_buffer_unlock_commit(tr->buffer, event);
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
}
|
||||
|
||||
void trace_graph_return(struct ftrace_graph_ret *trace)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue