mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tracing: Rename trace_buffer to array_buffer
As we are working to remove the generic "ring_buffer" name that is used by both tracing and perf, the ring_buffer name for tracing will be renamed to trace_buffer, and perf's ring buffer will be renamed to perf_buffer. As there already exists a trace_buffer that is used by the trace_arrays, it needs to be first renamed to array_buffer. Link: https://lore.kernel.org/r/20191213153553.GE20583@krava Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
56de4e8f91
commit
1c5eb4481e
18 changed files with 195 additions and 195 deletions
|
@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
|
|||
{
|
||||
struct trace_event_call *call = &event_funcgraph_entry;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ring_buffer *buffer = tr->array_buffer.buffer;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||
|
@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
|
|||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
|
@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
|
|||
{
|
||||
struct trace_event_call *call = &event_funcgraph_exit;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ring_buffer *buffer = tr->array_buffer.buffer;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||
|
@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
|
|||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
|
@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
|
|||
* We need to consume the current entry to see
|
||||
* the next one.
|
||||
*/
|
||||
ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
|
||||
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
|
||||
NULL, NULL);
|
||||
event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
|
||||
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
|
@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
|
|||
{
|
||||
unsigned long long usecs;
|
||||
|
||||
usecs = iter->ts - iter->trace_buffer->time_start;
|
||||
usecs = iter->ts - iter->array_buffer->time_start;
|
||||
do_div(usecs, NSEC_PER_USEC);
|
||||
|
||||
trace_seq_printf(s, "%9llu us | ", usecs);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue