mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tracing: Have dynamic size event stack traces
Currently the stack trace per event in ftace is only 8 frames. This can be quite limiting and sometimes useless. Especially when the "ignore frames" is wrong and we also use up stack frames for the event processing itself. Change this to be dynamic by adding a percpu buffer that we can write a large stack frame into and then copy into the ring buffer. For interrupts and NMIs that come in while another event is being process, will only get to use the 8 frame stack. That should be enough as the task that it interrupted will have the full stack frame anyway. Requested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
259032bfe3
commit
4a9bd3f134
4 changed files with 89 additions and 20 deletions
|
@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
|
|||
{
|
||||
struct stack_entry *field;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int i;
|
||||
unsigned long *p;
|
||||
unsigned long *end;
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
end = (unsigned long *)((long)iter->ent + iter->ent_size);
|
||||
|
||||
if (!trace_seq_puts(s, "<stack trace>\n"))
|
||||
goto partial;
|
||||
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
|
||||
if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
|
||||
break;
|
||||
|
||||
for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
|
||||
if (!trace_seq_puts(s, " => "))
|
||||
goto partial;
|
||||
|
||||
if (!seq_print_ip_sym(s, field->caller[i], flags))
|
||||
if (!seq_print_ip_sym(s, *p, flags))
|
||||
goto partial;
|
||||
if (!trace_seq_puts(s, "\n"))
|
||||
goto partial;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue