mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 06:32:08 +00:00
tracing: Allow arch-specific stack tracer
A stack frame may be used in a different way depending on cpu architecture. Thus it is not always appropriate to slurp the stack contents, as current check_stack() does, in order to calcurate a stack index (height) at a given function call. At least not on arm64. In addition, there is a possibility that we will mistakenly detect a stale stack frame which has not been overwritten. This patch makes check_stack() a weak function so as to later implement arch-specific version. Link: http://lkml.kernel.org/r/1446182741-31019-5-git-send-email-takahiro.akashi@linaro.org Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
2ee8a74f2a
commit
bb99d8ccec
2 changed files with 54 additions and 37 deletions
|
@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { }
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
#endif /* CONFIG_FUNCTION_TRACER */
|
||||||
|
|
||||||
#ifdef CONFIG_STACK_TRACER
|
#ifdef CONFIG_STACK_TRACER
|
||||||
|
|
||||||
|
#define STACK_TRACE_ENTRIES 500
|
||||||
|
|
||||||
|
struct stack_trace;
|
||||||
|
|
||||||
|
extern unsigned stack_trace_index[];
|
||||||
|
extern struct stack_trace stack_trace_max;
|
||||||
|
extern unsigned long stack_trace_max_size;
|
||||||
|
extern arch_spinlock_t max_stack_lock;
|
||||||
|
|
||||||
extern int stack_tracer_enabled;
|
extern int stack_tracer_enabled;
|
||||||
|
void stack_trace_print(void);
|
||||||
int
|
int
|
||||||
stack_trace_sysctl(struct ctl_table *table, int write,
|
stack_trace_sysctl(struct ctl_table *table, int write,
|
||||||
void __user *buffer, size_t *lenp,
|
void __user *buffer, size_t *lenp,
|
||||||
|
|
|
@ -16,24 +16,22 @@
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#define STACK_TRACE_ENTRIES 500
|
|
||||||
|
|
||||||
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
||||||
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
||||||
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
|
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve one entry for the passed in ip. This will allow
|
* Reserve one entry for the passed in ip. This will allow
|
||||||
* us to remove most or all of the stack size overhead
|
* us to remove most or all of the stack size overhead
|
||||||
* added by the stack tracer itself.
|
* added by the stack tracer itself.
|
||||||
*/
|
*/
|
||||||
static struct stack_trace max_stack_trace = {
|
struct stack_trace stack_trace_max = {
|
||||||
.max_entries = STACK_TRACE_ENTRIES - 1,
|
.max_entries = STACK_TRACE_ENTRIES - 1,
|
||||||
.entries = &stack_dump_trace[0],
|
.entries = &stack_dump_trace[0],
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned long max_stack_size;
|
unsigned long stack_trace_max_size;
|
||||||
static arch_spinlock_t max_stack_lock =
|
arch_spinlock_t max_stack_lock =
|
||||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, trace_active);
|
static DEFINE_PER_CPU(int, trace_active);
|
||||||
|
@ -42,30 +40,38 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
|
||||||
int stack_tracer_enabled;
|
int stack_tracer_enabled;
|
||||||
static int last_stack_tracer_enabled;
|
static int last_stack_tracer_enabled;
|
||||||
|
|
||||||
static inline void print_max_stack(void)
|
void stack_trace_print(void)
|
||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
pr_emerg(" Depth Size Location (%d entries)\n"
|
pr_emerg(" Depth Size Location (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
max_stack_trace.nr_entries);
|
stack_trace_max.nr_entries);
|
||||||
|
|
||||||
for (i = 0; i < max_stack_trace.nr_entries; i++) {
|
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
||||||
if (stack_dump_trace[i] == ULONG_MAX)
|
if (stack_dump_trace[i] == ULONG_MAX)
|
||||||
break;
|
break;
|
||||||
if (i+1 == max_stack_trace.nr_entries ||
|
if (i+1 == stack_trace_max.nr_entries ||
|
||||||
stack_dump_trace[i+1] == ULONG_MAX)
|
stack_dump_trace[i+1] == ULONG_MAX)
|
||||||
size = stack_dump_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_dump_index[i] - stack_dump_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
|
|
||||||
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
|
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
|
||||||
size, (void *)stack_dump_trace[i]);
|
size, (void *)stack_dump_trace[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
/*
|
||||||
|
* When arch-specific code overides this function, the following
|
||||||
|
* data should be filled up, assuming max_stack_lock is held to
|
||||||
|
* prevent concurrent updates.
|
||||||
|
* stack_trace_index[]
|
||||||
|
* stack_trace_max
|
||||||
|
* stack_trace_max_size
|
||||||
|
*/
|
||||||
|
void __weak
|
||||||
check_stack(unsigned long ip, unsigned long *stack)
|
check_stack(unsigned long ip, unsigned long *stack)
|
||||||
{
|
{
|
||||||
unsigned long this_size, flags; unsigned long *p, *top, *start;
|
unsigned long this_size, flags; unsigned long *p, *top, *start;
|
||||||
|
@ -78,7 +84,7 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
/* Remove the frame of the tracer */
|
/* Remove the frame of the tracer */
|
||||||
this_size -= frame_size;
|
this_size -= frame_size;
|
||||||
|
|
||||||
if (this_size <= max_stack_size)
|
if (this_size <= stack_trace_max_size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* we do not handle interrupt stacks yet */
|
/* we do not handle interrupt stacks yet */
|
||||||
|
@ -93,18 +99,18 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
this_size -= tracer_frame;
|
this_size -= tracer_frame;
|
||||||
|
|
||||||
/* a race could have already updated it */
|
/* a race could have already updated it */
|
||||||
if (this_size <= max_stack_size)
|
if (this_size <= stack_trace_max_size)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
max_stack_size = this_size;
|
stack_trace_max_size = this_size;
|
||||||
|
|
||||||
max_stack_trace.nr_entries = 0;
|
stack_trace_max.nr_entries = 0;
|
||||||
max_stack_trace.skip = 3;
|
stack_trace_max.skip = 3;
|
||||||
|
|
||||||
save_stack_trace(&max_stack_trace);
|
save_stack_trace(&stack_trace_max);
|
||||||
|
|
||||||
/* Skip over the overhead of the stack tracer itself */
|
/* Skip over the overhead of the stack tracer itself */
|
||||||
for (i = 0; i < max_stack_trace.nr_entries; i++) {
|
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
||||||
if (stack_dump_trace[i] == ip)
|
if (stack_dump_trace[i] == ip)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -124,18 +130,18 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
* loop will only happen once. This code only takes place
|
* loop will only happen once. This code only takes place
|
||||||
* on a new max, so it is far from a fast path.
|
* on a new max, so it is far from a fast path.
|
||||||
*/
|
*/
|
||||||
while (i < max_stack_trace.nr_entries) {
|
while (i < stack_trace_max.nr_entries) {
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
|
||||||
stack_dump_index[x] = this_size;
|
stack_trace_index[x] = this_size;
|
||||||
p = start;
|
p = start;
|
||||||
|
|
||||||
for (; p < top && i < max_stack_trace.nr_entries; p++) {
|
for (; p < top && i < stack_trace_max.nr_entries; p++) {
|
||||||
if (stack_dump_trace[i] == ULONG_MAX)
|
if (stack_dump_trace[i] == ULONG_MAX)
|
||||||
break;
|
break;
|
||||||
if (*p == stack_dump_trace[i]) {
|
if (*p == stack_dump_trace[i]) {
|
||||||
stack_dump_trace[x] = stack_dump_trace[i++];
|
stack_dump_trace[x] = stack_dump_trace[i++];
|
||||||
this_size = stack_dump_index[x++] =
|
this_size = stack_trace_index[x++] =
|
||||||
(top - p) * sizeof(unsigned long);
|
(top - p) * sizeof(unsigned long);
|
||||||
found = 1;
|
found = 1;
|
||||||
/* Start the search from here */
|
/* Start the search from here */
|
||||||
|
@ -150,7 +156,7 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
if (unlikely(!tracer_frame)) {
|
if (unlikely(!tracer_frame)) {
|
||||||
tracer_frame = (p - stack) *
|
tracer_frame = (p - stack) *
|
||||||
sizeof(unsigned long);
|
sizeof(unsigned long);
|
||||||
max_stack_size -= tracer_frame;
|
stack_trace_max_size -= tracer_frame;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,12 +165,12 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_stack_trace.nr_entries = x;
|
stack_trace_max.nr_entries = x;
|
||||||
for (; x < i; x++)
|
for (; x < i; x++)
|
||||||
stack_dump_trace[x] = ULONG_MAX;
|
stack_dump_trace[x] = ULONG_MAX;
|
||||||
|
|
||||||
if (task_stack_end_corrupted(current)) {
|
if (task_stack_end_corrupted(current)) {
|
||||||
print_max_stack();
|
stack_trace_print();
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +268,7 @@ __next(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
long n = *pos - 1;
|
long n = *pos - 1;
|
||||||
|
|
||||||
if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
|
if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
m->private = (void *)n;
|
m->private = (void *)n;
|
||||||
|
@ -332,9 +338,9 @@ static int t_show(struct seq_file *m, void *v)
|
||||||
seq_printf(m, " Depth Size Location"
|
seq_printf(m, " Depth Size Location"
|
||||||
" (%d entries)\n"
|
" (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
max_stack_trace.nr_entries);
|
stack_trace_max.nr_entries);
|
||||||
|
|
||||||
if (!stack_tracer_enabled && !max_stack_size)
|
if (!stack_tracer_enabled && !stack_trace_max_size)
|
||||||
print_disabled(m);
|
print_disabled(m);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -342,17 +348,17 @@ static int t_show(struct seq_file *m, void *v)
|
||||||
|
|
||||||
i = *(long *)v;
|
i = *(long *)v;
|
||||||
|
|
||||||
if (i >= max_stack_trace.nr_entries ||
|
if (i >= stack_trace_max.nr_entries ||
|
||||||
stack_dump_trace[i] == ULONG_MAX)
|
stack_dump_trace[i] == ULONG_MAX)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (i+1 == max_stack_trace.nr_entries ||
|
if (i+1 == stack_trace_max.nr_entries ||
|
||||||
stack_dump_trace[i+1] == ULONG_MAX)
|
stack_dump_trace[i+1] == ULONG_MAX)
|
||||||
size = stack_dump_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_dump_index[i] - stack_dump_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
|
|
||||||
seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
|
seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
|
||||||
|
|
||||||
trace_lookup_stack(m, i);
|
trace_lookup_stack(m, i);
|
||||||
|
|
||||||
|
@ -442,7 +448,7 @@ static __init int stack_trace_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
trace_create_file("stack_max_size", 0644, d_tracer,
|
trace_create_file("stack_max_size", 0644, d_tracer,
|
||||||
&max_stack_size, &stack_max_size_fops);
|
&stack_trace_max_size, &stack_max_size_fops);
|
||||||
|
|
||||||
trace_create_file("stack_trace", 0444, d_tracer,
|
trace_create_file("stack_trace", 0444, d_tracer,
|
||||||
NULL, &stack_trace_fops);
|
NULL, &stack_trace_fops);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue