mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 23:32:14 +00:00
tile: improve stack backtrace
This commit fixes a number of issues with the tile backtrace code. - Don't try to identify userspace shared object or executable paths if we are doing a backtrace from an interrupt; it's not legal, and also unlikely to be interesting. Likewise, don't try to do it for other address spaces, since d_path() assumes it is being called in "current" context. - Move "in_backtrace" from thread_struct to thread_info. This way we can access it even if our stack thread_info has been clobbered, which makes backtracing more robust. - Avoid using "current" directly when testing for is_sigreturn(). Since "current" may be corrupt, we're better off using kbt->task explicitly to look up the vdso_base for the current task. Conveniently, this simplifies the internal APIs (we only need one is_sigreturn() function now). - Avoid bogus "Odd fault" warning when pc/sp/ex1 are all zero, as is true for kernel threads above the last frame. - Hook into Tejun Heo's dump_stack() framework in lib/dump_stack.c. - Write last entry in save_stack_trace() as ULONG_MAX, not zero, since ftrace (at least) relies on finding that marker. - Implement save_stack_trace_regs() and save_strack_trace_user(), and set CONFIG_USER_STACKTRACE_SUPPORT. Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
This commit is contained in:
parent
e8200baa2f
commit
47ad7b9bbe
9 changed files with 107 additions and 82 deletions
|
@ -23,6 +23,7 @@
|
|||
#include <linux/mmzone.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
|||
if (kbt->verbose)
|
||||
pr_err(" <%s while in user mode>\n", fault);
|
||||
} else {
|
||||
if (kbt->verbose)
|
||||
if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
|
||||
pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
|
||||
p->pc, p->sp, p->ex1);
|
||||
return NULL;
|
||||
|
@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
|||
return p;
|
||||
}
|
||||
|
||||
/* Is the pc pointing to a sigreturn trampoline? */
|
||||
static int is_sigreturn(unsigned long pc)
|
||||
/* Is the iterator pointing to a sigreturn trampoline? */
|
||||
static int is_sigreturn(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
|
||||
return kbt->task->mm &&
|
||||
(kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
|
||||
(ulong)&__vdso_rt_sigreturn));
|
||||
}
|
||||
|
||||
/* Return a pt_regs pointer for a valid signal handler frame */
|
||||
|
@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
|
|||
{
|
||||
BacktraceIterator *b = &kbt->it;
|
||||
|
||||
if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
|
||||
if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
|
||||
b->sp % sizeof(long) == 0) {
|
||||
int retval;
|
||||
pagefault_disable();
|
||||
|
@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
return is_sigreturn(kbt->it.pc);
|
||||
}
|
||||
|
||||
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
struct pt_regs *p;
|
||||
|
@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive(
|
|||
{
|
||||
for (;;) {
|
||||
do {
|
||||
if (!KBacktraceIterator_is_sigreturn(kbt))
|
||||
if (!is_sigreturn(kbt))
|
||||
return KBT_ONGOING;
|
||||
} while (backtrace_next(&kbt->it));
|
||||
|
||||
|
@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt,
|
|||
*/
|
||||
static bool start_backtrace(void)
|
||||
{
|
||||
if (current->thread.in_backtrace) {
|
||||
if (current_thread_info()->in_backtrace) {
|
||||
pr_err("Backtrace requested while in backtrace!\n");
|
||||
return false;
|
||||
}
|
||||
current->thread.in_backtrace = true;
|
||||
current_thread_info()->in_backtrace = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void end_backtrace(void)
|
||||
{
|
||||
current->thread.in_backtrace = false;
|
||||
current_thread_info()->in_backtrace = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This method wraps the backtracer's more generic support.
|
||||
* It is only invoked from the architecture-specific code; show_stack()
|
||||
* and dump_stack() (in entry.S) are architecture-independent entry points.
|
||||
* and dump_stack() are architecture-independent entry points.
|
||||
*/
|
||||
void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
||||
void tile_show_stack(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
int i;
|
||||
int have_mmap_sem = 0;
|
||||
|
||||
if (!start_backtrace())
|
||||
return;
|
||||
if (headers) {
|
||||
/*
|
||||
* Add a blank line since if we are called from panic(),
|
||||
* then bust_spinlocks() spit out a space in front of us
|
||||
* and it will mess up our KERN_ERR.
|
||||
*/
|
||||
pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
|
||||
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
|
||||
raw_smp_processor_id(), get_cycles());
|
||||
}
|
||||
kbt->verbose = 1;
|
||||
i = 0;
|
||||
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
|
||||
char namebuf[KSYM_NAME_LEN+100];
|
||||
unsigned long address = kbt->it.pc;
|
||||
|
||||
/* Try to acquire the mmap_sem as we pass into userspace. */
|
||||
if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
|
||||
/*
|
||||
* Try to acquire the mmap_sem as we pass into userspace.
|
||||
* If we're in an interrupt context, don't even try, since
|
||||
* it's not safe to call e.g. d_path() from an interrupt,
|
||||
* since it uses spin locks without disabling interrupts.
|
||||
* Note we test "kbt->task == current", not "kbt->is_current",
|
||||
* since we're checking that "current" will work in d_path().
|
||||
*/
|
||||
if (kbt->task == current && address < PAGE_OFFSET &&
|
||||
!have_mmap_sem && kbt->task->mm && !in_interrupt()) {
|
||||
have_mmap_sem =
|
||||
down_read_trylock(&kbt->task->mm->mmap_sem);
|
||||
}
|
||||
|
||||
describe_addr(kbt, address, have_mmap_sem,
|
||||
namebuf, sizeof(namebuf));
|
||||
|
@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
|||
}
|
||||
if (kbt->end == KBT_LOOP)
|
||||
pr_err("Stack dump stopped; next frame identical to this one\n");
|
||||
if (headers)
|
||||
pr_err("Stack dump complete\n");
|
||||
if (have_mmap_sem)
|
||||
up_read(&kbt->task->mm->mmap_sem);
|
||||
end_backtrace();
|
||||
}
|
||||
EXPORT_SYMBOL(tile_show_stack);
|
||||
|
||||
|
||||
/* This is called from show_regs() and _dump_stack() */
|
||||
void dump_stack_regs(struct pt_regs *regs)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
KBacktraceIterator_init(&kbt, NULL, regs);
|
||||
tile_show_stack(&kbt, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_stack_regs);
|
||||
|
||||
static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
|
||||
ulong pc, ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
|
@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
|
|||
return regs;
|
||||
}
|
||||
|
||||
/* This is called from dump_stack() and just converts to pt_regs */
|
||||
/* Deprecated function currently only used by kernel_double_fault(). */
|
||||
void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
struct pt_regs regs;
|
||||
dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52));
|
||||
|
||||
regs_to_pt_regs(®s, pc, lr, sp, r52);
|
||||
KBacktraceIterator_init(&kbt, NULL, ®s);
|
||||
tile_show_stack(&kbt);
|
||||
}
|
||||
|
||||
/* This is called from KBacktraceIterator_init_current() */
|
||||
|
@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
|
|||
regs_to_pt_regs(®s, pc, lr, sp, r52));
|
||||
}
|
||||
|
||||
/* This is called only from kernel/sched/core.c, with esp == NULL */
|
||||
/*
|
||||
* Called from sched_show_task() with task != NULL, or dump_stack()
|
||||
* with task == NULL. The esp argument is always NULL.
|
||||
*/
|
||||
void show_stack(struct task_struct *task, unsigned long *esp)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
if (task == NULL || task == current)
|
||||
if (task == NULL || task == current) {
|
||||
KBacktraceIterator_init_current(&kbt);
|
||||
else
|
||||
KBacktraceIterator_next(&kbt); /* don't show first frame */
|
||||
} else {
|
||||
KBacktraceIterator_init(&kbt, task, NULL);
|
||||
tile_show_stack(&kbt, 0);
|
||||
}
|
||||
tile_show_stack(&kbt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
/* Support generic Linux stack API too */
|
||||
|
||||
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
|
||||
static void save_stack_trace_common(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
bool user,
|
||||
struct stack_trace *trace)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
int skip = trace->skip;
|
||||
|
@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
|
|||
|
||||
if (!start_backtrace())
|
||||
goto done;
|
||||
if (task == NULL || task == current)
|
||||
if (regs != NULL) {
|
||||
KBacktraceIterator_init(&kbt, NULL, regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
KBacktraceIterator_init_current(&kbt);
|
||||
else
|
||||
skip++; /* don't show KBacktraceIterator_init_current */
|
||||
} else {
|
||||
KBacktraceIterator_init(&kbt, task, NULL);
|
||||
}
|
||||
for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
|
||||
if (skip) {
|
||||
--skip;
|
||||
continue;
|
||||
}
|
||||
if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
|
||||
if (i >= trace->max_entries ||
|
||||
(!user && kbt.it.pc < PAGE_OFFSET))
|
||||
break;
|
||||
trace->entries[i++] = kbt.it.pc;
|
||||
}
|
||||
end_backtrace();
|
||||
done:
|
||||
if (i < trace->max_entries)
|
||||
trace->entries[i++] = ULONG_MAX;
|
||||
trace->nr_entries = i;
|
||||
}
|
||||
|
||||
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
|
||||
{
|
||||
save_stack_trace_common(task, NULL, false, trace);
|
||||
}
|
||||
EXPORT_SYMBOL(save_stack_trace_tsk);
|
||||
|
||||
void save_stack_trace(struct stack_trace *trace)
|
||||
{
|
||||
save_stack_trace_tsk(NULL, trace);
|
||||
save_stack_trace_common(NULL, NULL, false, trace);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||
|
||||
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||
{
|
||||
save_stack_trace_common(NULL, regs, false, trace);
|
||||
}
|
||||
|
||||
void save_stack_trace_user(struct stack_trace *trace)
|
||||
{
|
||||
/* Trace user stack if we are not a kernel thread. */
|
||||
if (current->mm)
|
||||
save_stack_trace_common(NULL, task_pt_regs(current),
|
||||
true, trace);
|
||||
else if (trace->nr_entries < trace->max_entries)
|
||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In entry.S */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue