mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 17:11:46 +00:00
tracing: Remove the last struct stack_trace usage
Simplify the stack retrieval code by using the storage array based interface. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: David Rientjes <rientjes@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094803.340000461@linutronix.de
This commit is contained in:
parent
ee6dd0db4d
commit
9f50c91b11
1 changed files with 16 additions and 21 deletions
|
@ -23,11 +23,7 @@
|
||||||
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
|
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
|
||||||
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
||||||
|
|
||||||
struct stack_trace stack_trace_max = {
|
static unsigned int stack_trace_nr_entries;
|
||||||
.max_entries = STACK_TRACE_ENTRIES,
|
|
||||||
.entries = &stack_dump_trace[0],
|
|
||||||
};
|
|
||||||
|
|
||||||
static unsigned long stack_trace_max_size;
|
static unsigned long stack_trace_max_size;
|
||||||
static arch_spinlock_t stack_trace_max_lock =
|
static arch_spinlock_t stack_trace_max_lock =
|
||||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||||
|
@ -44,10 +40,10 @@ static void print_max_stack(void)
|
||||||
|
|
||||||
pr_emerg(" Depth Size Location (%d entries)\n"
|
pr_emerg(" Depth Size Location (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
stack_trace_max.nr_entries);
|
stack_trace_nr_entries);
|
||||||
|
|
||||||
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
for (i = 0; i < stack_trace_nr_entries; i++) {
|
||||||
if (i + 1 == stack_trace_max.nr_entries)
|
if (i + 1 == stack_trace_nr_entries)
|
||||||
size = stack_trace_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_trace_index[i] - stack_trace_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
|
@ -93,13 +89,12 @@ static void check_stack(unsigned long ip, unsigned long *stack)
|
||||||
|
|
||||||
stack_trace_max_size = this_size;
|
stack_trace_max_size = this_size;
|
||||||
|
|
||||||
stack_trace_max.nr_entries = 0;
|
stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
|
||||||
stack_trace_max.skip = 0;
|
ARRAY_SIZE(stack_dump_trace) - 1,
|
||||||
|
0);
|
||||||
save_stack_trace(&stack_trace_max);
|
|
||||||
|
|
||||||
/* Skip over the overhead of the stack tracer itself */
|
/* Skip over the overhead of the stack tracer itself */
|
||||||
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
for (i = 0; i < stack_trace_nr_entries; i++) {
|
||||||
if (stack_dump_trace[i] == ip)
|
if (stack_dump_trace[i] == ip)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -108,7 +103,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
|
||||||
* Some archs may not have the passed in ip in the dump.
|
* Some archs may not have the passed in ip in the dump.
|
||||||
* If that happens, we need to show everything.
|
* If that happens, we need to show everything.
|
||||||
*/
|
*/
|
||||||
if (i == stack_trace_max.nr_entries)
|
if (i == stack_trace_nr_entries)
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -126,13 +121,13 @@ static void check_stack(unsigned long ip, unsigned long *stack)
|
||||||
* loop will only happen once. This code only takes place
|
* loop will only happen once. This code only takes place
|
||||||
* on a new max, so it is far from a fast path.
|
* on a new max, so it is far from a fast path.
|
||||||
*/
|
*/
|
||||||
while (i < stack_trace_max.nr_entries) {
|
while (i < stack_trace_nr_entries) {
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
|
||||||
stack_trace_index[x] = this_size;
|
stack_trace_index[x] = this_size;
|
||||||
p = start;
|
p = start;
|
||||||
|
|
||||||
for (; p < top && i < stack_trace_max.nr_entries; p++) {
|
for (; p < top && i < stack_trace_nr_entries; p++) {
|
||||||
/*
|
/*
|
||||||
* The READ_ONCE_NOCHECK is used to let KASAN know that
|
* The READ_ONCE_NOCHECK is used to let KASAN know that
|
||||||
* this is not a stack-out-of-bounds error.
|
* this is not a stack-out-of-bounds error.
|
||||||
|
@ -163,7 +158,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
stack_trace_max.nr_entries = x;
|
stack_trace_nr_entries = x;
|
||||||
|
|
||||||
if (task_stack_end_corrupted(current)) {
|
if (task_stack_end_corrupted(current)) {
|
||||||
print_max_stack();
|
print_max_stack();
|
||||||
|
@ -265,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
long n = *pos - 1;
|
long n = *pos - 1;
|
||||||
|
|
||||||
if (n >= stack_trace_max.nr_entries)
|
if (n >= stack_trace_nr_entries)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
m->private = (void *)n;
|
m->private = (void *)n;
|
||||||
|
@ -329,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
|
||||||
seq_printf(m, " Depth Size Location"
|
seq_printf(m, " Depth Size Location"
|
||||||
" (%d entries)\n"
|
" (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
stack_trace_max.nr_entries);
|
stack_trace_nr_entries);
|
||||||
|
|
||||||
if (!stack_tracer_enabled && !stack_trace_max_size)
|
if (!stack_tracer_enabled && !stack_trace_max_size)
|
||||||
print_disabled(m);
|
print_disabled(m);
|
||||||
|
@ -339,10 +334,10 @@ static int t_show(struct seq_file *m, void *v)
|
||||||
|
|
||||||
i = *(long *)v;
|
i = *(long *)v;
|
||||||
|
|
||||||
if (i >= stack_trace_max.nr_entries)
|
if (i >= stack_trace_nr_entries)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (i + 1 == stack_trace_max.nr_entries)
|
if (i + 1 == stack_trace_nr_entries)
|
||||||
size = stack_trace_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_trace_index[i] - stack_trace_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue