mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-19 13:11:14 +00:00
pstore/ftrace: Convert to its own enable/disable debugfs knob
With this patch we no longer reuse function tracer infrastructure, now we register our own tracer back-end via a debugfs knob. It's a bit more code, but that is the only downside. On the bright side we have: - Ability to make persistent_ram module removable (when needed, we can move ftrace_ops struct into a module). Note that persistent_ram is still not removable for other reasons, but with this patch it's just one thing less to worry about; - Pstore part is more isolated from the generic function tracer. We tried it already by registering our own tracer in available_tracers, but that way we're loosing ability to see the traces while we record them to pstore. This solution is somewhere in the middle: we only register "internal ftracer" back-end, but not the "front-end"; - When there is only pstore tracing enabled, the kernel will only write to the pstore buffer, omitting function tracer buffer (which, of course, still can be enabled via 'echo function > current_tracer'). Suggested-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
This commit is contained in:
parent
b4a871bce6
commit
65f8c95e46
7 changed files with 105 additions and 26 deletions
|
@ -13,7 +13,6 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/pstore.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
@ -75,10 +74,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
|||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
/* Our two options */
|
||||
/* Our option */
|
||||
enum {
|
||||
TRACE_FUNC_OPT_STACK = 0x1,
|
||||
TRACE_FUNC_OPT_PSTORE = 0x2,
|
||||
};
|
||||
|
||||
static struct tracer_flags func_flags;
|
||||
|
@ -106,12 +104,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1)) {
|
||||
/*
|
||||
* So far tracing doesn't support multiple buffers, so
|
||||
* we make an explicit call for now.
|
||||
*/
|
||||
if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
|
||||
pstore_ftrace_call(ip, parent_ip);
|
||||
pc = preempt_count();
|
||||
trace_function(tr, ip, parent_ip, flags, pc);
|
||||
}
|
||||
|
@ -176,9 +168,6 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
|
|||
static struct tracer_opt func_opts[] = {
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
|
||||
#endif
|
||||
#ifdef CONFIG_PSTORE_FTRACE
|
||||
{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
|
||||
#endif
|
||||
{ } /* Always set a last empty entry */
|
||||
};
|
||||
|
@ -231,8 +220,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
|
|||
register_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
break;
|
||||
case TRACE_FUNC_OPT_PSTORE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue