mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
ftrace: only have ftrace_kill atomic
When an anomaly is detected, we need a way to completely disable ftrace. Right now we have two functions: ftrace_kill and ftrace_kill_atomic. The ftrace_kill tries to do it in a "nice" way by converting everything back to a nop. The "nice" way is dangerous itself, so this patch removes it and only has the "atomic" version, which is all that is needed. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ab9a0918cb
commit
81adbdc029
3 changed files with 4 additions and 43 deletions
|
@ -40,7 +40,7 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
||||||
# define register_ftrace_function(ops) do { } while (0)
|
# define register_ftrace_function(ops) do { } while (0)
|
||||||
# define unregister_ftrace_function(ops) do { } while (0)
|
# define unregister_ftrace_function(ops) do { } while (0)
|
||||||
# define clear_ftrace_function(ops) do { } while (0)
|
# define clear_ftrace_function(ops) do { } while (0)
|
||||||
static inline void ftrace_kill_atomic(void) { }
|
static inline void ftrace_kill(void) { }
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
#endif /* CONFIG_FUNCTION_TRACER */
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
@ -117,7 +117,6 @@ static inline void ftrace_release(void *start, unsigned long size) { }
|
||||||
|
|
||||||
/* totally disable ftrace - can not re-enable after this */
|
/* totally disable ftrace - can not re-enable after this */
|
||||||
void ftrace_kill(void);
|
void ftrace_kill(void);
|
||||||
void ftrace_kill_atomic(void);
|
|
||||||
|
|
||||||
static inline void tracer_disable(void)
|
static inline void tracer_disable(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1549,22 +1549,6 @@ int ftrace_force_update(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_force_shutdown(void)
|
|
||||||
{
|
|
||||||
struct task_struct *task;
|
|
||||||
int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
|
|
||||||
|
|
||||||
mutex_lock(&ftraced_lock);
|
|
||||||
task = ftraced_task;
|
|
||||||
ftraced_task = NULL;
|
|
||||||
ftraced_suspend = -1;
|
|
||||||
ftrace_run_update_code(command);
|
|
||||||
mutex_unlock(&ftraced_lock);
|
|
||||||
|
|
||||||
if (task)
|
|
||||||
kthread_stop(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __init int ftrace_init_debugfs(void)
|
static __init int ftrace_init_debugfs(void)
|
||||||
{
|
{
|
||||||
struct dentry *d_tracer;
|
struct dentry *d_tracer;
|
||||||
|
@ -1795,17 +1779,16 @@ core_initcall(ftrace_dynamic_init);
|
||||||
# define ftrace_shutdown() do { } while (0)
|
# define ftrace_shutdown() do { } while (0)
|
||||||
# define ftrace_startup_sysctl() do { } while (0)
|
# define ftrace_startup_sysctl() do { } while (0)
|
||||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||||
# define ftrace_force_shutdown() do { } while (0)
|
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ftrace_kill_atomic - kill ftrace from critical sections
|
* ftrace_kill - kill ftrace
|
||||||
*
|
*
|
||||||
* This function should be used by panic code. It stops ftrace
|
* This function should be used by panic code. It stops ftrace
|
||||||
* but in a not so nice way. If you need to simply kill ftrace
|
* but in a not so nice way. If you need to simply kill ftrace
|
||||||
* from a non-atomic section, use ftrace_kill.
|
* from a non-atomic section, use ftrace_kill.
|
||||||
*/
|
*/
|
||||||
void ftrace_kill_atomic(void)
|
void ftrace_kill(void)
|
||||||
{
|
{
|
||||||
ftrace_disabled = 1;
|
ftrace_disabled = 1;
|
||||||
ftrace_enabled = 0;
|
ftrace_enabled = 0;
|
||||||
|
@ -1815,27 +1798,6 @@ void ftrace_kill_atomic(void)
|
||||||
clear_ftrace_function();
|
clear_ftrace_function();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ftrace_kill - totally shutdown ftrace
|
|
||||||
*
|
|
||||||
* This is a safety measure. If something was detected that seems
|
|
||||||
* wrong, calling this function will keep ftrace from doing
|
|
||||||
* any more modifications, and updates.
|
|
||||||
* used when something went wrong.
|
|
||||||
*/
|
|
||||||
void ftrace_kill(void)
|
|
||||||
{
|
|
||||||
mutex_lock(&ftrace_sysctl_lock);
|
|
||||||
ftrace_disabled = 1;
|
|
||||||
ftrace_enabled = 0;
|
|
||||||
|
|
||||||
clear_ftrace_function();
|
|
||||||
mutex_unlock(&ftrace_sysctl_lock);
|
|
||||||
|
|
||||||
/* Try to totally disable ftrace */
|
|
||||||
ftrace_force_shutdown();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* register_ftrace_function - register a function for profiling
|
* register_ftrace_function - register a function for profiling
|
||||||
* @ops - ops structure that holds the function for profiling.
|
* @ops - ops structure that holds the function for profiling.
|
||||||
|
|
|
@ -3097,7 +3097,7 @@ void ftrace_dump(void)
|
||||||
dump_ran = 1;
|
dump_ran = 1;
|
||||||
|
|
||||||
/* No turning back! */
|
/* No turning back! */
|
||||||
ftrace_kill_atomic();
|
ftrace_kill();
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue