mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 06:21:31 +00:00
ftrace: Add context level recursion bit checking
Currently for recursion checking in the function tracer, ftrace tests a task_struct bit to determine if the function tracer had recursed or not. If it has, then it will will return without going further. But this leads to races. If an interrupt came in after the bit was set, the functions being traced would see that bit set and think that the function tracer recursed on itself, and would return. Instead add a bit for each context (normal, softirq, irq and nmi). A check of which context the task is in is made before testing the associated bit. Now if an interrupt preempts the function tracer after the previous context has been set, the interrupt functions can still be traced. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
0a016409e4
commit
c29f122cd7
2 changed files with 42 additions and 10 deletions
|
@ -156,14 +156,27 @@ static void
|
|||
ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct pt_regs *regs)
|
||||
{
|
||||
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
|
||||
int bit;
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (in_nmi())
|
||||
bit = TRACE_GLOBAL_NMI_BIT;
|
||||
|
||||
else if (in_irq())
|
||||
bit = TRACE_GLOBAL_IRQ_BIT;
|
||||
else
|
||||
bit = TRACE_GLOBAL_SIRQ_BIT;
|
||||
} else
|
||||
bit = TRACE_GLOBAL_BIT;
|
||||
|
||||
if (unlikely(trace_recursion_test(bit)))
|
||||
return;
|
||||
|
||||
trace_recursion_set(TRACE_GLOBAL_BIT);
|
||||
trace_recursion_set(bit);
|
||||
do_for_each_ftrace_op(op, ftrace_global_list) {
|
||||
op->func(ip, parent_ip, op, regs);
|
||||
} while_for_each_ftrace_op(op);
|
||||
trace_recursion_clear(TRACE_GLOBAL_BIT);
|
||||
trace_recursion_clear(bit);
|
||||
}
|
||||
|
||||
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
||||
|
@ -4132,14 +4145,27 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|||
struct ftrace_ops *ignored, struct pt_regs *regs)
|
||||
{
|
||||
struct ftrace_ops *op;
|
||||
unsigned int bit;
|
||||
|
||||
if (function_trace_stop)
|
||||
return;
|
||||
|
||||
if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
|
||||
return;
|
||||
if (in_interrupt()) {
|
||||
if (in_nmi())
|
||||
bit = TRACE_INTERNAL_NMI_BIT;
|
||||
|
||||
else if (in_irq())
|
||||
bit = TRACE_INTERNAL_IRQ_BIT;
|
||||
else
|
||||
bit = TRACE_INTERNAL_SIRQ_BIT;
|
||||
} else
|
||||
bit = TRACE_INTERNAL_BIT;
|
||||
|
||||
if (unlikely(trace_recursion_test(bit)))
|
||||
return;
|
||||
|
||||
trace_recursion_set(bit);
|
||||
|
||||
trace_recursion_set(TRACE_INTERNAL_BIT);
|
||||
/*
|
||||
* Some of the ops may be dynamically allocated,
|
||||
* they must be freed after a synchronize_sched().
|
||||
|
@ -4150,7 +4176,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|||
op->func(ip, parent_ip, op, regs);
|
||||
} while_for_each_ftrace_op(op);
|
||||
preempt_enable_notrace();
|
||||
trace_recursion_clear(TRACE_INTERNAL_BIT);
|
||||
trace_recursion_clear(bit);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue