mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 01:21:58 +00:00
Three more changes.
1) I forgot that I had another selftest to stress test the ftrace instance creation. It was actually suppose to go into the 4.6 merge window, but I never committed it. I almost forgot about it again, but noticed it was missing from your tree. 2) Soumya PN sent me a clean up patch to not disable interrupts when taking the tasklist_lock for read, as it's unnecessary because that lock is never taken for write in irq context. 3) Newer gcc's can cause the jump in the function_graph code to the global ftrace_stub label to be a short jump instead of a long one. As that jump is dynamically converted to jump to the trace code to do function graph tracing, and that conversion expects a long jump it can corrupt the ftrace_stub itself (it's directly after that call). One way to prevent gcc from using a short jump is to declare the ftrace_stub as a weak function, which we do here to keep gcc from optimizing too much. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXQhYQAAoJEKKk/i67LK/82pAH/3XzRCP366HqWnKdvluPB8vX UnVoXGAX1Eh2ZpvlPIJBXNYOZlnGRMMMAoeI+su31FoJHrzTzfGXvRynTkZPFZtd XakvHfACjtGtvi2MuCN1t9/d1ty/ob2o05KB9qc+JRlzHM09qTL/HX8hwZeEsMQ4 NYgEY4Y727LOSCrJieLktchpwtie77q8Wq25oiWIVWOyDjpCsPnZyaOqaQSANot9 Gd00cixbMam7Ba1BjoRsRQZaT2pYZ8vt7HDXDBfAOW1oOjalWARLhRg/zww1V3WD DEptuEeyAgMJS3v76Z6Sbk/QM7hyGUWCcmC2qaN1yc2n1Sh+zBOiN1eyiiUh/2U= =ERxv -----END PGP SIGNATURE----- Merge tag 'trace-v4.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull motr tracing updates from Steven Rostedt: "Three more changes. - I forgot that I had another selftest to stress test the ftrace instance creation. It was actually suppose to go into the 4.6 merge window, but I never committed it. I almost forgot about it again, but noticed it was missing from your tree. - Soumya PN sent me a clean up patch to not disable interrupts when taking the tasklist_lock for read, as it's unnecessary because that lock is never taken for write in irq context. - Newer gcc's can cause the jump in the function_graph code to the global ftrace_stub label to be a short jump instead of a long one. As that jump is dynamically converted to jump to the trace code to do function graph tracing, and that conversion expects a long jump it can corrupt the ftrace_stub itself (it's directly after that call). One way to prevent gcc from using a short jump is to declare the ftrace_stub as a weak function, which we do here to keep gcc from optimizing too much" * tag 'trace-v4.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace/x86: Set ftrace_stub to weak to prevent gcc from using short jumps to it ftrace: Don't disable irqs when taking the tasklist_lock read_lock ftracetest: Add instance created, delete, read and enable event test
This commit is contained in:
commit
7639dad93a
3 changed files with 147 additions and 4 deletions
|
@ -5737,7 +5737,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|||
{
|
||||
int i;
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
|
||||
struct task_struct *g, *t;
|
||||
|
||||
|
@ -5753,7 +5752,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|||
}
|
||||
}
|
||||
|
||||
read_lock_irqsave(&tasklist_lock, flags);
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, t) {
|
||||
if (start == end) {
|
||||
ret = -EAGAIN;
|
||||
|
@ -5771,7 +5770,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|||
} while_each_thread(g, t);
|
||||
|
||||
unlock:
|
||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||
read_unlock(&tasklist_lock);
|
||||
free:
|
||||
for (i = start; i < end; i++)
|
||||
kfree(ret_stack_list[i]);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue