mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tracing: Disable preemption when using the filter buffer
In case trace_event_buffer_lock_reserve() is called with preemption enabled, the algorithm that defines the usage of the per cpu filter buffer may fail if the task schedules to another CPU after determining which buffer it will use. Disable preemption when using the filter buffer. And because that same buffer must be used throughout the call, keep preemption disabled until the filter buffer is released. This will also keep the semantics between the use case of when the filter buffer is used, and when the ring buffer itself is used, as that case also disables preemption until the ring buffer is released. Link: https://lkml.kernel.org/r/20211130024318.880190623@goodmis.org [ Fixed warning of assignment in if statement Reported-by: kernel test robot <lkp@intel.com> ] Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
e07a1d5762
commit
6c536d76cf
2 changed files with 36 additions and 27 deletions
|
@ -980,6 +980,8 @@ __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *ev
|
||||||
ring_buffer_write(buffer, event->array[0], &event->array[1]);
|
ring_buffer_write(buffer, event->array[0], &event->array[1]);
|
||||||
/* Release the temp buffer */
|
/* Release the temp buffer */
|
||||||
this_cpu_dec(trace_buffered_event_cnt);
|
this_cpu_dec(trace_buffered_event_cnt);
|
||||||
|
/* ring_buffer_unlock_commit() enables preemption */
|
||||||
|
preempt_enable_notrace();
|
||||||
} else
|
} else
|
||||||
ring_buffer_unlock_commit(buffer, event);
|
ring_buffer_unlock_commit(buffer, event);
|
||||||
}
|
}
|
||||||
|
@ -2745,8 +2747,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
|
||||||
*current_rb = tr->array_buffer.buffer;
|
*current_rb = tr->array_buffer.buffer;
|
||||||
|
|
||||||
if (!tr->no_filter_buffering_ref &&
|
if (!tr->no_filter_buffering_ref &&
|
||||||
(trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
|
(trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
|
||||||
(entry = __this_cpu_read(trace_buffered_event))) {
|
preempt_disable_notrace();
|
||||||
/*
|
/*
|
||||||
* Filtering is on, so try to use the per cpu buffer first.
|
* Filtering is on, so try to use the per cpu buffer first.
|
||||||
* This buffer will simulate a ring_buffer_event,
|
* This buffer will simulate a ring_buffer_event,
|
||||||
|
@ -2764,6 +2766,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
|
||||||
* is still quicker than no copy on match, but having
|
* is still quicker than no copy on match, but having
|
||||||
* to discard out of the ring buffer on a failed match.
|
* to discard out of the ring buffer on a failed match.
|
||||||
*/
|
*/
|
||||||
|
if ((entry = __this_cpu_read(trace_buffered_event))) {
|
||||||
int max_len = PAGE_SIZE - struct_size(entry, array, 1);
|
int max_len = PAGE_SIZE - struct_size(entry, array, 1);
|
||||||
|
|
||||||
val = this_cpu_inc_return(trace_buffered_event_cnt);
|
val = this_cpu_inc_return(trace_buffered_event_cnt);
|
||||||
|
@ -2788,10 +2791,14 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
|
||||||
if (val == 1 && likely(len <= max_len)) {
|
if (val == 1 && likely(len <= max_len)) {
|
||||||
trace_event_setup(entry, type, trace_ctx);
|
trace_event_setup(entry, type, trace_ctx);
|
||||||
entry->array[0] = len;
|
entry->array[0] = len;
|
||||||
|
/* Return with preemption disabled */
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
this_cpu_dec(trace_buffered_event_cnt);
|
this_cpu_dec(trace_buffered_event_cnt);
|
||||||
}
|
}
|
||||||
|
/* __trace_buffer_lock_reserve() disables preemption */
|
||||||
|
preempt_enable_notrace();
|
||||||
|
}
|
||||||
|
|
||||||
entry = __trace_buffer_lock_reserve(*current_rb, type, len,
|
entry = __trace_buffer_lock_reserve(*current_rb, type, len,
|
||||||
trace_ctx);
|
trace_ctx);
|
||||||
|
|
|
@ -1337,10 +1337,12 @@ __trace_event_discard_commit(struct trace_buffer *buffer,
|
||||||
struct ring_buffer_event *event)
|
struct ring_buffer_event *event)
|
||||||
{
|
{
|
||||||
if (this_cpu_read(trace_buffered_event) == event) {
|
if (this_cpu_read(trace_buffered_event) == event) {
|
||||||
/* Simply release the temp buffer */
|
/* Simply release the temp buffer and enable preemption */
|
||||||
this_cpu_dec(trace_buffered_event_cnt);
|
this_cpu_dec(trace_buffered_event_cnt);
|
||||||
|
preempt_enable_notrace();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
/* ring_buffer_discard_commit() enables preemption */
|
||||||
ring_buffer_discard_commit(buffer, event);
|
ring_buffer_discard_commit(buffer, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue