mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 10:01:25 +00:00
mmiotrace: count events lost due to not recording
Impact: enhances lost events counting in mmiotrace The tracing framework, or the ring buffer facility it uses, has a switch to stop recording data. When recording is off, the trace events will be lost. The framework does not count these, so mmiotrace has to count them itself. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
fe6f90e57f
commit
173ed24ee2
1 changed files with 10 additions and 4 deletions
|
@ -9,6 +9,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mmiotrace.h>
|
#include <linux/mmiotrace.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "trace_output.h"
|
#include "trace_output.h"
|
||||||
|
@ -20,6 +21,7 @@ struct header_iter {
|
||||||
static struct trace_array *mmio_trace_array;
|
static struct trace_array *mmio_trace_array;
|
||||||
static bool overrun_detected;
|
static bool overrun_detected;
|
||||||
static unsigned long prev_overruns;
|
static unsigned long prev_overruns;
|
||||||
|
static atomic_t dropped_count;
|
||||||
|
|
||||||
static void mmio_reset_data(struct trace_array *tr)
|
static void mmio_reset_data(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
|
@ -122,11 +124,11 @@ static void mmio_close(struct trace_iterator *iter)
|
||||||
|
|
||||||
static unsigned long count_overruns(struct trace_iterator *iter)
|
static unsigned long count_overruns(struct trace_iterator *iter)
|
||||||
{
|
{
|
||||||
unsigned long cnt = 0;
|
unsigned long cnt = atomic_xchg(&dropped_count, 0);
|
||||||
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
||||||
|
|
||||||
if (over > prev_overruns)
|
if (over > prev_overruns)
|
||||||
cnt = over - prev_overruns;
|
cnt += over - prev_overruns;
|
||||||
prev_overruns = over;
|
prev_overruns = over;
|
||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
@ -308,8 +310,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
|
||||||
|
|
||||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||||
&irq_flags);
|
&irq_flags);
|
||||||
if (!event)
|
if (!event) {
|
||||||
|
atomic_inc(&dropped_count);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
||||||
entry->ent.type = TRACE_MMIO_RW;
|
entry->ent.type = TRACE_MMIO_RW;
|
||||||
|
@ -336,8 +340,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
|
||||||
|
|
||||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
||||||
&irq_flags);
|
&irq_flags);
|
||||||
if (!event)
|
if (!event) {
|
||||||
|
atomic_inc(&dropped_count);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
||||||
entry->ent.type = TRACE_MMIO_MAP;
|
entry->ent.type = TRACE_MMIO_MAP;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue