Merge branches 'tracing/ftrace' and 'tracing/urgent' into tracing/core

Conflicts:
	kernel/trace/ring_buffer.c
This commit is contained in:
Ingo Molnar 2008-11-12 10:11:37 +01:00
commit d06bbd6695
182 changed files with 6942 additions and 3629 deletions

View file

@ -18,6 +18,35 @@
#include "trace.h"
/* Global flag to disable all recording to ring buffers */
static int ring_buffers_off __read_mostly;
/**
* tracing_on - enable all tracing buffers
*
* This function enables all tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
ring_buffers_off = 0;
}
/**
* tracing_off - turn off all tracing buffers
*
* This function stops all tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
ring_buffers_off = 1;
}
#include "trace.h"
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
@ -154,6 +183,7 @@ static inline int test_time_stamp(u64 delta)
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
raw_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head pages;
@ -190,60 +220,14 @@ struct ring_buffer_iter {
/* buffer may be either ring_buffer or ring_buffer_per_cpu */
#define RB_WARN_ON(buffer, cond) \
do { \
if (unlikely(cond)) { \
({ \
int _____ret = unlikely(cond); \
if (_____ret) { \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
} \
} while (0)
#define RB_WARN_ON_RET(buffer, cond) \
do { \
if (unlikely(cond)) { \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
return; \
} \
} while (0)
#define RB_WARN_ON_RET_INT(buffer, cond) \
do { \
if (unlikely(cond)) { \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
return -1; \
} \
} while (0)
#define RB_WARN_ON_RET_NULL(buffer, cond) \
do { \
if (unlikely(cond)) { \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
return NULL; \
} \
} while (0)
#define RB_WARN_ON_ONCE(buffer, cond) \
do { \
static int once; \
if (unlikely(cond) && !once) { \
once++; \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
} \
} while (0)
/* buffer must be ring_buffer not per_cpu */
#define RB_WARN_ON_UNLOCK(buffer, cond) \
do { \
if (unlikely(cond)) { \
mutex_unlock(&buffer->mutex); \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
return -1; \
} \
} while (0)
_____ret; \
})
/**
* check_pages - integrity check of buffer pages
@ -257,14 +241,18 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = &cpu_buffer->pages;
struct buffer_page *page, *tmp;
RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
return -1;
if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
return -1;
list_for_each_entry_safe(page, tmp, head, list) {
RB_WARN_ON_RET_INT(cpu_buffer,
page->list.next->prev != &page->list);
RB_WARN_ON_RET_INT(cpu_buffer,
page->list.prev->next != &page->list);
if (RB_WARN_ON(cpu_buffer,
page->list.next->prev != &page->list))
return -1;
if (RB_WARN_ON(cpu_buffer,
page->list.prev->next != &page->list))
return -1;
}
return 0;
@ -321,6 +309,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);
@ -470,13 +459,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
synchronize_sched();
for (i = 0; i < nr_pages; i++) {
RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
return;
p = cpu_buffer->pages.next;
page = list_entry(p, struct buffer_page, list);
list_del_init(&page->list);
free_buffer_page(page);
}
RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
return;
rb_reset_cpu(cpu_buffer);
@ -498,7 +489,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
synchronize_sched();
for (i = 0; i < nr_pages; i++) {
RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
return;
p = pages->next;
page = list_entry(p, struct buffer_page, list);
list_del_init(&page->list);
@ -553,7 +545,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
if (size < buffer_size) {
/* easy case, just free pages */
RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
mutex_unlock(&buffer->mutex);
return -1;
}
rm_pages = buffer->pages - nr_pages;
@ -572,7 +567,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
* add these pages to the cpu_buffers. Otherwise we just free
* them all and return -ENOMEM;
*/
RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
mutex_unlock(&buffer->mutex);
return -1;
}
new_pages = nr_pages - buffer->pages;
@ -596,7 +594,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
rb_insert_pages(cpu_buffer, &pages, new_pages);
}
RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
if (RB_WARN_ON(buffer, !list_empty(&pages))) {
mutex_unlock(&buffer->mutex);
return -1;
}
out:
buffer->pages = nr_pages;
@ -684,7 +685,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
head += rb_event_length(event)) {
event = __rb_page_index(cpu_buffer->head_page, head);
RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
return;
/* Only count data entries */
if (event->type != RINGBUF_TYPE_DATA)
continue;
@ -737,8 +739,9 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
addr &= PAGE_MASK;
while (cpu_buffer->commit_page->page != (void *)addr) {
RB_WARN_ON(cpu_buffer,
cpu_buffer->commit_page == cpu_buffer->tail_page);
if (RB_WARN_ON(cpu_buffer,
cpu_buffer->commit_page == cpu_buffer->tail_page))
return;
cpu_buffer->commit_page->commit =
cpu_buffer->commit_page->write;
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@ -894,7 +897,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
reader_page = cpu_buffer->reader_page;
/* we grabbed the lock before incrementing */
RB_WARN_ON(cpu_buffer, next_page == reader_page);
if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
goto out_unlock;
/*
* If for some reason, we had an interrupt storm that made
@ -971,7 +975,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* We reserved something on the buffer */
RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
return NULL;
event = __rb_page_index(tail_page, tail);
rb_update_event(event, type, length);
@ -1070,10 +1075,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
* storm or we have something buggy.
* Bail!
*/
if (unlikely(++nr_loops > 1000)) {
RB_WARN_ON(cpu_buffer, 1);
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
return NULL;
}
ts = ring_buffer_time_stamp(cpu_buffer->cpu);
@ -1169,6 +1172,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
struct ring_buffer_event *event;
int cpu, resched;
if (ring_buffers_off)
return NULL;
if (atomic_read(&buffer->record_disabled))
return NULL;
@ -1278,6 +1284,9 @@ int ring_buffer_write(struct ring_buffer *buffer,
int ret = -EBUSY;
int cpu, resched;
if (ring_buffers_off)
return -EBUSY;
if (atomic_read(&buffer->record_disabled))
return -EBUSY;
@ -1476,6 +1485,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/* Iterator usage is expected to have record disabled */
if (list_empty(&cpu_buffer->reader_page->list)) {
@ -1489,6 +1501,8 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
iter->read_stamp = cpu_buffer->read_stamp;
else
iter->read_stamp = iter->head_page->time_stamp;
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
/**
@ -1584,8 +1598,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* a case where we will loop three times. There should be no
* reason to loop four times (that I know of).
*/
if (unlikely(++nr_loops > 3)) {
RB_WARN_ON(cpu_buffer, 1);
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
reader = NULL;
goto out;
}
@ -1597,8 +1610,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto out;
/* Never should we have an index greater than the size */
RB_WARN_ON(cpu_buffer,
cpu_buffer->reader_page->read > rb_page_size(reader));
if (RB_WARN_ON(cpu_buffer,
cpu_buffer->reader_page->read > rb_page_size(reader)))
goto out;
/* check if we caught up to the tail */
reader = NULL;
@ -1652,7 +1666,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
reader = rb_get_reader_page(cpu_buffer);
/* This function should not be called when buffer is empty */
RB_WARN_ON_RET(cpu_buffer, !reader);
if (RB_WARN_ON(cpu_buffer, !reader))
return;
event = rb_reader_event(cpu_buffer);
@ -1679,8 +1694,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
* Check if we are at the end of the buffer.
*/
if (iter->head >= rb_page_size(iter->head_page)) {
RB_WARN_ON_RET(buffer,
iter->head_page == cpu_buffer->commit_page);
if (RB_WARN_ON(buffer,
iter->head_page == cpu_buffer->commit_page))
return;
rb_inc_iter(iter);
return;
}
@ -1693,9 +1709,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
* This should not be called to advance the header if we are
* at the tail of the buffer.
*/
RB_WARN_ON_RET(cpu_buffer,
if (RB_WARN_ON(cpu_buffer,
(iter->head_page == cpu_buffer->commit_page) &&
(iter->head + length > rb_commit_index(cpu_buffer)));
(iter->head + length > rb_commit_index(cpu_buffer))))
return;
rb_update_iter_read_stamp(iter, event);
@ -1707,17 +1724,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
rb_advance_iter(iter);
}
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
* @cpu: The cpu to peak at
* @ts: The timestamp counter of this event.
*
* This will return the event that will be read next, but does
* not consume the data.
*/
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@ -1738,10 +1746,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
* can have. Nesting 10 deep of interrupts is clearly
* an anomaly.
*/
if (unlikely(++nr_loops > 10)) {
RB_WARN_ON(cpu_buffer, 1);
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
return NULL;
}
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
@ -1779,16 +1785,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
return NULL;
}
/**
* ring_buffer_iter_peek - peek at the next event to be read
* @iter: The ring buffer iterator
* @ts: The timestamp counter of this event.
*
* This will return the event that will be read next, but does
* not increment the iterator.
*/
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
@ -1810,10 +1808,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* can have. Nesting 10 deep of interrupts is clearly
* an anomaly.
*/
if (unlikely(++nr_loops > 10)) {
RB_WARN_ON(cpu_buffer, 1);
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
return NULL;
}
if (rb_per_cpu_empty(cpu_buffer))
return NULL;
@ -1849,6 +1845,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
}
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
* @cpu: The cpu to peak at
* @ts: The timestamp counter of this event.
*
* This will return the event that will be read next, but does
* not consume the data.
*/
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
/**
* ring_buffer_iter_peek - peek at the next event to be read
* @iter: The ring buffer iterator
* @ts: The timestamp counter of this event.
*
* This will return the event that will be read next, but does
* not increment the iterator.
*/
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_event *event;
unsigned long flags;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
/**
* ring_buffer_consume - return an event and consume it
* @buffer: The ring buffer to get the next event from
@ -1860,19 +1901,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
if (!cpu_isset(cpu, buffer->cpumask))
return NULL;
event = ring_buffer_peek(buffer, cpu, ts);
if (!event)
return NULL;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
goto out;
cpu_buffer = buffer->buffers[cpu];
rb_advance_reader(cpu_buffer);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
@ -1909,11 +1955,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
local_irq_save(flags);
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_lock(&cpu_buffer->lock);
ring_buffer_iter_reset(iter);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
}
@ -1945,12 +1991,17 @@ struct ring_buffer_event *
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
event = ring_buffer_iter_peek(iter, ts);
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
return NULL;
goto out;
rb_advance_iter(iter);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
@ -1999,13 +2050,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpu_isset(cpu, buffer->cpumask))
return;
local_irq_save(flags);
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
/**
@ -2103,3 +2156,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0;
}
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
int *p = filp->private_data;
char buf[64];
int r;
/* !ring_buffers_off == tracing_on */
r = sprintf(buf, "%d\n", !*p);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
int *p = filp->private_data;
char buf[64];
long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret;
/* !ring_buffers_off == tracing_on */
*p = !val;
(*ppos)++;
return cnt;
}
static struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
};
static __init int rb_init_debugfs(void)
{
struct dentry *d_tracer;
struct dentry *entry;
d_tracer = tracing_init_dentry();
entry = debugfs_create_file("tracing_on", 0644, d_tracer,
&ring_buffers_off, &rb_simple_fops);
if (!entry)
pr_warning("Could not create debugfs 'tracing_on' entry\n");
return 0;
}
fs_initcall(rb_init_debugfs);