Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (49 commits)
  perfcounter: Handle some IO return values
  perf_counter: Push perf_sample_data through the swcounter code
  perf_counter tools: Define and use our own u64, s64 etc. definitions
  perf_counter: Close race in perf_lock_task_context()
  perf_counter, x86: Improve interactions with fast-gup
  perf_counter: Simplify and fix task migration counting
  perf_counter tools: Add a data file header
  perf_counter: Update userspace callchain sampling uses
  perf_counter: Make callchain samples extensible
  perf report: Filter to parent set by default
  perf_counter tools: Handle lost events
  perf_counter: Add event overlow handling
  fs: Provide empty .set_page_dirty() aop for anon inodes
  perf_counter: tools: Makefile tweaks for 64-bit powerpc
  perf_counter: powerpc: Add processor back-end for MPC7450 family
  perf_counter: powerpc: Make powerpc perf_counter code safe for 32-bit kernels
  perf_counter: powerpc: Change how processor-specific back-ends get selected
  perf_counter: powerpc: Use unsigned long for register and constraint values
  perf_counter: powerpc: Enable use of software counters on 32-bit powerpc
  perf_counter tools: Add and use isprint()
  ...
This commit is contained in:
Linus Torvalds 2009-06-20 11:29:32 -07:00
commit 12e24f34cb
40 changed files with 2327 additions and 898 deletions

View file

@ -853,6 +853,12 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
/*
* doesn't attempt to fault and will return short.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
/*
* A callback you can register to apply pressure to ageable caches.
*

View file

@ -236,10 +236,16 @@ struct perf_counter_mmap_page {
/*
* Control data for the mmap() data buffer.
*
* User-space reading this value should issue an rmb(), on SMP capable
* platforms, after reading this value -- see perf_counter_wakeup().
* User-space reading the @data_head value should issue an rmb(), on
* SMP capable platforms, after reading this value -- see
* perf_counter_wakeup().
*
* When the mapping is PROT_WRITE the @data_tail value should be
* written by userspace to reflect the last read data. In this case
* the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
};
#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
@ -273,6 +279,15 @@ enum perf_event_type {
*/
PERF_EVENT_MMAP = 1,
/*
* struct {
* struct perf_event_header header;
* u64 id;
* u64 lost;
* };
*/
PERF_EVENT_LOST = 2,
/*
* struct {
* struct perf_event_header header;
@ -313,30 +328,39 @@ enum perf_event_type {
/*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_*
* will be PERF_SAMPLE_*
*
* struct {
* struct perf_event_header header;
*
* { u64 ip; } && PERF_RECORD_IP
* { u32 pid, tid; } && PERF_RECORD_TID
* { u64 time; } && PERF_RECORD_TIME
* { u64 addr; } && PERF_RECORD_ADDR
* { u64 config; } && PERF_RECORD_CONFIG
* { u32 cpu, res; } && PERF_RECORD_CPU
* { u64 ip; } && PERF_SAMPLE_IP
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
* { u64 config; } && PERF_SAMPLE_CONFIG
* { u32 cpu, res; } && PERF_SAMPLE_CPU
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
*
* { u16 nr,
* hv,
* kernel,
* user;
* u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
};
enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
PERF_CONTEXT_KERNEL = (__u64)-128,
PERF_CONTEXT_USER = (__u64)-512,
PERF_CONTEXT_GUEST = (__u64)-2048,
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
PERF_CONTEXT_MAX = (__u64)-4095,
};
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
@ -356,6 +380,13 @@ enum perf_event_type {
#include <linux/pid_namespace.h>
#include <asm/atomic.h>
#define PERF_MAX_STACK_DEPTH 255
struct perf_callchain_entry {
__u64 nr;
__u64 ip[PERF_MAX_STACK_DEPTH];
};
struct task_struct;
/**
@ -414,6 +445,7 @@ struct file;
struct perf_mmap_data {
struct rcu_head rcu_head;
int nr_pages; /* nr of data pages */
int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
@ -423,8 +455,8 @@ struct perf_mmap_data {
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */
atomic_t wakeup; /* needs a wakeup */
atomic_t lost; /* nr records lost */
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
@ -604,6 +636,7 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_free_task(struct task_struct *task);
extern void set_perf_counter_pending(void);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
extern void __perf_disable(void);
@ -649,18 +682,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma)
extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk);
extern void perf_counter_task_migration(struct task_struct *task, int cpu);
#define MAX_STACK_DEPTH 255
struct perf_callchain_entry {
u16 nr;
u16 hv;
u16 kernel;
u16 user;
u64 ip[MAX_STACK_DEPTH];
};
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_paranoid;
@ -701,8 +722,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
static inline void perf_counter_task_migration(struct task_struct *task,
int cpu) { }
#endif
#endif /* __KERNEL__ */