mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 22:21:21 +00:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits) tracing/kprobes: unregister_trace_probe needs to be called under mutex perf: expose event__process function perf events: Fix mmap offset determination perf, powerpc: fsl_emb: Restore setting perf_sample_data.period perf, powerpc: Convert the FSL driver to use local64_t perf tools: Don't keep unreferenced maps when unmaps are detected perf session: Invalidate last_match when removing threads from rb_tree perf session: Free the ref_reloc_sym memory at the right place x86,mmiotrace: Add support for tracing STOS instruction perf, sched migration: Librarize task states and event headers helpers perf, sched migration: Librarize the GUI class perf, sched migration: Make the GUI class client agnostic perf, sched migration: Make it vertically scrollable perf, sched migration: Parameterize cpu height and spacing perf, sched migration: Fix key bindings perf, sched migration: Ignore unhandled task states perf, sched migration: Handle ignored migrate out events perf: New migration tool overview tracing: Drop cpparg() macro perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call ... Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
This commit is contained in:
commit
4aed2fd8e3
179 changed files with 5610 additions and 4808 deletions
|
@ -41,6 +41,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
|
@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
|
|||
|
||||
static int nr_slots[TYPE_MAX];
|
||||
|
||||
/* Keep track of the breakpoints attached to tasks */
|
||||
static LIST_HEAD(bp_task_head);
|
||||
|
||||
static int constraints_initialized;
|
||||
|
||||
/* Gather the number of total pinned and un-pinned bp in a cpuset */
|
||||
|
@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
|
||||
/*
|
||||
* Count the number of breakpoints of the same type and same task.
|
||||
* The given event must be not on the list.
|
||||
*/
|
||||
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
|
||||
{
|
||||
struct perf_event_context *ctx = tsk->perf_event_ctxp;
|
||||
struct list_head *list;
|
||||
struct perf_event *bp;
|
||||
unsigned long flags;
|
||||
struct perf_event_context *ctx = bp->ctx;
|
||||
struct perf_event *iter;
|
||||
int count = 0;
|
||||
|
||||
if (WARN_ONCE(!ctx, "No perf context for this task"))
|
||||
return 0;
|
||||
|
||||
list = &ctx->event_list;
|
||||
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
/*
|
||||
* The current breakpoint counter is not included in the list
|
||||
* at the open() callback time
|
||||
*/
|
||||
list_for_each_entry(bp, list, event_entry) {
|
||||
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
|
||||
if (find_slot_idx(bp) == type)
|
||||
count += hw_breakpoint_weight(bp);
|
||||
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
||||
if (iter->ctx == ctx && find_slot_idx(iter) == type)
|
||||
count += hw_breakpoint_weight(iter);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|||
if (!tsk)
|
||||
slots->pinned += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
slots->pinned += task_bp_pinned(tsk, type);
|
||||
slots->pinned += task_bp_pinned(bp, type);
|
||||
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
|
||||
|
||||
return;
|
||||
|
@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|||
if (!tsk)
|
||||
nr += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
nr += task_bp_pinned(tsk, type);
|
||||
nr += task_bp_pinned(bp, type);
|
||||
|
||||
if (nr > slots->pinned)
|
||||
slots->pinned = nr;
|
||||
|
@ -188,7 +180,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight)
|
|||
/*
|
||||
* Add a pinned breakpoint for the given task in our constraint table
|
||||
*/
|
||||
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
|
||||
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
|
||||
enum bp_type_idx type, int weight)
|
||||
{
|
||||
unsigned int *tsk_pinned;
|
||||
|
@ -196,10 +188,11 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
|
|||
int old_idx = 0;
|
||||
int idx = 0;
|
||||
|
||||
old_count = task_bp_pinned(tsk, type);
|
||||
old_count = task_bp_pinned(bp, type);
|
||||
old_idx = old_count - 1;
|
||||
idx = old_idx + weight;
|
||||
|
||||
/* tsk_pinned[n] is the number of tasks having n breakpoints */
|
||||
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
|
||||
if (enable) {
|
||||
tsk_pinned[idx]++;
|
||||
|
@ -222,23 +215,30 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
|||
int cpu = bp->cpu;
|
||||
struct task_struct *tsk = bp->ctx->task;
|
||||
|
||||
/* Pinned counter task profiling */
|
||||
if (tsk) {
|
||||
if (cpu >= 0) {
|
||||
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
|
||||
return;
|
||||
}
|
||||
/* Pinned counter cpu profiling */
|
||||
if (!tsk) {
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
|
||||
if (enable)
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
|
||||
else
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Pinned counter cpu profiling */
|
||||
/* Pinned counter task profiling */
|
||||
|
||||
if (!enable)
|
||||
list_del(&bp->hw.bp_list);
|
||||
|
||||
if (cpu >= 0) {
|
||||
toggle_bp_task_slot(bp, cpu, enable, type, weight);
|
||||
} else {
|
||||
for_each_online_cpu(cpu)
|
||||
toggle_bp_task_slot(bp, cpu, enable, type, weight);
|
||||
}
|
||||
|
||||
if (enable)
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
|
||||
else
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
|
||||
list_add_tail(&bp->hw.bp_list, &bp_task_head);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -312,6 +312,10 @@ static int __reserve_bp_slot(struct perf_event *bp)
|
|||
weight = hw_breakpoint_weight(bp);
|
||||
|
||||
fetch_bp_busy_slots(&slots, bp, type);
|
||||
/*
|
||||
* Simulate the addition of this breakpoint to the constraints
|
||||
* and see the result.
|
||||
*/
|
||||
fetch_this_slot(&slots, weight);
|
||||
|
||||
/* Flexible counters need to keep at least one slot */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue