mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 14:31:46 +00:00
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Fix the software context switch counter perf, x86: Fixup Kconfig deps x86, perf, nmi: Disable perf if counters are not accessible perf: Fix inherit vs. context rotation bug
This commit is contained in:
commit
a9e40a2493
4 changed files with 57 additions and 19 deletions
|
@ -21,7 +21,7 @@ config X86
|
||||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||||
select HAVE_IDE
|
select HAVE_IDE
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
select HAVE_PERF_EVENTS if (!M386 && !M486)
|
select HAVE_PERF_EVENTS
|
||||||
select HAVE_IRQ_WORK
|
select HAVE_IRQ_WORK
|
||||||
select HAVE_IOREMAP_PROT
|
select HAVE_IOREMAP_PROT
|
||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
|
|
|
@ -381,6 +381,20 @@ static void release_pmc_hardware(void) {}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static bool check_hw_exists(void)
|
||||||
|
{
|
||||||
|
u64 val, val_new = 0;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
val = 0xabcdUL;
|
||||||
|
ret |= checking_wrmsrl(x86_pmu.perfctr, val);
|
||||||
|
ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
|
||||||
|
if (ret || val != val_new)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void reserve_ds_buffers(void);
|
static void reserve_ds_buffers(void);
|
||||||
static void release_ds_buffers(void);
|
static void release_ds_buffers(void);
|
||||||
|
|
||||||
|
@ -1372,6 +1386,12 @@ void __init init_hw_perf_events(void)
|
||||||
|
|
||||||
pmu_check_apic();
|
pmu_check_apic();
|
||||||
|
|
||||||
|
/* sanity check that the hardware exists or is emulated */
|
||||||
|
if (!check_hw_exists()) {
|
||||||
|
pr_cont("Broken PMU hardware detected, software events only.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
||||||
|
|
||||||
if (x86_pmu.quirks)
|
if (x86_pmu.quirks)
|
||||||
|
|
|
@ -850,6 +850,7 @@ struct perf_event_context {
|
||||||
int nr_active;
|
int nr_active;
|
||||||
int is_active;
|
int is_active;
|
||||||
int nr_stat;
|
int nr_stat;
|
||||||
|
int rotate_disable;
|
||||||
atomic_t refcount;
|
atomic_t refcount;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
|
@ -908,20 +909,6 @@ extern int perf_num_counters(void);
|
||||||
extern const char *perf_pmu_name(void);
|
extern const char *perf_pmu_name(void);
|
||||||
extern void __perf_event_task_sched_in(struct task_struct *task);
|
extern void __perf_event_task_sched_in(struct task_struct *task);
|
||||||
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||||
|
|
||||||
extern atomic_t perf_task_events;
|
|
||||||
|
|
||||||
static inline void perf_event_task_sched_in(struct task_struct *task)
|
|
||||||
{
|
|
||||||
COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline
|
|
||||||
void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
|
||||||
{
|
|
||||||
COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int perf_event_init_task(struct task_struct *child);
|
extern int perf_event_init_task(struct task_struct *child);
|
||||||
extern void perf_event_exit_task(struct task_struct *child);
|
extern void perf_event_exit_task(struct task_struct *child);
|
||||||
extern void perf_event_free_task(struct task_struct *task);
|
extern void perf_event_free_task(struct task_struct *task);
|
||||||
|
@ -1030,6 +1017,21 @@ have_event:
|
||||||
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern atomic_t perf_task_events;
|
||||||
|
|
||||||
|
static inline void perf_event_task_sched_in(struct task_struct *task)
|
||||||
|
{
|
||||||
|
COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
||||||
|
{
|
||||||
|
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||||
|
|
||||||
|
COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
|
||||||
|
}
|
||||||
|
|
||||||
extern void perf_event_mmap(struct vm_area_struct *vma);
|
extern void perf_event_mmap(struct vm_area_struct *vma);
|
||||||
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
extern struct perf_guest_info_callbacks *perf_guest_cbs;
|
||||||
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
|
||||||
|
|
|
@ -1287,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||||
{
|
{
|
||||||
int ctxn;
|
int ctxn;
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
|
||||||
|
|
||||||
for_each_task_context_nr(ctxn)
|
for_each_task_context_nr(ctxn)
|
||||||
perf_event_context_sched_out(task, ctxn, next);
|
perf_event_context_sched_out(task, ctxn, next);
|
||||||
}
|
}
|
||||||
|
@ -1622,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
raw_spin_lock(&ctx->lock);
|
raw_spin_lock(&ctx->lock);
|
||||||
|
|
||||||
/* Rotate the first entry last of non-pinned groups */
|
/*
|
||||||
list_rotate_left(&ctx->flexible_groups);
|
* Rotate the first entry last of non-pinned groups. Rotation might be
|
||||||
|
* disabled by the inheritance code.
|
||||||
|
*/
|
||||||
|
if (!ctx->rotate_disable)
|
||||||
|
list_rotate_left(&ctx->flexible_groups);
|
||||||
|
|
||||||
raw_spin_unlock(&ctx->lock);
|
raw_spin_unlock(&ctx->lock);
|
||||||
}
|
}
|
||||||
|
@ -6162,6 +6164,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
struct task_struct *parent = current;
|
struct task_struct *parent = current;
|
||||||
int inherited_all = 1;
|
int inherited_all = 1;
|
||||||
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
child->perf_event_ctxp[ctxn] = NULL;
|
child->perf_event_ctxp[ctxn] = NULL;
|
||||||
|
@ -6202,6 +6205,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't hold ctx->lock when iterating the ->flexible_group list due
|
||||||
|
* to allocations, but we need to prevent rotation because
|
||||||
|
* rotate_ctx() will change the list from interrupt context.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||||
|
parent_ctx->rotate_disable = 1;
|
||||||
|
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
|
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
|
||||||
ret = inherit_task_group(event, parent, parent_ctx,
|
ret = inherit_task_group(event, parent, parent_ctx,
|
||||||
child, ctxn, &inherited_all);
|
child, ctxn, &inherited_all);
|
||||||
|
@ -6209,6 +6221,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||||
|
parent_ctx->rotate_disable = 0;
|
||||||
|
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||||
|
|
||||||
child_ctx = child->perf_event_ctxp[ctxn];
|
child_ctx = child->perf_event_ctxp[ctxn];
|
||||||
|
|
||||||
if (child_ctx && inherited_all) {
|
if (child_ctx && inherited_all) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue