mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 23:21:46 +00:00
perf: Fix throttle logic
It was possible to call pmu::start() on an already running event. In particular this lead so some wreckage as the hrtimer events would re-initialize active timers. This was due to throttled events being activated again by scheduling. Scheduling in a context would add and force start events, resulting in running events with a possible throttle status. The next tick to hit that task will then try to unthrottle the event and call ->start() on an already running event. Reported-by: Jeff Moyer <jmoyer@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
7d44ec193d
commit
4fe757dd48
1 changed files with 15 additions and 4 deletions
|
@ -782,6 +782,10 @@ retry:
|
||||||
raw_spin_unlock_irq(&ctx->lock);
|
raw_spin_unlock_irq(&ctx->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MAX_INTERRUPTS (~0ULL)
|
||||||
|
|
||||||
|
static void perf_log_throttle(struct perf_event *event, int enable);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
event_sched_in(struct perf_event *event,
|
event_sched_in(struct perf_event *event,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
|
@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
|
||||||
|
|
||||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||||
event->oncpu = smp_processor_id();
|
event->oncpu = smp_processor_id();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unthrottle events, since we scheduled we might have missed several
|
||||||
|
* ticks already, also for a heavily scheduling task there is little
|
||||||
|
* guarantee it'll get a tick in a timely manner.
|
||||||
|
*/
|
||||||
|
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
|
||||||
|
perf_log_throttle(event, 1);
|
||||||
|
event->hw.interrupts = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The new state must be visible before we turn it on in the hardware:
|
* The new state must be visible before we turn it on in the hardware:
|
||||||
*/
|
*/
|
||||||
|
@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_INTERRUPTS (~0ULL)
|
|
||||||
|
|
||||||
static void perf_log_throttle(struct perf_event *event, int enable);
|
|
||||||
|
|
||||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||||
{
|
{
|
||||||
u64 frequency = event->attr.sample_freq;
|
u64 frequency = event->attr.sample_freq;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue