mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 14:31:46 +00:00
perf_counter: rework the task clock software counter
Rework the task clock software counter to use the context time instead of the task runtime clock, this removes the last such user. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.445450972@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4af4998b8a
commit
a39d6f2556
1 changed files with 12 additions and 30 deletions
|
@ -974,9 +974,6 @@ int perf_counter_task_disable(void)
|
||||||
curr_rq_lock_irq_save(&flags);
|
curr_rq_lock_irq_save(&flags);
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
/* force the update of the task clock: */
|
|
||||||
__task_delta_exec(curr, 1);
|
|
||||||
|
|
||||||
perf_counter_task_sched_out(curr, cpu);
|
perf_counter_task_sched_out(curr, cpu);
|
||||||
|
|
||||||
spin_lock(&ctx->lock);
|
spin_lock(&ctx->lock);
|
||||||
|
@ -1017,9 +1014,6 @@ int perf_counter_task_enable(void)
|
||||||
curr_rq_lock_irq_save(&flags);
|
curr_rq_lock_irq_save(&flags);
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
/* force the update of the task clock: */
|
|
||||||
__task_delta_exec(curr, 1);
|
|
||||||
|
|
||||||
perf_counter_task_sched_out(curr, cpu);
|
perf_counter_task_sched_out(curr, cpu);
|
||||||
|
|
||||||
spin_lock(&ctx->lock);
|
spin_lock(&ctx->lock);
|
||||||
|
@ -2347,38 +2341,28 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
|
||||||
* Software counter: task time clock
|
* Software counter: task time clock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
static void task_clock_perf_counter_update(struct perf_counter *counter)
|
||||||
* Called from within the scheduler:
|
|
||||||
*/
|
|
||||||
static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
|
|
||||||
{
|
{
|
||||||
struct task_struct *curr = counter->task;
|
u64 prev, now;
|
||||||
u64 delta;
|
|
||||||
|
|
||||||
delta = __task_delta_exec(curr, update);
|
|
||||||
|
|
||||||
return curr->se.sum_exec_runtime + delta;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
|
|
||||||
{
|
|
||||||
u64 prev;
|
|
||||||
s64 delta;
|
s64 delta;
|
||||||
|
|
||||||
prev = atomic64_read(&counter->hw.prev_count);
|
update_context_time(counter->ctx);
|
||||||
|
now = counter->ctx->time;
|
||||||
atomic64_set(&counter->hw.prev_count, now);
|
|
||||||
|
|
||||||
|
prev = atomic64_xchg(&counter->hw.prev_count, now);
|
||||||
delta = now - prev;
|
delta = now - prev;
|
||||||
|
|
||||||
atomic64_add(delta, &counter->count);
|
atomic64_add(delta, &counter->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
struct hw_perf_counter *hwc = &counter->hw;
|
struct hw_perf_counter *hwc = &counter->hw;
|
||||||
|
u64 now;
|
||||||
|
|
||||||
atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
|
update_context_time(counter->ctx);
|
||||||
|
now = counter->ctx->time;
|
||||||
|
|
||||||
|
atomic64_set(&hwc->prev_count, now);
|
||||||
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
||||||
if (hwc->irq_period) {
|
if (hwc->irq_period) {
|
||||||
|
@ -2393,14 +2377,12 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
||||||
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
hrtimer_cancel(&counter->hw.hrtimer);
|
hrtimer_cancel(&counter->hw.hrtimer);
|
||||||
task_clock_perf_counter_update(counter,
|
task_clock_perf_counter_update(counter);
|
||||||
task_clock_perf_counter_val(counter, 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void task_clock_perf_counter_read(struct perf_counter *counter)
|
static void task_clock_perf_counter_read(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
task_clock_perf_counter_update(counter,
|
task_clock_perf_counter_update(counter);
|
||||||
task_clock_perf_counter_val(counter, 1));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_task_clock = {
|
static const struct hw_perf_counter_ops perf_ops_task_clock = {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue