mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 22:21:21 +00:00
sched: Implement group scheduler statistics in one struct
Put all statistic fields of sched_entity in one struct, sched_statistics, and embed it into sched_entity. This change allows to memset the sched_statistics to 0 when needed (for instance when forking), avoiding bugs of non initialized fields. Signed-off-by: Lucas De Marchi <lucas.de.marchi@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268275065-18542-1-git-send-email-lucas.de.marchi@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3d07467b7a
commit
41acab8851
5 changed files with 106 additions and 163 deletions
|
@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
|||
{
|
||||
unsigned long delta_exec_weighted;
|
||||
|
||||
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
|
||||
schedstat_set(curr->statistics.exec_max,
|
||||
max((u64)delta_exec, curr->statistics.exec_max));
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
||||
|
@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
|||
static inline void
|
||||
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
|
||||
schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
static void
|
||||
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
schedstat_set(se->wait_max, max(se->wait_max,
|
||||
rq_of(cfs_rq)->clock - se->wait_start));
|
||||
schedstat_set(se->wait_count, se->wait_count + 1);
|
||||
schedstat_set(se->wait_sum, se->wait_sum +
|
||||
rq_of(cfs_rq)->clock - se->wait_start);
|
||||
schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
|
||||
rq_of(cfs_rq)->clock - se->statistics.wait_start));
|
||||
schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
|
||||
schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
|
||||
rq_of(cfs_rq)->clock - se->statistics.wait_start);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (entity_is_task(se)) {
|
||||
trace_sched_stat_wait(task_of(se),
|
||||
rq_of(cfs_rq)->clock - se->wait_start);
|
||||
rq_of(cfs_rq)->clock - se->statistics.wait_start);
|
||||
}
|
||||
#endif
|
||||
schedstat_set(se->wait_start, 0);
|
||||
schedstat_set(se->statistics.wait_start, 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
if (entity_is_task(se))
|
||||
tsk = task_of(se);
|
||||
|
||||
if (se->sleep_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
|
||||
if (se->statistics.sleep_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
|
||||
|
||||
if ((s64)delta < 0)
|
||||
delta = 0;
|
||||
|
||||
if (unlikely(delta > se->sleep_max))
|
||||
se->sleep_max = delta;
|
||||
if (unlikely(delta > se->statistics.sleep_max))
|
||||
se->statistics.sleep_max = delta;
|
||||
|
||||
se->sleep_start = 0;
|
||||
se->sum_sleep_runtime += delta;
|
||||
se->statistics.sleep_start = 0;
|
||||
se->statistics.sum_sleep_runtime += delta;
|
||||
|
||||
if (tsk) {
|
||||
account_scheduler_latency(tsk, delta >> 10, 1);
|
||||
trace_sched_stat_sleep(tsk, delta);
|
||||
}
|
||||
}
|
||||
if (se->block_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->block_start;
|
||||
if (se->statistics.block_start) {
|
||||
u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
|
||||
|
||||
if ((s64)delta < 0)
|
||||
delta = 0;
|
||||
|
||||
if (unlikely(delta > se->block_max))
|
||||
se->block_max = delta;
|
||||
if (unlikely(delta > se->statistics.block_max))
|
||||
se->statistics.block_max = delta;
|
||||
|
||||
se->block_start = 0;
|
||||
se->sum_sleep_runtime += delta;
|
||||
se->statistics.block_start = 0;
|
||||
se->statistics.sum_sleep_runtime += delta;
|
||||
|
||||
if (tsk) {
|
||||
if (tsk->in_iowait) {
|
||||
se->iowait_sum += delta;
|
||||
se->iowait_count++;
|
||||
se->statistics.iowait_sum += delta;
|
||||
se->statistics.iowait_count++;
|
||||
trace_sched_stat_iowait(tsk, delta);
|
||||
}
|
||||
|
||||
|
@ -826,9 +827,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|||
struct task_struct *tsk = task_of(se);
|
||||
|
||||
if (tsk->state & TASK_INTERRUPTIBLE)
|
||||
se->sleep_start = rq_of(cfs_rq)->clock;
|
||||
se->statistics.sleep_start = rq_of(cfs_rq)->clock;
|
||||
if (tsk->state & TASK_UNINTERRUPTIBLE)
|
||||
se->block_start = rq_of(cfs_rq)->clock;
|
||||
se->statistics.block_start = rq_of(cfs_rq)->clock;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -912,7 +913,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
* when there are only lesser-weight tasks around):
|
||||
*/
|
||||
if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
|
||||
se->slice_max = max(se->slice_max,
|
||||
se->statistics.slice_max = max(se->statistics.slice_max,
|
||||
se->sum_exec_runtime - se->prev_sum_exec_runtime);
|
||||
}
|
||||
#endif
|
||||
|
@ -1306,7 +1307,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|||
if (sync && balanced)
|
||||
return 1;
|
||||
|
||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
|
||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||
|
||||
if (balanced ||
|
||||
|
@ -1318,7 +1319,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|||
* there is no bad imbalance.
|
||||
*/
|
||||
schedstat_inc(sd, ttwu_move_affine);
|
||||
schedstat_inc(p, se.nr_wakeups_affine);
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_affine);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -1844,13 +1845,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
|
|||
* 3) are cache-hot on their current CPU.
|
||||
*/
|
||||
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
|
||||
schedstat_inc(p, se.nr_failed_migrations_affine);
|
||||
schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
|
||||
return 0;
|
||||
}
|
||||
*all_pinned = 0;
|
||||
|
||||
if (task_running(rq, p)) {
|
||||
schedstat_inc(p, se.nr_failed_migrations_running);
|
||||
schedstat_inc(p, se.statistics.nr_failed_migrations_running);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1866,14 +1867,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
|
|||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (tsk_cache_hot) {
|
||||
schedstat_inc(sd, lb_hot_gained[idle]);
|
||||
schedstat_inc(p, se.nr_forced_migrations);
|
||||
schedstat_inc(p, se.statistics.nr_forced_migrations);
|
||||
}
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (tsk_cache_hot) {
|
||||
schedstat_inc(p, se.nr_failed_migrations_hot);
|
||||
schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue