mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-06 22:55:11 +00:00
sched: update aggregate when holding the RQs
It was observed that in __update_group_shares_cpu() rq_weight > aggregate()->rq_weight This is caused by forks/wakeups in between the initial aggregate pass and locking of the RQs for load balance. To avoid this situation partially re-do the aggregation once we have the RQs locked (which avoids new tasks from appearing). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b6a86c746f
commit
4d8d595dfa
1 changed files with 20 additions and 0 deletions
|
@ -1721,6 +1721,11 @@ aggregate_get_up(struct task_group *tg, int cpu, struct sched_domain *sd)
|
||||||
aggregate_group_set_shares(tg, cpu, sd);
|
aggregate_group_set_shares(tg, cpu, sd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
aggregate_get_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
|
static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
|
||||||
|
|
||||||
static void __init init_aggregate(void)
|
static void __init init_aggregate(void)
|
||||||
|
@ -1740,6 +1745,11 @@ static int get_aggregate(int cpu, struct sched_domain *sd)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void update_aggregate(int cpu, struct sched_domain *sd)
|
||||||
|
{
|
||||||
|
aggregate_walk_tree(aggregate_get_down, aggregate_get_nop, cpu, sd);
|
||||||
|
}
|
||||||
|
|
||||||
static void put_aggregate(int cpu, struct sched_domain *sd)
|
static void put_aggregate(int cpu, struct sched_domain *sd)
|
||||||
{
|
{
|
||||||
spin_unlock(&per_cpu(aggregate_lock, cpu));
|
spin_unlock(&per_cpu(aggregate_lock, cpu));
|
||||||
|
@ -1761,6 +1771,10 @@ static inline int get_aggregate(int cpu, struct sched_domain *sd)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void update_aggregate(int cpu, struct sched_domain *sd)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void put_aggregate(int cpu, struct sched_domain *sd)
|
static inline void put_aggregate(int cpu, struct sched_domain *sd)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -2192,6 +2206,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
||||||
int load_idx = sd->forkexec_idx;
|
int load_idx = sd->forkexec_idx;
|
||||||
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* now that we have both rqs locked the rq weight won't change
|
||||||
|
* anymore - so update the stats.
|
||||||
|
*/
|
||||||
|
update_aggregate(this_cpu, sd);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long load, avg_load;
|
unsigned long load, avg_load;
|
||||||
int local_group;
|
int local_group;
|
||||||
|
|
Loading…
Add table
Reference in a new issue