mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-12 09:24:17 +00:00
sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
Remove the direct {push,pull} balancing operations from switched_{from,to}_rt() / prio_changed_rt() and use the balance callback queue. Again, err on the side of too many reschedules; since too few is a hard bug while too many is just annoying. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
8046d68062
commit
fd7a4bed18
1 changed files with 19 additions and 16 deletions
|
@ -354,16 +354,23 @@ static inline int has_pushable_tasks(struct rq *rq)
|
||||||
return !plist_head_empty(&rq->rt.pushable_tasks);
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
|
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
|
||||||
|
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
|
||||||
|
|
||||||
static void push_rt_tasks(struct rq *);
|
static void push_rt_tasks(struct rq *);
|
||||||
|
static void pull_rt_task(struct rq *);
|
||||||
|
|
||||||
static inline void queue_push_tasks(struct rq *rq)
|
static inline void queue_push_tasks(struct rq *rq)
|
||||||
{
|
{
|
||||||
if (!has_pushable_tasks(rq))
|
if (!has_pushable_tasks(rq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
|
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void queue_pull_task(struct rq *rq)
|
||||||
|
{
|
||||||
|
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||||
|
@ -2139,7 +2146,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
||||||
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
|
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pull_rt_task(rq);
|
queue_pull_task(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init init_sched_rt_class(void)
|
void __init init_sched_rt_class(void)
|
||||||
|
@ -2160,8 +2167,6 @@ void __init init_sched_rt_class(void)
|
||||||
*/
|
*/
|
||||||
static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
int check_resched = 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are already running, then there's nothing
|
* If we are already running, then there's nothing
|
||||||
* that needs to be done. But if we are not running
|
* that needs to be done. But if we are not running
|
||||||
|
@ -2171,13 +2176,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||||
*/
|
*/
|
||||||
if (task_on_rq_queued(p) && rq->curr != p) {
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
|
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
|
||||||
/* Don't resched if we changed runqueues */
|
queue_push_tasks(rq);
|
||||||
push_rt_task(rq) && rq != task_rq(p))
|
#else
|
||||||
check_resched = 0;
|
if (p->prio < rq->curr->prio)
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
if (check_resched && p->prio < rq->curr->prio)
|
|
||||||
resched_curr(rq);
|
resched_curr(rq);
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2198,14 +2202,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
||||||
* may need to pull tasks to this runqueue.
|
* may need to pull tasks to this runqueue.
|
||||||
*/
|
*/
|
||||||
if (oldprio < p->prio)
|
if (oldprio < p->prio)
|
||||||
pull_rt_task(rq);
|
queue_pull_task(rq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there's a higher priority task waiting to run
|
* If there's a higher priority task waiting to run
|
||||||
* then reschedule. Note, the above pull_rt_task
|
* then reschedule.
|
||||||
* can release the rq lock and p could migrate.
|
|
||||||
* Only reschedule if p is still on the same runqueue.
|
|
||||||
*/
|
*/
|
||||||
if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
|
if (p->prio > rq->rt.highest_prio.curr)
|
||||||
resched_curr(rq);
|
resched_curr(rq);
|
||||||
#else
|
#else
|
||||||
/* For UP simply resched on drop of prio */
|
/* For UP simply resched on drop of prio */
|
||||||
|
|
Loading…
Add table
Reference in a new issue