mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-02 20:29:20 +00:00
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar. * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpusets: Remove an unused variable sched/rt: Improve pick_next_highest_task_rt() sched: Fix select_fallback_rq() vs cpu_active/cpu_online sched/x86/smp: Do not enable IRQs over calibrate_delay() sched: Fix compiler warning about declared inline after use MAINTAINERS: Update email address for SCHEDULER and PERF EVENTS
This commit is contained in:
commit
7fda0412c5
7 changed files with 63 additions and 53 deletions
|
@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|||
mutex_unlock(&callback_mutex);
|
||||
}
|
||||
|
||||
int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
|
||||
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
|
||||
{
|
||||
const struct cpuset *cs;
|
||||
int cpu;
|
||||
|
||||
rcu_read_lock();
|
||||
cs = task_cs(tsk);
|
||||
|
@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
|
|||
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
|
||||
* set any mask even if it is not right from task_cs() pov,
|
||||
* the pending set_cpus_allowed_ptr() will fix things.
|
||||
*
|
||||
* select_fallback_rq() will fix things ups and set cpu_possible_mask
|
||||
* if required.
|
||||
*/
|
||||
|
||||
cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
/*
|
||||
* Either tsk->cpus_allowed is wrong (see above) or it
|
||||
* is actually empty. The latter case is only possible
|
||||
* if we are racing with remove_tasks_in_empty_cpuset().
|
||||
* Like above we can temporary set any mask and rely on
|
||||
* set_cpus_allowed_ptr() as synchronization point.
|
||||
*/
|
||||
do_set_cpus_allowed(tsk, cpu_possible_mask);
|
||||
cpu = cpumask_any(cpu_active_mask);
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void cpuset_init_current_mems_allowed(void)
|
||||
|
|
|
@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
|
|||
*/
|
||||
static int select_fallback_rq(int cpu, struct task_struct *p)
|
||||
{
|
||||
int dest_cpu;
|
||||
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
|
||||
enum { cpuset, possible, fail } state = cpuset;
|
||||
int dest_cpu;
|
||||
|
||||
/* Look for allowed, online CPU in same node. */
|
||||
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
|
||||
for_each_cpu_mask(dest_cpu, *nodemask) {
|
||||
if (!cpu_online(dest_cpu))
|
||||
continue;
|
||||
if (!cpu_active(dest_cpu))
|
||||
continue;
|
||||
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
|
||||
return dest_cpu;
|
||||
}
|
||||
|
||||
/* Any allowed, online CPU? */
|
||||
dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
|
||||
if (dest_cpu < nr_cpu_ids)
|
||||
return dest_cpu;
|
||||
for (;;) {
|
||||
/* Any allowed, online CPU? */
|
||||
for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
|
||||
if (!cpu_online(dest_cpu))
|
||||
continue;
|
||||
if (!cpu_active(dest_cpu))
|
||||
continue;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* No more Mr. Nice Guy. */
|
||||
dest_cpu = cpuset_cpus_allowed_fallback(p);
|
||||
/*
|
||||
* Don't tell them about moving exiting tasks or
|
||||
* kernel threads (both mm NULL), since they never
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk_sched("process %d (%s) no longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, cpu);
|
||||
switch (state) {
|
||||
case cpuset:
|
||||
/* No more Mr. Nice Guy. */
|
||||
cpuset_cpus_allowed_fallback(p);
|
||||
state = possible;
|
||||
break;
|
||||
|
||||
case possible:
|
||||
do_set_cpus_allowed(p, cpu_possible_mask);
|
||||
state = fail;
|
||||
break;
|
||||
|
||||
case fail:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (state != cpuset) {
|
||||
/*
|
||||
* Don't tell them about moving exiting tasks or
|
||||
* kernel threads (both mm NULL), since they never
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk_sched("process %d (%s) no longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
return dest_cpu;
|
||||
|
|
|
@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
|
|||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
|
||||
unsigned long delta_exec);
|
||||
static __always_inline
|
||||
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
|
||||
|
||||
/**************************************************************
|
||||
* Scheduling class tree data structure manipulation methods:
|
||||
|
@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
__clear_buddies_skip(se);
|
||||
}
|
||||
|
||||
static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
||||
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
||||
|
||||
static void
|
||||
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
|
@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
|
|||
resched_task(rq_of(cfs_rq)->curr);
|
||||
}
|
||||
|
||||
static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
|
||||
unsigned long delta_exec)
|
||||
static __always_inline
|
||||
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
|
||||
{
|
||||
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
|
||||
return;
|
||||
|
@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
|
|||
}
|
||||
|
||||
#else /* CONFIG_CFS_BANDWIDTH */
|
||||
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
|
||||
unsigned long delta_exec) {}
|
||||
static __always_inline
|
||||
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
|
||||
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
||||
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
||||
static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
||||
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
||||
|
||||
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
|
|
|
@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
|
|||
next_idx:
|
||||
if (idx >= MAX_RT_PRIO)
|
||||
continue;
|
||||
if (next && next->prio < idx)
|
||||
if (next && next->prio <= idx)
|
||||
continue;
|
||||
list_for_each_entry(rt_se, array->queue + idx, run_list) {
|
||||
struct task_struct *p;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue