mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-02 20:29:20 +00:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits) sched: Cleanup duplicate local variable in [enqueue|dequeue]_task_fair sched: Replace use of entity_key() sched: Separate group-scheduling code more clearly sched: Reorder root_domain to remove 64 bit alignment padding sched: Do not attempt to destroy uninitialized rt_bandwidth sched: Remove unused function cpu_cfs_rq() sched: Fix (harmless) typo 'CONFG_FAIR_GROUP_SCHED' sched, cgroup: Optimize load_balance_fair() sched: Don't update shares twice on on_rq parent sched: update correct entity's runtime in check_preempt_wakeup() xtensa: Use generic config PREEMPT definition h8300: Use generic config PREEMPT definition m32r: Use generic PREEMPT config sched: Skip autogroup when looking for all rt sched groups sched: Simplify mutex_spin_on_owner() sched: Remove rcu_read_lock() from wake_affine() sched: Generalize sleep inside spinlock detection sched: Make sleeping inside spinlock detection working in !CONFIG_PREEMPT sched: Isolate preempt counting in its own config option sched: Remove pointless in_atomic() definition check ...
This commit is contained in:
commit
bdc7ccfc06
21 changed files with 146 additions and 175 deletions
119
kernel/sched.c
119
kernel/sched.c
|
@ -124,7 +124,7 @@
|
|||
|
||||
static inline int rt_policy(int policy)
|
||||
{
|
||||
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
|
||||
if (policy == SCHED_FIFO || policy == SCHED_RR)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -422,6 +422,7 @@ struct rt_rq {
|
|||
*/
|
||||
struct root_domain {
|
||||
atomic_t refcount;
|
||||
atomic_t rto_count;
|
||||
struct rcu_head rcu;
|
||||
cpumask_var_t span;
|
||||
cpumask_var_t online;
|
||||
|
@ -431,7 +432,6 @@ struct root_domain {
|
|||
* one runnable RT task.
|
||||
*/
|
||||
cpumask_var_t rto_mask;
|
||||
atomic_t rto_count;
|
||||
struct cpupri cpupri;
|
||||
};
|
||||
|
||||
|
@ -1568,38 +1568,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
|||
return rq->avg_load_per_task;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/*
|
||||
* Compute the cpu's hierarchical load factor for each task group.
|
||||
* This needs to be done in a top-down fashion because the load of a child
|
||||
* group is a fraction of its parents load.
|
||||
*/
|
||||
static int tg_load_down(struct task_group *tg, void *data)
|
||||
{
|
||||
unsigned long load;
|
||||
long cpu = (long)data;
|
||||
|
||||
if (!tg->parent) {
|
||||
load = cpu_rq(cpu)->load.weight;
|
||||
} else {
|
||||
load = tg->parent->cfs_rq[cpu]->h_load;
|
||||
load *= tg->se[cpu]->load.weight;
|
||||
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
|
||||
}
|
||||
|
||||
tg->cfs_rq[cpu]->h_load = load;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_h_load(long cpu)
|
||||
{
|
||||
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
|
||||
|
@ -2497,7 +2465,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|||
if (p->sched_class->task_woken)
|
||||
p->sched_class->task_woken(rq, p);
|
||||
|
||||
if (unlikely(rq->idle_stamp)) {
|
||||
if (rq->idle_stamp) {
|
||||
u64 delta = rq->clock - rq->idle_stamp;
|
||||
u64 max = 2*sysctl_sched_migration_cost;
|
||||
|
||||
|
@ -2886,7 +2854,7 @@ void sched_fork(struct task_struct *p)
|
|||
#if defined(CONFIG_SMP)
|
||||
p->on_cpu = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
/* Want to start with kernel preemption disabled. */
|
||||
task_thread_info(p)->preempt_count = 1;
|
||||
#endif
|
||||
|
@ -4338,11 +4306,8 @@ EXPORT_SYMBOL(schedule);
|
|||
|
||||
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock->owner != owner)
|
||||
goto fail;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Ensure we emit the owner->on_cpu, dereference _after_ checking
|
||||
|
@ -4352,11 +4317,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|||
*/
|
||||
barrier();
|
||||
|
||||
ret = owner->on_cpu;
|
||||
fail:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return owner->on_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4368,21 +4329,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|||
if (!sched_feat(OWNER_SPIN))
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
while (owner_running(lock, owner)) {
|
||||
if (need_resched())
|
||||
return 0;
|
||||
break;
|
||||
|
||||
arch_mutex_cpu_relax();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* If the owner changed to another task there is likely
|
||||
* heavy contention, stop spinning.
|
||||
* We break out the loop above on need_resched() and when the
|
||||
* owner changed, which is a sign for heavy contention. Return
|
||||
* success only when lock->owner is NULL.
|
||||
*/
|
||||
if (lock->owner)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return lock->owner == NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -7898,17 +7859,10 @@ int in_sched_functions(unsigned long addr)
|
|||
&& addr < (unsigned long)__sched_text_end);
|
||||
}
|
||||
|
||||
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
||||
static void init_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
cfs_rq->tasks_timeline = RB_ROOT;
|
||||
INIT_LIST_HEAD(&cfs_rq->tasks);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
cfs_rq->rq = rq;
|
||||
/* allow initial update_cfs_load() to truncate */
|
||||
#ifdef CONFIG_SMP
|
||||
cfs_rq->load_stamp = 1;
|
||||
#endif
|
||||
#endif
|
||||
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
|
||||
#ifndef CONFIG_64BIT
|
||||
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
|
||||
|
@ -7928,13 +7882,9 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|||
/* delimiter for bitsearch: */
|
||||
__set_bit(MAX_RT_PRIO, array->bitmap);
|
||||
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
#if defined CONFIG_SMP
|
||||
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
||||
#ifdef CONFIG_SMP
|
||||
rt_rq->highest_prio.next = MAX_RT_PRIO;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
rt_rq->rt_nr_migratory = 0;
|
||||
rt_rq->overloaded = 0;
|
||||
plist_head_init(&rt_rq->pushable_tasks);
|
||||
|
@ -7944,11 +7894,6 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|||
rt_rq->rt_throttled = 0;
|
||||
rt_rq->rt_runtime = 0;
|
||||
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
rt_rq->rt_nr_boosted = 0;
|
||||
rt_rq->rq = rq;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -7957,11 +7902,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|||
struct sched_entity *parent)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
tg->cfs_rq[cpu] = cfs_rq;
|
||||
init_cfs_rq(cfs_rq, rq);
|
||||
cfs_rq->tg = tg;
|
||||
|
||||
cfs_rq->tg = tg;
|
||||
cfs_rq->rq = rq;
|
||||
#ifdef CONFIG_SMP
|
||||
/* allow initial update_cfs_load() to truncate */
|
||||
cfs_rq->load_stamp = 1;
|
||||
#endif
|
||||
|
||||
tg->cfs_rq[cpu] = cfs_rq;
|
||||
tg->se[cpu] = se;
|
||||
|
||||
/* se could be NULL for root_task_group */
|
||||
if (!se)
|
||||
return;
|
||||
|
@ -7984,12 +7935,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
|||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
tg->rt_rq[cpu] = rt_rq;
|
||||
init_rt_rq(rt_rq, rq);
|
||||
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
||||
rt_rq->rt_nr_boosted = 0;
|
||||
rt_rq->rq = rq;
|
||||
rt_rq->tg = tg;
|
||||
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
|
||||
tg->rt_rq[cpu] = rt_rq;
|
||||
tg->rt_se[cpu] = rt_se;
|
||||
|
||||
if (!rt_se)
|
||||
return;
|
||||
|
||||
|
@ -8071,7 +8024,7 @@ void __init sched_init(void)
|
|||
rq->nr_running = 0;
|
||||
rq->calc_load_active = 0;
|
||||
rq->calc_load_update = jiffies + LOAD_FREQ;
|
||||
init_cfs_rq(&rq->cfs, rq);
|
||||
init_cfs_rq(&rq->cfs);
|
||||
init_rt_rq(&rq->rt, rq);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
root_task_group.shares = root_task_group_load;
|
||||
|
@ -8185,7 +8138,7 @@ void __init sched_init(void)
|
|||
scheduler_running = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
static inline int preempt_count_equals(int preempt_offset)
|
||||
{
|
||||
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
|
||||
|
@ -8195,7 +8148,6 @@ static inline int preempt_count_equals(int preempt_offset)
|
|||
|
||||
void __might_sleep(const char *file, int line, int preempt_offset)
|
||||
{
|
||||
#ifdef in_atomic
|
||||
static unsigned long prev_jiffy; /* ratelimiting */
|
||||
|
||||
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
|
||||
|
@ -8217,7 +8169,6 @@ void __might_sleep(const char *file, int line, int preempt_offset)
|
|||
if (irqs_disabled())
|
||||
print_irqtrace_events(current);
|
||||
dump_stack();
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__might_sleep);
|
||||
#endif
|
||||
|
@ -8376,6 +8327,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|||
if (!se)
|
||||
goto err_free_rq;
|
||||
|
||||
init_cfs_rq(cfs_rq);
|
||||
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
||||
}
|
||||
|
||||
|
@ -8403,7 +8355,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
|
|||
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
#else /* !CONFG_FAIR_GROUP_SCHED */
|
||||
#else /* !CONFIG_FAIR_GROUP_SCHED */
|
||||
static inline void free_fair_sched_group(struct task_group *tg)
|
||||
{
|
||||
}
|
||||
|
@ -8424,7 +8376,8 @@ static void free_rt_sched_group(struct task_group *tg)
|
|||
{
|
||||
int i;
|
||||
|
||||
destroy_rt_bandwidth(&tg->rt_bandwidth);
|
||||
if (tg->rt_se)
|
||||
destroy_rt_bandwidth(&tg->rt_bandwidth);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (tg->rt_rq)
|
||||
|
@ -8465,6 +8418,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|||
if (!rt_se)
|
||||
goto err_free_rq;
|
||||
|
||||
init_rt_rq(rt_rq, cpu_rq(i));
|
||||
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue