mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
sched: style cleanups
style cleanup of various changes that were done recently. no code changed: text data bss dec hex filename 23680 2542 28 26250 668a sched.o.before 23680 2542 28 26250 668a sched.o.after Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ce6bd420f4
commit
41a2d6cfa3
1 changed files with 68 additions and 64 deletions
|
@ -211,7 +211,6 @@ static inline struct task_group *task_group(struct task_struct *p)
|
||||||
#else
|
#else
|
||||||
tg = &init_task_group;
|
tg = &init_task_group;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return tg;
|
return tg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,14 +248,15 @@ struct cfs_rq {
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||||
|
|
||||||
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
/*
|
||||||
|
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||||
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||||
* (like users, containers etc.)
|
* (like users, containers etc.)
|
||||||
*
|
*
|
||||||
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
|
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
|
||||||
* list is used during load balance.
|
* list is used during load balance.
|
||||||
*/
|
*/
|
||||||
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
|
struct list_head leaf_cfs_rq_list;
|
||||||
struct task_group *tg; /* group that "owns" this runqueue */
|
struct task_group *tg; /* group that "owns" this runqueue */
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
||||||
* @policy: new policy.
|
* @policy: new policy.
|
||||||
* @param: structure containing the new RT priority.
|
* @param: structure containing the new RT priority.
|
||||||
*/
|
*/
|
||||||
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
|
asmlinkage long
|
||||||
struct sched_param __user *param)
|
sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
||||||
{
|
{
|
||||||
/* negative values for policy are not valid */
|
/* negative values for policy are not valid */
|
||||||
if (policy < 0)
|
if (policy < 0)
|
||||||
|
@ -5245,11 +5245,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||||
* kernel threads (both mm NULL), since they never
|
* kernel threads (both mm NULL), since they never
|
||||||
* leave kernel.
|
* leave kernel.
|
||||||
*/
|
*/
|
||||||
if (p->mm && printk_ratelimit())
|
if (p->mm && printk_ratelimit()) {
|
||||||
printk(KERN_INFO "process %d (%s) no "
|
printk(KERN_INFO "process %d (%s) no "
|
||||||
"longer affine to cpu%d\n",
|
"longer affine to cpu%d\n",
|
||||||
task_pid_nr(p), p->comm, dead_cpu);
|
task_pid_nr(p), p->comm, dead_cpu);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
|
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||||
migrate_nr_uninterruptible(rq);
|
migrate_nr_uninterruptible(rq);
|
||||||
BUG_ON(rq->nr_running != 0);
|
BUG_ON(rq->nr_running != 0);
|
||||||
|
|
||||||
/* No need to migrate the tasks: it was best-effort if
|
/*
|
||||||
|
* No need to migrate the tasks: it was best-effort if
|
||||||
* they didn't take sched_hotcpu_mutex. Just wake up
|
* they didn't take sched_hotcpu_mutex. Just wake up
|
||||||
* the requestors. */
|
* the requestors.
|
||||||
|
*/
|
||||||
spin_lock_irq(&rq->lock);
|
spin_lock_irq(&rq->lock);
|
||||||
while (!list_empty(&rq->migration_queue)) {
|
while (!list_empty(&rq->migration_queue)) {
|
||||||
struct migration_req *req;
|
struct migration_req *req;
|
||||||
|
@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
|
||||||
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
||||||
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
||||||
|
|
||||||
static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
|
static int
|
||||||
struct sched_group **sg)
|
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||||
{
|
{
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_cpus, cpu);
|
*sg = &per_cpu(sched_group_cpus, cpu);
|
||||||
|
@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
||||||
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
static int
|
||||||
struct sched_group **sg)
|
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||||
{
|
{
|
||||||
int group;
|
int group;
|
||||||
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
||||||
|
@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
#elif defined(CONFIG_SCHED_MC)
|
#elif defined(CONFIG_SCHED_MC)
|
||||||
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
static int
|
||||||
struct sched_group **sg)
|
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||||
{
|
{
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_core, cpu);
|
*sg = &per_cpu(sched_group_core, cpu);
|
||||||
|
@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||||
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
||||||
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
||||||
|
|
||||||
static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
|
static int
|
||||||
struct sched_group **sg)
|
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||||
{
|
{
|
||||||
int group;
|
int group;
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
|
@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||||
return &tg->css;
|
return &tg->css;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
|
static void
|
||||||
struct cgroup *cgrp)
|
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
struct task_group *tg = cgroup_tg(cgrp);
|
struct task_group *tg = cgroup_tg(cgrp);
|
||||||
|
|
||||||
sched_destroy_group(tg);
|
sched_destroy_group(tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
|
static int
|
||||||
struct cgroup *cgrp, struct task_struct *tsk)
|
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
/* We don't support RT-tasks being in separate groups */
|
/* We don't support RT-tasks being in separate groups */
|
||||||
if (tsk->sched_class != &fair_sched_class)
|
if (tsk->sched_class != &fair_sched_class)
|
||||||
|
@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
|
||||||
}
|
}
|
||||||
|
|
||||||
/* destroy an existing cpu accounting group */
|
/* destroy an existing cpu accounting group */
|
||||||
static void cpuacct_destroy(struct cgroup_subsys *ss,
|
static void
|
||||||
struct cgroup *cont)
|
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||||
{
|
{
|
||||||
struct cpuacct *ca = cgroup_ca(cont);
|
struct cpuacct *ca = cgroup_ca(cont);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue