sched: wrap sched_group and sched_domain cpumask accesses.

Impact: trivial wrap of member accesses

This eases the transition in the next patch.

We also get rid of a temporary cpumask in find_idlest_cpu() thanks to
for_each_cpu_and, and sched_balance_self() due to getting weight before
setting sd to NULL.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Rusty Russell 2008-11-25 02:35:04 +10:30 committed by Ingo Molnar
parent 1e5ce4f4a7
commit 758b2cdc6f
5 changed files with 73 additions and 67 deletions

View file

@ -786,6 +786,11 @@ struct sched_group {
u32 reciprocal_cpu_power;
};
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return &sg->cpumask;
}
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
@ -866,6 +871,11 @@ struct sched_domain {
#endif
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return &sd->span;
}
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);