mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 06:32:08 +00:00
sched: convert struct root_domain to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. def_root_domain is static, and so its masks are initialized with alloc_bootmem_cpumask_var. After that, alloc_cpumask_var is used. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6a7b3dc344
commit
c6c4927b22
2 changed files with 64 additions and 31 deletions
|
@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq)
|
|||
if (!rq->online)
|
||||
return;
|
||||
|
||||
cpu_set(rq->cpu, rq->rd->rto_mask);
|
||||
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
|
||||
/*
|
||||
* Make sure the mask is visible before we set
|
||||
* the overload count. That is checked to determine
|
||||
|
@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq)
|
|||
|
||||
/* the order here really doesn't matter */
|
||||
atomic_dec(&rq->rd->rto_count);
|
||||
cpu_clear(rq->cpu, rq->rd->rto_mask);
|
||||
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
|
||||
}
|
||||
|
||||
static void update_rt_migration(struct rq *rq)
|
||||
|
@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
static inline const struct cpumask *sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_rq(smp_processor_id())->rd->span;
|
||||
}
|
||||
#else
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
static inline const struct cpumask *sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
|||
return rt_rq->rt_throttled;
|
||||
}
|
||||
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
static inline const struct cpumask *sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static inline
|
||||
|
@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
|||
int i, weight, more = 0;
|
||||
u64 rt_period;
|
||||
|
||||
weight = cpus_weight(rd->span);
|
||||
weight = cpumask_weight(rd->span);
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
rt_period = ktime_to_ns(rt_b->rt_period);
|
||||
for_each_cpu_mask_nr(i, rd->span) {
|
||||
for_each_cpu(i, rd->span) {
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
s64 diff;
|
||||
|
||||
|
@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq)
|
|||
/*
|
||||
* Greedy reclaim, take back as much as we can.
|
||||
*/
|
||||
for_each_cpu_mask(i, rd->span) {
|
||||
for_each_cpu(i, rd->span) {
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
s64 diff;
|
||||
|
||||
|
@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
|
|||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
{
|
||||
int i, idle = 1;
|
||||
cpumask_t span;
|
||||
const struct cpumask *span;
|
||||
|
||||
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
||||
return 1;
|
||||
|
||||
span = sched_rt_period_mask();
|
||||
for_each_cpu_mask(i, span) {
|
||||
for_each_cpu(i, span) {
|
||||
int enqueue = 0;
|
||||
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
@ -1181,7 +1181,7 @@ static int pull_rt_task(struct rq *this_rq)
|
|||
|
||||
next = pick_next_task_rt(this_rq);
|
||||
|
||||
for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
|
||||
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
||||
if (this_cpu == cpu)
|
||||
continue;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue