mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-10 00:03:44 +00:00
sched: convert struct cpupri_vec cpumask_var_t.
Impact: stack usage reduction, (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. The fact cpupro_init is called both before and after the slab is available makes for an ugly parameter unfortunately. We also use cpumask_any_and to get rid of a temporary in cpupri_find. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4212823fb4
commit
68e74568fb
3 changed files with 38 additions and 15 deletions
|
@ -6792,6 +6792,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
||||||
|
|
||||||
static void free_rootdomain(struct root_domain *rd)
|
static void free_rootdomain(struct root_domain *rd)
|
||||||
{
|
{
|
||||||
|
cpupri_cleanup(&rd->cpupri);
|
||||||
|
|
||||||
free_cpumask_var(rd->rto_mask);
|
free_cpumask_var(rd->rto_mask);
|
||||||
free_cpumask_var(rd->online);
|
free_cpumask_var(rd->online);
|
||||||
free_cpumask_var(rd->span);
|
free_cpumask_var(rd->span);
|
||||||
|
@ -6834,7 +6836,7 @@ static int init_rootdomain(struct root_domain *rd, bool bootmem)
|
||||||
alloc_bootmem_cpumask_var(&def_root_domain.span);
|
alloc_bootmem_cpumask_var(&def_root_domain.span);
|
||||||
alloc_bootmem_cpumask_var(&def_root_domain.online);
|
alloc_bootmem_cpumask_var(&def_root_domain.online);
|
||||||
alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
|
alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
|
||||||
cpupri_init(&rd->cpupri);
|
cpupri_init(&rd->cpupri, true);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6845,9 +6847,12 @@ static int init_rootdomain(struct root_domain *rd, bool bootmem)
|
||||||
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
||||||
goto free_online;
|
goto free_online;
|
||||||
|
|
||||||
cpupri_init(&rd->cpupri);
|
if (cpupri_init(&rd->cpupri, false) != 0)
|
||||||
|
goto free_rto_mask;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
free_rto_mask:
|
||||||
|
free_cpumask_var(rd->rto_mask);
|
||||||
free_online:
|
free_online:
|
||||||
free_cpumask_var(rd->online);
|
free_cpumask_var(rd->online);
|
||||||
free_span:
|
free_span:
|
||||||
|
|
|
@ -67,24 +67,21 @@ static int convert_prio(int prio)
|
||||||
* Returns: (int)bool - CPUs were found
|
* Returns: (int)bool - CPUs were found
|
||||||
*/
|
*/
|
||||||
int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||||
cpumask_t *lowest_mask)
|
struct cpumask *lowest_mask)
|
||||||
{
|
{
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
int task_pri = convert_prio(p->prio);
|
int task_pri = convert_prio(p->prio);
|
||||||
|
|
||||||
for_each_cpupri_active(cp->pri_active, idx) {
|
for_each_cpupri_active(cp->pri_active, idx) {
|
||||||
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
||||||
cpumask_t mask;
|
|
||||||
|
|
||||||
if (idx >= task_pri)
|
if (idx >= task_pri)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
cpus_and(mask, p->cpus_allowed, vec->mask);
|
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
||||||
|
|
||||||
if (cpus_empty(mask))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
*lowest_mask = mask;
|
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,7 +123,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||||
vec->count--;
|
vec->count--;
|
||||||
if (!vec->count)
|
if (!vec->count)
|
||||||
clear_bit(oldpri, cp->pri_active);
|
clear_bit(oldpri, cp->pri_active);
|
||||||
cpu_clear(cpu, vec->mask);
|
cpumask_clear_cpu(cpu, vec->mask);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&vec->lock, flags);
|
spin_unlock_irqrestore(&vec->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -136,7 +133,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||||
|
|
||||||
spin_lock_irqsave(&vec->lock, flags);
|
spin_lock_irqsave(&vec->lock, flags);
|
||||||
|
|
||||||
cpu_set(cpu, vec->mask);
|
cpumask_set_cpu(cpu, vec->mask);
|
||||||
vec->count++;
|
vec->count++;
|
||||||
if (vec->count == 1)
|
if (vec->count == 1)
|
||||||
set_bit(newpri, cp->pri_active);
|
set_bit(newpri, cp->pri_active);
|
||||||
|
@ -150,10 +147,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||||
/**
|
/**
|
||||||
* cpupri_init - initialize the cpupri structure
|
* cpupri_init - initialize the cpupri structure
|
||||||
* @cp: The cpupri context
|
* @cp: The cpupri context
|
||||||
|
* @bootmem: true if allocations need to use bootmem
|
||||||
*
|
*
|
||||||
* Returns: (void)
|
* Returns: -ENOMEM if memory fails.
|
||||||
*/
|
*/
|
||||||
void cpupri_init(struct cpupri *cp)
|
int cpupri_init(struct cpupri *cp, bool bootmem)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -164,11 +162,30 @@ void cpupri_init(struct cpupri *cp)
|
||||||
|
|
||||||
spin_lock_init(&vec->lock);
|
spin_lock_init(&vec->lock);
|
||||||
vec->count = 0;
|
vec->count = 0;
|
||||||
cpus_clear(vec->mask);
|
if (bootmem)
|
||||||
|
alloc_bootmem_cpumask_var(&vec->mask);
|
||||||
|
else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
|
||||||
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
cp->cpu_to_pri[i] = CPUPRI_INVALID;
|
cp->cpu_to_pri[i] = CPUPRI_INVALID;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
for (i--; i >= 0; i--)
|
||||||
|
free_cpumask_var(cp->pri_to_cpu[i].mask);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpupri_cleanup - clean up the cpupri structure
|
||||||
|
* @cp: The cpupri context
|
||||||
|
*/
|
||||||
|
void cpupri_cleanup(struct cpupri *cp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
|
||||||
|
free_cpumask_var(cp->pri_to_cpu[i].mask);
|
||||||
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
struct cpupri_vec {
|
struct cpupri_vec {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
int count;
|
int count;
|
||||||
cpumask_t mask;
|
cpumask_var_t mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cpupri {
|
struct cpupri {
|
||||||
|
@ -27,7 +27,8 @@ struct cpupri {
|
||||||
int cpupri_find(struct cpupri *cp,
|
int cpupri_find(struct cpupri *cp,
|
||||||
struct task_struct *p, cpumask_t *lowest_mask);
|
struct task_struct *p, cpumask_t *lowest_mask);
|
||||||
void cpupri_set(struct cpupri *cp, int cpu, int pri);
|
void cpupri_set(struct cpupri *cp, int cpu, int pri);
|
||||||
void cpupri_init(struct cpupri *cp);
|
int cpupri_init(struct cpupri *cp, bool bootmem);
|
||||||
|
void cpupri_cleanup(struct cpupri *cp);
|
||||||
#else
|
#else
|
||||||
#define cpupri_set(cp, cpu, pri) do { } while (0)
|
#define cpupri_set(cp, cpu, pri) do { } while (0)
|
||||||
#define cpupri_init() do { } while (0)
|
#define cpupri_init() do { } while (0)
|
||||||
|
|
Loading…
Add table
Reference in a new issue