mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 00:21:17 +00:00
generic: use new set_cpus_allowed_ptr function
* Use new set_cpus_allowed_ptr() function added by previous patch, which instead of passing the "newly allowed cpus" cpumask_t arg by value, pass it by pointer: -int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) +int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) * Modify CPU_MASK_ALL Depends on: [sched-devel]: sched: add new set_cpus_allowed_ptr function Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
fc0e474840
commit
f70316dace
7 changed files with 27 additions and 21 deletions
|
@ -838,10 +838,10 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
|
||||||
* Migrate task to the cpu pointed by pr.
|
* Migrate task to the cpu pointed by pr.
|
||||||
*/
|
*/
|
||||||
saved_mask = current->cpus_allowed;
|
saved_mask = current->cpus_allowed;
|
||||||
set_cpus_allowed(current, cpumask_of_cpu(pr->id));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
||||||
ret = pr->throttling.acpi_processor_get_throttling(pr);
|
ret = pr->throttling.acpi_processor_get_throttling(pr);
|
||||||
/* restore the previous state */
|
/* restore the previous state */
|
||||||
set_cpus_allowed(current, saved_mask);
|
set_cpus_allowed_ptr(current, &saved_mask);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1025,7 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
||||||
* it can be called only for the cpu pointed by pr.
|
* it can be called only for the cpu pointed by pr.
|
||||||
*/
|
*/
|
||||||
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
||||||
set_cpus_allowed(current, cpumask_of_cpu(pr->id));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
||||||
ret = p_throttling->acpi_processor_set_throttling(pr,
|
ret = p_throttling->acpi_processor_set_throttling(pr,
|
||||||
t_state.target_state);
|
t_state.target_state);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1056,7 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
t_state.cpu = i;
|
t_state.cpu = i;
|
||||||
set_cpus_allowed(current, cpumask_of_cpu(i));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
|
||||||
ret = match_pr->throttling.
|
ret = match_pr->throttling.
|
||||||
acpi_processor_set_throttling(
|
acpi_processor_set_throttling(
|
||||||
match_pr, t_state.target_state);
|
match_pr, t_state.target_state);
|
||||||
|
@ -1074,7 +1074,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
||||||
&t_state);
|
&t_state);
|
||||||
}
|
}
|
||||||
/* restore the previous state */
|
/* restore the previous state */
|
||||||
set_cpus_allowed(current, saved_mask);
|
set_cpus_allowed_ptr(current, &saved_mask);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -265,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
|
||||||
|
|
||||||
/* SMI requires CPU 0 */
|
/* SMI requires CPU 0 */
|
||||||
old_mask = current->cpus_allowed;
|
old_mask = current->cpus_allowed;
|
||||||
set_cpus_allowed(current, cpumask_of_cpu(0));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
|
||||||
if (smp_processor_id() != 0) {
|
if (smp_processor_id() != 0) {
|
||||||
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
|
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
|
||||||
__FUNCTION__);
|
__FUNCTION__);
|
||||||
|
@ -285,7 +285,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
|
||||||
);
|
);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
set_cpus_allowed(current, old_mask);
|
set_cpus_allowed_ptr(current, &old_mask);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -182,15 +182,18 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
|
||||||
struct mempolicy *oldpol;
|
struct mempolicy *oldpol;
|
||||||
cpumask_t oldmask = current->cpus_allowed;
|
cpumask_t oldmask = current->cpus_allowed;
|
||||||
int node = pcibus_to_node(dev->bus);
|
int node = pcibus_to_node(dev->bus);
|
||||||
if (node >= 0 && node_online(node))
|
|
||||||
set_cpus_allowed(current, node_to_cpumask(node));
|
if (node >= 0) {
|
||||||
|
node_to_cpumask_ptr(nodecpumask, node);
|
||||||
|
set_cpus_allowed_ptr(current, nodecpumask);
|
||||||
|
}
|
||||||
/* And set default memory allocation policy */
|
/* And set default memory allocation policy */
|
||||||
oldpol = current->mempolicy;
|
oldpol = current->mempolicy;
|
||||||
current->mempolicy = NULL; /* fall back to system default policy */
|
current->mempolicy = NULL; /* fall back to system default policy */
|
||||||
#endif
|
#endif
|
||||||
error = drv->probe(dev, id);
|
error = drv->probe(dev, id);
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
set_cpus_allowed(current, oldmask);
|
set_cpus_allowed_ptr(current, &oldmask);
|
||||||
current->mempolicy = oldpol;
|
current->mempolicy = oldpol;
|
||||||
#endif
|
#endif
|
||||||
return error;
|
return error;
|
||||||
|
|
|
@ -232,9 +232,9 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||||
|
|
||||||
/* Ensure that we are not runnable on dying cpu */
|
/* Ensure that we are not runnable on dying cpu */
|
||||||
old_allowed = current->cpus_allowed;
|
old_allowed = current->cpus_allowed;
|
||||||
tmp = CPU_MASK_ALL;
|
cpus_setall(tmp);
|
||||||
cpu_clear(cpu, tmp);
|
cpu_clear(cpu, tmp);
|
||||||
set_cpus_allowed(current, tmp);
|
set_cpus_allowed_ptr(current, &tmp);
|
||||||
|
|
||||||
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
||||||
|
|
||||||
|
@ -268,7 +268,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||||
out_thread:
|
out_thread:
|
||||||
err = kthread_stop(p);
|
err = kthread_stop(p);
|
||||||
out_allowed:
|
out_allowed:
|
||||||
set_cpus_allowed(current, old_allowed);
|
set_cpus_allowed_ptr(current, &old_allowed);
|
||||||
out_release:
|
out_release:
|
||||||
cpu_hotplug_done();
|
cpu_hotplug_done();
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -165,7 +165,7 @@ static int ____call_usermodehelper(void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We can run anywhere, unlike our parent keventd(). */
|
/* We can run anywhere, unlike our parent keventd(). */
|
||||||
set_cpus_allowed(current, CPU_MASK_ALL);
|
set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our parent is keventd, which runs with elevated scheduling priority.
|
* Our parent is keventd, which runs with elevated scheduling priority.
|
||||||
|
|
|
@ -723,9 +723,10 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
||||||
*/
|
*/
|
||||||
static void rcu_torture_shuffle_tasks(void)
|
static void rcu_torture_shuffle_tasks(void)
|
||||||
{
|
{
|
||||||
cpumask_t tmp_mask = CPU_MASK_ALL;
|
cpumask_t tmp_mask;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
cpus_setall(tmp_mask);
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
|
|
||||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||||
|
@ -737,25 +738,27 @@ static void rcu_torture_shuffle_tasks(void)
|
||||||
if (rcu_idle_cpu != -1)
|
if (rcu_idle_cpu != -1)
|
||||||
cpu_clear(rcu_idle_cpu, tmp_mask);
|
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||||
|
|
||||||
set_cpus_allowed(current, tmp_mask);
|
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||||
|
|
||||||
if (reader_tasks) {
|
if (reader_tasks) {
|
||||||
for (i = 0; i < nrealreaders; i++)
|
for (i = 0; i < nrealreaders; i++)
|
||||||
if (reader_tasks[i])
|
if (reader_tasks[i])
|
||||||
set_cpus_allowed(reader_tasks[i], tmp_mask);
|
set_cpus_allowed_ptr(reader_tasks[i],
|
||||||
|
&tmp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fakewriter_tasks) {
|
if (fakewriter_tasks) {
|
||||||
for (i = 0; i < nfakewriters; i++)
|
for (i = 0; i < nfakewriters; i++)
|
||||||
if (fakewriter_tasks[i])
|
if (fakewriter_tasks[i])
|
||||||
set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
|
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||||
|
&tmp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (writer_task)
|
if (writer_task)
|
||||||
set_cpus_allowed(writer_task, tmp_mask);
|
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||||
|
|
||||||
if (stats_task)
|
if (stats_task)
|
||||||
set_cpus_allowed(stats_task, tmp_mask);
|
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||||
|
|
||||||
if (rcu_idle_cpu == -1)
|
if (rcu_idle_cpu == -1)
|
||||||
rcu_idle_cpu = num_online_cpus() - 1;
|
rcu_idle_cpu = num_online_cpus() - 1;
|
||||||
|
|
|
@ -35,7 +35,7 @@ static int stopmachine(void *cpu)
|
||||||
int irqs_disabled = 0;
|
int irqs_disabled = 0;
|
||||||
int prepared = 0;
|
int prepared = 0;
|
||||||
|
|
||||||
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
|
||||||
|
|
||||||
/* Ack: we are alive */
|
/* Ack: we are alive */
|
||||||
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue