mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 22:51:37 +00:00
cpumask: smp_call_function_many()
Impact: Implementation change to remove cpumask_t from stack. Actually change smp_call_function_mask() to smp_call_function_many(). We avoid cpumasks on the stack in this version. (S390 has its own version, but that's going away apparently). We have to do some dancing to figure out if 0 or 1 other cpus are in the mask supplied and the online mask without allocating a tmp cpumask. It's still fairly cheap. We allocate the cpumask at the end of the call_function_data structure: if allocation fails we fallback to smp_call_function_single rather than using the baroque quiescing code (which needs a cpumask on stack). (Thanks to Hiroshi Shimamoto for spotting several bugs in previous versions!) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> Cc: npiggin@suse.de Cc: axboe@kernel.dk
This commit is contained in:
parent
3fa4152069
commit
54b11e6d57
2 changed files with 56 additions and 96 deletions
|
@ -67,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus);
|
||||||
* Call a function on all other processors
|
* Call a function on all other processors
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void(*func)(void *info), void *info, int wait);
|
int smp_call_function(void(*func)(void *info), void *info, int wait);
|
||||||
/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
|
void smp_call_function_many(const struct cpumask *mask,
|
||||||
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
void (*func)(void *info), void *info, bool wait);
|
||||||
int wait);
|
|
||||||
|
|
||||||
static inline void smp_call_function_many(const struct cpumask *mask,
|
/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
|
||||||
void (*func)(void *info), void *info,
|
static inline int
|
||||||
|
smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
||||||
int wait)
|
int wait)
|
||||||
{
|
{
|
||||||
smp_call_function_mask(*mask, func, info, wait);
|
smp_call_function_many(&mask, func, info, wait);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||||
|
|
137
kernel/smp.c
137
kernel/smp.c
|
@ -24,8 +24,8 @@ struct call_function_data {
|
||||||
struct call_single_data csd;
|
struct call_single_data csd;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
unsigned int refs;
|
unsigned int refs;
|
||||||
cpumask_t cpumask;
|
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
|
unsigned long cpumask_bits[];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct call_single_queue {
|
struct call_single_queue {
|
||||||
|
@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
|
||||||
list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
|
list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
|
||||||
int refs;
|
int refs;
|
||||||
|
|
||||||
if (!cpu_isset(cpu, data->cpumask))
|
if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
data->csd.func(data->csd.info);
|
data->csd.func(data->csd.info);
|
||||||
|
|
||||||
spin_lock(&data->lock);
|
spin_lock(&data->lock);
|
||||||
cpu_clear(cpu, data->cpumask);
|
cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
|
||||||
WARN_ON(data->refs == 0);
|
WARN_ON(data->refs == 0);
|
||||||
data->refs--;
|
data->refs--;
|
||||||
refs = data->refs;
|
refs = data->refs;
|
||||||
|
@ -266,51 +266,13 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
|
||||||
generic_exec_single(cpu, data);
|
generic_exec_single(cpu, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dummy function */
|
|
||||||
static void quiesce_dummy(void *unused)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure stack based data used in call function mask is safe to free.
|
|
||||||
*
|
|
||||||
* This is needed by smp_call_function_mask when using on-stack data, because
|
|
||||||
* a single call function queue is shared by all CPUs, and any CPU may pick up
|
|
||||||
* the data item on the queue at any time before it is deleted. So we need to
|
|
||||||
* ensure that all CPUs have transitioned through a quiescent state after
|
|
||||||
* this call.
|
|
||||||
*
|
|
||||||
* This is a very slow function, implemented by sending synchronous IPIs to
|
|
||||||
* all possible CPUs. For this reason, we have to alloc data rather than use
|
|
||||||
* stack based data even in the case of synchronous calls. The stack based
|
|
||||||
* data is then just used for deadlock/oom fallback which will be very rare.
|
|
||||||
*
|
|
||||||
* If a faster scheme can be made, we could go back to preferring stack based
|
|
||||||
* data -- the data allocation/free is non-zero cost.
|
|
||||||
*/
|
|
||||||
static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
|
|
||||||
{
|
|
||||||
struct call_single_data data;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
data.func = quiesce_dummy;
|
|
||||||
data.info = NULL;
|
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, mask) {
|
|
||||||
data.flags = CSD_FLAG_WAIT;
|
|
||||||
generic_exec_single(cpu, &data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||||
* @mask: The set of cpus to run on.
|
* @mask: The set of cpus to run on (only runs on online subset).
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
* @func: The function to run. This must be fast and non-blocking.
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
* @info: An arbitrary pointer to pass to the function.
|
||||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code.
|
|
||||||
*
|
|
||||||
* If @wait is true, then returns once @func has returned. Note that @wait
|
* If @wait is true, then returns once @func has returned. Note that @wait
|
||||||
* will be implicitly turned on in case of allocation failures, since
|
* will be implicitly turned on in case of allocation failures, since
|
||||||
* we fall back to on-stack allocation.
|
* we fall back to on-stack allocation.
|
||||||
|
@ -319,53 +281,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
|
||||||
* hardware interrupt handler or from a bottom half handler. Preemption
|
* hardware interrupt handler or from a bottom half handler. Preemption
|
||||||
* must be disabled when calling this function.
|
* must be disabled when calling this function.
|
||||||
*/
|
*/
|
||||||
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
|
void smp_call_function_many(const struct cpumask *mask,
|
||||||
int wait)
|
void (*func)(void *), void *info,
|
||||||
|
bool wait)
|
||||||
{
|
{
|
||||||
struct call_function_data d;
|
struct call_function_data *data;
|
||||||
struct call_function_data *data = NULL;
|
|
||||||
cpumask_t allbutself;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int cpu, num_cpus;
|
int cpu, next_cpu;
|
||||||
int slowpath = 0;
|
|
||||||
|
|
||||||
/* Can deadlock when called with interrupts disabled */
|
/* Can deadlock when called with interrupts disabled */
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
/* So, what's a CPU they want? Ignoring this one. */
|
||||||
allbutself = cpu_online_map;
|
cpu = cpumask_first_and(mask, cpu_online_mask);
|
||||||
cpu_clear(cpu, allbutself);
|
if (cpu == smp_processor_id())
|
||||||
cpus_and(mask, mask, allbutself);
|
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
|
||||||
num_cpus = cpus_weight(mask);
|
/* No online cpus? We're done. */
|
||||||
|
if (cpu >= nr_cpu_ids)
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/* Do we have another CPU which isn't us? */
|
||||||
* If zero CPUs, return. If just a single CPU, turn this request
|
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
|
||||||
* into a targetted single call instead since it's faster.
|
if (next_cpu == smp_processor_id())
|
||||||
*/
|
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
|
||||||
if (!num_cpus)
|
|
||||||
return 0;
|
/* Fastpath: do that cpu by itself. */
|
||||||
else if (num_cpus == 1) {
|
if (next_cpu >= nr_cpu_ids) {
|
||||||
cpu = first_cpu(mask);
|
smp_call_function_single(cpu, func, info, wait);
|
||||||
return smp_call_function_single(cpu, func, info, wait);
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = kmalloc(sizeof(*data), GFP_ATOMIC);
|
data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
|
||||||
if (data) {
|
if (unlikely(!data)) {
|
||||||
data->csd.flags = CSD_FLAG_ALLOC;
|
/* Slow path. */
|
||||||
if (wait)
|
for_each_online_cpu(cpu) {
|
||||||
data->csd.flags |= CSD_FLAG_WAIT;
|
if (cpu == smp_processor_id())
|
||||||
} else {
|
continue;
|
||||||
data = &d;
|
if (cpumask_test_cpu(cpu, mask))
|
||||||
data->csd.flags = CSD_FLAG_WAIT;
|
smp_call_function_single(cpu, func, info, wait);
|
||||||
wait = 1;
|
}
|
||||||
slowpath = 1;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&data->lock);
|
spin_lock_init(&data->lock);
|
||||||
|
data->csd.flags = CSD_FLAG_ALLOC;
|
||||||
|
if (wait)
|
||||||
|
data->csd.flags |= CSD_FLAG_WAIT;
|
||||||
data->csd.func = func;
|
data->csd.func = func;
|
||||||
data->csd.info = info;
|
data->csd.info = info;
|
||||||
data->refs = num_cpus;
|
cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
|
||||||
data->cpumask = mask;
|
cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
|
||||||
|
data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
|
||||||
|
|
||||||
spin_lock_irqsave(&call_function_lock, flags);
|
spin_lock_irqsave(&call_function_lock, flags);
|
||||||
list_add_tail_rcu(&data->csd.list, &call_function_queue);
|
list_add_tail_rcu(&data->csd.list, &call_function_queue);
|
||||||
|
@ -377,18 +343,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
/* Send a message to all CPUs in the map */
|
/* Send a message to all CPUs in the map */
|
||||||
arch_send_call_function_ipi(mask);
|
arch_send_call_function_ipi(*to_cpumask(data->cpumask_bits));
|
||||||
|
|
||||||
/* optionally wait for the CPUs to complete */
|
/* optionally wait for the CPUs to complete */
|
||||||
if (wait) {
|
if (wait)
|
||||||
csd_flag_wait(&data->csd);
|
csd_flag_wait(&data->csd);
|
||||||
if (unlikely(slowpath))
|
|
||||||
smp_call_function_mask_quiesce_stack(mask);
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(smp_call_function_many);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(smp_call_function_mask);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* smp_call_function(): Run a function on all other CPUs.
|
* smp_call_function(): Run a function on all other CPUs.
|
||||||
|
@ -396,7 +357,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
* @info: An arbitrary pointer to pass to the function.
|
||||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code.
|
* Returns 0.
|
||||||
*
|
*
|
||||||
* If @wait is true, then returns once @func has returned; otherwise
|
* If @wait is true, then returns once @func has returned; otherwise
|
||||||
* it returns just before the target cpu calls @func. In case of allocation
|
* it returns just before the target cpu calls @func. In case of allocation
|
||||||
|
@ -407,12 +368,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
|
||||||
*/
|
*/
|
||||||
int smp_call_function(void (*func)(void *), void *info, int wait)
|
int smp_call_function(void (*func)(void *), void *info, int wait)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
ret = smp_call_function_mask(cpu_online_map, func, info, wait);
|
smp_call_function_many(cpu_online_mask, func, info, wait);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(smp_call_function);
|
EXPORT_SYMBOL(smp_call_function);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue