mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-19 05:24:11 +00:00
[S390] Convert to smp_call_function_single.
smp_call_function_single now has the same semantics as s390's smp_call_function_on. Therefore convert to the *single variant and get rid of some architecture specific code. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
d941cf5e37
commit
3bb447fc8b
5 changed files with 23 additions and 33 deletions
|
@ -173,7 +173,7 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
|
||||||
/*
|
/*
|
||||||
* appldata_mod_vtimer_wrap()
|
* appldata_mod_vtimer_wrap()
|
||||||
*
|
*
|
||||||
* wrapper function for mod_virt_timer(), because smp_call_function_on()
|
* wrapper function for mod_virt_timer(), because smp_call_function_single()
|
||||||
* accepts only one parameter.
|
* accepts only one parameter.
|
||||||
*/
|
*/
|
||||||
static void __appldata_mod_vtimer_wrap(void *p) {
|
static void __appldata_mod_vtimer_wrap(void *p) {
|
||||||
|
@ -208,9 +208,9 @@ __appldata_vtimer_setup(int cmd)
|
||||||
num_online_cpus()) * TOD_MICRO;
|
num_online_cpus()) * TOD_MICRO;
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
per_cpu(appldata_timer, i).expires = per_cpu_interval;
|
per_cpu(appldata_timer, i).expires = per_cpu_interval;
|
||||||
smp_call_function_on(add_virt_timer_periodic,
|
smp_call_function_single(i, add_virt_timer_periodic,
|
||||||
&per_cpu(appldata_timer, i),
|
&per_cpu(appldata_timer, i),
|
||||||
0, 1, i);
|
0, 1);
|
||||||
}
|
}
|
||||||
appldata_timer_active = 1;
|
appldata_timer_active = 1;
|
||||||
P_INFO("Monitoring timer started.\n");
|
P_INFO("Monitoring timer started.\n");
|
||||||
|
@ -236,8 +236,8 @@ __appldata_vtimer_setup(int cmd)
|
||||||
} args;
|
} args;
|
||||||
args.timer = &per_cpu(appldata_timer, i);
|
args.timer = &per_cpu(appldata_timer, i);
|
||||||
args.expires = per_cpu_interval;
|
args.expires = per_cpu_interval;
|
||||||
smp_call_function_on(__appldata_mod_vtimer_wrap,
|
smp_call_function_single(i, __appldata_mod_vtimer_wrap,
|
||||||
&args, 0, 1, i);
|
&args, 0, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,30 +170,28 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
|
||||||
EXPORT_SYMBOL(smp_call_function);
|
EXPORT_SYMBOL(smp_call_function);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* smp_call_function_on:
|
* smp_call_function_single:
|
||||||
|
* @cpu: the CPU where func should run
|
||||||
* @func: the function to run; this must be fast and non-blocking
|
* @func: the function to run; this must be fast and non-blocking
|
||||||
* @info: an arbitrary pointer to pass to the function
|
* @info: an arbitrary pointer to pass to the function
|
||||||
* @nonatomic: unused
|
* @nonatomic: unused
|
||||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
||||||
* @cpu: the CPU where func should run
|
|
||||||
*
|
*
|
||||||
* Run a function on one processor.
|
* Run a function on one processor.
|
||||||
*
|
*
|
||||||
* You must not call this function with disabled interrupts, from a
|
* You must not call this function with disabled interrupts, from a
|
||||||
* hardware interrupt handler or from a bottom half.
|
* hardware interrupt handler or from a bottom half.
|
||||||
*/
|
*/
|
||||||
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||||
int wait, int cpu)
|
int nonatomic, int wait)
|
||||||
{
|
{
|
||||||
cpumask_t map = CPU_MASK_NONE;
|
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
cpu_set(cpu, map);
|
__smp_call_function_map(func, info, nonatomic, wait,
|
||||||
__smp_call_function_map(func, info, nonatomic, wait, map);
|
cpumask_of_cpu(cpu));
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(smp_call_function_on);
|
EXPORT_SYMBOL(smp_call_function_single);
|
||||||
|
|
||||||
static void do_send_stop(void)
|
static void do_send_stop(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -415,7 +415,7 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we change a pending timer the function must be called on the CPU
|
* If we change a pending timer the function must be called on the CPU
|
||||||
* where the timer is running on, e.g. by smp_call_function_on()
|
* where the timer is running on, e.g. by smp_call_function_single()
|
||||||
*
|
*
|
||||||
* The original mod_timer adds the timer if it is not pending. For compatibility
|
* The original mod_timer adds the timer if it is not pending. For compatibility
|
||||||
* we do the same. The timer will be added on the current CPU as a oneshot timer.
|
* we do the same. The timer will be added on the current CPU as a oneshot timer.
|
||||||
|
|
|
@ -36,8 +36,7 @@ extern void machine_halt_smp(void);
|
||||||
extern void machine_power_off_smp(void);
|
extern void machine_power_off_smp(void);
|
||||||
|
|
||||||
extern void smp_setup_cpu_possible_map(void);
|
extern void smp_setup_cpu_possible_map(void);
|
||||||
extern int smp_call_function_on(void (*func) (void *info), void *info,
|
|
||||||
int nonatomic, int wait, int cpu);
|
|
||||||
#define NO_PROC_ID 0xFF /* No processor magic marker */
|
#define NO_PROC_ID 0xFF /* No processor magic marker */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -96,14 +95,6 @@ extern int __cpu_up (unsigned int cpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
static inline int
|
|
||||||
smp_call_function_on(void (*func) (void *info), void *info,
|
|
||||||
int nonatomic, int wait, int cpu)
|
|
||||||
{
|
|
||||||
func(info);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void smp_send_stop(void)
|
static inline void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
/* Disable all interrupts/machine checks */
|
/* Disable all interrupts/machine checks */
|
||||||
|
|
|
@ -479,7 +479,8 @@ static void iucv_setmask_mp(void)
|
||||||
/* Enable all cpus with a declared buffer. */
|
/* Enable all cpus with a declared buffer. */
|
||||||
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
|
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
|
||||||
!cpu_isset(cpu, iucv_irq_cpumask))
|
!cpu_isset(cpu, iucv_irq_cpumask))
|
||||||
smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
|
smp_call_function_single(cpu, iucv_allow_cpu,
|
||||||
|
NULL, 0, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,7 +498,7 @@ static void iucv_setmask_up(void)
|
||||||
cpumask = iucv_irq_cpumask;
|
cpumask = iucv_irq_cpumask;
|
||||||
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
|
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
|
||||||
for_each_cpu_mask(cpu, cpumask)
|
for_each_cpu_mask(cpu, cpumask)
|
||||||
smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
|
smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -522,7 +523,7 @@ static int iucv_enable(void)
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
|
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
if (cpus_empty(iucv_buffer_cpumask))
|
if (cpus_empty(iucv_buffer_cpumask))
|
||||||
/* No cpu could declare an iucv buffer. */
|
/* No cpu could declare an iucv buffer. */
|
||||||
|
@ -578,7 +579,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
case CPU_DOWN_FAILED:
|
case CPU_DOWN_FAILED:
|
||||||
case CPU_DOWN_FAILED_FROZEN:
|
case CPU_DOWN_FAILED_FROZEN:
|
||||||
smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
|
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
||||||
break;
|
break;
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
case CPU_DOWN_PREPARE_FROZEN:
|
case CPU_DOWN_PREPARE_FROZEN:
|
||||||
|
@ -587,10 +588,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
||||||
if (cpus_empty(cpumask))
|
if (cpus_empty(cpumask))
|
||||||
/* Can't offline last IUCV enabled cpu. */
|
/* Can't offline last IUCV enabled cpu. */
|
||||||
return NOTIFY_BAD;
|
return NOTIFY_BAD;
|
||||||
smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
|
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
|
||||||
if (cpus_empty(iucv_irq_cpumask))
|
if (cpus_empty(iucv_irq_cpumask))
|
||||||
smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
|
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
|
||||||
first_cpu(iucv_buffer_cpumask));
|
iucv_allow_cpu, NULL, 0, 1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
Loading…
Add table
Reference in a new issue