mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
x86/apic: Wrap IPI calls into helper functions
Move them to one place so the static call conversion gets simpler. No functional change. [ dhansen: merge against recent x86/apic changes ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Tested-by: Juergen Gross <jgross@suse.com> # Xen PV (dom0 and unpriv. guest)
This commit is contained in:
parent
54271fb0b7
commit
28b8235238
13 changed files with 51 additions and 20 deletions
|
@ -20,7 +20,7 @@ static bool __initdata hv_pvspin = true;
|
|||
|
||||
static void hv_qlock_kick(int cpu)
|
||||
{
|
||||
apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
|
||||
__apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
|
||||
}
|
||||
|
||||
static void hv_qlock_wait(u8 *byte, u8 val)
|
||||
|
|
|
@ -401,6 +401,36 @@ static __always_inline void apic_icr_write(u32 low, u32 high)
|
|||
apic->icr_write(low, high);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI(int cpu, int vector)
|
||||
{
|
||||
apic->send_IPI(cpu, vector);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
apic->send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
apic->send_IPI_mask_allbutself(mask, vector);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI_allbutself(int vector)
|
||||
{
|
||||
apic->send_IPI_allbutself(vector);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI_all(int vector)
|
||||
{
|
||||
apic->send_IPI_all(vector);
|
||||
}
|
||||
|
||||
static __always_inline void __apic_send_IPI_self(int vector)
|
||||
{
|
||||
apic->send_IPI_self(vector);
|
||||
}
|
||||
|
||||
static __always_inline void apic_wait_icr_idle(void)
|
||||
{
|
||||
if (apic->wait_icr_idle)
|
||||
|
|
|
@ -502,7 +502,7 @@ static int lapic_timer_set_oneshot(struct clock_event_device *evt)
|
|||
static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
||||
__apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "local.h"
|
||||
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
|
||||
u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
||||
{
|
||||
|
@ -31,7 +33,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
|||
#ifdef arch_trigger_cpumask_backtrace
|
||||
static void nmi_raise_cpu_backtrace(cpumask_t *mask)
|
||||
{
|
||||
apic->send_IPI_mask(mask, NMI_VECTOR);
|
||||
__apic_send_IPI_mask(mask, NMI_VECTOR);
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
|
|
|
@ -54,9 +54,9 @@ void apic_send_IPI_allbutself(unsigned int vector)
|
|||
return;
|
||||
|
||||
if (static_branch_likely(&apic_use_ipi_shorthand))
|
||||
apic->send_IPI_allbutself(vector);
|
||||
__apic_send_IPI_allbutself(vector);
|
||||
else
|
||||
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
__apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -70,12 +70,12 @@ void native_smp_send_reschedule(int cpu)
|
|||
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
|
||||
return;
|
||||
}
|
||||
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
|
||||
__apic_send_IPI(cpu, RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_single_ipi(int cpu)
|
||||
{
|
||||
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
||||
__apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_ipi(const struct cpumask *mask)
|
||||
|
@ -87,14 +87,14 @@ void native_send_call_func_ipi(const struct cpumask *mask)
|
|||
goto sendmask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask))
|
||||
apic->send_IPI_all(CALL_FUNCTION_VECTOR);
|
||||
__apic_send_IPI_all(CALL_FUNCTION_VECTOR);
|
||||
else if (num_online_cpus() > 1)
|
||||
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
__apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
return;
|
||||
}
|
||||
|
||||
sendmask:
|
||||
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -221,7 +221,7 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
|||
*/
|
||||
void default_send_IPI_single(int cpu, int vector)
|
||||
{
|
||||
apic->send_IPI_mask(cpumask_of(cpu), vector);
|
||||
__apic_send_IPI_mask(cpumask_of(cpu), vector);
|
||||
}
|
||||
|
||||
void default_send_IPI_allbutself(int vector)
|
||||
|
|
|
@ -898,7 +898,7 @@ static int apic_retrigger_irq(struct irq_data *irqd)
|
|||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
apic->send_IPI(apicd->cpu, apicd->vector);
|
||||
__apic_send_IPI(apicd->cpu, apicd->vector);
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -270,8 +270,7 @@ static void __maybe_unused raise_mce(struct mce *m)
|
|||
mce_irq_ipi, NULL, 0);
|
||||
preempt_enable();
|
||||
} else if (m->inject_flags & MCJ_NMI_BROADCAST)
|
||||
apic->send_IPI_mask(mce_inject_cpumask,
|
||||
NMI_VECTOR);
|
||||
__apic_send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
|
||||
}
|
||||
start = jiffies;
|
||||
while (!cpumask_empty(mce_inject_cpumask)) {
|
||||
|
|
|
@ -28,7 +28,7 @@ void arch_irq_work_raise(void)
|
|||
if (!arch_irq_work_has_interrupt())
|
||||
return;
|
||||
|
||||
apic->send_IPI_self(IRQ_WORK_VECTOR);
|
||||
__apic_send_IPI_self(IRQ_WORK_VECTOR);
|
||||
apic_wait_icr_idle();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -75,7 +75,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
|
|||
/* sync above data before sending NMI */
|
||||
wmb();
|
||||
|
||||
apic->send_IPI_mask(mask, NMI_VECTOR);
|
||||
__apic_send_IPI_mask(mask, NMI_VECTOR);
|
||||
|
||||
/* Don't wait longer than a second */
|
||||
timeout = USEC_PER_SEC;
|
||||
|
|
|
@ -237,7 +237,7 @@ static void native_stop_other_cpus(int wait)
|
|||
pr_emerg("Shutting down cpus with NMI\n");
|
||||
|
||||
for_each_cpu(cpu, &cpus_stop_mask)
|
||||
apic->send_IPI(cpu, NMI_VECTOR);
|
||||
__apic_send_IPI(cpu, NMI_VECTOR);
|
||||
}
|
||||
/*
|
||||
* Don't wait longer than 10 ms if the caller didn't
|
||||
|
|
|
@ -175,7 +175,7 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
|
|||
* scheduled out).
|
||||
*/
|
||||
if (pi_test_on(&new))
|
||||
apic->send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
|
||||
__apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
|
@ -4179,7 +4179,7 @@ static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
|
||||
if (vcpu != kvm_get_running_vcpu())
|
||||
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
|
||||
__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -601,7 +601,7 @@ static void uv_nmi_nr_cpus_ping(void)
|
|||
for_each_cpu(cpu, uv_nmi_cpu_mask)
|
||||
uv_cpu_nmi_per(cpu).pinging = 1;
|
||||
|
||||
apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
|
||||
__apic_send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
|
||||
}
|
||||
|
||||
/* Clean up flags for CPU's that ignored both NMI and ping */
|
||||
|
|
Loading…
Add table
Reference in a new issue