mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-02 20:20:53 +00:00
genirq: Introduce IRQD_AFFINITY_MANAGED flag
Interupts marked with this flag are excluded from user space interrupt affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal affinity mechanism is not blocked. This flag will be used for multi-queue device interrupts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Cc: linux-block@vger.kernel.org Cc: linux-pci@vger.kernel.org Cc: linux-nvme@lists.infradead.org Cc: axboe@fb.com Cc: agordeev@redhat.com Link: http://lkml.kernel.org/r/1467621574-8277-3-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
b6140914fd
commit
9c2555835b
4 changed files with 28 additions and 4 deletions
|
@ -197,6 +197,7 @@ struct irq_data {
|
||||||
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
|
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
|
||||||
* IRQD_WAKEUP_ARMED - Wakeup mode armed
|
* IRQD_WAKEUP_ARMED - Wakeup mode armed
|
||||||
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
|
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
|
||||||
|
* IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
IRQD_TRIGGER_MASK = 0xf,
|
IRQD_TRIGGER_MASK = 0xf,
|
||||||
|
@ -212,6 +213,7 @@ enum {
|
||||||
IRQD_IRQ_INPROGRESS = (1 << 18),
|
IRQD_IRQ_INPROGRESS = (1 << 18),
|
||||||
IRQD_WAKEUP_ARMED = (1 << 19),
|
IRQD_WAKEUP_ARMED = (1 << 19),
|
||||||
IRQD_FORWARDED_TO_VCPU = (1 << 20),
|
IRQD_FORWARDED_TO_VCPU = (1 << 20),
|
||||||
|
IRQD_AFFINITY_MANAGED = (1 << 21),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
||||||
|
@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
|
||||||
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
|
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irqd_affinity_is_managed(struct irq_data *d)
|
||||||
|
{
|
||||||
|
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
|
||||||
|
}
|
||||||
|
|
||||||
#undef __irqd_to_state
|
#undef __irqd_to_state
|
||||||
|
|
||||||
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||||
|
|
|
@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
|
||||||
struct irqaction *action) { }
|
struct irqaction *action) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
||||||
|
|
||||||
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
||||||
|
|
||||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||||
|
|
|
@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
cpumask_var_t irq_default_affinity;
|
cpumask_var_t irq_default_affinity;
|
||||||
|
|
||||||
static int __irq_can_set_affinity(struct irq_desc *desc)
|
static bool __irq_can_set_affinity(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
||||||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
||||||
return 0;
|
return false;
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -133,6 +133,21 @@ int irq_can_set_affinity(unsigned int irq)
|
||||||
return __irq_can_set_affinity(irq_to_desc(irq));
|
return __irq_can_set_affinity(irq_to_desc(irq));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
|
||||||
|
* @irq: Interrupt to check
|
||||||
|
*
|
||||||
|
* Like irq_can_set_affinity() above, but additionally checks for the
|
||||||
|
* AFFINITY_MANAGED flag.
|
||||||
|
*/
|
||||||
|
bool irq_can_set_affinity_usr(unsigned int irq)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
return __irq_can_set_affinity(desc) &&
|
||||||
|
!irqd_affinity_is_managed(&desc->irq_data);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
||||||
* @desc: irq descriptor which has affitnity changed
|
* @desc: irq descriptor which has affitnity changed
|
||||||
|
|
|
@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
|
||||||
cpumask_var_t new_value;
|
cpumask_var_t new_value;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!irq_can_set_affinity(irq) || no_irq_affinity)
|
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue