mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-07 07:08:07 +00:00
genirq: Convert core code to irq_data
Convert all references in the core code to orq, chip, handler_data, chip_data, msi_desc, affinity to irq_data.* Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ff7dcd44dd
commit
6b8ff3120c
11 changed files with 111 additions and 110 deletions
|
@ -475,12 +475,12 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||||
gfp = GFP_NOWAIT;
|
gfp = GFP_NOWAIT;
|
||||||
|
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
|
if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
||||||
free_cpumask_var(desc->affinity);
|
free_cpumask_var(desc->irq_data.affinity);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -490,7 +490,7 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||||
|
|
||||||
static inline void init_desc_masks(struct irq_desc *desc)
|
static inline void init_desc_masks(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
cpumask_setall(desc->affinity);
|
cpumask_setall(desc->irq_data.affinity);
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
cpumask_clear(desc->pending_mask);
|
cpumask_clear(desc->pending_mask);
|
||||||
#endif
|
#endif
|
||||||
|
@ -510,7 +510,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||||
struct irq_desc *new_desc)
|
struct irq_desc *new_desc)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
cpumask_copy(new_desc->affinity, old_desc->affinity);
|
cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
||||||
|
@ -521,7 +521,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||||
static inline void free_desc_masks(struct irq_desc *old_desc,
|
static inline void free_desc_masks(struct irq_desc *old_desc,
|
||||||
struct irq_desc *new_desc)
|
struct irq_desc *new_desc)
|
||||||
{
|
{
|
||||||
free_cpumask_var(old_desc->affinity);
|
free_cpumask_var(old_desc->irq_data.affinity);
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
free_cpumask_var(old_desc->pending_mask);
|
free_cpumask_var(old_desc->pending_mask);
|
||||||
|
|
|
@ -57,9 +57,9 @@ unsigned long probe_irq_on(void)
|
||||||
* Some chips need to know about probing in
|
* Some chips need to know about probing in
|
||||||
* progress:
|
* progress:
|
||||||
*/
|
*/
|
||||||
if (desc->chip->set_type)
|
if (desc->irq_data.chip->set_type)
|
||||||
desc->chip->set_type(i, IRQ_TYPE_PROBE);
|
desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE);
|
||||||
desc->chip->startup(i);
|
desc->irq_data.chip->startup(i);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ unsigned long probe_irq_on(void)
|
||||||
raw_spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||||
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
||||||
if (desc->chip->startup(i))
|
if (desc->irq_data.chip->startup(i))
|
||||||
desc->status |= IRQ_PENDING;
|
desc->status |= IRQ_PENDING;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
|
@ -98,7 +98,7 @@ unsigned long probe_irq_on(void)
|
||||||
/* It triggered already - consider it spurious. */
|
/* It triggered already - consider it spurious. */
|
||||||
if (!(status & IRQ_WAITING)) {
|
if (!(status & IRQ_WAITING)) {
|
||||||
desc->status = status & ~IRQ_AUTODETECT;
|
desc->status = status & ~IRQ_AUTODETECT;
|
||||||
desc->chip->shutdown(i);
|
desc->irq_data.chip->shutdown(i);
|
||||||
} else
|
} else
|
||||||
if (i < 32)
|
if (i < 32)
|
||||||
mask |= 1 << i;
|
mask |= 1 << i;
|
||||||
|
@ -137,7 +137,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||||
mask |= 1 << i;
|
mask |= 1 << i;
|
||||||
|
|
||||||
desc->status = status & ~IRQ_AUTODETECT;
|
desc->status = status & ~IRQ_AUTODETECT;
|
||||||
desc->chip->shutdown(i);
|
desc->irq_data.chip->shutdown(i);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ int probe_irq_off(unsigned long val)
|
||||||
nr_of_irqs++;
|
nr_of_irqs++;
|
||||||
}
|
}
|
||||||
desc->status = status & ~IRQ_AUTODETECT;
|
desc->status = status & ~IRQ_AUTODETECT;
|
||||||
desc->chip->shutdown(i);
|
desc->irq_data.chip->shutdown(i);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,18 +32,18 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
|
||||||
/* Ensure we don't have left over values from a previous use of this irq */
|
/* Ensure we don't have left over values from a previous use of this irq */
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->status = IRQ_DISABLED;
|
desc->status = IRQ_DISABLED;
|
||||||
desc->chip = &no_irq_chip;
|
desc->irq_data.chip = &no_irq_chip;
|
||||||
desc->handle_irq = handle_bad_irq;
|
desc->handle_irq = handle_bad_irq;
|
||||||
desc->depth = 1;
|
desc->depth = 1;
|
||||||
desc->msi_desc = NULL;
|
desc->irq_data.msi_desc = NULL;
|
||||||
desc->handler_data = NULL;
|
desc->irq_data.handler_data = NULL;
|
||||||
if (!keep_chip_data)
|
if (!keep_chip_data)
|
||||||
desc->chip_data = NULL;
|
desc->irq_data.chip_data = NULL;
|
||||||
desc->action = NULL;
|
desc->action = NULL;
|
||||||
desc->irq_count = 0;
|
desc->irq_count = 0;
|
||||||
desc->irqs_unhandled = 0;
|
desc->irqs_unhandled = 0;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
cpumask_setall(desc->affinity);
|
cpumask_setall(desc->irq_data.affinity);
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
cpumask_clear(desc->pending_mask);
|
cpumask_clear(desc->pending_mask);
|
||||||
#endif
|
#endif
|
||||||
|
@ -64,7 +64,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||||
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
|
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
|
||||||
* @irq: irq number to initialize
|
* @irq: irq number to initialize
|
||||||
*
|
*
|
||||||
* does not set irq_to_desc(irq)->chip_data to NULL
|
* does not set irq_to_desc(irq)->irq_data.chip_data to NULL
|
||||||
*/
|
*/
|
||||||
void dynamic_irq_init_keep_chip_data(unsigned int irq)
|
void dynamic_irq_init_keep_chip_data(unsigned int irq)
|
||||||
{
|
{
|
||||||
|
@ -88,12 +88,12 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
|
||||||
irq);
|
irq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
desc->msi_desc = NULL;
|
desc->irq_data.msi_desc = NULL;
|
||||||
desc->handler_data = NULL;
|
desc->irq_data.handler_data = NULL;
|
||||||
if (!keep_chip_data)
|
if (!keep_chip_data)
|
||||||
desc->chip_data = NULL;
|
desc->irq_data.chip_data = NULL;
|
||||||
desc->handle_irq = handle_bad_irq;
|
desc->handle_irq = handle_bad_irq;
|
||||||
desc->chip = &no_irq_chip;
|
desc->irq_data.chip = &no_irq_chip;
|
||||||
desc->name = NULL;
|
desc->name = NULL;
|
||||||
clear_kstat_irqs(desc);
|
clear_kstat_irqs(desc);
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
@ -112,7 +112,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
||||||
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
|
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
|
||||||
* @irq: irq number to initialize
|
* @irq: irq number to initialize
|
||||||
*
|
*
|
||||||
* does not set irq_to_desc(irq)->chip_data to NULL
|
* does not set irq_to_desc(irq)->irq_data.chip_data to NULL
|
||||||
*/
|
*/
|
||||||
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
|
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
|
||||||
{
|
{
|
||||||
|
@ -140,7 +140,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
irq_chip_set_defaults(chip);
|
irq_chip_set_defaults(chip);
|
||||||
desc->chip = chip;
|
desc->irq_data.chip = chip;
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -193,7 +193,7 @@ int set_irq_data(unsigned int irq, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->handler_data = data;
|
desc->irq_data.handler_data = data;
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->msi_desc = entry;
|
desc->irq_data.msi_desc = entry;
|
||||||
if (entry)
|
if (entry)
|
||||||
entry->irq = irq;
|
entry->irq = irq;
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
@ -243,13 +243,13 @@ int set_irq_chip_data(unsigned int irq, void *data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!desc->chip) {
|
if (!desc->irq_data.chip) {
|
||||||
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
|
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->chip_data = data;
|
desc->irq_data.chip_data = data;
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -291,7 +291,7 @@ static void default_enable(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
desc->chip->unmask(irq);
|
desc->irq_data.chip->unmask(irq);
|
||||||
desc->status &= ~IRQ_MASKED;
|
desc->status &= ~IRQ_MASKED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ static unsigned int default_startup(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
desc->chip->enable(irq);
|
desc->irq_data.chip->enable(irq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ static void default_shutdown(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
desc->chip->mask(irq);
|
desc->irq_data.chip->mask(irq);
|
||||||
desc->status |= IRQ_MASKED;
|
desc->status |= IRQ_MASKED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,28 +350,28 @@ void irq_chip_set_defaults(struct irq_chip *chip)
|
||||||
|
|
||||||
static inline void mask_ack_irq(struct irq_desc *desc, int irq)
|
static inline void mask_ack_irq(struct irq_desc *desc, int irq)
|
||||||
{
|
{
|
||||||
if (desc->chip->mask_ack)
|
if (desc->irq_data.chip->mask_ack)
|
||||||
desc->chip->mask_ack(irq);
|
desc->irq_data.chip->mask_ack(irq);
|
||||||
else {
|
else {
|
||||||
desc->chip->mask(irq);
|
desc->irq_data.chip->mask(irq);
|
||||||
if (desc->chip->ack)
|
if (desc->irq_data.chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->irq_data.chip->ack(irq);
|
||||||
}
|
}
|
||||||
desc->status |= IRQ_MASKED;
|
desc->status |= IRQ_MASKED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mask_irq(struct irq_desc *desc, int irq)
|
static inline void mask_irq(struct irq_desc *desc, int irq)
|
||||||
{
|
{
|
||||||
if (desc->chip->mask) {
|
if (desc->irq_data.chip->mask) {
|
||||||
desc->chip->mask(irq);
|
desc->irq_data.chip->mask(irq);
|
||||||
desc->status |= IRQ_MASKED;
|
desc->status |= IRQ_MASKED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void unmask_irq(struct irq_desc *desc, int irq)
|
static inline void unmask_irq(struct irq_desc *desc, int irq)
|
||||||
{
|
{
|
||||||
if (desc->chip->unmask) {
|
if (desc->irq_data.chip->unmask) {
|
||||||
desc->chip->unmask(irq);
|
desc->irq_data.chip->unmask(irq);
|
||||||
desc->status &= ~IRQ_MASKED;
|
desc->status &= ~IRQ_MASKED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -552,7 +552,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
raw_spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
out:
|
out:
|
||||||
desc->chip->eoi(irq);
|
desc->irq_data.chip->eoi(irq);
|
||||||
|
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
@ -594,8 +594,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
kstat_incr_irqs_this_cpu(irq, desc);
|
kstat_incr_irqs_this_cpu(irq, desc);
|
||||||
|
|
||||||
/* Start handling the irq */
|
/* Start handling the irq */
|
||||||
if (desc->chip->ack)
|
if (desc->irq_data.chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->irq_data.chip->ack(irq);
|
||||||
|
|
||||||
/* Mark the IRQ currently in progress.*/
|
/* Mark the IRQ currently in progress.*/
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
|
@ -648,15 +648,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
|
|
||||||
kstat_incr_irqs_this_cpu(irq, desc);
|
kstat_incr_irqs_this_cpu(irq, desc);
|
||||||
|
|
||||||
if (desc->chip->ack)
|
if (desc->irq_data.chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->irq_data.chip->ack(irq);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, desc->action);
|
action_ret = handle_IRQ_event(irq, desc->action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
if (desc->chip->eoi)
|
if (desc->irq_data.chip->eoi)
|
||||||
desc->chip->eoi(irq);
|
desc->irq_data.chip->eoi(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -674,7 +674,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
|
|
||||||
if (!handle)
|
if (!handle)
|
||||||
handle = handle_bad_irq;
|
handle = handle_bad_irq;
|
||||||
else if (desc->chip == &no_irq_chip) {
|
else if (desc->irq_data.chip == &no_irq_chip) {
|
||||||
printk(KERN_WARNING "Trying to install %sinterrupt handler "
|
printk(KERN_WARNING "Trying to install %sinterrupt handler "
|
||||||
"for IRQ%d\n", is_chained ? "chained " : "", irq);
|
"for IRQ%d\n", is_chained ? "chained " : "", irq);
|
||||||
/*
|
/*
|
||||||
|
@ -684,7 +684,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
* prevent us to setup the interrupt at all. Switch it to
|
* prevent us to setup the interrupt at all. Switch it to
|
||||||
* dummy_irq_chip for easy transition.
|
* dummy_irq_chip for easy transition.
|
||||||
*/
|
*/
|
||||||
desc->chip = &dummy_irq_chip;
|
desc->irq_data.chip = &dummy_irq_chip;
|
||||||
}
|
}
|
||||||
|
|
||||||
chip_bus_lock(irq, desc);
|
chip_bus_lock(irq, desc);
|
||||||
|
@ -692,7 +692,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
|
|
||||||
/* Uninstall? */
|
/* Uninstall? */
|
||||||
if (handle == handle_bad_irq) {
|
if (handle == handle_bad_irq) {
|
||||||
if (desc->chip != &no_irq_chip)
|
if (desc->irq_data.chip != &no_irq_chip)
|
||||||
mask_ack_irq(desc, irq);
|
mask_ack_irq(desc, irq);
|
||||||
desc->status |= IRQ_DISABLED;
|
desc->status |= IRQ_DISABLED;
|
||||||
desc->depth = 1;
|
desc->depth = 1;
|
||||||
|
@ -704,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
desc->status &= ~IRQ_DISABLED;
|
desc->status &= ~IRQ_DISABLED;
|
||||||
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||||
desc->depth = 0;
|
desc->depth = 0;
|
||||||
desc->chip->startup(irq);
|
desc->irq_data.chip->startup(irq);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
chip_bus_sync_unlock(irq, desc);
|
chip_bus_sync_unlock(irq, desc);
|
||||||
|
|
|
@ -105,7 +105,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||||
raw_spin_lock_init(&desc->lock);
|
raw_spin_lock_init(&desc->lock);
|
||||||
desc->irq_data.irq = irq;
|
desc->irq_data.irq = irq;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
desc->node = node;
|
desc->irq_data.node = node;
|
||||||
#endif
|
#endif
|
||||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||||
init_kstat_irqs(desc, node, nr_cpu_ids);
|
init_kstat_irqs(desc, node, nr_cpu_ids);
|
||||||
|
@ -185,7 +185,7 @@ int __init early_irq_init(void)
|
||||||
desc[i].irq_data.irq = i;
|
desc[i].irq_data.irq = i;
|
||||||
desc[i].irq_data.chip = &no_irq_chip;
|
desc[i].irq_data.chip = &no_irq_chip;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
desc[i].node = node;
|
desc[i].irq_data.node = node;
|
||||||
#endif
|
#endif
|
||||||
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
|
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
|
||||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||||
|
@ -456,20 +456,20 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||||
/*
|
/*
|
||||||
* No locking required for CPU-local interrupts:
|
* No locking required for CPU-local interrupts:
|
||||||
*/
|
*/
|
||||||
if (desc->chip->ack)
|
if (desc->irq_data.chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->irq_data.chip->ack(irq);
|
||||||
if (likely(!(desc->status & IRQ_DISABLED))) {
|
if (likely(!(desc->status & IRQ_DISABLED))) {
|
||||||
action_ret = handle_IRQ_event(irq, desc->action);
|
action_ret = handle_IRQ_event(irq, desc->action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
}
|
}
|
||||||
desc->chip->end(irq);
|
desc->irq_data.chip->end(irq);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (desc->chip->ack)
|
if (desc->irq_data.chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->irq_data.chip->ack(irq);
|
||||||
/*
|
/*
|
||||||
* REPLAY is when Linux resends an IRQ that was dropped earlier
|
* REPLAY is when Linux resends an IRQ that was dropped earlier
|
||||||
* WAITING is used by probe to mark irqs that are being tested
|
* WAITING is used by probe to mark irqs that are being tested
|
||||||
|
@ -529,7 +529,7 @@ out:
|
||||||
* The ->end() handler has to deal with interrupts which got
|
* The ->end() handler has to deal with interrupts which got
|
||||||
* disabled while the handler was running.
|
* disabled while the handler was running.
|
||||||
*/
|
*/
|
||||||
desc->chip->end(irq);
|
desc->irq_data.chip->end(irq);
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -43,14 +43,14 @@ extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||||
/* Inline functions for support of irq chips on slow busses */
|
/* Inline functions for support of irq chips on slow busses */
|
||||||
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
|
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (unlikely(desc->chip->bus_lock))
|
if (unlikely(desc->irq_data.chip->bus_lock))
|
||||||
desc->chip->bus_lock(irq);
|
desc->irq_data.chip->bus_lock(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
|
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (unlikely(desc->chip->bus_sync_unlock))
|
if (unlikely(desc->irq_data.chip->bus_sync_unlock))
|
||||||
desc->chip->bus_sync_unlock(irq);
|
desc->irq_data.chip->bus_sync_unlock(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -67,8 +67,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||||
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
||||||
printk("->handle_irq(): %p, ", desc->handle_irq);
|
printk("->handle_irq(): %p, ", desc->handle_irq);
|
||||||
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
||||||
printk("->chip(): %p, ", desc->chip);
|
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
|
||||||
print_symbol("%s\n", (unsigned long)desc->chip);
|
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
|
||||||
printk("->action(): %p\n", desc->action);
|
printk("->action(): %p\n", desc->action);
|
||||||
if (desc->action) {
|
if (desc->action) {
|
||||||
printk("->action->handler(): %p, ", desc->action->handler);
|
printk("->action->handler(): %p, ", desc->action->handler);
|
||||||
|
|
|
@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
|
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
|
||||||
!desc->chip->set_affinity)
|
!desc->irq_data.chip->set_affinity)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -111,15 +111,15 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!desc->chip->set_affinity)
|
if (!desc->irq_data.chip->set_affinity)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
if (desc->status & IRQ_MOVE_PCNTXT) {
|
if (desc->status & IRQ_MOVE_PCNTXT) {
|
||||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
if (!desc->irq_data.chip->set_affinity(irq, cpumask)) {
|
||||||
cpumask_copy(desc->affinity, cpumask);
|
cpumask_copy(desc->irq_data.affinity, cpumask);
|
||||||
irq_set_thread_affinity(desc);
|
irq_set_thread_affinity(desc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -128,8 +128,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||||
cpumask_copy(desc->pending_mask, cpumask);
|
cpumask_copy(desc->pending_mask, cpumask);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
if (!desc->irq_data.chip->set_affinity(irq, cpumask)) {
|
||||||
cpumask_copy(desc->affinity, cpumask);
|
cpumask_copy(desc->irq_data.affinity, cpumask);
|
||||||
irq_set_thread_affinity(desc);
|
irq_set_thread_affinity(desc);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -168,16 +168,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||||
* one of the targets is online.
|
* one of the targets is online.
|
||||||
*/
|
*/
|
||||||
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
||||||
if (cpumask_any_and(desc->affinity, cpu_online_mask)
|
if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
|
||||||
< nr_cpu_ids)
|
< nr_cpu_ids)
|
||||||
goto set_affinity;
|
goto set_affinity;
|
||||||
else
|
else
|
||||||
desc->status &= ~IRQ_AFFINITY_SET;
|
desc->status &= ~IRQ_AFFINITY_SET;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
|
cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
|
||||||
set_affinity:
|
set_affinity:
|
||||||
desc->chip->set_affinity(irq, desc->affinity);
|
desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
||||||
|
|
||||||
if (!desc->depth++) {
|
if (!desc->depth++) {
|
||||||
desc->status |= IRQ_DISABLED;
|
desc->status |= IRQ_DISABLED;
|
||||||
desc->chip->disable(irq);
|
desc->irq_data.chip->disable(irq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
||||||
* IRQ line is re-enabled.
|
* IRQ line is re-enabled.
|
||||||
*
|
*
|
||||||
* This function may be called from IRQ context only when
|
* This function may be called from IRQ context only when
|
||||||
* desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
|
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
|
||||||
*/
|
*/
|
||||||
void enable_irq(unsigned int irq)
|
void enable_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
|
@ -336,8 +336,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
int ret = -ENXIO;
|
int ret = -ENXIO;
|
||||||
|
|
||||||
if (desc->chip->set_wake)
|
if (desc->irq_data.chip->set_wake)
|
||||||
ret = desc->chip->set_wake(irq, on);
|
ret = desc->irq_data.chip->set_wake(irq, on);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -432,7 +432,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct irq_chip *chip = desc->chip;
|
struct irq_chip *chip = desc->irq_data.chip;
|
||||||
|
|
||||||
if (!chip || !chip->set_type) {
|
if (!chip || !chip->set_type) {
|
||||||
/*
|
/*
|
||||||
|
@ -457,8 +457,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||||
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
||||||
desc->status |= flags;
|
desc->status |= flags;
|
||||||
|
|
||||||
if (chip != desc->chip)
|
if (chip != desc->irq_data.chip)
|
||||||
irq_chip_set_defaults(desc->chip);
|
irq_chip_set_defaults(desc->irq_data.chip);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -528,7 +528,7 @@ again:
|
||||||
|
|
||||||
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
|
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
|
||||||
desc->status &= ~IRQ_MASKED;
|
desc->status &= ~IRQ_MASKED;
|
||||||
desc->chip->unmask(irq);
|
desc->irq_data.chip->unmask(irq);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
chip_bus_sync_unlock(irq, desc);
|
chip_bus_sync_unlock(irq, desc);
|
||||||
|
@ -556,7 +556,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
cpumask_copy(mask, desc->affinity);
|
cpumask_copy(mask, desc->irq_data.affinity);
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
|
|
||||||
set_cpus_allowed_ptr(current, mask);
|
set_cpus_allowed_ptr(current, mask);
|
||||||
|
@ -657,7 +657,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (desc->chip == &no_irq_chip)
|
if (desc->irq_data.chip == &no_irq_chip)
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
/*
|
/*
|
||||||
* Some drivers like serial.c use request_irq() heavily,
|
* Some drivers like serial.c use request_irq() heavily,
|
||||||
|
@ -752,7 +752,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!shared) {
|
if (!shared) {
|
||||||
irq_chip_set_defaults(desc->chip);
|
irq_chip_set_defaults(desc->irq_data.chip);
|
||||||
|
|
||||||
init_waitqueue_head(&desc->wait_for_threads);
|
init_waitqueue_head(&desc->wait_for_threads);
|
||||||
|
|
||||||
|
@ -779,7 +779,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||||
if (!(desc->status & IRQ_NOAUTOEN)) {
|
if (!(desc->status & IRQ_NOAUTOEN)) {
|
||||||
desc->depth = 0;
|
desc->depth = 0;
|
||||||
desc->status &= ~IRQ_DISABLED;
|
desc->status &= ~IRQ_DISABLED;
|
||||||
desc->chip->startup(irq);
|
desc->irq_data.chip->startup(irq);
|
||||||
} else
|
} else
|
||||||
/* Undo nested disables: */
|
/* Undo nested disables: */
|
||||||
desc->depth = 1;
|
desc->depth = 1;
|
||||||
|
@ -912,17 +912,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||||
|
|
||||||
/* Currently used only by UML, might disappear one day: */
|
/* Currently used only by UML, might disappear one day: */
|
||||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||||
if (desc->chip->release)
|
if (desc->irq_data.chip->release)
|
||||||
desc->chip->release(irq, dev_id);
|
desc->irq_data.chip->release(irq, dev_id);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* If this was the last handler, shut down the IRQ line: */
|
/* If this was the last handler, shut down the IRQ line: */
|
||||||
if (!desc->action) {
|
if (!desc->action) {
|
||||||
desc->status |= IRQ_DISABLED;
|
desc->status |= IRQ_DISABLED;
|
||||||
if (desc->chip->shutdown)
|
if (desc->irq_data.chip->shutdown)
|
||||||
desc->chip->shutdown(irq);
|
desc->irq_data.chip->shutdown(irq);
|
||||||
else
|
else
|
||||||
desc->chip->disable(irq);
|
desc->irq_data.chip->disable(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
|
@ -24,7 +24,7 @@ void move_masked_irq(int irq)
|
||||||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!desc->chip->set_affinity)
|
if (!desc->irq_data.chip->set_affinity)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
assert_raw_spin_locked(&desc->lock);
|
assert_raw_spin_locked(&desc->lock);
|
||||||
|
@ -43,8 +43,8 @@ void move_masked_irq(int irq)
|
||||||
*/
|
*/
|
||||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||||
< nr_cpu_ids))
|
< nr_cpu_ids))
|
||||||
if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
|
if (!desc->irq_data.chip->set_affinity(irq, desc->pending_mask)) {
|
||||||
cpumask_copy(desc->affinity, desc->pending_mask);
|
cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
|
||||||
irq_set_thread_affinity(desc);
|
irq_set_thread_affinity(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,8 +61,8 @@ void move_native_irq(int irq)
|
||||||
if (unlikely(desc->status & IRQ_DISABLED))
|
if (unlikely(desc->status & IRQ_DISABLED))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
desc->chip->mask(irq);
|
desc->irq_data.chip->mask(irq);
|
||||||
move_masked_irq(irq);
|
move_masked_irq(irq);
|
||||||
desc->chip->unmask(irq);
|
desc->irq_data.chip->unmask(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
raw_spin_lock_init(&desc->lock);
|
raw_spin_lock_init(&desc->lock);
|
||||||
desc->node = node;
|
desc->irq_data.node = node;
|
||||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||||
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
|
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
|
||||||
init_copy_desc_masks(old_desc, desc);
|
init_copy_desc_masks(old_desc, desc);
|
||||||
|
@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
irq = old_desc->irq;
|
irq = old_desc->irq_data.irq;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||||
|
|
||||||
|
@ -109,10 +109,10 @@ out_unlock:
|
||||||
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
||||||
{
|
{
|
||||||
/* those static or target node is -1, do not move them */
|
/* those static or target node is -1, do not move them */
|
||||||
if (desc->irq < NR_IRQS_LEGACY || node == -1)
|
if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1)
|
||||||
return desc;
|
return desc;
|
||||||
|
|
||||||
if (desc->node != node)
|
if (desc->irq_data.node != node)
|
||||||
desc = __real_move_irq_desc(desc, node);
|
desc = __real_move_irq_desc(desc, node);
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
|
|
|
@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir;
|
||||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||||
const struct cpumask *mask = desc->affinity;
|
const struct cpumask *mask = desc->irq_data.affinity;
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||||
if (desc->status & IRQ_MOVE_PENDING)
|
if (desc->status & IRQ_MOVE_PENDING)
|
||||||
|
@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
|
||||||
cpumask_var_t new_value;
|
cpumask_var_t new_value;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
|
if (!irq_to_desc(irq)->irq_data.chip->set_affinity || no_irq_affinity ||
|
||||||
irq_balancing_disabled(irq))
|
irq_balancing_disabled(irq))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc((long) m->private);
|
struct irq_desc *desc = irq_to_desc((long) m->private);
|
||||||
|
|
||||||
seq_printf(m, "%d\n", desc->node);
|
seq_printf(m, "%d\n", desc->irq_data.node);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
char name [MAX_NAMELEN];
|
char name [MAX_NAMELEN];
|
||||||
|
|
||||||
if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
|
if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
memset(name, 0, MAX_NAMELEN);
|
memset(name, 0, MAX_NAMELEN);
|
||||||
|
|
|
@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
||||||
/*
|
/*
|
||||||
* Make sure the interrupt is enabled, before resending it:
|
* Make sure the interrupt is enabled, before resending it:
|
||||||
*/
|
*/
|
||||||
desc->chip->enable(irq);
|
desc->irq_data.chip->enable(irq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not resend level type interrupts. Level type
|
* We do not resend level type interrupts. Level type
|
||||||
|
@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
||||||
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
|
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
|
||||||
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
|
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
|
||||||
|
|
||||||
if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
|
if (!desc->irq_data.chip->retrigger ||
|
||||||
|
!desc->irq_data.chip->retrigger(irq)) {
|
||||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||||
/* Set it pending and activate the softirq: */
|
/* Set it pending and activate the softirq: */
|
||||||
set_bit(irq, irqs_resend);
|
set_bit(irq, irqs_resend);
|
||||||
|
|
|
@ -78,8 +78,8 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||||
* If we did actual work for the real IRQ line we must let the
|
* If we did actual work for the real IRQ line we must let the
|
||||||
* IRQ controller clean up too
|
* IRQ controller clean up too
|
||||||
*/
|
*/
|
||||||
if (work && desc->chip && desc->chip->end)
|
if (work && desc->irq_data.chip && desc->irq_data.chip->end)
|
||||||
desc->chip->end(irq);
|
desc->irq_data.chip->end(irq);
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
return ok;
|
return ok;
|
||||||
|
@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||||
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
|
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
|
||||||
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
|
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
|
||||||
desc->depth++;
|
desc->depth++;
|
||||||
desc->chip->disable(irq);
|
desc->irq_data.chip->disable(irq);
|
||||||
|
|
||||||
mod_timer(&poll_spurious_irq_timer,
|
mod_timer(&poll_spurious_irq_timer,
|
||||||
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
||||||
|
|
Loading…
Add table
Reference in a new issue