2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/kallsyms.h>
23 #include <linux/kgdb.h>
24 #include <linux/ftrace.h>
26 #include <asm/atomic.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
34 static unsigned long irq_map
[NR_IRQS
/ BITS_PER_LONG
];
36 int allocate_irqno(void)
41 irq
= find_first_zero_bit(irq_map
, NR_IRQS
);
46 if (test_and_set_bit(irq
, irq_map
))
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
57 void __init
alloc_legacy_irqno(void)
61 for (i
= 0; i
<= 16; i
++)
62 BUG_ON(test_and_set_bit(i
, irq_map
));
65 void free_irqno(unsigned int irq
)
67 smp_mb__before_clear_bit();
68 clear_bit(irq
, irq_map
);
69 smp_mb__after_clear_bit();
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
76 void ack_bad_irq(unsigned int irq
)
79 printk("unexpected IRQ # %d\n", irq
);
82 atomic_t irq_err_count
;
85 * Generic, controller-independent functions:
88 int show_interrupts(struct seq_file
*p
, void *v
)
90 int i
= *(loff_t
*) v
, j
;
91 struct irqaction
* action
;
96 for_each_online_cpu(j
)
97 seq_printf(p
, "CPU%d ", j
);
102 raw_spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
103 action
= irq_desc
[i
].action
;
106 seq_printf(p
, "%3d: ", i
);
108 seq_printf(p
, "%10u ", kstat_irqs(i
));
110 for_each_online_cpu(j
)
111 seq_printf(p
, "%10u ", kstat_irqs_cpu(i
, j
));
113 seq_printf(p
, " %14s", irq_desc
[i
].chip
->name
);
114 seq_printf(p
, " %s", action
->name
);
116 for (action
=action
->next
; action
; action
= action
->next
)
117 seq_printf(p
, ", %s", action
->name
);
121 raw_spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
122 } else if (i
== NR_IRQS
) {
124 seq_printf(p
, "ERR: %10u\n", atomic_read(&irq_err_count
));
129 asmlinkage
void spurious_interrupt(void)
131 atomic_inc(&irq_err_count
);
134 void __init
init_IRQ(void)
139 if (kgdb_early_setup
)
143 for (i
= 0; i
< NR_IRQS
; i
++)
149 if (!kgdb_early_setup
)
150 kgdb_early_setup
= 1;
154 #ifdef DEBUG_STACKOVERFLOW
155 static inline void check_stack_overflow(void)
159 __asm__
__volatile__("move %0, $sp" : "=r" (sp
));
163 * Check for stack overflow: is there less than STACK_WARN free?
164 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
166 if (unlikely(sp
< (sizeof(struct thread_info
) + STACK_WARN
))) {
167 printk("do_IRQ: stack overflow: %ld\n",
168 sp
- sizeof(struct thread_info
));
173 static inline void check_stack_overflow(void) {}
178 * do_IRQ handles all normal device IRQ's (the special
179 * SMP cross-CPU interrupts have their own specific
182 void __irq_entry
do_IRQ(unsigned int irq
)
185 check_stack_overflow();
186 __DO_IRQ_SMTC_HOOK(irq
);
187 generic_handle_irq(irq
);
191 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
193 * To avoid inefficient and in some cases pathological re-checking of
194 * IRQ affinity, we have this variant that skips the affinity check.
197 void __irq_entry
do_IRQ_no_affinity(unsigned int irq
)
200 __NO_AFFINITY_IRQ_SMTC_HOOK(irq
);
201 generic_handle_irq(irq
);
205 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */