2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rculist.h>
21 #include <linux/hash.h>
22 #include <linux/bootmem.h>
23 #include <trace/events/irq.h>
25 #include "internals.h"
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
30 struct lock_class_key irq_desc_lock_class
;
33 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number
35 * @desc: description of the interrupt
37 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
39 void handle_bad_irq(unsigned int irq
, struct irq_desc
*desc
)
41 print_irq_desc(irq
, desc
);
42 kstat_incr_irqs_this_cpu(irq
, desc
);
46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47 static void __init
init_irq_default_affinity(void)
49 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
50 cpumask_setall(irq_default_affinity
);
53 static void __init
init_irq_default_affinity(void)
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
70 * Controller mappings for all interrupt sources:
72 int nr_irqs
= NR_IRQS
;
73 EXPORT_SYMBOL_GPL(nr_irqs
);
75 #ifdef CONFIG_SPARSE_IRQ
77 static struct irq_desc irq_desc_init
= {
79 .status
= IRQ_DISABLED
,
81 .handle_irq
= handle_bad_irq
,
83 .lock
= __SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
86 void __ref
init_kstat_irqs(struct irq_desc
*desc
, int node
, int nr
)
90 if (slab_is_available())
91 ptr
= kzalloc_node(nr
* sizeof(*desc
->kstat_irqs
),
94 ptr
= alloc_bootmem_node(NODE_DATA(node
),
95 nr
* sizeof(*desc
->kstat_irqs
));
98 * don't overwite if can not get new one
99 * init_copy_kstat_irqs() could still use old one
102 printk(KERN_DEBUG
" alloc kstat_irqs on node %d\n", node
);
103 desc
->kstat_irqs
= ptr
;
107 static void init_one_irq_desc(int irq
, struct irq_desc
*desc
, int node
)
109 memcpy(desc
, &irq_desc_init
, sizeof(struct irq_desc
));
111 spin_lock_init(&desc
->lock
);
116 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
117 init_kstat_irqs(desc
, node
, nr_cpu_ids
);
118 if (!desc
->kstat_irqs
) {
119 printk(KERN_ERR
"can not alloc kstat_irqs\n");
122 if (!alloc_desc_masks(desc
, node
, false)) {
123 printk(KERN_ERR
"can not alloc irq_desc cpumasks\n");
126 init_desc_masks(desc
);
127 arch_init_chip_data(desc
, node
);
131 * Protect the sparse_irqs:
133 DEFINE_SPINLOCK(sparse_irq_lock
);
135 struct irq_desc
**irq_desc_ptrs __read_mostly
;
137 static struct irq_desc irq_desc_legacy
[NR_IRQS_LEGACY
] __cacheline_aligned_in_smp
= {
138 [0 ... NR_IRQS_LEGACY
-1] = {
140 .status
= IRQ_DISABLED
,
141 .chip
= &no_irq_chip
,
142 .handle_irq
= handle_bad_irq
,
144 .lock
= __SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
148 static unsigned int *kstat_irqs_legacy
;
150 int __init
early_irq_init(void)
152 struct irq_desc
*desc
;
157 init_irq_default_affinity();
159 /* initialize nr_irqs based on nr_cpu_ids */
160 arch_probe_nr_irqs();
161 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d\n", NR_IRQS
, nr_irqs
);
163 desc
= irq_desc_legacy
;
164 legacy_count
= ARRAY_SIZE(irq_desc_legacy
);
165 node
= first_online_node
;
167 /* allocate irq_desc_ptrs array based on nr_irqs */
168 irq_desc_ptrs
= kcalloc(nr_irqs
, sizeof(void *), GFP_NOWAIT
);
170 /* allocate based on nr_cpu_ids */
171 kstat_irqs_legacy
= kzalloc_node(NR_IRQS_LEGACY
* nr_cpu_ids
*
172 sizeof(int), GFP_NOWAIT
, node
);
174 for (i
= 0; i
< legacy_count
; i
++) {
179 desc
[i
].kstat_irqs
= kstat_irqs_legacy
+ i
* nr_cpu_ids
;
180 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
181 alloc_desc_masks(&desc
[i
], node
, true);
182 init_desc_masks(&desc
[i
]);
183 irq_desc_ptrs
[i
] = desc
+ i
;
186 for (i
= legacy_count
; i
< nr_irqs
; i
++)
187 irq_desc_ptrs
[i
] = NULL
;
189 return arch_early_irq_init();
192 struct irq_desc
*irq_to_desc(unsigned int irq
)
194 if (irq_desc_ptrs
&& irq
< nr_irqs
)
195 return irq_desc_ptrs
[irq
];
200 struct irq_desc
* __ref
irq_to_desc_alloc_node(unsigned int irq
, int node
)
202 struct irq_desc
*desc
;
205 if (irq
>= nr_irqs
) {
206 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
211 desc
= irq_desc_ptrs
[irq
];
215 spin_lock_irqsave(&sparse_irq_lock
, flags
);
217 /* We have to check it to avoid races with another CPU */
218 desc
= irq_desc_ptrs
[irq
];
222 if (slab_is_available())
223 desc
= kzalloc_node(sizeof(*desc
), GFP_ATOMIC
, node
);
225 desc
= alloc_bootmem_node(NODE_DATA(node
), sizeof(*desc
));
227 printk(KERN_DEBUG
" alloc irq_desc for %d on node %d\n", irq
, node
);
229 printk(KERN_ERR
"can not alloc irq_desc\n");
232 init_one_irq_desc(irq
, desc
, node
);
234 irq_desc_ptrs
[irq
] = desc
;
237 spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
242 #else /* !CONFIG_SPARSE_IRQ */
244 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
245 [0 ... NR_IRQS
-1] = {
246 .status
= IRQ_DISABLED
,
247 .chip
= &no_irq_chip
,
248 .handle_irq
= handle_bad_irq
,
250 .lock
= __SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
254 static unsigned int kstat_irqs_all
[NR_IRQS
][NR_CPUS
];
255 int __init
early_irq_init(void)
257 struct irq_desc
*desc
;
261 init_irq_default_affinity();
263 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
266 count
= ARRAY_SIZE(irq_desc
);
268 for (i
= 0; i
< count
; i
++) {
270 alloc_desc_masks(&desc
[i
], 0, true);
271 init_desc_masks(&desc
[i
]);
272 desc
[i
].kstat_irqs
= kstat_irqs_all
[i
];
274 return arch_early_irq_init();
277 struct irq_desc
*irq_to_desc(unsigned int irq
)
279 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
282 struct irq_desc
*irq_to_desc_alloc_node(unsigned int irq
, int node
)
284 return irq_to_desc(irq
);
286 #endif /* !CONFIG_SPARSE_IRQ */
288 void clear_kstat_irqs(struct irq_desc
*desc
)
290 memset(desc
->kstat_irqs
, 0, nr_cpu_ids
* sizeof(*(desc
->kstat_irqs
)));
294 * What should we do if we get a hw irq event on an illegal vector?
295 * Each architecture has to answer this themself.
297 static void ack_bad(unsigned int irq
)
299 struct irq_desc
*desc
= irq_to_desc(irq
);
301 print_irq_desc(irq
, desc
);
308 static void noop(unsigned int irq
)
312 static unsigned int noop_ret(unsigned int irq
)
318 * Generic no controller implementation
320 struct irq_chip no_irq_chip
= {
331 * Generic dummy implementation which can be used for
332 * real dumb interrupt sources
334 struct irq_chip dummy_irq_chip
= {
347 * Special, empty irq handler:
349 irqreturn_t
no_action(int cpl
, void *dev_id
)
354 static void warn_no_thread(unsigned int irq
, struct irqaction
*action
)
356 if (test_and_set_bit(IRQTF_WARNED
, &action
->thread_flags
))
359 printk(KERN_WARNING
"IRQ %d device %s returned IRQ_WAKE_THREAD "
360 "but no thread function available.", irq
, action
->name
);
364 * handle_IRQ_event - irq action chain handler
365 * @irq: the interrupt number
366 * @action: the interrupt action chain for this irq
368 * Handles the action chain of an irq event
370 irqreturn_t
handle_IRQ_event(unsigned int irq
, struct irqaction
*action
)
372 irqreturn_t ret
, retval
= IRQ_NONE
;
373 unsigned int status
= 0;
375 if (!(action
->flags
& IRQF_DISABLED
))
376 local_irq_enable_in_hardirq();
379 trace_irq_handler_entry(irq
, action
);
380 ret
= action
->handler(irq
, action
->dev_id
);
381 trace_irq_handler_exit(irq
, action
, ret
);
384 case IRQ_WAKE_THREAD
:
386 * Set result to handled so the spurious check
392 * Catch drivers which return WAKE_THREAD but
393 * did not set up a thread function
395 if (unlikely(!action
->thread_fn
)) {
396 warn_no_thread(irq
, action
);
401 * Wake up the handler thread for this
402 * action. In case the thread crashed and was
403 * killed we just pretend that we handled the
404 * interrupt. The hardirq handler above has
405 * disabled the device interrupt, so no irq
408 if (likely(!test_bit(IRQTF_DIED
,
409 &action
->thread_flags
))) {
410 set_bit(IRQTF_RUNTHREAD
, &action
->thread_flags
);
411 wake_up_process(action
->thread
);
414 /* Fall through to add to randomness */
416 status
|= action
->flags
;
424 action
= action
->next
;
427 if (status
& IRQF_SAMPLE_RANDOM
)
428 add_interrupt_randomness(irq
);
434 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
436 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
437 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
441 * __do_IRQ - original all in one highlevel IRQ handler
442 * @irq: the interrupt number
444 * __do_IRQ handles all normal device IRQ's (the special
445 * SMP cross-CPU interrupts have their own specific
448 * This is the original x86 implementation which is used for every
451 unsigned int __do_IRQ(unsigned int irq
)
453 struct irq_desc
*desc
= irq_to_desc(irq
);
454 struct irqaction
*action
;
457 kstat_incr_irqs_this_cpu(irq
, desc
);
459 if (CHECK_IRQ_PER_CPU(desc
->status
)) {
460 irqreturn_t action_ret
;
463 * No locking required for CPU-local interrupts:
466 desc
->chip
->ack(irq
);
467 if (likely(!(desc
->status
& IRQ_DISABLED
))) {
468 action_ret
= handle_IRQ_event(irq
, desc
->action
);
470 note_interrupt(irq
, desc
, action_ret
);
472 desc
->chip
->end(irq
);
476 spin_lock(&desc
->lock
);
478 desc
->chip
->ack(irq
);
480 * REPLAY is when Linux resends an IRQ that was dropped earlier
481 * WAITING is used by probe to mark irqs that are being tested
483 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
484 status
|= IRQ_PENDING
; /* we _want_ to handle it */
487 * If the IRQ is disabled for whatever reason, we cannot
488 * use the action we have.
491 if (likely(!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
)))) {
492 action
= desc
->action
;
493 status
&= ~IRQ_PENDING
; /* we commit to handling */
494 status
|= IRQ_INPROGRESS
; /* we are handling it */
496 desc
->status
= status
;
499 * If there is no IRQ handler or it was disabled, exit early.
500 * Since we set PENDING, if another processor is handling
501 * a different instance of this same irq, the other processor
502 * will take care of it.
504 if (unlikely(!action
))
508 * Edge triggered interrupts need to remember
510 * This applies to any hw interrupts that allow a second
511 * instance of the same irq to arrive while we are in do_IRQ
512 * or in the handler. But the code here only handles the _second_
513 * instance of the irq, not the third or fourth. So it is mostly
514 * useful for irq hardware that does not mask cleanly in an
518 irqreturn_t action_ret
;
520 spin_unlock(&desc
->lock
);
522 action_ret
= handle_IRQ_event(irq
, action
);
524 note_interrupt(irq
, desc
, action_ret
);
526 spin_lock(&desc
->lock
);
527 if (likely(!(desc
->status
& IRQ_PENDING
)))
529 desc
->status
&= ~IRQ_PENDING
;
531 desc
->status
&= ~IRQ_INPROGRESS
;
535 * The ->end() handler has to deal with interrupts which got
536 * disabled while the handler was running.
538 desc
->chip
->end(irq
);
539 spin_unlock(&desc
->lock
);
545 void early_init_irq_lock_class(void)
547 struct irq_desc
*desc
;
550 for_each_irq_desc(i
, desc
) {
551 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
555 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
557 struct irq_desc
*desc
= irq_to_desc(irq
);
558 return desc
? desc
->kstat_irqs
[cpu
] : 0;
560 EXPORT_SYMBOL(kstat_irqs_cpu
);