genirq: Provide compat handling for chip->disable()/shutdown()
[linux-2.6/x86.git] / kernel / irq / handle.c
blob60e25c46eb55edb0911c48681947368917b1b446
1 /*
2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rculist.h>
21 #include <linux/hash.h>
22 #include <linux/radix-tree.h>
23 #include <trace/events/irq.h>
25 #include "internals.h"
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
30 struct lock_class_key irq_desc_lock_class;
32 /**
33 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number
35 * @desc: description of the interrupt
37 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
39 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
41 print_irq_desc(irq, desc);
42 kstat_incr_irqs_this_cpu(irq, desc);
43 ack_bad_irq(irq);
46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47 static void __init init_irq_default_affinity(void)
49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 cpumask_setall(irq_default_affinity);
52 #else
53 static void __init init_irq_default_affinity(void)
56 #endif
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
70 * Controller mappings for all interrupt sources:
72 int nr_irqs = NR_IRQS;
73 EXPORT_SYMBOL_GPL(nr_irqs);
75 #ifdef CONFIG_SPARSE_IRQ
77 static struct irq_desc irq_desc_init = {
78 .status = IRQ_DISABLED,
79 .handle_irq = handle_bad_irq,
80 .depth = 1,
81 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
86 void *ptr;
88 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
89 GFP_ATOMIC, node);
92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
97 desc->kstat_irqs = ptr;
101 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
103 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
105 raw_spin_lock_init(&desc->lock);
106 desc->irq_data.irq = irq;
107 #ifdef CONFIG_SMP
108 desc->irq_data.node = node;
109 #endif
110 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
111 init_kstat_irqs(desc, node, nr_cpu_ids);
112 if (!desc->kstat_irqs) {
113 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1);
116 if (!alloc_desc_masks(desc, node, false)) {
117 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
118 BUG_ON(1);
120 init_desc_masks(desc);
121 arch_init_chip_data(desc, node);
125 * Protect the sparse_irqs:
127 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
129 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
131 static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
133 radix_tree_insert(&irq_desc_tree, irq, desc);
136 struct irq_desc *irq_to_desc(unsigned int irq)
138 return radix_tree_lookup(&irq_desc_tree, irq);
141 void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
143 void **ptr;
145 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
146 if (ptr)
147 radix_tree_replace_slot(ptr, desc);
150 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
151 [0 ... NR_IRQS_LEGACY-1] = {
152 .status = IRQ_DISABLED,
153 .handle_irq = handle_bad_irq,
154 .depth = 1,
155 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
159 static unsigned int *kstat_irqs_legacy;
161 int __init early_irq_init(void)
163 struct irq_desc *desc;
164 int legacy_count;
165 int node;
166 int i;
168 init_irq_default_affinity();
170 /* initialize nr_irqs based on nr_cpu_ids */
171 arch_probe_nr_irqs();
172 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
174 desc = irq_desc_legacy;
175 legacy_count = ARRAY_SIZE(irq_desc_legacy);
176 node = first_online_node;
178 /* allocate based on nr_cpu_ids */
179 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
180 sizeof(int), GFP_NOWAIT, node);
182 irq_desc_init.irq_data.chip = &no_irq_chip;
184 for (i = 0; i < legacy_count; i++) {
185 desc[i].irq_data.irq = i;
186 desc[i].irq_data.chip = &no_irq_chip;
187 #ifdef CONFIG_SMP
188 desc[i].irq_data.node = node;
189 #endif
190 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
191 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
192 alloc_desc_masks(&desc[i], node, true);
193 init_desc_masks(&desc[i]);
194 set_irq_desc(i, &desc[i]);
197 return arch_early_irq_init();
200 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
202 struct irq_desc *desc;
203 unsigned long flags;
205 if (irq >= nr_irqs) {
206 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
207 irq, nr_irqs);
208 return NULL;
211 desc = irq_to_desc(irq);
212 if (desc)
213 return desc;
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
217 /* We have to check it to avoid races with another CPU */
218 desc = irq_to_desc(irq);
219 if (desc)
220 goto out_unlock;
222 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
225 if (!desc) {
226 printk(KERN_ERR "can not alloc irq_desc\n");
227 BUG_ON(1);
229 init_one_irq_desc(irq, desc, node);
231 set_irq_desc(irq, desc);
233 out_unlock:
234 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
236 return desc;
239 #else /* !CONFIG_SPARSE_IRQ */
241 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
242 [0 ... NR_IRQS-1] = {
243 .status = IRQ_DISABLED,
244 .handle_irq = handle_bad_irq,
245 .depth = 1,
246 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
250 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
251 int __init early_irq_init(void)
253 struct irq_desc *desc;
254 int count;
255 int i;
257 init_irq_default_affinity();
259 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
261 desc = irq_desc;
262 count = ARRAY_SIZE(irq_desc);
264 for (i = 0; i < count; i++) {
265 desc[i].irq_data.irq = i;
266 desc[i].irq_data.chip = &no_irq_chip;
267 alloc_desc_masks(&desc[i], 0, true);
268 init_desc_masks(&desc[i]);
269 desc[i].kstat_irqs = kstat_irqs_all[i];
271 return arch_early_irq_init();
274 struct irq_desc *irq_to_desc(unsigned int irq)
276 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
279 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
281 return irq_to_desc(irq);
283 #endif /* !CONFIG_SPARSE_IRQ */
285 void clear_kstat_irqs(struct irq_desc *desc)
287 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
291 * What should we do if we get a hw irq event on an illegal vector?
292 * Each architecture has to answer this themself.
294 static void ack_bad(struct irq_data *data)
296 struct irq_desc *desc = irq_data_to_desc(data);
298 print_irq_desc(data->irq, desc);
299 ack_bad_irq(data->irq);
303 * NOP functions
305 static void noop(struct irq_data *data) { }
307 static unsigned int noop_ret(struct irq_data *data)
309 return 0;
312 static void compat_noop(unsigned int irq) { }
314 static unsigned int compat_noop_ret(unsigned int irq)
316 return 0;
320 * Generic no controller implementation
322 struct irq_chip no_irq_chip = {
323 .name = "none",
324 .irq_startup = noop_ret,
325 .irq_shutdown = noop,
326 .irq_enable = noop,
327 .irq_disable = noop,
328 .irq_ack = ack_bad,
329 .startup = compat_noop_ret,
330 .end = compat_noop,
334 * Generic dummy implementation which can be used for
335 * real dumb interrupt sources
337 struct irq_chip dummy_irq_chip = {
338 .name = "dummy",
339 .irq_startup = noop_ret,
340 .irq_shutdown = noop,
341 .irq_enable = noop,
342 .irq_disable = noop,
343 .irq_ack = noop,
344 .irq_mask = noop,
345 .irq_unmask = noop,
346 .startup = compat_noop_ret,
347 .end = compat_noop,
351 * Special, empty irq handler:
353 irqreturn_t no_action(int cpl, void *dev_id)
355 return IRQ_NONE;
358 static void warn_no_thread(unsigned int irq, struct irqaction *action)
360 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
361 return;
363 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
364 "but no thread function available.", irq, action->name);
368 * handle_IRQ_event - irq action chain handler
369 * @irq: the interrupt number
370 * @action: the interrupt action chain for this irq
372 * Handles the action chain of an irq event
374 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
376 irqreturn_t ret, retval = IRQ_NONE;
377 unsigned int status = 0;
379 do {
380 trace_irq_handler_entry(irq, action);
381 ret = action->handler(irq, action->dev_id);
382 trace_irq_handler_exit(irq, action, ret);
384 switch (ret) {
385 case IRQ_WAKE_THREAD:
387 * Set result to handled so the spurious check
388 * does not trigger.
390 ret = IRQ_HANDLED;
393 * Catch drivers which return WAKE_THREAD but
394 * did not set up a thread function
396 if (unlikely(!action->thread_fn)) {
397 warn_no_thread(irq, action);
398 break;
402 * Wake up the handler thread for this
403 * action. In case the thread crashed and was
404 * killed we just pretend that we handled the
405 * interrupt. The hardirq handler above has
406 * disabled the device interrupt, so no irq
407 * storm is lurking.
409 if (likely(!test_bit(IRQTF_DIED,
410 &action->thread_flags))) {
411 set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
412 wake_up_process(action->thread);
415 /* Fall through to add to randomness */
416 case IRQ_HANDLED:
417 status |= action->flags;
418 break;
420 default:
421 break;
424 retval |= ret;
425 action = action->next;
426 } while (action);
428 if (status & IRQF_SAMPLE_RANDOM)
429 add_interrupt_randomness(irq);
430 local_irq_disable();
432 return retval;
435 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
437 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
438 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
439 #endif
442 * __do_IRQ - original all in one highlevel IRQ handler
443 * @irq: the interrupt number
445 * __do_IRQ handles all normal device IRQ's (the special
446 * SMP cross-CPU interrupts have their own specific
447 * handlers).
449 * This is the original x86 implementation which is used for every
450 * interrupt type.
452 unsigned int __do_IRQ(unsigned int irq)
454 struct irq_desc *desc = irq_to_desc(irq);
455 struct irqaction *action;
456 unsigned int status;
458 kstat_incr_irqs_this_cpu(irq, desc);
460 if (CHECK_IRQ_PER_CPU(desc->status)) {
461 irqreturn_t action_ret;
464 * No locking required for CPU-local interrupts:
466 if (desc->irq_data.chip->ack)
467 desc->irq_data.chip->ack(irq);
468 if (likely(!(desc->status & IRQ_DISABLED))) {
469 action_ret = handle_IRQ_event(irq, desc->action);
470 if (!noirqdebug)
471 note_interrupt(irq, desc, action_ret);
473 desc->irq_data.chip->end(irq);
474 return 1;
477 raw_spin_lock(&desc->lock);
478 if (desc->irq_data.chip->ack)
479 desc->irq_data.chip->ack(irq);
481 * REPLAY is when Linux resends an IRQ that was dropped earlier
482 * WAITING is used by probe to mark irqs that are being tested
484 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
485 status |= IRQ_PENDING; /* we _want_ to handle it */
488 * If the IRQ is disabled for whatever reason, we cannot
489 * use the action we have.
491 action = NULL;
492 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
493 action = desc->action;
494 status &= ~IRQ_PENDING; /* we commit to handling */
495 status |= IRQ_INPROGRESS; /* we are handling it */
497 desc->status = status;
500 * If there is no IRQ handler or it was disabled, exit early.
501 * Since we set PENDING, if another processor is handling
502 * a different instance of this same irq, the other processor
503 * will take care of it.
505 if (unlikely(!action))
506 goto out;
509 * Edge triggered interrupts need to remember
510 * pending events.
511 * This applies to any hw interrupts that allow a second
512 * instance of the same irq to arrive while we are in do_IRQ
513 * or in the handler. But the code here only handles the _second_
514 * instance of the irq, not the third or fourth. So it is mostly
515 * useful for irq hardware that does not mask cleanly in an
516 * SMP environment.
518 for (;;) {
519 irqreturn_t action_ret;
521 raw_spin_unlock(&desc->lock);
523 action_ret = handle_IRQ_event(irq, action);
524 if (!noirqdebug)
525 note_interrupt(irq, desc, action_ret);
527 raw_spin_lock(&desc->lock);
528 if (likely(!(desc->status & IRQ_PENDING)))
529 break;
530 desc->status &= ~IRQ_PENDING;
532 desc->status &= ~IRQ_INPROGRESS;
534 out:
536 * The ->end() handler has to deal with interrupts which got
537 * disabled while the handler was running.
539 desc->irq_data.chip->end(irq);
540 raw_spin_unlock(&desc->lock);
542 return 1;
544 #endif
546 void early_init_irq_lock_class(void)
548 struct irq_desc *desc;
549 int i;
551 for_each_irq_desc(i, desc) {
552 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
556 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
558 struct irq_desc *desc = irq_to_desc(irq);
559 return desc ? desc->kstat_irqs[cpu] : 0;
561 EXPORT_SYMBOL(kstat_irqs_cpu);