MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / arch / sh / kernel / irq.c
blob73ec3365d0d04568af86ecb7998489cb8156f2f4
1 /* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
3 * linux/arch/sh/kernel/irq.c
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 * SuperH version: Copyright (C) 1999 Niibe Yutaka
9 */
12 * IRQs are in fact implemented a bit like signal handlers for the kernel.
13 * Naturally it's not a 1:1 relation, but there are similarities.
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/ptrace.h>
19 #include <linux/errno.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/signal.h>
22 #include <linux/sched.h>
23 #include <linux/ioport.h>
24 #include <linux/interrupt.h>
25 #include <linux/timex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/seq_file.h>
33 #include <linux/kallsyms.h>
35 #include <asm/system.h>
36 #include <asm/io.h>
37 #include <asm/bitops.h>
38 #include <asm/pgalloc.h>
39 #include <asm/delay.h>
40 #include <asm/irq.h>
41 #include <linux/irq.h>
44 * Controller mappings for all interrupt sources:
46 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
47 [0 ... NR_IRQS-1] = {
48 .handler = &no_irq_type,
49 .lock = SPIN_LOCK_UNLOCKED
54 * Special irq handlers.
57 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
58 { return IRQ_NONE; }
61 * Generic no controller code
64 static void enable_none(unsigned int irq) { }
65 static unsigned int startup_none(unsigned int irq) { return 0; }
66 static void disable_none(unsigned int irq) { }
67 static void ack_none(unsigned int irq)
70 * 'what should we do if we get a hw irq event on an illegal vector'.
71 * each architecture has to answer this themselves, it doesn't deserve
72 * a generic callback i think.
74 printk("unexpected IRQ trap at vector %02x\n", irq);
77 /* startup is the same as "enable", shutdown is same as "disable" */
78 #define shutdown_none disable_none
79 #define end_none enable_none
81 struct hw_interrupt_type no_irq_type = {
82 "none",
83 startup_none,
84 shutdown_none,
85 enable_none,
86 disable_none,
87 ack_none,
88 end_none
92 * Generic, controller-independent functions:
95 #if defined(CONFIG_PROC_FS)
96 int show_interrupts(struct seq_file *p, void *v)
98 int i = *(loff_t *) v, j;
99 struct irqaction * action;
100 unsigned long flags;
102 if (i == 0) {
103 seq_puts(p, " ");
104 for (j=0; j<NR_CPUS; j++)
105 if (cpu_online(j))
106 seq_printf(p, "CPU%d ",j);
107 seq_putc(p, '\n');
110 if (i < ACTUAL_NR_IRQS) {
111 spin_lock_irqsave(&irq_desc[i].lock, flags);
112 action = irq_desc[i].action;
113 if (!action)
114 goto unlock;
115 seq_printf(p, "%3d: ",i);
116 seq_printf(p, "%10u ", kstat_irqs(i));
117 seq_printf(p, " %14s", irq_desc[i].handler->typename);
118 seq_printf(p, " %s", action->name);
120 for (action=action->next; action; action = action->next)
121 seq_printf(p, ", %s", action->name);
122 seq_putc(p, '\n');
123 unlock:
124 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
126 return 0;
128 #endif
131 * This should really return information about whether
132 * we should do bottom half handling etc. Right now we
133 * end up _always_ checking the bottom half, which is a
134 * waste of time and is not what some drivers would
135 * prefer.
137 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
139 int status = 1; /* Force the "do bottom halves" bit */
140 int ret, retval = 0;
142 if (!(action->flags & SA_INTERRUPT))
143 local_irq_enable();
145 do {
146 ret = action->handler(irq, action->dev_id, regs);
147 if (ret == IRQ_HANDLED)
148 status |= action->flags;
149 retval |= ret;
150 action = action->next;
151 } while (action);
153 if (status & SA_SAMPLE_RANDOM)
154 add_interrupt_randomness(irq);
156 local_irq_disable();
157 return retval;
160 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
162 struct irqaction *action;
164 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
165 printk(KERN_ERR "irq event %d: bogus return value %x\n",
166 irq, action_ret);
167 } else {
168 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
170 dump_stack();
171 printk(KERN_ERR "handlers:\n");
172 action = desc->action;
173 do {
174 printk(KERN_ERR "[<%p>]", action->handler);
175 print_symbol(" (%s)",
176 (unsigned long)action->handler);
177 printk("\n");
178 action = action->next;
179 } while (action);
182 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
184 static int count = 100;
186 if (count) {
187 count--;
188 __report_bad_irq(irq, desc, action_ret);
192 static int noirqdebug;
194 static int __init noirqdebug_setup(char *str)
196 noirqdebug = 1;
197 printk("IRQ lockup detection disabled\n");
198 return 1;
201 __setup("noirqdebug", noirqdebug_setup);
204 * If 99,900 of the previous 100,000 interrupts have not been handled then
205 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
206 * turn the IRQ off.
208 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
209 * device sharing an IRQ with the failing one)
211 * Called under desc->lock
213 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
215 if (action_ret != IRQ_HANDLED) {
216 desc->irqs_unhandled++;
217 if (action_ret != IRQ_NONE)
218 report_bad_irq(irq, desc, action_ret);
221 desc->irq_count++;
222 if (desc->irq_count < 100000)
223 return;
225 desc->irq_count = 0;
226 if (desc->irqs_unhandled > 99900) {
228 * The interrupt is stuck
230 __report_bad_irq(irq, desc, action_ret);
232 * Now kill the IRQ
234 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
235 desc->status |= IRQ_DISABLED;
236 desc->handler->disable(irq);
238 desc->irqs_unhandled = 0;
242 * Generic enable/disable code: this just calls
243 * down into the PIC-specific version for the actual
244 * hardware disable after having gotten the irq
245 * controller lock.
247 inline void disable_irq_nosync(unsigned int irq)
249 irq_desc_t *desc = irq_desc + irq;
250 unsigned long flags;
252 spin_lock_irqsave(&desc->lock, flags);
253 if (!desc->depth++) {
254 desc->status |= IRQ_DISABLED;
255 desc->handler->disable(irq);
257 spin_unlock_irqrestore(&desc->lock, flags);
261 * Synchronous version of the above, making sure the IRQ is
262 * no longer running on any other IRQ..
264 void disable_irq(unsigned int irq)
266 irq_desc_t *desc = irq_desc + irq;
267 disable_irq_nosync(irq);
268 if (desc->action)
269 synchronize_irq(irq);
272 void enable_irq(unsigned int irq)
274 irq_desc_t *desc = irq_desc + irq;
275 unsigned long flags;
277 spin_lock_irqsave(&desc->lock, flags);
278 switch (desc->depth) {
279 case 1: {
280 unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
281 desc->status = status;
282 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
283 desc->status = status | IRQ_REPLAY;
284 hw_resend_irq(desc->handler,irq);
286 desc->handler->enable(irq);
287 /* fall-through */
289 default:
290 desc->depth--;
291 break;
292 case 0:
293 printk("enable_irq() unbalanced from %p\n",
294 __builtin_return_address(0));
296 spin_unlock_irqrestore(&desc->lock, flags);
300 * do_IRQ handles all normal device IRQ's.
302 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
303 unsigned long r6, unsigned long r7,
304 struct pt_regs regs)
307 * We ack quickly, we don't want the irq controller
308 * thinking we're snobs just because some other CPU has
309 * disabled global interrupts (we have already done the
310 * INT_ACK cycles, it's too late to try to pretend to the
311 * controller that we aren't taking the interrupt).
313 * 0 return value means that this irq is already being
314 * handled by some other CPU. (or is disabled)
316 int irq;
317 irq_desc_t *desc;
318 struct irqaction * action;
319 unsigned int status;
321 irq_enter();
323 #ifdef CONFIG_PREEMPT
325 * At this point we're now about to actually call handlers,
326 * and interrupts might get reenabled during them... bump
327 * preempt_count to prevent any preemption while the handler
328 * called here is pending...
330 preempt_disable();
331 #endif
333 /* Get IRQ number */
334 asm volatile("stc r2_bank, %0\n\t"
335 "shlr2 %0\n\t"
336 "shlr2 %0\n\t"
337 "shlr %0\n\t"
338 "add #-16, %0\n\t"
339 :"=z" (irq));
340 irq = irq_demux(irq);
342 kstat_this_cpu.irqs[irq]++;
343 desc = irq_desc + irq;
344 spin_lock(&desc->lock);
345 desc->handler->ack(irq);
347 REPLAY is when Linux resends an IRQ that was dropped earlier
348 WAITING is used by probe to mark irqs that are being tested
350 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
351 status |= IRQ_PENDING; /* we _want_ to handle it */
354 * If the IRQ is disabled for whatever reason, we cannot
355 * use the action we have.
357 action = NULL;
358 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
359 action = desc->action;
360 status &= ~IRQ_PENDING; /* we commit to handling */
361 status |= IRQ_INPROGRESS; /* we are handling it */
363 desc->status = status;
366 * If there is no IRQ handler or it was disabled, exit early.
367 Since we set PENDING, if another processor is handling
368 a different instance of this same irq, the other processor
369 will take care of it.
371 if (unlikely(!action))
372 goto out;
375 * Edge triggered interrupts need to remember
376 * pending events.
377 * This applies to any hw interrupts that allow a second
378 * instance of the same irq to arrive while we are in do_IRQ
379 * or in the handler. But the code here only handles the _second_
380 * instance of the irq, not the third or fourth. So it is mostly
381 * useful for irq hardware that does not mask cleanly in an
382 * SMP environment.
384 for (;;) {
385 irqreturn_t action_ret;
387 spin_unlock(&desc->lock);
388 action_ret = handle_IRQ_event(irq, &regs, action);
389 spin_lock(&desc->lock);
390 if (!noirqdebug)
391 note_interrupt(irq, desc, action_ret);
392 if (likely(!(desc->status & IRQ_PENDING)))
393 break;
394 desc->status &= ~IRQ_PENDING;
396 desc->status &= ~IRQ_INPROGRESS;
398 out:
400 * The ->end() handler has to deal with interrupts which got
401 * disabled while the handler was running.
403 desc->handler->end(irq);
404 spin_unlock(&desc->lock);
406 irq_exit();
408 #ifdef CONFIG_PREEMPT
410 * We're done with the handlers, interrupts should be
411 * currently disabled; decrement preempt_count now so
412 * as we return preemption may be allowed...
414 preempt_enable_no_resched();
415 #endif
417 return 1;
420 int request_irq(unsigned int irq,
421 irqreturn_t (*handler)(int, void *, struct pt_regs *),
422 unsigned long irqflags,
423 const char * devname,
424 void *dev_id)
426 int retval;
427 struct irqaction * action;
429 if (irq >= ACTUAL_NR_IRQS)
430 return -EINVAL;
431 if (!handler)
432 return -EINVAL;
434 action = (struct irqaction *)
435 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
436 if (!action)
437 return -ENOMEM;
439 action->handler = handler;
440 action->flags = irqflags;
441 cpus_clear(action->mask);
442 action->name = devname;
443 action->next = NULL;
444 action->dev_id = dev_id;
446 retval = setup_irq(irq, action);
447 if (retval)
448 kfree(action);
449 return retval;
452 EXPORT_SYMBOL(request_irq);
454 void free_irq(unsigned int irq, void *dev_id)
456 irq_desc_t *desc;
457 struct irqaction **p;
458 unsigned long flags;
460 if (irq >= ACTUAL_NR_IRQS)
461 return;
463 desc = irq_desc + irq;
464 spin_lock_irqsave(&desc->lock,flags);
465 p = &desc->action;
466 for (;;) {
467 struct irqaction * action = *p;
468 if (action) {
469 struct irqaction **pp = p;
470 p = &action->next;
471 if (action->dev_id != dev_id)
472 continue;
474 /* Found it - now remove it from the list of entries */
475 *pp = action->next;
476 if (!desc->action) {
477 desc->status |= IRQ_DISABLED;
478 desc->handler->shutdown(irq);
480 spin_unlock_irqrestore(&desc->lock,flags);
481 synchronize_irq(irq);
482 kfree(action);
483 return;
485 printk("Trying to free free IRQ%d\n",irq);
486 spin_unlock_irqrestore(&desc->lock,flags);
487 return;
491 EXPORT_SYMBOL(free_irq);
493 static DECLARE_MUTEX(probe_sem);
496 * IRQ autodetection code..
498 * This depends on the fact that any interrupt that
499 * comes in on to an unassigned handler will get stuck
500 * with "IRQ_WAITING" cleared and the interrupt
501 * disabled.
503 unsigned long probe_irq_on(void)
505 unsigned int i;
506 irq_desc_t *desc;
507 unsigned long val;
508 unsigned long delay;
510 down(&probe_sem);
512 * something may have generated an irq long ago and we want to
513 * flush such a longstanding irq before considering it as spurious.
515 for (i = NR_IRQS-1; i > 0; i--) {
516 desc = irq_desc + i;
518 spin_lock_irq(&desc->lock);
519 if (!desc->action)
520 desc->handler->startup(i);
521 spin_unlock_irq(&desc->lock);
524 /* Wait for longstanding interrupts to trigger. */
525 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
526 /* about 20ms delay */ barrier();
529 * enable any unassigned irqs
530 * (we must startup again here because if a longstanding irq
531 * happened in the previous stage, it may have masked itself)
533 for (i = NR_IRQS-1; i > 0; i--) {
534 desc = irq_desc + i;
536 spin_lock_irq(&desc->lock);
537 if (!desc->action) {
538 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
539 if (desc->handler->startup(i))
540 desc->status |= IRQ_PENDING;
542 spin_unlock_irq(&desc->lock);
546 * Wait for spurious interrupts to trigger
548 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
549 /* about 100ms delay */ barrier();
552 * Now filter out any obviously spurious interrupts
554 val = 0;
555 for (i=0; i<NR_IRQS; i++) {
556 unsigned int status;
558 desc = irq_desc + i;
560 spin_lock_irq(&desc->lock);
561 status = desc->status;
563 if (status & IRQ_AUTODETECT) {
564 /* It triggered already - consider it spurious. */
565 if (!(status & IRQ_WAITING)) {
566 desc->status = status & ~IRQ_AUTODETECT;
567 desc->handler->shutdown(i);
568 } else
569 if (i < 32)
570 val |= 1 << i;
572 spin_unlock_irq(&desc->lock);
575 return val;
578 EXPORT_SYMBOL(probe_irq_on);
580 /* Return a mask of triggered interrupts (this
581 * can handle only legacy ISA interrupts).
585 * probe_irq_mask - scan a bitmap of interrupt lines
586 * @val: mask of interrupts to consider
588 * Scan the ISA bus interrupt lines and return a bitmap of
589 * active interrupts. The interrupt probe logic state is then
590 * returned to its previous value.
592 * Note: we need to scan all the irq's even though we will
593 * only return ISA irq numbers - just so that we reset them
594 * all to a known state.
596 unsigned int probe_irq_mask(unsigned long val)
598 int i;
599 unsigned int mask;
601 mask = 0;
602 for (i = 0; i < NR_IRQS; i++) {
603 irq_desc_t *desc = irq_desc + i;
604 unsigned int status;
606 spin_lock_irq(&desc->lock);
607 status = desc->status;
609 if (status & IRQ_AUTODETECT) {
610 if (i < 16 && !(status & IRQ_WAITING))
611 mask |= 1 << i;
613 desc->status = status & ~IRQ_AUTODETECT;
614 desc->handler->shutdown(i);
616 spin_unlock_irq(&desc->lock);
618 up(&probe_sem);
620 return mask & val;
623 int probe_irq_off(unsigned long val)
625 int i, irq_found, nr_irqs;
627 nr_irqs = 0;
628 irq_found = 0;
629 for (i=0; i<NR_IRQS; i++) {
630 irq_desc_t *desc = irq_desc + i;
631 unsigned int status;
633 spin_lock_irq(&desc->lock);
634 status = desc->status;
636 if (status & IRQ_AUTODETECT) {
637 if (!(status & IRQ_WAITING)) {
638 if (!nr_irqs)
639 irq_found = i;
640 nr_irqs++;
642 desc->status = status & ~IRQ_AUTODETECT;
643 desc->handler->shutdown(i);
645 spin_unlock_irq(&desc->lock);
647 up(&probe_sem);
649 if (nr_irqs > 1)
650 irq_found = -irq_found;
651 return irq_found;
654 EXPORT_SYMBOL(probe_irq_off);
656 int setup_irq(unsigned int irq, struct irqaction * new)
658 int shared = 0;
659 struct irqaction *old, **p;
660 unsigned long flags;
661 irq_desc_t *desc = irq_desc + irq;
663 if (desc->handler == &no_irq_type)
664 return -ENOSYS;
666 * Some drivers like serial.c use request_irq() heavily,
667 * so we have to be careful not to interfere with a
668 * running system.
670 if (new->flags & SA_SAMPLE_RANDOM) {
672 * This function might sleep, we want to call it first,
673 * outside of the atomic block.
674 * Yes, this might clear the entropy pool if the wrong
675 * driver is attempted to be loaded, without actually
676 * installing a new handler, but is this really a problem,
677 * only the sysadmin is able to do this.
679 rand_initialize_irq(irq);
683 * The following block of code has to be executed atomically
685 spin_lock_irqsave(&desc->lock,flags);
686 p = &desc->action;
687 if ((old = *p) != NULL) {
688 /* Can't share interrupts unless both agree to */
689 if (!(old->flags & new->flags & SA_SHIRQ)) {
690 spin_unlock_irqrestore(&desc->lock,flags);
691 return -EBUSY;
694 /* add new interrupt at end of irq queue */
695 do {
696 p = &old->next;
697 old = *p;
698 } while (old);
699 shared = 1;
702 *p = new;
704 if (!shared) {
705 desc->depth = 0;
706 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
707 desc->handler->startup(irq);
709 spin_unlock_irqrestore(&desc->lock,flags);
710 return 0;
713 #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
715 void init_irq_proc(void)
718 #endif
720 /* Taken from the 2.5 alpha port */
721 #ifdef CONFIG_SMP
722 void synchronize_irq(unsigned int irq)
724 /* is there anything to synchronize with? */
725 if (!irq_desc[irq].action)
726 return;
728 while (irq_desc[irq].status & IRQ_INPROGRESS)
729 barrier();
731 #endif