Import 2.3.47pre4
[davej-history.git] / arch / alpha / kernel / irq.c
blob231c1cb6003a05b765cd4b2cdefc5ff78b836c8f
1 /*
2 * linux/arch/alpha/kernel/irq.c
4 * Copyright (C) 1995 Linus Torvalds
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/malloc.h>
22 #include <linux/random.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/irq.h>
27 #include <asm/system.h>
28 #include <asm/io.h>
29 #include <asm/bitops.h>
30 #include <asm/machvec.h>
32 #include "proto.h"
34 #define vulp volatile unsigned long *
35 #define vuip volatile unsigned int *
37 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
38 in the per-cpu structure for cache reasons. */
39 #ifndef __SMP__
40 int __local_irq_count;
41 int __local_bh_count;
42 unsigned long __irq_attempt[NR_IRQS];
43 #endif
45 #if NR_IRQS > 128
46 # error Unable to handle more than 128 irq levels.
47 #endif
49 #ifdef CONFIG_ALPHA_GENERIC
50 #define ACTUAL_NR_IRQS alpha_mv.nr_irqs
51 #else
52 #define ACTUAL_NR_IRQS NR_IRQS
53 #endif
55 /* Reserved interrupts. These must NEVER be requested by any driver!
56 IRQ 2 used by hw cascade */
57 #define IS_RESERVED_IRQ(irq) ((irq)==2)
61 * The ack_irq routine used by 80% of the systems.
64 void
65 common_ack_irq(unsigned long irq)
67 if (irq < 16) {
68 /* Ack the interrupt making it the lowest priority */
69 /* First the slave .. */
70 if (irq > 7) {
71 outb(0xE0 | (irq - 8), 0xa0);
72 irq = 2;
74 /* .. then the master */
75 outb(0xE0 | irq, 0x20);
81 static void dummy_perf(unsigned long vector, struct pt_regs *regs)
83 printk(KERN_CRIT "Performance counter interrupt!\n");
86 void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
89 * Dispatch device interrupts.
92 /* Handle ISA interrupt via the PICs. */
94 #if defined(CONFIG_ALPHA_GENERIC)
95 # define IACK_SC alpha_mv.iack_sc
96 #elif defined(CONFIG_ALPHA_APECS)
97 # define IACK_SC APECS_IACK_SC
98 #elif defined(CONFIG_ALPHA_LCA)
99 # define IACK_SC LCA_IACK_SC
100 #elif defined(CONFIG_ALPHA_CIA)
101 # define IACK_SC CIA_IACK_SC
102 #elif defined(CONFIG_ALPHA_PYXIS)
103 # define IACK_SC PYXIS_IACK_SC
104 #elif defined(CONFIG_ALPHA_TSUNAMI)
105 # define IACK_SC TSUNAMI_IACK_SC
106 #elif defined(CONFIG_ALPHA_POLARIS)
107 # define IACK_SC POLARIS_IACK_SC
108 #elif defined(CONFIG_ALPHA_IRONGATE)
109 # define IACK_SC IRONGATE_IACK_SC
110 #else
111 /* This is bogus but necessary to get it to compile on all platforms. */
112 # define IACK_SC 1L
113 #endif
115 void
116 isa_device_interrupt(unsigned long vector, struct pt_regs * regs)
118 #if 1
120 * Generate a PCI interrupt acknowledge cycle. The PIC will
121 * respond with the interrupt vector of the highest priority
122 * interrupt that is pending. The PALcode sets up the
123 * interrupts vectors such that irq level L generates vector L.
125 int j = *(vuip) IACK_SC;
126 j &= 0xff;
127 if (j == 7) {
128 if (!(inb(0x20) & 0x80)) {
129 /* It's only a passive release... */
130 return;
133 handle_irq(j, regs);
134 #else
135 unsigned long pic;
138 * It seems to me that the probability of two or more *device*
139 * interrupts occurring at almost exactly the same time is
140 * pretty low. So why pay the price of checking for
141 * additional interrupts here if the common case can be
142 * handled so much easier?
145 * The first read of gives you *all* interrupting lines.
146 * Therefore, read the mask register and and out those lines
147 * not enabled. Note that some documentation has 21 and a1
148 * write only. This is not true.
150 pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
151 pic &= ~alpha_irq_mask; /* apply mask */
152 pic &= 0xFFFB; /* mask out cascade & hibits */
154 while (pic) {
155 int j = ffz(~pic);
156 pic &= pic - 1;
157 handle_irq(j, j, regs);
159 #endif
162 /* Handle interrupts from the SRM, assuming no additional weirdness. */
164 void
165 srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
167 int irq;
169 irq = (vector - 0x800) >> 4;
170 handle_irq(irq, regs);
175 * Special irq handlers.
178 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
181 * Initial irq handlers.
184 static void enable_none(unsigned int irq) { }
185 static unsigned int startup_none(unsigned int irq) { return 0; }
186 static void disable_none(unsigned int irq) { }
187 static void ack_none(unsigned int irq)
189 printk("unexpected IRQ trap at vector %02x\n", irq);
192 /* startup is the same as "enable", shutdown is same as "disable" */
193 #define shutdown_none disable_none
194 #define end_none enable_none
196 struct hw_interrupt_type no_irq_type = {
197 "none",
198 startup_none,
199 shutdown_none,
200 enable_none,
201 disable_none,
202 ack_none,
203 end_none
206 spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
207 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
208 { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
210 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
212 int status;
213 int cpu = smp_processor_id();
215 kstat.irqs[cpu][irq]++;
216 irq_enter(cpu, irq);
218 status = 1; /* Force the "do bottom halves" bit */
220 do {
221 if (!(action->flags & SA_INTERRUPT))
222 __sti();
223 else
224 __cli();
226 status |= action->flags;
227 action->handler(irq, action->dev_id, regs);
228 action = action->next;
229 } while (action);
230 if (status & SA_SAMPLE_RANDOM)
231 add_interrupt_randomness(irq);
232 __cli();
234 irq_exit(cpu, irq);
236 return status;
240 * Generic enable/disable code: this just calls
241 * down into the PIC-specific version for the actual
242 * hardware disable after having gotten the irq
243 * controller lock.
245 void
246 disable_irq_nosync(unsigned int irq)
248 unsigned long flags;
250 spin_lock_irqsave(&irq_controller_lock, flags);
251 if (!irq_desc[irq].depth++) {
252 irq_desc[irq].status |= IRQ_DISABLED;
253 irq_desc[irq].handler->disable(irq);
255 spin_unlock_irqrestore(&irq_controller_lock, flags);
259 * Synchronous version of the above, making sure the IRQ is
260 * no longer running on any other IRQ..
262 void
263 disable_irq(unsigned int irq)
265 disable_irq_nosync(irq);
267 if (!local_irq_count(smp_processor_id())) {
268 do {
269 barrier();
270 } while (irq_desc[irq].status & IRQ_INPROGRESS);
274 void
275 enable_irq(unsigned int irq)
277 unsigned long flags;
279 spin_lock_irqsave(&irq_controller_lock, flags);
280 switch (irq_desc[irq].depth) {
281 case 1: {
282 unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
283 irq_desc[irq].status = status;
284 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
285 irq_desc[irq].status = status | IRQ_REPLAY;
286 hw_resend_irq(irq_desc[irq].handler,irq); /* noop */
288 irq_desc[irq].handler->enable(irq);
289 /* fall-through */
291 default:
292 irq_desc[irq].depth--;
293 break;
294 case 0:
295 printk("enable_irq() unbalanced from %p\n",
296 __builtin_return_address(0));
298 spin_unlock_irqrestore(&irq_controller_lock, flags);
302 setup_irq(unsigned int irq, struct irqaction * new)
304 int shared = 0;
305 struct irqaction *old, **p;
306 unsigned long flags;
309 * Some drivers like serial.c use request_irq() heavily,
310 * so we have to be careful not to interfere with a
311 * running system.
313 if (new->flags & SA_SAMPLE_RANDOM) {
315 * This function might sleep, we want to call it first,
316 * outside of the atomic block.
317 * Yes, this might clear the entropy pool if the wrong
318 * driver is attempted to be loaded, without actually
319 * installing a new handler, but is this really a problem,
320 * only the sysadmin is able to do this.
322 rand_initialize_irq(irq);
326 * The following block of code has to be executed atomically
328 spin_lock_irqsave(&irq_controller_lock,flags);
329 p = &irq_desc[irq].action;
330 if ((old = *p) != NULL) {
331 /* Can't share interrupts unless both agree to */
332 if (!(old->flags & new->flags & SA_SHIRQ)) {
333 spin_unlock_irqrestore(&irq_controller_lock,flags);
334 return -EBUSY;
337 /* add new interrupt at end of irq queue */
338 do {
339 p = &old->next;
340 old = *p;
341 } while (old);
342 shared = 1;
345 *p = new;
347 if (!shared) {
348 irq_desc[irq].depth = 0;
349 irq_desc[irq].status &= ~IRQ_DISABLED;
350 irq_desc[irq].handler->startup(irq);
352 spin_unlock_irqrestore(&irq_controller_lock,flags);
353 return 0;
357 request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
358 unsigned long irqflags, const char * devname, void *dev_id)
360 int retval;
361 struct irqaction * action;
363 if (irq >= ACTUAL_NR_IRQS)
364 return -EINVAL;
365 if (IS_RESERVED_IRQ(irq))
366 return -EINVAL;
367 if (!handler)
368 return -EINVAL;
370 #if 1
372 * Sanity-check: shared interrupts should REALLY pass in
373 * a real dev-ID, otherwise we'll have trouble later trying
374 * to figure out which interrupt is which (messes up the
375 * interrupt freeing logic etc).
377 if (irqflags & SA_SHIRQ) {
378 if (!dev_id)
379 printk("Bad boy: %s (at %p) called us without a dev_id!\n",
380 devname, __builtin_return_address(0));
382 #endif
384 action = (struct irqaction *)
385 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
386 if (!action)
387 return -ENOMEM;
389 action->handler = handler;
390 action->flags = irqflags;
391 action->mask = 0;
392 action->name = devname;
393 action->next = NULL;
394 action->dev_id = dev_id;
396 retval = setup_irq(irq, action);
397 if (retval)
398 kfree(action);
399 return retval;
402 void
403 free_irq(unsigned int irq, void *dev_id)
405 struct irqaction **p;
406 unsigned long flags;
408 if (irq >= ACTUAL_NR_IRQS) {
409 printk("Trying to free IRQ%d\n",irq);
410 return;
412 if (IS_RESERVED_IRQ(irq)) {
413 printk("Trying to free reserved IRQ %d\n", irq);
414 return;
416 spin_lock_irqsave(&irq_controller_lock,flags);
417 p = &irq_desc[irq].action;
418 for (;;) {
419 struct irqaction * action = *p;
420 if (action) {
421 struct irqaction **pp = p;
422 p = &action->next;
423 if (action->dev_id != dev_id)
424 continue;
426 /* Found it - now remove it from the list of entries */
427 *pp = action->next;
428 if (!irq_desc[irq].action) {
429 irq_desc[irq].status |= IRQ_DISABLED;
430 irq_desc[irq].handler->shutdown(irq);
432 spin_unlock_irqrestore(&irq_controller_lock,flags);
434 /* Wait to make sure it's not being used on another CPU */
435 while (irq_desc[irq].status & IRQ_INPROGRESS)
436 barrier();
437 kfree(action);
438 return;
440 printk("Trying to free free IRQ%d\n",irq);
441 spin_unlock_irqrestore(&irq_controller_lock,flags);
442 return;
446 int get_irq_list(char *buf)
448 int i, j;
449 struct irqaction * action;
450 char *p = buf;
452 #ifdef __SMP__
453 p += sprintf(p, " ");
454 for (i = 0; i < smp_num_cpus; i++)
455 p += sprintf(p, "CPU%d ", i);
456 for (i = 0; i < smp_num_cpus; i++)
457 p += sprintf(p, "TRY%d ", i);
458 *p++ = '\n';
459 #endif
461 for (i = 0; i < NR_IRQS; i++) {
462 action = irq_desc[i].action;
463 if (!action)
464 continue;
465 p += sprintf(p, "%3d: ",i);
466 #ifndef __SMP__
467 p += sprintf(p, "%10u ", kstat_irqs(i));
468 #else
469 for (j = 0; j < smp_num_cpus; j++)
470 p += sprintf(p, "%10u ",
471 kstat.irqs[cpu_logical_map(j)][i]);
472 for (j = 0; j < smp_num_cpus; j++)
473 p += sprintf(p, "%10lu ",
474 irq_attempt(cpu_logical_map(j), i));
475 #endif
476 p += sprintf(p, " %14s", irq_desc[i].handler->typename);
477 p += sprintf(p, " %c%s",
478 (action->flags & SA_INTERRUPT)?'+':' ',
479 action->name);
481 for (action=action->next; action; action = action->next) {
482 p += sprintf(p, ", %c%s",
483 (action->flags & SA_INTERRUPT)?'+':' ',
484 action->name);
486 *p++ = '\n';
488 #if CONFIG_SMP
489 p += sprintf(p, "LOC: ");
490 for (j = 0; j < smp_num_cpus; j++)
491 p += sprintf(p, "%10lu ",
492 cpu_data[cpu_logical_map(j)].smp_local_irq_count);
493 p += sprintf(p, "\n");
494 #endif
495 return p - buf;
498 #ifdef __SMP__
499 /* Who has global_irq_lock. */
500 int global_irq_holder = NO_PROC_ID;
502 /* This protects IRQ's. */
503 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
505 /* Global IRQ locking depth. */
506 atomic_t global_irq_count = ATOMIC_INIT(0);
508 static void *previous_irqholder = NULL;
510 #define MAXCOUNT 100000000
512 static void show(char * str, void *where);
514 static inline void
515 wait_on_irq(int cpu, void *where)
517 int count = MAXCOUNT;
519 for (;;) {
522 * Wait until all interrupts are gone. Wait
523 * for bottom half handlers unless we're
524 * already executing in one..
526 if (!atomic_read(&global_irq_count)) {
527 if (local_bh_count(cpu)
528 || !spin_is_locked(&global_bh_lock))
529 break;
532 /* Duh, we have to loop. Release the lock to avoid deadlocks */
533 spin_unlock(&global_irq_lock);
535 for (;;) {
536 if (!--count) {
537 show("wait_on_irq", where);
538 count = MAXCOUNT;
540 __sti();
541 udelay(1); /* make sure to run pending irqs */
542 __cli();
544 if (atomic_read(&global_irq_count))
545 continue;
546 if (spin_is_locked(&global_irq_lock))
547 continue;
548 if (!local_bh_count(cpu)
549 && spin_is_locked(&global_bh_lock))
550 continue;
551 if (spin_trylock(&global_irq_lock))
552 break;
557 static inline void
558 get_irqlock(int cpu, void* where)
560 if (!spin_trylock(&global_irq_lock)) {
561 /* Do we already hold the lock? */
562 if (cpu == global_irq_holder)
563 return;
564 /* Uhhuh.. Somebody else got it. Wait. */
565 spin_lock(&global_irq_lock);
569 * Ok, we got the lock bit.
570 * But that's actually just the easy part.. Now
571 * we need to make sure that nobody else is running
572 * in an interrupt context.
574 wait_on_irq(cpu, where);
577 * Finally.
579 #if DEBUG_SPINLOCK
580 global_irq_lock.task = current;
581 global_irq_lock.previous = where;
582 #endif
583 global_irq_holder = cpu;
584 previous_irqholder = where;
587 void
588 __global_cli(void)
590 int cpu = smp_processor_id();
591 void *where = __builtin_return_address(0);
594 * Maximize ipl. If ipl was previously 0 and if this thread
595 * is not in an irq, then take global_irq_lock.
597 if (swpipl(7) == 0 && !local_irq_count(cpu))
598 get_irqlock(cpu, where);
601 void
602 __global_sti(void)
604 int cpu = smp_processor_id();
606 if (!local_irq_count(cpu))
607 release_irqlock(cpu);
608 __sti();
612 * SMP flags value to restore to:
613 * 0 - global cli
614 * 1 - global sti
615 * 2 - local cli
616 * 3 - local sti
618 unsigned long
619 __global_save_flags(void)
621 int retval;
622 int local_enabled;
623 unsigned long flags;
624 int cpu = smp_processor_id();
626 __save_flags(flags);
627 local_enabled = (!(flags & 7));
628 /* default to local */
629 retval = 2 + local_enabled;
631 /* Check for global flags if we're not in an interrupt. */
632 if (!local_irq_count(cpu)) {
633 if (local_enabled)
634 retval = 1;
635 if (global_irq_holder == cpu)
636 retval = 0;
638 return retval;
641 void
642 __global_restore_flags(unsigned long flags)
644 switch (flags) {
645 case 0:
646 __global_cli();
647 break;
648 case 1:
649 __global_sti();
650 break;
651 case 2:
652 __cli();
653 break;
654 case 3:
655 __sti();
656 break;
657 default:
658 printk("global_restore_flags: %08lx (%p)\n",
659 flags, __builtin_return_address(0));
663 static void
664 show(char * str, void *where)
666 #if 0
667 int i;
668 unsigned long *stack;
669 #endif
670 int cpu = smp_processor_id();
672 printk("\n%s, CPU %d: %p\n", str, cpu, where);
673 printk("irq: %d [%d %d]\n",
674 atomic_read(&global_irq_count),
675 cpu_data[0].irq_count,
676 cpu_data[1].irq_count);
678 printk("bh: %d [%d %d]\n",
679 spin_is_locked(&global_bh_lock) ? 1 : 0,
680 cpu_data[0].bh_count,
681 cpu_data[1].bh_count);
682 #if 0
683 stack = (unsigned long *) &str;
684 for (i = 40; i ; i--) {
685 unsigned long x = *++stack;
686 if (x > (unsigned long) &init_task_union &&
687 x < (unsigned long) &vsprintf) {
688 printk("<[%08lx]> ", x);
691 #endif
695 * From its use, I infer that synchronize_irq() stalls a thread until
696 * the effects of a command to an external device are known to have
697 * taken hold. Typically, the command is to stop sending interrupts.
698 * The strategy here is wait until there is at most one processor
699 * (this one) in an irq. The memory barrier serializes the write to
700 * the device and the subsequent accesses of global_irq_count.
701 * --jmartin
703 #define DEBUG_SYNCHRONIZE_IRQ 0
705 void
706 synchronize_irq(void)
708 #if 0
709 /* Joe's version. */
710 int cpu = smp_processor_id();
711 int local_count;
712 int global_count;
713 int countdown = 1<<24;
714 void *where = __builtin_return_address(0);
716 mb();
717 do {
718 local_count = local_irq_count(cpu);
719 global_count = atomic_read(&global_irq_count);
720 if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
721 printk("%d:%d/%d\n", cpu, local_count, global_count);
722 show("synchronize_irq", where);
723 break;
725 } while (global_count != local_count);
726 #else
727 /* Jay's version. */
728 if (atomic_read(&global_irq_count)) {
729 cli();
730 sti();
732 #endif
734 #endif /* __SMP__ */
737 * do_IRQ handles all normal device IRQ's (the special
738 * SMP cross-CPU interrupts have their own specific
739 * handlers).
741 void
742 handle_irq(int irq, struct pt_regs * regs)
745 * We ack quickly, we don't want the irq controller
746 * thinking we're snobs just because some other CPU has
747 * disabled global interrupts (we have already done the
748 * INT_ACK cycles, it's too late to try to pretend to the
749 * controller that we aren't taking the interrupt).
751 * 0 return value means that this irq is already being
752 * handled by some other CPU. (or is disabled)
754 int cpu = smp_processor_id();
755 irq_desc_t *desc;
756 struct irqaction * action;
757 unsigned int status;
759 if ((unsigned) irq > ACTUAL_NR_IRQS) {
760 printk("device_interrupt: illegal interrupt %d\n", irq);
761 return;
764 irq_attempt(cpu, irq)++;
765 desc = irq_desc + irq;
766 spin_lock_irq(&irq_controller_lock); /* mask also the RTC */
767 desc->handler->ack(irq);
769 REPLAY is when Linux resends an IRQ that was dropped earlier
770 WAITING is used by probe to mark irqs that are being tested
772 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
773 status |= IRQ_PENDING; /* we _want_ to handle it */
776 * If the IRQ is disabled for whatever reason, we cannot
777 * use the action we have.
779 action = NULL;
780 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
781 action = desc->action;
782 status &= ~IRQ_PENDING; /* we commit to handling */
783 status |= IRQ_INPROGRESS; /* we are handling it */
785 desc->status = status;
786 spin_unlock(&irq_controller_lock);
789 * If there is no IRQ handler or it was disabled, exit early.
790 Since we set PENDING, if another processor is handling
791 a different instance of this same irq, the other processor
792 will take care of it.
794 if (!action)
795 return;
798 * Edge triggered interrupts need to remember
799 * pending events.
800 * This applies to any hw interrupts that allow a second
801 * instance of the same irq to arrive while we are in do_IRQ
802 * or in the handler. But the code here only handles the _second_
803 * instance of the irq, not the third or fourth. So it is mostly
804 * useful for irq hardware that does not mask cleanly in an
805 * SMP environment.
807 for (;;) {
808 handle_IRQ_event(irq, regs, action);
809 spin_lock(&irq_controller_lock);
811 if (!(desc->status & IRQ_PENDING)
812 || (desc->status & IRQ_LEVEL))
813 break;
814 desc->status &= ~IRQ_PENDING;
815 spin_unlock(&irq_controller_lock);
817 desc->status &= ~IRQ_INPROGRESS;
818 if (!(desc->status & IRQ_DISABLED))
819 desc->handler->end(irq);
820 spin_unlock(&irq_controller_lock);
824 * IRQ autodetection code..
826 * This depends on the fact that any interrupt that
827 * comes in on to an unassigned handler will get stuck
828 * with "IRQ_WAITING" cleared and the interrupt
829 * disabled.
831 unsigned long
832 probe_irq_on(void)
834 int i;
835 unsigned long delay;
836 unsigned long val;
838 /* Something may have generated an irq long ago and we want to
839 flush such a longstanding irq before considering it as spurious. */
840 spin_lock_irq(&irq_controller_lock);
841 for (i = NR_IRQS-1; i >= 0; i--)
842 if (!irq_desc[i].action)
843 if(irq_desc[i].handler->startup(i))
844 irq_desc[i].status |= IRQ_PENDING;
845 spin_unlock_irq(&irq_controller_lock);
847 /* Wait for longstanding interrupts to trigger. */
848 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
849 /* about 20ms delay */ synchronize_irq();
851 /* enable any unassigned irqs (we must startup again here because
852 if a longstanding irq happened in the previous stage, it may have
853 masked itself) first, enable any unassigned irqs. */
854 spin_lock_irq(&irq_controller_lock);
855 for (i = NR_IRQS-1; i >= 0; i--) {
856 if (!irq_desc[i].action) {
857 irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
858 if(irq_desc[i].handler->startup(i))
859 irq_desc[i].status |= IRQ_PENDING;
862 spin_unlock_irq(&irq_controller_lock);
865 * Wait for spurious interrupts to trigger
867 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
868 /* about 100ms delay */ synchronize_irq();
871 * Now filter out any obviously spurious interrupts
873 val = 0;
874 spin_lock_irq(&irq_controller_lock);
875 for (i=0; i<NR_IRQS; i++) {
876 unsigned int status = irq_desc[i].status;
878 if (!(status & IRQ_AUTODETECT))
879 continue;
881 /* It triggered already - consider it spurious. */
882 if (!(status & IRQ_WAITING)) {
883 irq_desc[i].status = status & ~IRQ_AUTODETECT;
884 irq_desc[i].handler->shutdown(i);
885 continue;
888 if (i < 64)
889 val |= 1 << i;
891 spin_unlock_irq(&irq_controller_lock);
893 return val;
897 * Return a mask of triggered interrupts (this
898 * can handle only legacy ISA interrupts).
900 unsigned int probe_irq_mask(unsigned long val)
902 int i;
903 unsigned int mask;
905 mask = 0;
906 spin_lock_irq(&irq_controller_lock);
907 for (i = 0; i < 16; i++) {
908 unsigned int status = irq_desc[i].status;
910 if (!(status & IRQ_AUTODETECT))
911 continue;
913 if (!(status & IRQ_WAITING))
914 mask |= 1 << i;
916 irq_desc[i].status = status & ~IRQ_AUTODETECT;
917 irq_desc[i].handler->shutdown(i);
919 spin_unlock_irq(&irq_controller_lock);
921 return mask & val;
925 * Get the result of the IRQ probe.. A negative result means that
926 * we have several candidates (but we return the lowest-numbered
927 * one).
931 probe_irq_off(unsigned long val)
933 int i, irq_found, nr_irqs;
935 nr_irqs = 0;
936 irq_found = 0;
937 spin_lock_irq(&irq_controller_lock);
938 for (i=0; i<NR_IRQS; i++) {
939 unsigned int status = irq_desc[i].status;
941 if (!(status & IRQ_AUTODETECT))
942 continue;
944 if (!(status & IRQ_WAITING)) {
945 if (!nr_irqs)
946 irq_found = i;
947 nr_irqs++;
949 irq_desc[i].status = status & ~IRQ_AUTODETECT;
950 irq_desc[i].handler->shutdown(i);
952 spin_unlock_irq(&irq_controller_lock);
954 if (nr_irqs > 1)
955 irq_found = -irq_found;
956 return irq_found;
961 * The main interrupt entry point.
964 asmlinkage void
965 do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
966 unsigned long a3, unsigned long a4, unsigned long a5,
967 struct pt_regs regs)
969 switch (type) {
970 case 0:
971 #ifdef __SMP__
972 handle_ipi(&regs);
973 return;
974 #else
975 printk("Interprocessor interrupt? You must be kidding\n");
976 #endif
977 break;
978 case 1:
979 #ifdef __SMP__
980 cpu_data[smp_processor_id()].smp_local_irq_count++;
981 smp_percpu_timer_interrupt(&regs);
982 if (smp_processor_id() == smp_boot_cpuid)
983 #endif
984 handle_irq(RTC_IRQ, &regs);
985 return;
986 case 2:
987 alpha_mv.machine_check(vector, la_ptr, &regs);
988 return;
989 case 3:
990 alpha_mv.device_interrupt(vector, &regs);
991 return;
992 case 4:
993 perf_irq(vector, &regs);
994 return;
995 default:
996 printk("Hardware intr %ld %lx? Huh?\n", type, vector);
998 printk("PC = %016lx PS=%04lx\n", regs.pc, regs.ps);
1001 void __init
1002 init_IRQ(void)
1004 wrent(entInt, 0);
1005 alpha_mv.init_irq();
1011 #define MCHK_K_TPERR 0x0080
1012 #define MCHK_K_TCPERR 0x0082
1013 #define MCHK_K_HERR 0x0084
1014 #define MCHK_K_ECC_C 0x0086
1015 #define MCHK_K_ECC_NC 0x0088
1016 #define MCHK_K_OS_BUGCHECK 0x008A
1017 #define MCHK_K_PAL_BUGCHECK 0x0090
1019 #ifndef __SMP__
1020 struct mcheck_info __mcheck_info;
1021 #endif
1023 void
1024 process_mcheck_info(unsigned long vector, unsigned long la_ptr,
1025 struct pt_regs *regs, const char *machine,
1026 int expected)
1028 struct el_common *mchk_header;
1029 const char *reason;
1032 * See if the machine check is due to a badaddr() and if so,
1033 * ignore it.
1036 #if DEBUG_MCHECK > 0
1037 printk(KERN_CRIT "%s machine check %s\n", machine,
1038 expected ? "expected." : "NOT expected!!!");
1039 #endif
1041 if (expected) {
1042 int cpu = smp_processor_id();
1043 mcheck_expected(cpu) = 0;
1044 mcheck_taken(cpu) = 1;
1045 return;
1048 mchk_header = (struct el_common *)la_ptr;
1050 printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%lx\n",
1051 machine, vector, regs->pc, mchk_header->code);
1053 switch ((unsigned int) mchk_header->code) {
1054 /* Machine check reasons. Defined according to PALcode sources. */
1055 case 0x80: reason = "tag parity error"; break;
1056 case 0x82: reason = "tag control parity error"; break;
1057 case 0x84: reason = "generic hard error"; break;
1058 case 0x86: reason = "correctable ECC error"; break;
1059 case 0x88: reason = "uncorrectable ECC error"; break;
1060 case 0x8A: reason = "OS-specific PAL bugcheck"; break;
1061 case 0x90: reason = "callsys in kernel mode"; break;
1062 case 0x96: reason = "i-cache read retryable error"; break;
1063 case 0x98: reason = "processor detected hard error"; break;
1065 /* System specific (these are for Alcor, at least): */
1066 case 0x202: reason = "system detected hard error"; break;
1067 case 0x203: reason = "system detected uncorrectable ECC error"; break;
1068 case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
1069 case 0x205: reason = "parity error detected by CIA"; break;
1070 case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
1071 case 0x207: reason = "non-existent memory error"; break;
1072 case 0x208: reason = "MCHK_K_DCSR"; break;
1073 case 0x209: reason = "PCI SERR detected"; break;
1074 case 0x20b: reason = "PCI data parity error detected"; break;
1075 case 0x20d: reason = "PCI address parity error detected"; break;
1076 case 0x20f: reason = "PCI master abort error"; break;
1077 case 0x211: reason = "PCI target abort error"; break;
1078 case 0x213: reason = "scatter/gather PTE invalid error"; break;
1079 case 0x215: reason = "flash ROM write error"; break;
1080 case 0x217: reason = "IOA timeout detected"; break;
1081 case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
1082 case 0x21b: reason = "EISA fail-safe timer timeout"; break;
1083 case 0x21d: reason = "EISA bus time-out"; break;
1084 case 0x21f: reason = "EISA software generated NMI"; break;
1085 case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
1086 default: reason = "unknown"; break;
1089 printk(KERN_CRIT "machine check type: %s%s\n",
1090 reason, mchk_header->retry ? " (retryable)" : "");
1092 dik_show_regs(regs, NULL);
1094 #if DEBUG_MCHECK > 1
1096 /* Dump the logout area to give all info. */
1097 unsigned long *ptr = (unsigned long *)la_ptr;
1098 long i;
1099 for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
1100 printk(KERN_CRIT " +%8lx %016lx %016lx\n",
1101 i*sizeof(long), ptr[i], ptr[i+1]);
1104 #endif