2 * linux/arch/alpha/kernel/irq.c
4 * Copyright (C) 1995 Linus Torvalds
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/malloc.h>
22 #include <linux/random.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/irq.h>
27 #include <asm/system.h>
29 #include <asm/bitops.h>
30 #include <asm/machvec.h>
34 #define vulp volatile unsigned long *
35 #define vuip volatile unsigned int *
37 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
38 in the per-cpu structure for cache reasons. */
40 int __local_irq_count
;
42 unsigned long __irq_attempt
[NR_IRQS
];
46 # error Unable to handle more than 128 irq levels.
49 #ifdef CONFIG_ALPHA_GENERIC
50 #define ACTUAL_NR_IRQS alpha_mv.nr_irqs
52 #define ACTUAL_NR_IRQS NR_IRQS
55 /* Reserved interrupts. These must NEVER be requested by any driver!
56 IRQ 2 used by hw cascade */
57 #define IS_RESERVED_IRQ(irq) ((irq)==2)
61 * The ack_irq routine used by 80% of the systems.
65 common_ack_irq(unsigned long irq
)
68 /* Ack the interrupt making it the lowest priority */
69 /* First the slave .. */
71 outb(0xE0 | (irq
- 8), 0xa0);
74 /* .. then the master */
75 outb(0xE0 | irq
, 0x20);
81 static void dummy_perf(unsigned long vector
, struct pt_regs
*regs
)
83 printk(KERN_CRIT
"Performance counter interrupt!\n");
86 void (*perf_irq
)(unsigned long, struct pt_regs
*) = dummy_perf
;
89 * Dispatch device interrupts.
92 /* Handle ISA interrupt via the PICs. */
94 #if defined(CONFIG_ALPHA_GENERIC)
95 # define IACK_SC alpha_mv.iack_sc
96 #elif defined(CONFIG_ALPHA_APECS)
97 # define IACK_SC APECS_IACK_SC
98 #elif defined(CONFIG_ALPHA_LCA)
99 # define IACK_SC LCA_IACK_SC
100 #elif defined(CONFIG_ALPHA_CIA)
101 # define IACK_SC CIA_IACK_SC
102 #elif defined(CONFIG_ALPHA_PYXIS)
103 # define IACK_SC PYXIS_IACK_SC
104 #elif defined(CONFIG_ALPHA_TSUNAMI)
105 # define IACK_SC TSUNAMI_IACK_SC
106 #elif defined(CONFIG_ALPHA_POLARIS)
107 # define IACK_SC POLARIS_IACK_SC
108 #elif defined(CONFIG_ALPHA_IRONGATE)
109 # define IACK_SC IRONGATE_IACK_SC
111 /* This is bogus but necessary to get it to compile on all platforms. */
116 isa_device_interrupt(unsigned long vector
, struct pt_regs
* regs
)
120 * Generate a PCI interrupt acknowledge cycle. The PIC will
121 * respond with the interrupt vector of the highest priority
122 * interrupt that is pending. The PALcode sets up the
123 * interrupts vectors such that irq level L generates vector L.
125 int j
= *(vuip
) IACK_SC
;
128 if (!(inb(0x20) & 0x80)) {
129 /* It's only a passive release... */
138 * It seems to me that the probability of two or more *device*
139 * interrupts occurring at almost exactly the same time is
140 * pretty low. So why pay the price of checking for
141 * additional interrupts here if the common case can be
142 * handled so much easier?
145 * The first read of gives you *all* interrupting lines.
146 * Therefore, read the mask register and and out those lines
147 * not enabled. Note that some documentation has 21 and a1
148 * write only. This is not true.
150 pic
= inb(0x20) | (inb(0xA0) << 8); /* read isr */
151 pic
&= ~alpha_irq_mask
; /* apply mask */
152 pic
&= 0xFFFB; /* mask out cascade & hibits */
157 handle_irq(j
, j
, regs
);
162 /* Handle interrupts from the SRM, assuming no additional weirdness. */
165 srm_device_interrupt(unsigned long vector
, struct pt_regs
* regs
)
169 irq
= (vector
- 0x800) >> 4;
170 handle_irq(irq
, regs
);
175 * Special irq handlers.
178 void no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { }
181 * Initial irq handlers.
184 static void enable_none(unsigned int irq
) { }
185 static unsigned int startup_none(unsigned int irq
) { return 0; }
186 static void disable_none(unsigned int irq
) { }
187 static void ack_none(unsigned int irq
)
189 printk("unexpected IRQ trap at vector %02x\n", irq
);
192 /* startup is the same as "enable", shutdown is same as "disable" */
193 #define shutdown_none disable_none
194 #define end_none enable_none
196 struct hw_interrupt_type no_irq_type
= {
206 spinlock_t irq_controller_lock
= SPIN_LOCK_UNLOCKED
;
207 irq_desc_t irq_desc
[NR_IRQS
] __cacheline_aligned
=
208 { [0 ... NR_IRQS
-1] = { 0, &no_irq_type
, }};
210 int handle_IRQ_event(unsigned int irq
, struct pt_regs
* regs
, struct irqaction
* action
)
213 int cpu
= smp_processor_id();
215 kstat
.irqs
[cpu
][irq
]++;
218 status
= 1; /* Force the "do bottom halves" bit */
221 if (!(action
->flags
& SA_INTERRUPT
))
226 status
|= action
->flags
;
227 action
->handler(irq
, action
->dev_id
, regs
);
228 action
= action
->next
;
230 if (status
& SA_SAMPLE_RANDOM
)
231 add_interrupt_randomness(irq
);
240 * Generic enable/disable code: this just calls
241 * down into the PIC-specific version for the actual
242 * hardware disable after having gotten the irq
246 disable_irq_nosync(unsigned int irq
)
250 spin_lock_irqsave(&irq_controller_lock
, flags
);
251 if (!irq_desc
[irq
].depth
++) {
252 irq_desc
[irq
].status
|= IRQ_DISABLED
;
253 irq_desc
[irq
].handler
->disable(irq
);
255 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
259 * Synchronous version of the above, making sure the IRQ is
260 * no longer running on any other IRQ..
263 disable_irq(unsigned int irq
)
265 disable_irq_nosync(irq
);
267 if (!local_irq_count(smp_processor_id())) {
270 } while (irq_desc
[irq
].status
& IRQ_INPROGRESS
);
275 enable_irq(unsigned int irq
)
279 spin_lock_irqsave(&irq_controller_lock
, flags
);
280 switch (irq_desc
[irq
].depth
) {
282 unsigned int status
= irq_desc
[irq
].status
& ~IRQ_DISABLED
;
283 irq_desc
[irq
].status
= status
;
284 if ((status
& (IRQ_PENDING
| IRQ_REPLAY
)) == IRQ_PENDING
) {
285 irq_desc
[irq
].status
= status
| IRQ_REPLAY
;
286 hw_resend_irq(irq_desc
[irq
].handler
,irq
); /* noop */
288 irq_desc
[irq
].handler
->enable(irq
);
292 irq_desc
[irq
].depth
--;
295 printk("enable_irq() unbalanced from %p\n",
296 __builtin_return_address(0));
298 spin_unlock_irqrestore(&irq_controller_lock
, flags
);
302 setup_irq(unsigned int irq
, struct irqaction
* new)
305 struct irqaction
*old
, **p
;
309 * Some drivers like serial.c use request_irq() heavily,
310 * so we have to be careful not to interfere with a
313 if (new->flags
& SA_SAMPLE_RANDOM
) {
315 * This function might sleep, we want to call it first,
316 * outside of the atomic block.
317 * Yes, this might clear the entropy pool if the wrong
318 * driver is attempted to be loaded, without actually
319 * installing a new handler, but is this really a problem,
320 * only the sysadmin is able to do this.
322 rand_initialize_irq(irq
);
326 * The following block of code has to be executed atomically
328 spin_lock_irqsave(&irq_controller_lock
,flags
);
329 p
= &irq_desc
[irq
].action
;
330 if ((old
= *p
) != NULL
) {
331 /* Can't share interrupts unless both agree to */
332 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
333 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
337 /* add new interrupt at end of irq queue */
348 irq_desc
[irq
].depth
= 0;
349 irq_desc
[irq
].status
&= ~IRQ_DISABLED
;
350 irq_desc
[irq
].handler
->startup(irq
);
352 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
357 request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
358 unsigned long irqflags
, const char * devname
, void *dev_id
)
361 struct irqaction
* action
;
363 if (irq
>= ACTUAL_NR_IRQS
)
365 if (IS_RESERVED_IRQ(irq
))
372 * Sanity-check: shared interrupts should REALLY pass in
373 * a real dev-ID, otherwise we'll have trouble later trying
374 * to figure out which interrupt is which (messes up the
375 * interrupt freeing logic etc).
377 if (irqflags
& SA_SHIRQ
) {
379 printk("Bad boy: %s (at %p) called us without a dev_id!\n",
380 devname
, __builtin_return_address(0));
384 action
= (struct irqaction
*)
385 kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
389 action
->handler
= handler
;
390 action
->flags
= irqflags
;
392 action
->name
= devname
;
394 action
->dev_id
= dev_id
;
396 retval
= setup_irq(irq
, action
);
403 free_irq(unsigned int irq
, void *dev_id
)
405 struct irqaction
**p
;
408 if (irq
>= ACTUAL_NR_IRQS
) {
409 printk("Trying to free IRQ%d\n",irq
);
412 if (IS_RESERVED_IRQ(irq
)) {
413 printk("Trying to free reserved IRQ %d\n", irq
);
416 spin_lock_irqsave(&irq_controller_lock
,flags
);
417 p
= &irq_desc
[irq
].action
;
419 struct irqaction
* action
= *p
;
421 struct irqaction
**pp
= p
;
423 if (action
->dev_id
!= dev_id
)
426 /* Found it - now remove it from the list of entries */
428 if (!irq_desc
[irq
].action
) {
429 irq_desc
[irq
].status
|= IRQ_DISABLED
;
430 irq_desc
[irq
].handler
->shutdown(irq
);
432 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
434 /* Wait to make sure it's not being used on another CPU */
435 while (irq_desc
[irq
].status
& IRQ_INPROGRESS
)
440 printk("Trying to free free IRQ%d\n",irq
);
441 spin_unlock_irqrestore(&irq_controller_lock
,flags
);
446 int get_irq_list(char *buf
)
449 struct irqaction
* action
;
453 p
+= sprintf(p
, " ");
454 for (i
= 0; i
< smp_num_cpus
; i
++)
455 p
+= sprintf(p
, "CPU%d ", i
);
456 for (i
= 0; i
< smp_num_cpus
; i
++)
457 p
+= sprintf(p
, "TRY%d ", i
);
461 for (i
= 0; i
< NR_IRQS
; i
++) {
462 action
= irq_desc
[i
].action
;
465 p
+= sprintf(p
, "%3d: ",i
);
467 p
+= sprintf(p
, "%10u ", kstat_irqs(i
));
469 for (j
= 0; j
< smp_num_cpus
; j
++)
470 p
+= sprintf(p
, "%10u ",
471 kstat
.irqs
[cpu_logical_map(j
)][i
]);
472 for (j
= 0; j
< smp_num_cpus
; j
++)
473 p
+= sprintf(p
, "%10lu ",
474 irq_attempt(cpu_logical_map(j
), i
));
476 p
+= sprintf(p
, " %14s", irq_desc
[i
].handler
->typename
);
477 p
+= sprintf(p
, " %c%s",
478 (action
->flags
& SA_INTERRUPT
)?'+':' ',
481 for (action
=action
->next
; action
; action
= action
->next
) {
482 p
+= sprintf(p
, ", %c%s",
483 (action
->flags
& SA_INTERRUPT
)?'+':' ',
489 p
+= sprintf(p
, "LOC: ");
490 for (j
= 0; j
< smp_num_cpus
; j
++)
491 p
+= sprintf(p
, "%10lu ",
492 cpu_data
[cpu_logical_map(j
)].smp_local_irq_count
);
493 p
+= sprintf(p
, "\n");
499 /* Who has global_irq_lock. */
500 int global_irq_holder
= NO_PROC_ID
;
502 /* This protects IRQ's. */
503 spinlock_t global_irq_lock
= SPIN_LOCK_UNLOCKED
;
505 /* Global IRQ locking depth. */
506 atomic_t global_irq_count
= ATOMIC_INIT(0);
508 static void *previous_irqholder
= NULL
;
510 #define MAXCOUNT 100000000
512 static void show(char * str
, void *where
);
515 wait_on_irq(int cpu
, void *where
)
517 int count
= MAXCOUNT
;
522 * Wait until all interrupts are gone. Wait
523 * for bottom half handlers unless we're
524 * already executing in one..
526 if (!atomic_read(&global_irq_count
)) {
527 if (local_bh_count(cpu
)
528 || !spin_is_locked(&global_bh_lock
))
532 /* Duh, we have to loop. Release the lock to avoid deadlocks */
533 spin_unlock(&global_irq_lock
);
537 show("wait_on_irq", where
);
541 udelay(1); /* make sure to run pending irqs */
544 if (atomic_read(&global_irq_count
))
546 if (spin_is_locked(&global_irq_lock
))
548 if (!local_bh_count(cpu
)
549 && spin_is_locked(&global_bh_lock
))
551 if (spin_trylock(&global_irq_lock
))
558 get_irqlock(int cpu
, void* where
)
560 if (!spin_trylock(&global_irq_lock
)) {
561 /* Do we already hold the lock? */
562 if (cpu
== global_irq_holder
)
564 /* Uhhuh.. Somebody else got it. Wait. */
565 spin_lock(&global_irq_lock
);
569 * Ok, we got the lock bit.
570 * But that's actually just the easy part.. Now
571 * we need to make sure that nobody else is running
572 * in an interrupt context.
574 wait_on_irq(cpu
, where
);
580 global_irq_lock
.task
= current
;
581 global_irq_lock
.previous
= where
;
583 global_irq_holder
= cpu
;
584 previous_irqholder
= where
;
590 int cpu
= smp_processor_id();
591 void *where
= __builtin_return_address(0);
594 * Maximize ipl. If ipl was previously 0 and if this thread
595 * is not in an irq, then take global_irq_lock.
597 if (swpipl(7) == 0 && !local_irq_count(cpu
))
598 get_irqlock(cpu
, where
);
604 int cpu
= smp_processor_id();
606 if (!local_irq_count(cpu
))
607 release_irqlock(cpu
);
612 * SMP flags value to restore to:
619 __global_save_flags(void)
624 int cpu
= smp_processor_id();
627 local_enabled
= (!(flags
& 7));
628 /* default to local */
629 retval
= 2 + local_enabled
;
631 /* Check for global flags if we're not in an interrupt. */
632 if (!local_irq_count(cpu
)) {
635 if (global_irq_holder
== cpu
)
642 __global_restore_flags(unsigned long flags
)
658 printk("global_restore_flags: %08lx (%p)\n",
659 flags
, __builtin_return_address(0));
664 show(char * str
, void *where
)
668 unsigned long *stack
;
670 int cpu
= smp_processor_id();
672 printk("\n%s, CPU %d: %p\n", str
, cpu
, where
);
673 printk("irq: %d [%d %d]\n",
674 atomic_read(&global_irq_count
),
675 cpu_data
[0].irq_count
,
676 cpu_data
[1].irq_count
);
678 printk("bh: %d [%d %d]\n",
679 spin_is_locked(&global_bh_lock
) ? 1 : 0,
680 cpu_data
[0].bh_count
,
681 cpu_data
[1].bh_count
);
683 stack
= (unsigned long *) &str
;
684 for (i
= 40; i
; i
--) {
685 unsigned long x
= *++stack
;
686 if (x
> (unsigned long) &init_task_union
&&
687 x
< (unsigned long) &vsprintf
) {
688 printk("<[%08lx]> ", x
);
695 * From its use, I infer that synchronize_irq() stalls a thread until
696 * the effects of a command to an external device are known to have
697 * taken hold. Typically, the command is to stop sending interrupts.
698 * The strategy here is wait until there is at most one processor
699 * (this one) in an irq. The memory barrier serializes the write to
700 * the device and the subsequent accesses of global_irq_count.
703 #define DEBUG_SYNCHRONIZE_IRQ 0
706 synchronize_irq(void)
710 int cpu
= smp_processor_id();
713 int countdown
= 1<<24;
714 void *where
= __builtin_return_address(0);
718 local_count
= local_irq_count(cpu
);
719 global_count
= atomic_read(&global_irq_count
);
720 if (DEBUG_SYNCHRONIZE_IRQ
&& (--countdown
== 0)) {
721 printk("%d:%d/%d\n", cpu
, local_count
, global_count
);
722 show("synchronize_irq", where
);
725 } while (global_count
!= local_count
);
728 if (atomic_read(&global_irq_count
)) {
737 * do_IRQ handles all normal device IRQ's (the special
738 * SMP cross-CPU interrupts have their own specific
742 handle_irq(int irq
, struct pt_regs
* regs
)
745 * We ack quickly, we don't want the irq controller
746 * thinking we're snobs just because some other CPU has
747 * disabled global interrupts (we have already done the
748 * INT_ACK cycles, it's too late to try to pretend to the
749 * controller that we aren't taking the interrupt).
751 * 0 return value means that this irq is already being
752 * handled by some other CPU. (or is disabled)
754 int cpu
= smp_processor_id();
756 struct irqaction
* action
;
759 if ((unsigned) irq
> ACTUAL_NR_IRQS
) {
760 printk("device_interrupt: illegal interrupt %d\n", irq
);
764 irq_attempt(cpu
, irq
)++;
765 desc
= irq_desc
+ irq
;
766 spin_lock_irq(&irq_controller_lock
); /* mask also the RTC */
767 desc
->handler
->ack(irq
);
769 REPLAY is when Linux resends an IRQ that was dropped earlier
770 WAITING is used by probe to mark irqs that are being tested
772 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
773 status
|= IRQ_PENDING
; /* we _want_ to handle it */
776 * If the IRQ is disabled for whatever reason, we cannot
777 * use the action we have.
780 if (!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
))) {
781 action
= desc
->action
;
782 status
&= ~IRQ_PENDING
; /* we commit to handling */
783 status
|= IRQ_INPROGRESS
; /* we are handling it */
785 desc
->status
= status
;
786 spin_unlock(&irq_controller_lock
);
789 * If there is no IRQ handler or it was disabled, exit early.
790 Since we set PENDING, if another processor is handling
791 a different instance of this same irq, the other processor
792 will take care of it.
798 * Edge triggered interrupts need to remember
800 * This applies to any hw interrupts that allow a second
801 * instance of the same irq to arrive while we are in do_IRQ
802 * or in the handler. But the code here only handles the _second_
803 * instance of the irq, not the third or fourth. So it is mostly
804 * useful for irq hardware that does not mask cleanly in an
808 handle_IRQ_event(irq
, regs
, action
);
809 spin_lock(&irq_controller_lock
);
811 if (!(desc
->status
& IRQ_PENDING
)
812 || (desc
->status
& IRQ_LEVEL
))
814 desc
->status
&= ~IRQ_PENDING
;
815 spin_unlock(&irq_controller_lock
);
817 desc
->status
&= ~IRQ_INPROGRESS
;
818 if (!(desc
->status
& IRQ_DISABLED
))
819 desc
->handler
->end(irq
);
820 spin_unlock(&irq_controller_lock
);
824 * IRQ autodetection code..
826 * This depends on the fact that any interrupt that
827 * comes in on to an unassigned handler will get stuck
828 * with "IRQ_WAITING" cleared and the interrupt
838 /* Something may have generated an irq long ago and we want to
839 flush such a longstanding irq before considering it as spurious. */
840 spin_lock_irq(&irq_controller_lock
);
841 for (i
= NR_IRQS
-1; i
>= 0; i
--)
842 if (!irq_desc
[i
].action
)
843 if(irq_desc
[i
].handler
->startup(i
))
844 irq_desc
[i
].status
|= IRQ_PENDING
;
845 spin_unlock_irq(&irq_controller_lock
);
847 /* Wait for longstanding interrupts to trigger. */
848 for (delay
= jiffies
+ HZ
/50; time_after(delay
, jiffies
); )
849 /* about 20ms delay */ synchronize_irq();
851 /* enable any unassigned irqs (we must startup again here because
852 if a longstanding irq happened in the previous stage, it may have
853 masked itself) first, enable any unassigned irqs. */
854 spin_lock_irq(&irq_controller_lock
);
855 for (i
= NR_IRQS
-1; i
>= 0; i
--) {
856 if (!irq_desc
[i
].action
) {
857 irq_desc
[i
].status
|= IRQ_AUTODETECT
| IRQ_WAITING
;
858 if(irq_desc
[i
].handler
->startup(i
))
859 irq_desc
[i
].status
|= IRQ_PENDING
;
862 spin_unlock_irq(&irq_controller_lock
);
865 * Wait for spurious interrupts to trigger
867 for (delay
= jiffies
+ HZ
/10; time_after(delay
, jiffies
); )
868 /* about 100ms delay */ synchronize_irq();
871 * Now filter out any obviously spurious interrupts
874 spin_lock_irq(&irq_controller_lock
);
875 for (i
=0; i
<NR_IRQS
; i
++) {
876 unsigned int status
= irq_desc
[i
].status
;
878 if (!(status
& IRQ_AUTODETECT
))
881 /* It triggered already - consider it spurious. */
882 if (!(status
& IRQ_WAITING
)) {
883 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
884 irq_desc
[i
].handler
->shutdown(i
);
891 spin_unlock_irq(&irq_controller_lock
);
897 * Return a mask of triggered interrupts (this
898 * can handle only legacy ISA interrupts).
900 unsigned int probe_irq_mask(unsigned long val
)
906 spin_lock_irq(&irq_controller_lock
);
907 for (i
= 0; i
< 16; i
++) {
908 unsigned int status
= irq_desc
[i
].status
;
910 if (!(status
& IRQ_AUTODETECT
))
913 if (!(status
& IRQ_WAITING
))
916 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
917 irq_desc
[i
].handler
->shutdown(i
);
919 spin_unlock_irq(&irq_controller_lock
);
925 * Get the result of the IRQ probe.. A negative result means that
926 * we have several candidates (but we return the lowest-numbered
931 probe_irq_off(unsigned long val
)
933 int i
, irq_found
, nr_irqs
;
937 spin_lock_irq(&irq_controller_lock
);
938 for (i
=0; i
<NR_IRQS
; i
++) {
939 unsigned int status
= irq_desc
[i
].status
;
941 if (!(status
& IRQ_AUTODETECT
))
944 if (!(status
& IRQ_WAITING
)) {
949 irq_desc
[i
].status
= status
& ~IRQ_AUTODETECT
;
950 irq_desc
[i
].handler
->shutdown(i
);
952 spin_unlock_irq(&irq_controller_lock
);
955 irq_found
= -irq_found
;
961 * The main interrupt entry point.
965 do_entInt(unsigned long type
, unsigned long vector
, unsigned long la_ptr
,
966 unsigned long a3
, unsigned long a4
, unsigned long a5
,
975 printk("Interprocessor interrupt? You must be kidding\n");
980 cpu_data
[smp_processor_id()].smp_local_irq_count
++;
981 smp_percpu_timer_interrupt(®s
);
982 if (smp_processor_id() == smp_boot_cpuid
)
984 handle_irq(RTC_IRQ
, ®s
);
987 alpha_mv
.machine_check(vector
, la_ptr
, ®s
);
990 alpha_mv
.device_interrupt(vector
, ®s
);
993 perf_irq(vector
, ®s
);
996 printk("Hardware intr %ld %lx? Huh?\n", type
, vector
);
998 printk("PC = %016lx PS=%04lx\n", regs
.pc
, regs
.ps
);
1005 alpha_mv
.init_irq();
1011 #define MCHK_K_TPERR 0x0080
1012 #define MCHK_K_TCPERR 0x0082
1013 #define MCHK_K_HERR 0x0084
1014 #define MCHK_K_ECC_C 0x0086
1015 #define MCHK_K_ECC_NC 0x0088
1016 #define MCHK_K_OS_BUGCHECK 0x008A
1017 #define MCHK_K_PAL_BUGCHECK 0x0090
1020 struct mcheck_info __mcheck_info
;
1024 process_mcheck_info(unsigned long vector
, unsigned long la_ptr
,
1025 struct pt_regs
*regs
, const char *machine
,
1028 struct el_common
*mchk_header
;
1032 * See if the machine check is due to a badaddr() and if so,
1036 #if DEBUG_MCHECK > 0
1037 printk(KERN_CRIT
"%s machine check %s\n", machine
,
1038 expected
? "expected." : "NOT expected!!!");
1042 int cpu
= smp_processor_id();
1043 mcheck_expected(cpu
) = 0;
1044 mcheck_taken(cpu
) = 1;
1048 mchk_header
= (struct el_common
*)la_ptr
;
1050 printk(KERN_CRIT
"%s machine check: vector=0x%lx pc=0x%lx code=0x%lx\n",
1051 machine
, vector
, regs
->pc
, mchk_header
->code
);
1053 switch ((unsigned int) mchk_header
->code
) {
1054 /* Machine check reasons. Defined according to PALcode sources. */
1055 case 0x80: reason
= "tag parity error"; break;
1056 case 0x82: reason
= "tag control parity error"; break;
1057 case 0x84: reason
= "generic hard error"; break;
1058 case 0x86: reason
= "correctable ECC error"; break;
1059 case 0x88: reason
= "uncorrectable ECC error"; break;
1060 case 0x8A: reason
= "OS-specific PAL bugcheck"; break;
1061 case 0x90: reason
= "callsys in kernel mode"; break;
1062 case 0x96: reason
= "i-cache read retryable error"; break;
1063 case 0x98: reason
= "processor detected hard error"; break;
1065 /* System specific (these are for Alcor, at least): */
1066 case 0x202: reason
= "system detected hard error"; break;
1067 case 0x203: reason
= "system detected uncorrectable ECC error"; break;
1068 case 0x204: reason
= "SIO SERR occurred on PCI bus"; break;
1069 case 0x205: reason
= "parity error detected by CIA"; break;
1070 case 0x206: reason
= "SIO IOCHK occurred on ISA bus"; break;
1071 case 0x207: reason
= "non-existent memory error"; break;
1072 case 0x208: reason
= "MCHK_K_DCSR"; break;
1073 case 0x209: reason
= "PCI SERR detected"; break;
1074 case 0x20b: reason
= "PCI data parity error detected"; break;
1075 case 0x20d: reason
= "PCI address parity error detected"; break;
1076 case 0x20f: reason
= "PCI master abort error"; break;
1077 case 0x211: reason
= "PCI target abort error"; break;
1078 case 0x213: reason
= "scatter/gather PTE invalid error"; break;
1079 case 0x215: reason
= "flash ROM write error"; break;
1080 case 0x217: reason
= "IOA timeout detected"; break;
1081 case 0x219: reason
= "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
1082 case 0x21b: reason
= "EISA fail-safe timer timeout"; break;
1083 case 0x21d: reason
= "EISA bus time-out"; break;
1084 case 0x21f: reason
= "EISA software generated NMI"; break;
1085 case 0x221: reason
= "unexpected ev5 IRQ[3] interrupt"; break;
1086 default: reason
= "unknown"; break;
1089 printk(KERN_CRIT
"machine check type: %s%s\n",
1090 reason
, mchk_header
->retry
? " (retryable)" : "");
1092 dik_show_regs(regs
, NULL
);
1094 #if DEBUG_MCHECK > 1
1096 /* Dump the logout area to give all info. */
1097 unsigned long *ptr
= (unsigned long *)la_ptr
;
1099 for (i
= 0; i
< mchk_header
->size
/ sizeof(long); i
+= 2) {
1100 printk(KERN_CRIT
" +%8lx %016lx %016lx\n",
1101 i
*sizeof(long), ptr
[i
], ptr
[i
+1]);