Linux 2.2.0pre2 (December 31 1998)
[davej-history.git] / arch / ppc / kernel / irq.c
blobf4a7c714389c1dfc1a340df53019cfdb182690fa
1 /*
2 * $Id: irq.c,v 1.91 1998/12/28 10:28:47 paulus Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/signal.h>
36 #include <linux/sched.h>
37 #include <linux/ioport.h>
38 #include <linux/interrupt.h>
39 #include <linux/timex.h>
40 #include <linux/config.h>
41 #include <linux/init.h>
42 #include <linux/malloc.h>
43 #include <linux/openpic.h>
44 #include <linux/pci.h>
46 #include <asm/bitops.h>
47 #include <asm/hydra.h>
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/pgtable.h>
51 #include <asm/irq.h>
52 #include <asm/bitops.h>
53 #include <asm/gg2.h>
54 #include <asm/cache.h>
55 #include <asm/prom.h>
56 #include <asm/amigaints.h>
57 #include <asm/amigahw.h>
58 #include <asm/amigappc.h>
59 #ifdef CONFIG_8xx
60 #include <asm/8xx_immap.h>
61 #include <asm/mbx.h>
62 #endif
64 extern void process_int(unsigned long vec, struct pt_regs *fp);
65 extern void apus_init_IRQ(void);
66 extern void amiga_disable_irq(unsigned int irq);
67 extern void amiga_enable_irq(unsigned int irq);
68 static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
69 static volatile unsigned char *chrp_int_ack_special;
70 extern volatile unsigned long ipi_count;
71 static void pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base);
73 #ifdef CONFIG_APUS
74 /* Rename a few functions. Requires the CONFIG_APUS protection. */
75 #define request_irq nop_ppc_request_irq
76 #define free_irq nop_ppc_free_irq
77 #define get_irq_list nop_get_irq_list
78 #endif
79 #ifndef CONFIG_8xx
80 void (*mask_and_ack_irq)(int irq_nr);
81 void (*mask_irq)(unsigned int irq_nr);
82 void (*unmask_irq)(unsigned int irq_nr);
83 #else /* CONFIG_8xx */
84 /* init_IRQ() happens too late for the MBX because we initialize the
85 * CPM early and it calls request_irq() before we have these function
86 * pointers initialized.
88 #define mask_and_ack_irq(irq) mbx_mask_irq(irq)
89 #define mask_irq(irq) mbx_mask_irq(irq)
90 #define unmask_irq(irq) mbx_unmask_irq(irq)
91 #endif /* CONFIG_8xx */
93 #define VEC_SPUR (24)
94 #undef SHOW_IRQ
95 #undef SHOW_GATWICK_IRQS
96 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
97 #define cached_21 (((char *)(cached_irq_mask))[3])
98 #define cached_A1 (((char *)(cached_irq_mask))[2])
99 #define PREP_IRQ_MASK (((unsigned int)cached_A1)<<8) | (unsigned int)cached_21
101 unsigned int local_bh_count[NR_CPUS];
102 unsigned int local_irq_count[NR_CPUS];
103 int max_irqs;
104 int max_real_irqs;
105 static struct irqaction *irq_action[NR_IRQS];
106 static int spurious_interrupts = 0;
107 static unsigned int cached_irq_mask[NR_MASK_WORDS];
108 unsigned int lost_interrupts[NR_MASK_WORDS];
109 atomic_t n_lost_interrupts;
111 /* pmac */
112 struct pmac_irq_hw {
113 unsigned int flag;
114 unsigned int enable;
115 unsigned int ack;
116 unsigned int level;
119 /* XXX these addresses should be obtained from the device tree */
120 volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
121 (struct pmac_irq_hw *) 0xf3000020,
122 (struct pmac_irq_hw *) 0xf3000010,
123 (struct pmac_irq_hw *) 0xf4000020,
124 (struct pmac_irq_hw *) 0xf4000010,
127 /* This is the interrupt used on the main controller for the secondary
128 controller. Happens on PowerBooks G3 Series (a second mac-io)
129 -- BenH
131 static int second_irq = -999;
133 /* Returns the number of 0's to the left of the most significant 1 bit */
134 static inline int cntlzw(int bits)
136 int lz;
138 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (bits));
139 return lz;
142 static inline void sync(void)
144 asm volatile ("sync");
147 /* nasty hack for shared irq's since we need to do kmalloc calls but
148 * can't very very early in the boot when we need to do a request irq.
149 * this needs to be removed.
150 * -- Cort
152 static char cache_bitmask = 0;
153 static struct irqaction malloc_cache[4];
154 extern int mem_init_done;
156 void *irq_kmalloc(size_t size, int pri)
158 unsigned int i;
159 if ( mem_init_done )
160 return kmalloc(size,pri);
161 for ( i = 0; i <= 3 ; i++ )
162 if ( ! ( cache_bitmask & (1<<i) ) )
164 cache_bitmask |= (1<<i);
165 return (void *)(&malloc_cache[i]);
167 return 0;
170 void irq_kfree(void *ptr)
172 unsigned int i;
173 for ( i = 0 ; i <= 3 ; i++ )
174 if ( ptr == &malloc_cache[i] )
176 cache_bitmask &= ~(1<<i);
177 return;
179 kfree(ptr);
182 #ifndef CONFIG_8xx
183 void i8259_mask_and_ack_irq(int irq_nr)
185 /* spin_lock(&irq_controller_lock);*/
186 cached_irq_mask[0] |= 1 << irq_nr;
187 if (irq_nr > 7) {
188 inb(0xA1); /* DUMMY */
189 outb(cached_A1,0xA1);
190 outb(0x62,0x20); /* Specific EOI to cascade */
191 /*outb(0x20,0xA0);*/
192 outb(0x60|(irq_nr-8), 0xA0); /* specific eoi */
193 } else {
194 inb(0x21); /* DUMMY */
195 outb(cached_21,0x21);
196 /*outb(0x20,0x20);*/
197 outb(0x60|irq_nr,0x20); /* specific eoi */
200 /* spin_unlock(&irq_controller_lock);*/
203 void __pmac pmac_mask_and_ack_irq(int irq_nr)
205 unsigned long bit = 1UL << (irq_nr & 0x1f);
206 int i = irq_nr >> 5;
208 if ((unsigned)irq_nr >= max_irqs)
209 return;
210 /*spin_lock(&irq_controller_lock);*/
212 clear_bit(irq_nr, cached_irq_mask);
213 if (test_and_clear_bit(irq_nr, lost_interrupts))
214 atomic_dec(&n_lost_interrupts);
215 out_le32(&pmac_irq_hw[i]->ack, bit);
216 out_le32(&pmac_irq_hw[i]->enable, cached_irq_mask[i]);
217 out_le32(&pmac_irq_hw[i]->ack, bit);
218 /* make sure ack gets to controller before we enable interrupts */
219 sync();
221 /*spin_unlock(&irq_controller_lock);*/
222 /*if ( irq_controller_lock.lock )
223 panic("irq controller lock still held in mask and ack\n");*/
226 void __openfirmware chrp_mask_and_ack_irq(int irq_nr)
228 /* spinlocks are done by i8259_mask_and_ack() - Cort */
229 if (is_8259_irq(irq_nr))
230 i8259_mask_and_ack_irq(irq_nr);
234 static void i8259_set_irq_mask(int irq_nr)
236 if (irq_nr > 7) {
237 outb(cached_A1,0xA1);
238 } else {
239 outb(cached_21,0x21);
243 static void __pmac pmac_set_irq_mask(int irq_nr)
245 unsigned long bit = 1UL << (irq_nr & 0x1f);
246 int i = irq_nr >> 5;
248 if ((unsigned)irq_nr >= max_irqs)
249 return;
251 /* enable unmasked interrupts */
252 out_le32(&pmac_irq_hw[i]->enable, cached_irq_mask[i]);
255 * Unfortunately, setting the bit in the enable register
256 * when the device interrupt is already on *doesn't* set
257 * the bit in the flag register or request another interrupt.
259 if ((bit & cached_irq_mask[i])
260 && (ld_le32(&pmac_irq_hw[i]->level) & bit)
261 && !(ld_le32(&pmac_irq_hw[i]->flag) & bit)) {
262 if (!test_and_set_bit(irq_nr, lost_interrupts))
263 atomic_inc(&n_lost_interrupts);
268 * These have to be protected by the spinlock
269 * before being called.
271 static void i8259_mask_irq(unsigned int irq_nr)
273 cached_irq_mask[0] |= 1 << irq_nr;
274 i8259_set_irq_mask(irq_nr);
277 static void i8259_unmask_irq(unsigned int irq_nr)
279 cached_irq_mask[0] &= ~(1 << irq_nr);
280 i8259_set_irq_mask(irq_nr);
283 static void __pmac pmac_mask_irq(unsigned int irq_nr)
285 clear_bit(irq_nr, cached_irq_mask);
286 pmac_set_irq_mask(irq_nr);
287 sync();
290 static void __pmac pmac_unmask_irq(unsigned int irq_nr)
292 set_bit(irq_nr, cached_irq_mask);
293 pmac_set_irq_mask(irq_nr);
296 static void __openfirmware chrp_mask_irq(unsigned int irq_nr)
298 if (is_8259_irq(irq_nr))
299 i8259_mask_irq(irq_nr);
300 else
301 openpic_disable_irq(irq_to_openpic(irq_nr));
304 static void __openfirmware chrp_unmask_irq(unsigned int irq_nr)
306 if (is_8259_irq(irq_nr))
307 i8259_unmask_irq(irq_nr);
308 else
309 openpic_enable_irq(irq_to_openpic(irq_nr));
311 #else /* CONFIG_8xx */
312 static void mbx_mask_irq(unsigned int irq_nr)
314 cached_irq_mask[0] &= ~(1 << (31-irq_nr));
315 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask =
316 cached_irq_mask[0];
319 static void mbx_unmask_irq(unsigned int irq_nr)
321 cached_irq_mask[0] |= (1 << (31-irq_nr));
322 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask =
323 cached_irq_mask[0];
325 #endif /* CONFIG_8xx */
327 void disable_irq(unsigned int irq_nr)
329 /*unsigned long flags;*/
331 /* spin_lock_irqsave(&irq_controller_lock, flags);*/
332 mask_irq(irq_nr);
333 /* spin_unlock_irqrestore(&irq_controller_lock, flags);*/
334 synchronize_irq();
337 void enable_irq(unsigned int irq_nr)
339 /*unsigned long flags;*/
341 /* spin_lock_irqsave(&irq_controller_lock, flags);*/
342 unmask_irq(irq_nr);
343 /* spin_unlock_irqrestore(&irq_controller_lock, flags);*/
346 int get_irq_list(char *buf)
348 int i, len = 0, j;
349 struct irqaction * action;
351 len += sprintf(buf+len, " ");
352 for (j=0; j<smp_num_cpus; j++)
353 len += sprintf(buf+len, "CPU%d ",j);
354 *(char *)(buf+len++) = '\n';
356 for (i = 0 ; i < NR_IRQS ; i++) {
357 action = irq_action[i];
358 if ((!action || !action->handler) && (i != second_irq))
359 continue;
360 len += sprintf(buf+len, "%3d: ", i);
361 #ifdef __SMP__
362 for (j = 0; j < smp_num_cpus; j++)
363 len += sprintf(buf+len, "%10u ",
364 kstat.irqs[cpu_logical_map(j)][i]);
365 #else
366 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
367 #endif /* __SMP__ */
368 switch( _machine )
370 case _MACH_prep:
371 len += sprintf(buf+len, " 82c59 ");
372 break;
373 case _MACH_Pmac:
374 if (i < 64)
375 len += sprintf(buf+len, " PMAC-PIC ");
376 else
377 len += sprintf(buf+len, " GATWICK ");
378 break;
379 case _MACH_chrp:
380 if ( is_8259_irq(i) )
381 len += sprintf(buf+len, " 82c59 ");
382 else
383 len += sprintf(buf+len, " OpenPIC ");
384 break;
385 case _MACH_mbx:
386 len += sprintf(buf+len, " MPC8xx ");
387 break;
390 if (i != second_irq) {
391 len += sprintf(buf+len, " %s",action->name);
392 for (action=action->next; action; action = action->next) {
393 len += sprintf(buf+len, ", %s", action->name);
395 len += sprintf(buf+len, "\n");
396 } else
397 len += sprintf(buf+len, " Gatwick secondary IRQ controller\n");
399 #ifdef __SMP__
400 /* should this be per processor send/receive? */
401 len += sprintf(buf+len, "IPI: %10lu", ipi_count);
402 for ( i = 0 ; i <= smp_num_cpus-1; i++ )
403 len += sprintf(buf+len," ");
404 len += sprintf(buf+len, " interprocessor messages received\n");
405 #endif
406 len += sprintf(buf+len, "BAD: %10u",spurious_interrupts);
407 for ( i = 0 ; i <= smp_num_cpus-1; i++ )
408 len += sprintf(buf+len," ");
409 len += sprintf(buf+len, " spurious or short\n");
410 return len;
415 * Global interrupt locks for SMP. Allow interrupts to come in on any
416 * CPU, yet make cli/sti act globally to protect critical regions..
418 #ifdef __SMP__
419 unsigned char global_irq_holder = NO_PROC_ID;
420 unsigned volatile int global_irq_lock;
421 atomic_t global_irq_count;
423 atomic_t global_bh_count;
424 atomic_t global_bh_lock;
426 static void show(char * str)
428 int i;
429 unsigned long *stack;
430 int cpu = smp_processor_id();
432 printk("\n%s, CPU %d:\n", str, cpu);
433 printk("irq: %d [%d %d]\n",
434 atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
435 printk("bh: %d [%d %d]\n",
436 atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
437 stack = (unsigned long *) &str;
438 for (i = 40; i ; i--) {
439 unsigned long x = *++stack;
440 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
441 printk("<[%08lx]> ", x);
446 #define MAXCOUNT 100000000
447 static inline void wait_on_bh(void)
449 int count = MAXCOUNT;
450 do {
451 if (!--count) {
452 show("wait_on_bh");
453 count = ~0;
455 /* nothing .. wait for the other bh's to go away */
456 } while (atomic_read(&global_bh_count) != 0);
460 static inline void wait_on_irq(int cpu)
462 int count = MAXCOUNT;
464 for (;;) {
467 * Wait until all interrupts are gone. Wait
468 * for bottom half handlers unless we're
469 * already executing in one..
471 if (!atomic_read(&global_irq_count)) {
472 if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
473 break;
476 /* Duh, we have to loop. Release the lock to avoid deadlocks */
477 clear_bit(0,&global_irq_lock);
479 for (;;) {
480 if (!--count) {
481 show("wait_on_irq");
482 count = ~0;
484 __sti();
485 /* don't worry about the lock race Linus found
486 * on intel here. -- Cort
488 __cli();
489 if (atomic_read(&global_irq_count))
490 continue;
491 if (global_irq_lock)
492 continue;
493 if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
494 continue;
495 if (!test_and_set_bit(0,&global_irq_lock))
496 break;
502 * This is called when we want to synchronize with
503 * bottom half handlers. We need to wait until
504 * no other CPU is executing any bottom half handler.
506 * Don't wait if we're already running in an interrupt
507 * context or are inside a bh handler.
509 void synchronize_bh(void)
511 if (atomic_read(&global_bh_count) && !in_interrupt())
512 wait_on_bh();
517 * This is called when we want to synchronize with
518 * interrupts. We may for example tell a device to
519 * stop sending interrupts: but to make sure there
520 * are no interrupts that are executing on another
521 * CPU we need to call this function.
523 void synchronize_irq(void)
525 if (atomic_read(&global_irq_count)) {
526 /* Stupid approach */
527 cli();
528 sti();
532 static inline void get_irqlock(int cpu)
534 unsigned int loops = MAXCOUNT;
536 if (test_and_set_bit(0,&global_irq_lock)) {
537 /* do we already hold the lock? */
538 if ((unsigned char) cpu == global_irq_holder)
539 return;
540 /* Uhhuh.. Somebody else got it. Wait.. */
541 do {
542 do {
543 if (loops-- == 0) {
544 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
545 #ifdef CONFIG_XMON
546 xmon(0);
547 #endif
549 } while (test_bit(0,&global_irq_lock));
550 } while (test_and_set_bit(0,&global_irq_lock));
553 * We also need to make sure that nobody else is running
554 * in an interrupt context.
556 wait_on_irq(cpu);
559 * Ok, finally..
561 global_irq_holder = cpu;
565 * A global "cli()" while in an interrupt context
566 * turns into just a local cli(). Interrupts
567 * should use spinlocks for the (very unlikely)
568 * case that they ever want to protect against
569 * each other.
571 * If we already have local interrupts disabled,
572 * this will not turn a local disable into a
573 * global one (problems with spinlocks: this makes
574 * save_flags+cli+sti usable inside a spinlock).
576 void __global_cli(void)
578 unsigned int flags;
580 __save_flags(flags);
581 if (flags & (1 << 15)) {
582 int cpu = smp_processor_id();
583 __cli();
584 if (!local_irq_count[cpu])
585 get_irqlock(cpu);
589 void __global_sti(void)
591 int cpu = smp_processor_id();
593 if (!local_irq_count[cpu])
594 release_irqlock(cpu);
595 __sti();
599 * SMP flags value to restore to:
600 * 0 - global cli
601 * 1 - global sti
602 * 2 - local cli
603 * 3 - local sti
605 unsigned long __global_save_flags(void)
607 int retval;
608 int local_enabled;
609 unsigned long flags;
611 __save_flags(flags);
612 local_enabled = (flags >> 15) & 1;
613 /* default to local */
614 retval = 2 + local_enabled;
616 /* check for global flags if we're not in an interrupt */
617 if (!local_irq_count[smp_processor_id()]) {
618 if (local_enabled)
619 retval = 1;
620 if (global_irq_holder == (unsigned char) smp_processor_id())
621 retval = 0;
623 return retval;
626 void __global_restore_flags(unsigned long flags)
628 switch (flags) {
629 case 0:
630 __global_cli();
631 break;
632 case 1:
633 __global_sti();
634 break;
635 case 2:
636 __cli();
637 break;
638 case 3:
639 __sti();
640 break;
641 default:
642 printk("global_restore_flags: %08lx (%08lx)\n",
643 flags, (&flags)[-1]);
647 #endif /* __SMP__ */
649 asmlinkage void do_IRQ(struct pt_regs *regs, int isfake)
651 int irq;
652 unsigned long bits;
653 struct irqaction *action;
654 int cpu = smp_processor_id();
655 int status;
656 int openpic_eoi_done = 0;
658 /* save the HID0 in case dcache was off - see idle.c
659 * this hack should leave for a better solution -- Cort */
660 unsigned dcache_locked;
662 dcache_locked = unlock_dcache();
663 hardirq_enter(cpu);
664 #ifndef CONFIG_8xx
665 #ifdef __SMP__
666 if ( cpu != 0 )
668 if (!isfake)
670 extern void smp_message_recv(void);
671 #ifdef CONFIG_XMON
672 static int xmon_2nd;
673 if (xmon_2nd)
674 xmon(regs);
675 #endif
676 smp_message_recv();
677 goto out;
679 /* could be here due to a do_fake_interrupt call but we don't
680 mess with the controller from the second cpu -- Cort */
681 goto out;
685 unsigned int loops = MAXCOUNT;
686 while (test_bit(0, &global_irq_lock)) {
687 if (smp_processor_id() == global_irq_holder) {
688 printk("uh oh, interrupt while we hold global irq lock!\n");
689 #ifdef CONFIG_XMON
690 xmon(0);
691 #endif
692 break;
694 if (loops-- == 0) {
695 printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder);
696 #ifdef CONFIG_XMON
697 xmon(0);
698 #endif
702 #endif /* __SMP__ */
704 switch ( _machine )
706 case _MACH_Pmac:
707 for (irq = max_real_irqs - 1; irq > 0; irq -= 32) {
708 int i = irq >> 5;
709 bits = ld_le32(&pmac_irq_hw[i]->flag)
710 | lost_interrupts[i];
711 if (bits == 0)
712 continue;
713 irq -= cntlzw(bits);
714 break;
717 /* Here, we handle interrupts coming from Gatwick,
718 * normal interrupt code will take care of acking and
719 * masking the irq on Gatwick itself but we ack&mask
720 * the Gatwick main interrupt on Heathrow now. It's
721 * unmasked later, after interrupt handling. -- BenH
723 if (irq == second_irq) {
724 mask_and_ack_irq(second_irq);
725 for (irq = max_irqs - 1; irq > max_real_irqs; irq -= 32) {
726 int i = irq >> 5;
727 bits = ld_le32(&pmac_irq_hw[i]->flag)
728 | lost_interrupts[i];
729 if (bits == 0)
730 continue;
731 irq -= cntlzw(bits);
732 break;
734 /* If not found, on exit, irq is 63 (128-1-32-32).
735 * We set it to -1 and revalidate second controller
737 if (irq < max_real_irqs) {
738 irq = -1;
739 unmask_irq(second_irq);
741 #ifdef SHOW_GATWICK_IRQS
742 printk("Gatwick irq %d (i:%d, bits:0x%08lx\n", irq, i, bits);
743 #endif
746 break;
747 case _MACH_chrp:
748 irq = openpic_irq(0);
749 if (irq == IRQ_8259_CASCADE)
752 * This magic address generates a PCI IACK cycle.
754 * This should go in the above mask/ack code soon. -- Cort
756 irq = *chrp_int_ack_special;
758 * Acknowledge as soon as possible to allow i8259
759 * interrupt nesting
761 openpic_eoi(0);
762 openpic_eoi_done = 1;
764 else if (irq >= OPENPIC_VEC_TIMER)
767 * OpenPIC interrupts >64 will be used for other purposes
768 * like interprocessor interrupts and hardware errors
770 if (irq == OPENPIC_VEC_SPURIOUS) {
772 * Spurious interrupts should never be
773 * acknowledged
775 spurious_interrupts++;
776 openpic_eoi_done = 1;
777 } else {
779 * Here we should process IPI timer
780 * for now the interrupt is dismissed.
783 goto out;
785 break;
786 case _MACH_prep:
787 outb(0x0C, 0x20);
788 irq = inb(0x20) & 7;
789 if (irq == 2)
791 retry_cascade:
792 outb(0x0C, 0xA0);
793 irq = inb(0xA0);
794 /* if no intr left */
795 if ( !(irq & 128 ) )
796 goto out;
797 irq = (irq&7) + 8;
799 bits = 1UL << irq;
800 break;
801 #ifdef CONFIG_APUS
802 case _MACH_apus:
804 int old_level, new_level;
806 old_level = ~(regs->mq) & IPLEMU_IPLMASK;
807 new_level = (~(regs->mq) >> 3) & IPLEMU_IPLMASK;
809 if (new_level == 0)
811 goto apus_out;
814 APUS_WRITE(APUS_IPL_EMU, IPLEMU_IPLMASK);
815 APUS_WRITE(APUS_IPL_EMU, (IPLEMU_SETRESET
816 | (~(new_level) & IPLEMU_IPLMASK)));
817 APUS_WRITE(APUS_IPL_EMU, IPLEMU_DISABLEINT);
819 process_int (VEC_SPUR+new_level, regs);
821 APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_DISABLEINT);
822 APUS_WRITE(APUS_IPL_EMU, IPLEMU_IPLMASK);
823 APUS_WRITE(APUS_IPL_EMU, (IPLEMU_SETRESET
824 | (~(old_level) & IPLEMU_IPLMASK)));
826 apus_out:
827 hardirq_exit(cpu);
828 APUS_WRITE(APUS_IPL_EMU, IPLEMU_DISABLEINT);
829 goto out2;
831 #endif
834 if (irq < 0) {
835 /* we get here with Gatwick but the 'bogus' isn't correct in that case -- Cort */
836 if ( irq != second_irq )
838 printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n",
839 irq, regs->nip);
840 spurious_interrupts++;
842 goto out;
845 #else /* CONFIG_8xx */
846 /* For MPC8xx, read the SIVEC register and shift the bits down
847 * to get the irq number.
849 bits = ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec;
850 irq = bits >> 26;
851 #endif /* CONFIG_8xx */
852 mask_and_ack_irq(irq);
853 status = 0;
854 action = irq_action[irq];
855 kstat.irqs[cpu][irq]++;
856 if (action && action->handler) {
857 if (!(action->flags & SA_INTERRUPT))
858 __sti();
859 do {
860 status |= action->flags;
861 action->handler(irq, action->dev_id, regs);
862 action = action->next;
863 } while ( action );
864 __cli();
865 unmask_irq(irq);
866 } else {
867 #ifndef CONFIG_8xx
868 if ( irq == 7 ) /* i8259 gives us irq 7 on 'short' intrs */
869 #endif
870 spurious_interrupts++;
871 disable_irq( irq );
874 /* This was a gatwick sub-interrupt, we re-enable them on Heathrow
875 now */
876 if (_machine == _MACH_Pmac && irq >= max_real_irqs)
877 unmask_irq(second_irq);
879 /* make sure we don't miss any cascade intrs due to eoi-ing irq 2 */
880 #ifndef CONFIG_8xx
881 if ( is_prep && (irq > 7) )
882 goto retry_cascade;
883 /* do_bottom_half is called if necessary from int_return in head.S */
884 out:
885 if (_machine == _MACH_chrp && !openpic_eoi_done)
886 openpic_eoi(0);
887 #endif /* CONFIG_8xx */
888 hardirq_exit(cpu);
890 #ifdef CONFIG_APUS
891 out2:
892 #endif
893 /* restore the HID0 in case dcache was off - see idle.c
894 * this hack should leave for a better solution -- Cort */
895 lock_dcache(dcache_locked);
898 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
899 unsigned long irqflags, const char * devname, void *dev_id)
901 struct irqaction *old, **p, *action;
902 unsigned long flags;
904 #ifdef SHOW_IRQ
905 printk("request_irq(): irq %d handler %08x name %s dev_id %04x\n",
906 irq,(int)handler,devname,(int)dev_id);
907 #endif /* SHOW_IRQ */
909 if (irq >= NR_IRQS)
910 return -EINVAL;
912 /* Cannot allocate second controller IRQ */
913 if (irq == second_irq)
914 return -EBUSY;
916 if (!handler)
918 /* Free */
919 for (p = irq + irq_action; (action = *p) != NULL; p = &action->next)
921 /* Found it - now free it */
922 save_flags(flags);
923 cli();
924 *p = action->next;
925 restore_flags(flags);
926 irq_kfree(action);
927 return 0;
929 return -ENOENT;
932 action = (struct irqaction *)
933 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
934 if (!action)
935 return -ENOMEM;
936 save_flags(flags);
937 cli();
939 action->handler = handler;
940 action->flags = irqflags;
941 action->mask = 0;
942 action->name = devname;
943 action->dev_id = dev_id;
944 action->next = NULL;
945 enable_irq(irq);
946 p = irq_action + irq;
948 if ((old = *p) != NULL) {
949 /* Can't share interrupts unless both agree to */
950 if (!(old->flags & action->flags & SA_SHIRQ))
951 return -EBUSY;
952 /* add new interrupt at end of irq queue */
953 do {
954 p = &old->next;
955 old = *p;
956 } while (old);
958 *p = action;
960 restore_flags(flags);
961 return 0;
964 void free_irq(unsigned int irq, void *dev_id)
966 request_irq(irq, NULL, 0, NULL, dev_id);
969 unsigned long probe_irq_on (void)
971 return 0;
974 int probe_irq_off (unsigned long irqs)
976 return 0;
979 #ifndef CONFIG_8xx
980 __initfunc(static void i8259_init(void))
982 /* init master interrupt controller */
983 outb(0x11, 0x20); /* Start init sequence */
984 outb(0x00, 0x21); /* Vector base */
985 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
986 outb(0x01, 0x21); /* Select 8086 mode */
987 outb(0xFF, 0x21); /* Mask all */
989 /* init slave interrupt controller */
990 outb(0x11, 0xA0); /* Start init sequence */
991 outb(0x08, 0xA1); /* Vector base */
992 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
993 outb(0x01, 0xA1); /* Select 8086 mode */
994 outb(0xFF, 0xA1); /* Mask all */
995 outb(cached_A1, 0xA1);
996 outb(cached_21, 0x21);
997 if (request_irq(2, no_action, SA_INTERRUPT, "cascade", NULL) != 0)
998 panic("Could not allocate cascade IRQ!");
999 enable_irq(2); /* Enable cascade interrupt */
1001 #endif /* CONFIG_8xx */
1003 /* On MBX8xx, the interrupt control (SIEL) was set by EPPC-bug. External
1004 * interrupts can be either edge or level triggered, but there is no
1005 * reason for us to change the EPPC-bug values (it would not work if we did).
1007 __initfunc(void init_IRQ(void))
1009 extern void xmon_irq(int, void *, struct pt_regs *);
1010 int i;
1011 struct device_node *irqctrler;
1012 unsigned long addr;
1013 struct device_node *np;
1015 #ifndef CONFIG_8xx
1016 switch (_machine)
1018 case _MACH_Pmac:
1019 mask_and_ack_irq = pmac_mask_and_ack_irq;
1020 mask_irq = pmac_mask_irq;
1021 unmask_irq = pmac_unmask_irq;
1023 /* G3 powermacs have 64 interrupts, G3 Series PowerBook have 128,
1024 others have 32 */
1025 max_irqs = max_real_irqs = 32;
1026 irqctrler = find_devices("mac-io");
1027 if (irqctrler)
1029 max_real_irqs = 64;
1030 if (irqctrler->next)
1031 max_irqs = 128;
1032 else
1033 max_irqs = 64;
1036 /* get addresses of first controller */
1037 if (irqctrler) {
1038 if (irqctrler->n_addrs > 0) {
1039 addr = (unsigned long)
1040 ioremap(irqctrler->addrs[0].address, 0x40);
1041 for (i = 0; i < 2; ++i)
1042 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
1043 (addr + (2 - i) * 0x10);
1046 /* get addresses of second controller */
1047 irqctrler = (irqctrler->next) ? irqctrler->next : NULL;
1048 if (irqctrler && irqctrler->n_addrs > 0) {
1049 addr = (unsigned long)
1050 ioremap(irqctrler->addrs[0].address, 0x40);
1051 for (i = 2; i < 4; ++i)
1052 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
1053 (addr + (4 - i) * 0x10);
1057 /* disable all interrupts in all controllers */
1058 for (i = 0; i * 32 < max_irqs; ++i)
1059 out_le32(&pmac_irq_hw[i]->enable, 0);
1062 /* get interrupt line of secondary interrupt controller */
1063 if (irqctrler) {
1064 second_irq = irqctrler->intrs[0].line;
1065 printk(KERN_INFO "irq: secondary controller on irq %d\n",
1066 (int)second_irq);
1067 if (device_is_compatible(irqctrler, "gatwick"))
1068 pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
1069 enable_irq(second_irq);
1071 printk("System has %d possible interrupts\n", max_irqs);
1072 if (max_irqs != max_real_irqs)
1073 printk(KERN_DEBUG "%d interrupts on main controller\n",
1074 max_real_irqs);
1076 #ifdef CONFIG_XMON
1077 request_irq(20, xmon_irq, 0, "NMI", 0);
1078 #endif /* CONFIG_XMON */
1079 break;
1080 case _MACH_chrp:
1081 mask_and_ack_irq = chrp_mask_and_ack_irq;
1082 mask_irq = chrp_mask_irq;
1083 unmask_irq = chrp_unmask_irq;
1085 if ( !(np = find_devices("pci") ) )
1086 printk("Cannot find pci to get ack address\n");
1087 else
1089 chrp_int_ack_special = (volatile unsigned char *)
1090 (*(unsigned long *)get_property(np,
1091 "8259-interrupt-acknowledge", NULL));
1093 openpic_init(1);
1094 i8259_init();
1095 cached_irq_mask[0] = cached_irq_mask[1] = ~0UL;
1096 #ifdef CONFIG_XMON
1097 request_irq(openpic_to_irq(HYDRA_INT_ADB_NMI),
1098 xmon_irq, 0, "NMI", 0);
1099 #endif /* CONFIG_XMON */
1100 break;
1101 case _MACH_prep:
1102 mask_and_ack_irq = i8259_mask_and_ack_irq;
1103 mask_irq = i8259_mask_irq;
1104 unmask_irq = i8259_unmask_irq;
1105 cached_irq_mask[0] = ~0UL;
1107 i8259_init();
1109 * According to the Carolina spec from ibm irqs 0,1,2, and 8
1110 * must be edge triggered. Also, the pci intrs must be level
1111 * triggered and _only_ isa intrs can be level sensitive
1112 * which are 3-7,9-12,14-15. 13 is special - it can be level.
1114 * power on default is 0's in both regs - all edge.
1116 * These edge/level control regs allow edge/level status
1117 * to be decided on a irq basis instead of on a PIC basis.
1118 * It's still pretty ugly.
1119 * - Cort
1122 unsigned char irq_mode1 = 0, irq_mode2 = 0;
1123 irq_mode1 = 0; /* to get rid of compiler warnings */
1125 * On Carolina, irq 15 and 13 must be level (scsi/ide/net).
1127 if ( _prep_type == _PREP_IBM )
1128 irq_mode2 |= 0xa0;
1130 break;
1131 #ifdef CONFIG_APUS
1132 case _MACH_apus:
1133 mask_irq = amiga_disable_irq;
1134 unmask_irq = amiga_enable_irq;
1135 apus_init_IRQ();
1136 break;
1137 #endif
1139 #endif /* CONFIG_8xx */
1142 /* This routine will fix some missing interrupt values in the device tree
1143 * on the gatwick mac-io controller used by some PowerBooks
1145 static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
1147 struct device_node *node;
1148 static struct interrupt_info int_pool[4];
1150 memset(int_pool, 0, sizeof(int_pool));
1151 node = gw->child;
1152 while(node)
1154 /* Fix SCC */
1155 if (strcasecmp(node->name, "escc") == 0)
1156 if (node->child && node->child->n_intrs == 0)
1158 node->child->n_intrs = 1;
1159 node->child->intrs = &int_pool[0];
1160 int_pool[0].line = 15+irq_base;
1161 printk(KERN_INFO "irq: fixed SCC on second controller (%d)\n",
1162 int_pool[0].line);
1164 /* Fix media-bay & left SWIM */
1165 if (strcasecmp(node->name, "media-bay") == 0)
1167 struct device_node* ya_node;
1169 if (node->n_intrs == 0)
1171 node->n_intrs = 1;
1172 node->intrs = &int_pool[1];
1173 int_pool[1].line = 29+irq_base;
1174 printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
1175 int_pool[1].line);
1177 ya_node = node->child;
1178 while(ya_node)
1180 if ((strcasecmp(ya_node->name, "floppy") == 0) &&
1181 ya_node->n_intrs == 0)
1183 ya_node->n_intrs = 2;
1184 ya_node->intrs = &int_pool[2];
1185 int_pool[2].line = 19+irq_base;
1186 int_pool[3].line = 1+irq_base;
1187 printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
1188 int_pool[2].line, int_pool[3].line);
1190 ya_node = ya_node->sibling;
1193 node = node->sibling;