2 * $Id: irq.c,v 1.91 1998/12/28 10:28:47 paulus Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/signal.h>
36 #include <linux/sched.h>
37 #include <linux/ioport.h>
38 #include <linux/interrupt.h>
39 #include <linux/timex.h>
40 #include <linux/config.h>
41 #include <linux/init.h>
42 #include <linux/malloc.h>
43 #include <linux/openpic.h>
44 #include <linux/pci.h>
46 #include <asm/bitops.h>
47 #include <asm/hydra.h>
48 #include <asm/system.h>
50 #include <asm/pgtable.h>
52 #include <asm/bitops.h>
54 #include <asm/cache.h>
56 #include <asm/amigaints.h>
57 #include <asm/amigahw.h>
58 #include <asm/amigappc.h>
60 #include <asm/8xx_immap.h>
64 extern void process_int(unsigned long vec
, struct pt_regs
*fp
);
65 extern void apus_init_IRQ(void);
66 extern void amiga_disable_irq(unsigned int irq
);
67 extern void amiga_enable_irq(unsigned int irq
);
68 static void no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { }
69 static volatile unsigned char *chrp_int_ack_special
;
70 extern volatile unsigned long ipi_count
;
71 static void pmac_fix_gatwick_interrupts(struct device_node
*gw
, int irq_base
);
74 /* Rename a few functions. Requires the CONFIG_APUS protection. */
75 #define request_irq nop_ppc_request_irq
76 #define free_irq nop_ppc_free_irq
77 #define get_irq_list nop_get_irq_list
80 void (*mask_and_ack_irq
)(int irq_nr
);
81 void (*mask_irq
)(unsigned int irq_nr
);
82 void (*unmask_irq
)(unsigned int irq_nr
);
83 #else /* CONFIG_8xx */
84 /* init_IRQ() happens too late for the MBX because we initialize the
85 * CPM early and it calls request_irq() before we have these function
86 * pointers initialized.
88 #define mask_and_ack_irq(irq) mbx_mask_irq(irq)
89 #define mask_irq(irq) mbx_mask_irq(irq)
90 #define unmask_irq(irq) mbx_unmask_irq(irq)
91 #endif /* CONFIG_8xx */
95 #undef SHOW_GATWICK_IRQS
96 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
97 #define cached_21 (((char *)(cached_irq_mask))[3])
98 #define cached_A1 (((char *)(cached_irq_mask))[2])
99 #define PREP_IRQ_MASK (((unsigned int)cached_A1)<<8) | (unsigned int)cached_21
101 unsigned int local_bh_count
[NR_CPUS
];
102 unsigned int local_irq_count
[NR_CPUS
];
105 static struct irqaction
*irq_action
[NR_IRQS
];
106 static int spurious_interrupts
= 0;
107 static unsigned int cached_irq_mask
[NR_MASK_WORDS
];
108 unsigned int lost_interrupts
[NR_MASK_WORDS
];
109 atomic_t n_lost_interrupts
;
119 /* XXX these addresses should be obtained from the device tree */
120 volatile struct pmac_irq_hw
*pmac_irq_hw
[4] = {
121 (struct pmac_irq_hw
*) 0xf3000020,
122 (struct pmac_irq_hw
*) 0xf3000010,
123 (struct pmac_irq_hw
*) 0xf4000020,
124 (struct pmac_irq_hw
*) 0xf4000010,
127 /* This is the interrupt used on the main controller for the secondary
128 controller. Happens on PowerBooks G3 Series (a second mac-io)
131 static int second_irq
= -999;
133 /* Returns the number of 0's to the left of the most significant 1 bit */
134 static inline int cntlzw(int bits
)
138 asm ("cntlzw %0,%1" : "=r" (lz
) : "r" (bits
));
142 static inline void sync(void)
144 asm volatile ("sync");
147 /* nasty hack for shared irq's since we need to do kmalloc calls but
148 * can't very very early in the boot when we need to do a request irq.
149 * this needs to be removed.
152 static char cache_bitmask
= 0;
153 static struct irqaction malloc_cache
[4];
154 extern int mem_init_done
;
156 void *irq_kmalloc(size_t size
, int pri
)
160 return kmalloc(size
,pri
);
161 for ( i
= 0; i
<= 3 ; i
++ )
162 if ( ! ( cache_bitmask
& (1<<i
) ) )
164 cache_bitmask
|= (1<<i
);
165 return (void *)(&malloc_cache
[i
]);
170 void irq_kfree(void *ptr
)
173 for ( i
= 0 ; i
<= 3 ; i
++ )
174 if ( ptr
== &malloc_cache
[i
] )
176 cache_bitmask
&= ~(1<<i
);
183 void i8259_mask_and_ack_irq(int irq_nr
)
185 /* spin_lock(&irq_controller_lock);*/
186 cached_irq_mask
[0] |= 1 << irq_nr
;
188 inb(0xA1); /* DUMMY */
189 outb(cached_A1
,0xA1);
190 outb(0x62,0x20); /* Specific EOI to cascade */
192 outb(0x60|(irq_nr
-8), 0xA0); /* specific eoi */
194 inb(0x21); /* DUMMY */
195 outb(cached_21
,0x21);
197 outb(0x60|irq_nr
,0x20); /* specific eoi */
200 /* spin_unlock(&irq_controller_lock);*/
203 void __pmac
pmac_mask_and_ack_irq(int irq_nr
)
205 unsigned long bit
= 1UL << (irq_nr
& 0x1f);
208 if ((unsigned)irq_nr
>= max_irqs
)
210 /*spin_lock(&irq_controller_lock);*/
212 clear_bit(irq_nr
, cached_irq_mask
);
213 if (test_and_clear_bit(irq_nr
, lost_interrupts
))
214 atomic_dec(&n_lost_interrupts
);
215 out_le32(&pmac_irq_hw
[i
]->ack
, bit
);
216 out_le32(&pmac_irq_hw
[i
]->enable
, cached_irq_mask
[i
]);
217 out_le32(&pmac_irq_hw
[i
]->ack
, bit
);
218 /* make sure ack gets to controller before we enable interrupts */
221 /*spin_unlock(&irq_controller_lock);*/
222 /*if ( irq_controller_lock.lock )
223 panic("irq controller lock still held in mask and ack\n");*/
226 void __openfirmware
chrp_mask_and_ack_irq(int irq_nr
)
228 /* spinlocks are done by i8259_mask_and_ack() - Cort */
229 if (is_8259_irq(irq_nr
))
230 i8259_mask_and_ack_irq(irq_nr
);
234 static void i8259_set_irq_mask(int irq_nr
)
237 outb(cached_A1
,0xA1);
239 outb(cached_21
,0x21);
243 static void __pmac
pmac_set_irq_mask(int irq_nr
)
245 unsigned long bit
= 1UL << (irq_nr
& 0x1f);
248 if ((unsigned)irq_nr
>= max_irqs
)
251 /* enable unmasked interrupts */
252 out_le32(&pmac_irq_hw
[i
]->enable
, cached_irq_mask
[i
]);
255 * Unfortunately, setting the bit in the enable register
256 * when the device interrupt is already on *doesn't* set
257 * the bit in the flag register or request another interrupt.
259 if ((bit
& cached_irq_mask
[i
])
260 && (ld_le32(&pmac_irq_hw
[i
]->level
) & bit
)
261 && !(ld_le32(&pmac_irq_hw
[i
]->flag
) & bit
)) {
262 if (!test_and_set_bit(irq_nr
, lost_interrupts
))
263 atomic_inc(&n_lost_interrupts
);
268 * These have to be protected by the spinlock
269 * before being called.
271 static void i8259_mask_irq(unsigned int irq_nr
)
273 cached_irq_mask
[0] |= 1 << irq_nr
;
274 i8259_set_irq_mask(irq_nr
);
277 static void i8259_unmask_irq(unsigned int irq_nr
)
279 cached_irq_mask
[0] &= ~(1 << irq_nr
);
280 i8259_set_irq_mask(irq_nr
);
283 static void __pmac
pmac_mask_irq(unsigned int irq_nr
)
285 clear_bit(irq_nr
, cached_irq_mask
);
286 pmac_set_irq_mask(irq_nr
);
290 static void __pmac
pmac_unmask_irq(unsigned int irq_nr
)
292 set_bit(irq_nr
, cached_irq_mask
);
293 pmac_set_irq_mask(irq_nr
);
296 static void __openfirmware
chrp_mask_irq(unsigned int irq_nr
)
298 if (is_8259_irq(irq_nr
))
299 i8259_mask_irq(irq_nr
);
301 openpic_disable_irq(irq_to_openpic(irq_nr
));
304 static void __openfirmware
chrp_unmask_irq(unsigned int irq_nr
)
306 if (is_8259_irq(irq_nr
))
307 i8259_unmask_irq(irq_nr
);
309 openpic_enable_irq(irq_to_openpic(irq_nr
));
311 #else /* CONFIG_8xx */
312 static void mbx_mask_irq(unsigned int irq_nr
)
314 cached_irq_mask
[0] &= ~(1 << (31-irq_nr
));
315 ((immap_t
*)IMAP_ADDR
)->im_siu_conf
.sc_simask
=
319 static void mbx_unmask_irq(unsigned int irq_nr
)
321 cached_irq_mask
[0] |= (1 << (31-irq_nr
));
322 ((immap_t
*)IMAP_ADDR
)->im_siu_conf
.sc_simask
=
325 #endif /* CONFIG_8xx */
327 void disable_irq(unsigned int irq_nr
)
329 /*unsigned long flags;*/
331 /* spin_lock_irqsave(&irq_controller_lock, flags);*/
333 /* spin_unlock_irqrestore(&irq_controller_lock, flags);*/
337 void enable_irq(unsigned int irq_nr
)
339 /*unsigned long flags;*/
341 /* spin_lock_irqsave(&irq_controller_lock, flags);*/
343 /* spin_unlock_irqrestore(&irq_controller_lock, flags);*/
346 int get_irq_list(char *buf
)
349 struct irqaction
* action
;
351 len
+= sprintf(buf
+len
, " ");
352 for (j
=0; j
<smp_num_cpus
; j
++)
353 len
+= sprintf(buf
+len
, "CPU%d ",j
);
354 *(char *)(buf
+len
++) = '\n';
356 for (i
= 0 ; i
< NR_IRQS
; i
++) {
357 action
= irq_action
[i
];
358 if ((!action
|| !action
->handler
) && (i
!= second_irq
))
360 len
+= sprintf(buf
+len
, "%3d: ", i
);
362 for (j
= 0; j
< smp_num_cpus
; j
++)
363 len
+= sprintf(buf
+len
, "%10u ",
364 kstat
.irqs
[cpu_logical_map(j
)][i
]);
366 len
+= sprintf(buf
+len
, "%10u ", kstat_irqs(i
));
371 len
+= sprintf(buf
+len
, " 82c59 ");
375 len
+= sprintf(buf
+len
, " PMAC-PIC ");
377 len
+= sprintf(buf
+len
, " GATWICK ");
380 if ( is_8259_irq(i
) )
381 len
+= sprintf(buf
+len
, " 82c59 ");
383 len
+= sprintf(buf
+len
, " OpenPIC ");
386 len
+= sprintf(buf
+len
, " MPC8xx ");
390 if (i
!= second_irq
) {
391 len
+= sprintf(buf
+len
, " %s",action
->name
);
392 for (action
=action
->next
; action
; action
= action
->next
) {
393 len
+= sprintf(buf
+len
, ", %s", action
->name
);
395 len
+= sprintf(buf
+len
, "\n");
397 len
+= sprintf(buf
+len
, " Gatwick secondary IRQ controller\n");
400 /* should this be per processor send/receive? */
401 len
+= sprintf(buf
+len
, "IPI: %10lu", ipi_count
);
402 for ( i
= 0 ; i
<= smp_num_cpus
-1; i
++ )
403 len
+= sprintf(buf
+len
," ");
404 len
+= sprintf(buf
+len
, " interprocessor messages received\n");
406 len
+= sprintf(buf
+len
, "BAD: %10u",spurious_interrupts
);
407 for ( i
= 0 ; i
<= smp_num_cpus
-1; i
++ )
408 len
+= sprintf(buf
+len
," ");
409 len
+= sprintf(buf
+len
, " spurious or short\n");
415 * Global interrupt locks for SMP. Allow interrupts to come in on any
416 * CPU, yet make cli/sti act globally to protect critical regions..
419 unsigned char global_irq_holder
= NO_PROC_ID
;
420 unsigned volatile int global_irq_lock
;
421 atomic_t global_irq_count
;
423 atomic_t global_bh_count
;
424 atomic_t global_bh_lock
;
426 static void show(char * str
)
429 unsigned long *stack
;
430 int cpu
= smp_processor_id();
432 printk("\n%s, CPU %d:\n", str
, cpu
);
433 printk("irq: %d [%d %d]\n",
434 atomic_read(&global_irq_count
), local_irq_count
[0], local_irq_count
[1]);
435 printk("bh: %d [%d %d]\n",
436 atomic_read(&global_bh_count
), local_bh_count
[0], local_bh_count
[1]);
437 stack
= (unsigned long *) &str
;
438 for (i
= 40; i
; i
--) {
439 unsigned long x
= *++stack
;
440 if (x
> (unsigned long) &init_task_union
&& x
< (unsigned long) &vsprintf
) {
441 printk("<[%08lx]> ", x
);
446 #define MAXCOUNT 100000000
447 static inline void wait_on_bh(void)
449 int count
= MAXCOUNT
;
455 /* nothing .. wait for the other bh's to go away */
456 } while (atomic_read(&global_bh_count
) != 0);
460 static inline void wait_on_irq(int cpu
)
462 int count
= MAXCOUNT
;
467 * Wait until all interrupts are gone. Wait
468 * for bottom half handlers unless we're
469 * already executing in one..
471 if (!atomic_read(&global_irq_count
)) {
472 if (local_bh_count
[cpu
] || !atomic_read(&global_bh_count
))
476 /* Duh, we have to loop. Release the lock to avoid deadlocks */
477 clear_bit(0,&global_irq_lock
);
485 /* don't worry about the lock race Linus found
486 * on intel here. -- Cort
489 if (atomic_read(&global_irq_count
))
493 if (!local_bh_count
[cpu
] && atomic_read(&global_bh_count
))
495 if (!test_and_set_bit(0,&global_irq_lock
))
502 * This is called when we want to synchronize with
503 * bottom half handlers. We need to wait until
504 * no other CPU is executing any bottom half handler.
506 * Don't wait if we're already running in an interrupt
507 * context or are inside a bh handler.
509 void synchronize_bh(void)
511 if (atomic_read(&global_bh_count
) && !in_interrupt())
517 * This is called when we want to synchronize with
518 * interrupts. We may for example tell a device to
519 * stop sending interrupts: but to make sure there
520 * are no interrupts that are executing on another
521 * CPU we need to call this function.
523 void synchronize_irq(void)
525 if (atomic_read(&global_irq_count
)) {
526 /* Stupid approach */
532 static inline void get_irqlock(int cpu
)
534 unsigned int loops
= MAXCOUNT
;
536 if (test_and_set_bit(0,&global_irq_lock
)) {
537 /* do we already hold the lock? */
538 if ((unsigned char) cpu
== global_irq_holder
)
540 /* Uhhuh.. Somebody else got it. Wait.. */
544 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu
, global_irq_holder
);
549 } while (test_bit(0,&global_irq_lock
));
550 } while (test_and_set_bit(0,&global_irq_lock
));
553 * We also need to make sure that nobody else is running
554 * in an interrupt context.
561 global_irq_holder
= cpu
;
565 * A global "cli()" while in an interrupt context
566 * turns into just a local cli(). Interrupts
567 * should use spinlocks for the (very unlikely)
568 * case that they ever want to protect against
571 * If we already have local interrupts disabled,
572 * this will not turn a local disable into a
573 * global one (problems with spinlocks: this makes
574 * save_flags+cli+sti usable inside a spinlock).
576 void __global_cli(void)
581 if (flags
& (1 << 15)) {
582 int cpu
= smp_processor_id();
584 if (!local_irq_count
[cpu
])
589 void __global_sti(void)
591 int cpu
= smp_processor_id();
593 if (!local_irq_count
[cpu
])
594 release_irqlock(cpu
);
599 * SMP flags value to restore to:
605 unsigned long __global_save_flags(void)
612 local_enabled
= (flags
>> 15) & 1;
613 /* default to local */
614 retval
= 2 + local_enabled
;
616 /* check for global flags if we're not in an interrupt */
617 if (!local_irq_count
[smp_processor_id()]) {
620 if (global_irq_holder
== (unsigned char) smp_processor_id())
626 void __global_restore_flags(unsigned long flags
)
642 printk("global_restore_flags: %08lx (%08lx)\n",
643 flags
, (&flags
)[-1]);
649 asmlinkage
void do_IRQ(struct pt_regs
*regs
, int isfake
)
653 struct irqaction
*action
;
654 int cpu
= smp_processor_id();
656 int openpic_eoi_done
= 0;
658 /* save the HID0 in case dcache was off - see idle.c
659 * this hack should leave for a better solution -- Cort */
660 unsigned dcache_locked
;
662 dcache_locked
= unlock_dcache();
670 extern void smp_message_recv(void);
679 /* could be here due to a do_fake_interrupt call but we don't
680 mess with the controller from the second cpu -- Cort */
685 unsigned int loops
= MAXCOUNT
;
686 while (test_bit(0, &global_irq_lock
)) {
687 if (smp_processor_id() == global_irq_holder
) {
688 printk("uh oh, interrupt while we hold global irq lock!\n");
695 printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder
);
707 for (irq
= max_real_irqs
- 1; irq
> 0; irq
-= 32) {
709 bits
= ld_le32(&pmac_irq_hw
[i
]->flag
)
710 | lost_interrupts
[i
];
717 /* Here, we handle interrupts coming from Gatwick,
718 * normal interrupt code will take care of acking and
719 * masking the irq on Gatwick itself but we ack&mask
720 * the Gatwick main interrupt on Heathrow now. It's
721 * unmasked later, after interrupt handling. -- BenH
723 if (irq
== second_irq
) {
724 mask_and_ack_irq(second_irq
);
725 for (irq
= max_irqs
- 1; irq
> max_real_irqs
; irq
-= 32) {
727 bits
= ld_le32(&pmac_irq_hw
[i
]->flag
)
728 | lost_interrupts
[i
];
734 /* If not found, on exit, irq is 63 (128-1-32-32).
735 * We set it to -1 and revalidate second controller
737 if (irq
< max_real_irqs
) {
739 unmask_irq(second_irq
);
741 #ifdef SHOW_GATWICK_IRQS
742 printk("Gatwick irq %d (i:%d, bits:0x%08lx\n", irq
, i
, bits
);
748 irq
= openpic_irq(0);
749 if (irq
== IRQ_8259_CASCADE
)
752 * This magic address generates a PCI IACK cycle.
754 * This should go in the above mask/ack code soon. -- Cort
756 irq
= *chrp_int_ack_special
;
758 * Acknowledge as soon as possible to allow i8259
762 openpic_eoi_done
= 1;
764 else if (irq
>= OPENPIC_VEC_TIMER
)
767 * OpenPIC interrupts >64 will be used for other purposes
768 * like interprocessor interrupts and hardware errors
770 if (irq
== OPENPIC_VEC_SPURIOUS
) {
772 * Spurious interrupts should never be
775 spurious_interrupts
++;
776 openpic_eoi_done
= 1;
779 * Here we should process IPI timer
780 * for now the interrupt is dismissed.
794 /* if no intr left */
804 int old_level
, new_level
;
806 old_level
= ~(regs
->mq
) & IPLEMU_IPLMASK
;
807 new_level
= (~(regs
->mq
) >> 3) & IPLEMU_IPLMASK
;
814 APUS_WRITE(APUS_IPL_EMU
, IPLEMU_IPLMASK
);
815 APUS_WRITE(APUS_IPL_EMU
, (IPLEMU_SETRESET
816 | (~(new_level
) & IPLEMU_IPLMASK
)));
817 APUS_WRITE(APUS_IPL_EMU
, IPLEMU_DISABLEINT
);
819 process_int (VEC_SPUR
+new_level
, regs
);
821 APUS_WRITE(APUS_IPL_EMU
, IPLEMU_SETRESET
| IPLEMU_DISABLEINT
);
822 APUS_WRITE(APUS_IPL_EMU
, IPLEMU_IPLMASK
);
823 APUS_WRITE(APUS_IPL_EMU
, (IPLEMU_SETRESET
824 | (~(old_level
) & IPLEMU_IPLMASK
)));
828 APUS_WRITE(APUS_IPL_EMU
, IPLEMU_DISABLEINT
);
835 /* we get here with Gatwick but the 'bogus' isn't correct in that case -- Cort */
836 if ( irq
!= second_irq
)
838 printk(KERN_DEBUG
"Bogus interrupt %d from PC = %lx\n",
840 spurious_interrupts
++;
845 #else /* CONFIG_8xx */
846 /* For MPC8xx, read the SIVEC register and shift the bits down
847 * to get the irq number.
849 bits
= ((immap_t
*)IMAP_ADDR
)->im_siu_conf
.sc_sivec
;
851 #endif /* CONFIG_8xx */
852 mask_and_ack_irq(irq
);
854 action
= irq_action
[irq
];
855 kstat
.irqs
[cpu
][irq
]++;
856 if (action
&& action
->handler
) {
857 if (!(action
->flags
& SA_INTERRUPT
))
860 status
|= action
->flags
;
861 action
->handler(irq
, action
->dev_id
, regs
);
862 action
= action
->next
;
868 if ( irq
== 7 ) /* i8259 gives us irq 7 on 'short' intrs */
870 spurious_interrupts
++;
874 /* This was a gatwick sub-interrupt, we re-enable them on Heathrow
876 if (_machine
== _MACH_Pmac
&& irq
>= max_real_irqs
)
877 unmask_irq(second_irq
);
879 /* make sure we don't miss any cascade intrs due to eoi-ing irq 2 */
881 if ( is_prep
&& (irq
> 7) )
883 /* do_bottom_half is called if necessary from int_return in head.S */
885 if (_machine
== _MACH_chrp
&& !openpic_eoi_done
)
887 #endif /* CONFIG_8xx */
893 /* restore the HID0 in case dcache was off - see idle.c
894 * this hack should leave for a better solution -- Cort */
895 lock_dcache(dcache_locked
);
898 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
899 unsigned long irqflags
, const char * devname
, void *dev_id
)
901 struct irqaction
*old
, **p
, *action
;
905 printk("request_irq(): irq %d handler %08x name %s dev_id %04x\n",
906 irq
,(int)handler
,devname
,(int)dev_id
);
907 #endif /* SHOW_IRQ */
912 /* Cannot allocate second controller IRQ */
913 if (irq
== second_irq
)
919 for (p
= irq
+ irq_action
; (action
= *p
) != NULL
; p
= &action
->next
)
921 /* Found it - now free it */
925 restore_flags(flags
);
932 action
= (struct irqaction
*)
933 irq_kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
939 action
->handler
= handler
;
940 action
->flags
= irqflags
;
942 action
->name
= devname
;
943 action
->dev_id
= dev_id
;
946 p
= irq_action
+ irq
;
948 if ((old
= *p
) != NULL
) {
949 /* Can't share interrupts unless both agree to */
950 if (!(old
->flags
& action
->flags
& SA_SHIRQ
))
952 /* add new interrupt at end of irq queue */
960 restore_flags(flags
);
964 void free_irq(unsigned int irq
, void *dev_id
)
966 request_irq(irq
, NULL
, 0, NULL
, dev_id
);
969 unsigned long probe_irq_on (void)
974 int probe_irq_off (unsigned long irqs
)
980 __initfunc(static void i8259_init(void))
982 /* init master interrupt controller */
983 outb(0x11, 0x20); /* Start init sequence */
984 outb(0x00, 0x21); /* Vector base */
985 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
986 outb(0x01, 0x21); /* Select 8086 mode */
987 outb(0xFF, 0x21); /* Mask all */
989 /* init slave interrupt controller */
990 outb(0x11, 0xA0); /* Start init sequence */
991 outb(0x08, 0xA1); /* Vector base */
992 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
993 outb(0x01, 0xA1); /* Select 8086 mode */
994 outb(0xFF, 0xA1); /* Mask all */
995 outb(cached_A1
, 0xA1);
996 outb(cached_21
, 0x21);
997 if (request_irq(2, no_action
, SA_INTERRUPT
, "cascade", NULL
) != 0)
998 panic("Could not allocate cascade IRQ!");
999 enable_irq(2); /* Enable cascade interrupt */
1001 #endif /* CONFIG_8xx */
1003 /* On MBX8xx, the interrupt control (SIEL) was set by EPPC-bug. External
1004 * interrupts can be either edge or level triggered, but there is no
1005 * reason for us to change the EPPC-bug values (it would not work if we did).
1007 __initfunc(void init_IRQ(void))
1009 extern void xmon_irq(int, void *, struct pt_regs
*);
1011 struct device_node
*irqctrler
;
1013 struct device_node
*np
;
1019 mask_and_ack_irq
= pmac_mask_and_ack_irq
;
1020 mask_irq
= pmac_mask_irq
;
1021 unmask_irq
= pmac_unmask_irq
;
1023 /* G3 powermacs have 64 interrupts, G3 Series PowerBook have 128,
1025 max_irqs
= max_real_irqs
= 32;
1026 irqctrler
= find_devices("mac-io");
1030 if (irqctrler
->next
)
1036 /* get addresses of first controller */
1038 if (irqctrler
->n_addrs
> 0) {
1039 addr
= (unsigned long)
1040 ioremap(irqctrler
->addrs
[0].address
, 0x40);
1041 for (i
= 0; i
< 2; ++i
)
1042 pmac_irq_hw
[i
] = (volatile struct pmac_irq_hw
*)
1043 (addr
+ (2 - i
) * 0x10);
1046 /* get addresses of second controller */
1047 irqctrler
= (irqctrler
->next
) ? irqctrler
->next
: NULL
;
1048 if (irqctrler
&& irqctrler
->n_addrs
> 0) {
1049 addr
= (unsigned long)
1050 ioremap(irqctrler
->addrs
[0].address
, 0x40);
1051 for (i
= 2; i
< 4; ++i
)
1052 pmac_irq_hw
[i
] = (volatile struct pmac_irq_hw
*)
1053 (addr
+ (4 - i
) * 0x10);
1057 /* disable all interrupts in all controllers */
1058 for (i
= 0; i
* 32 < max_irqs
; ++i
)
1059 out_le32(&pmac_irq_hw
[i
]->enable
, 0);
1062 /* get interrupt line of secondary interrupt controller */
1064 second_irq
= irqctrler
->intrs
[0].line
;
1065 printk(KERN_INFO
"irq: secondary controller on irq %d\n",
1067 if (device_is_compatible(irqctrler
, "gatwick"))
1068 pmac_fix_gatwick_interrupts(irqctrler
, max_real_irqs
);
1069 enable_irq(second_irq
);
1071 printk("System has %d possible interrupts\n", max_irqs
);
1072 if (max_irqs
!= max_real_irqs
)
1073 printk(KERN_DEBUG
"%d interrupts on main controller\n",
1077 request_irq(20, xmon_irq
, 0, "NMI", 0);
1078 #endif /* CONFIG_XMON */
1081 mask_and_ack_irq
= chrp_mask_and_ack_irq
;
1082 mask_irq
= chrp_mask_irq
;
1083 unmask_irq
= chrp_unmask_irq
;
1085 if ( !(np
= find_devices("pci") ) )
1086 printk("Cannot find pci to get ack address\n");
1089 chrp_int_ack_special
= (volatile unsigned char *)
1090 (*(unsigned long *)get_property(np
,
1091 "8259-interrupt-acknowledge", NULL
));
1095 cached_irq_mask
[0] = cached_irq_mask
[1] = ~0UL;
1097 request_irq(openpic_to_irq(HYDRA_INT_ADB_NMI
),
1098 xmon_irq
, 0, "NMI", 0);
1099 #endif /* CONFIG_XMON */
1102 mask_and_ack_irq
= i8259_mask_and_ack_irq
;
1103 mask_irq
= i8259_mask_irq
;
1104 unmask_irq
= i8259_unmask_irq
;
1105 cached_irq_mask
[0] = ~0UL;
1109 * According to the Carolina spec from ibm irqs 0,1,2, and 8
1110 * must be edge triggered. Also, the pci intrs must be level
1111 * triggered and _only_ isa intrs can be level sensitive
1112 * which are 3-7,9-12,14-15. 13 is special - it can be level.
1114 * power on default is 0's in both regs - all edge.
1116 * These edge/level control regs allow edge/level status
1117 * to be decided on a irq basis instead of on a PIC basis.
1118 * It's still pretty ugly.
1122 unsigned char irq_mode1
= 0, irq_mode2
= 0;
1123 irq_mode1
= 0; /* to get rid of compiler warnings */
1125 * On Carolina, irq 15 and 13 must be level (scsi/ide/net).
1127 if ( _prep_type
== _PREP_IBM
)
1133 mask_irq
= amiga_disable_irq
;
1134 unmask_irq
= amiga_enable_irq
;
1139 #endif /* CONFIG_8xx */
1142 /* This routine will fix some missing interrupt values in the device tree
1143 * on the gatwick mac-io controller used by some PowerBooks
1145 static void __init
pmac_fix_gatwick_interrupts(struct device_node
*gw
, int irq_base
)
1147 struct device_node
*node
;
1148 static struct interrupt_info int_pool
[4];
1150 memset(int_pool
, 0, sizeof(int_pool
));
1155 if (strcasecmp(node
->name
, "escc") == 0)
1156 if (node
->child
&& node
->child
->n_intrs
== 0)
1158 node
->child
->n_intrs
= 1;
1159 node
->child
->intrs
= &int_pool
[0];
1160 int_pool
[0].line
= 15+irq_base
;
1161 printk(KERN_INFO
"irq: fixed SCC on second controller (%d)\n",
1164 /* Fix media-bay & left SWIM */
1165 if (strcasecmp(node
->name
, "media-bay") == 0)
1167 struct device_node
* ya_node
;
1169 if (node
->n_intrs
== 0)
1172 node
->intrs
= &int_pool
[1];
1173 int_pool
[1].line
= 29+irq_base
;
1174 printk(KERN_INFO
"irq: fixed media-bay on second controller (%d)\n",
1177 ya_node
= node
->child
;
1180 if ((strcasecmp(ya_node
->name
, "floppy") == 0) &&
1181 ya_node
->n_intrs
== 0)
1183 ya_node
->n_intrs
= 2;
1184 ya_node
->intrs
= &int_pool
[2];
1185 int_pool
[2].line
= 19+irq_base
;
1186 int_pool
[3].line
= 1+irq_base
;
1187 printk(KERN_INFO
"irq: fixed floppy on second controller (%d,%d)\n",
1188 int_pool
[2].line
, int_pool
[3].line
);
1190 ya_node
= ya_node
->sibling
;
1193 node
= node
->sibling
;