2 * $Id: irq.c,v 1.109 1999/09/05 11:56:31 paulus Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ioport.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/config.h>
42 #include <linux/init.h>
43 #include <linux/malloc.h>
44 #include <linux/openpic.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
48 #include <asm/bitops.h>
49 #include <asm/hydra.h>
50 #include <asm/system.h>
52 #include <asm/pgtable.h>
55 #include <asm/cache.h>
57 #include <asm/amigaints.h>
58 #include <asm/amigahw.h>
59 #include <asm/amigappc.h>
60 #include <asm/ptrace.h>
62 #include "local_irq.h"
64 extern volatile unsigned long ipi_count
;
65 void enable_irq(unsigned int irq_nr
);
66 void disable_irq(unsigned int irq_nr
);
68 volatile unsigned char *chrp_int_ack_special
;
71 /* Rename a few functions. Requires the CONFIG_APUS protection. */
72 #define request_irq nop_ppc_request_irq
73 #define free_irq nop_ppc_free_irq
74 #define get_irq_list nop_get_irq_list
78 #define MAXCOUNT 10000000
80 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
82 int ppc_spurious_interrupts
= 0;
84 unsigned int ppc_local_bh_count
[NR_CPUS
];
85 unsigned int ppc_local_irq_count
[NR_CPUS
];
86 struct irqaction
*ppc_irq_action
[NR_IRQS
];
87 unsigned int ppc_cached_irq_mask
[NR_MASK_WORDS
];
88 unsigned int ppc_lost_interrupts
[NR_MASK_WORDS
];
89 atomic_t ppc_n_lost_interrupts
;
92 /* nasty hack for shared irq's since we need to do kmalloc calls but
93 * can't very early in the boot when we need to do a request irq.
94 * this needs to be removed.
97 static char cache_bitmask
= 0;
98 static struct irqaction malloc_cache
[8];
99 extern int mem_init_done
;
101 void *irq_kmalloc(size_t size
, int pri
)
105 return kmalloc(size
,pri
);
106 for ( i
= 0; i
<= 3 ; i
++ )
107 if ( ! ( cache_bitmask
& (1<<i
) ) )
109 cache_bitmask
|= (1<<i
);
110 return (void *)(&malloc_cache
[i
]);
115 void irq_kfree(void *ptr
)
118 for ( i
= 0 ; i
<= 3 ; i
++ )
119 if ( ptr
== &malloc_cache
[i
] )
121 cache_bitmask
&= ~(1<<i
);
127 struct irqdesc irq_desc
[NR_IRQS
] = {{0, 0}, };
129 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
130 unsigned long irqflags
, const char * devname
, void *dev_id
)
132 struct irqaction
*old
, **p
, *action
;
140 for (p
= &irq_desc
[irq
].action
; (action
= *p
) != NULL
; p
= &action
->next
)
142 /* Found it - now free it */
146 restore_flags(flags
);
153 action
= (struct irqaction
*)
154 irq_kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
161 action
->handler
= handler
;
162 action
->flags
= irqflags
;
164 action
->name
= devname
;
165 action
->dev_id
= dev_id
;
169 p
= &irq_desc
[irq
].action
;
171 if ((old
= *p
) != NULL
) {
172 /* Can't share interrupts unless both agree to */
173 if (!(old
->flags
& action
->flags
& SA_SHIRQ
))
175 /* add new interrupt at end of irq queue */
183 restore_flags(flags
);
187 void free_irq(unsigned int irq
, void *dev_id
)
189 request_irq(irq
, NULL
, 0, NULL
, dev_id
);
192 /* XXX should implement irq disable depth like on intel */
193 void disable_irq_nosync(unsigned int irq_nr
)
198 void disable_irq(unsigned int irq_nr
)
204 void enable_irq(unsigned int irq_nr
)
209 int get_irq_list(char *buf
)
212 struct irqaction
* action
;
214 len
+= sprintf(buf
+len
, " ");
215 for (j
=0; j
<smp_num_cpus
; j
++)
216 len
+= sprintf(buf
+len
, "CPU%d ",j
);
217 *(char *)(buf
+len
++) = '\n';
219 for (i
= 0 ; i
< NR_IRQS
; i
++) {
220 action
= irq_desc
[i
].action
;
221 if ( !action
|| !action
->handler
)
223 len
+= sprintf(buf
+len
, "%3d: ", i
);
225 for (j
= 0; j
< smp_num_cpus
; j
++)
226 len
+= sprintf(buf
+len
, "%10u ",
227 kstat
.irqs
[cpu_logical_map(j
)][i
]);
229 len
+= sprintf(buf
+len
, "%10u ", kstat_irqs(i
));
231 if ( irq_desc
[i
].ctl
)
232 len
+= sprintf(buf
+len
, " %s ", irq_desc
[i
].ctl
->typename
);
233 len
+= sprintf(buf
+len
, " %s",action
->name
);
234 for (action
=action
->next
; action
; action
= action
->next
) {
235 len
+= sprintf(buf
+len
, ", %s", action
->name
);
237 len
+= sprintf(buf
+len
, "\n");
240 /* should this be per processor send/receive? */
241 len
+= sprintf(buf
+len
, "IPI: %10lu\n", ipi_count
);
243 len
+= sprintf(buf
+len
, "BAD: %10u\n", ppc_spurious_interrupts
);
248 * Eventually, this should take an array of interrupts and an array size
249 * so it can dispatch multiple interrupts.
251 void ppc_irq_dispatch_handler(struct pt_regs
*regs
, int irq
)
254 struct irqaction
*action
;
255 int cpu
= smp_processor_id();
257 mask_and_ack_irq(irq
);
259 action
= irq_desc
[irq
].action
;
260 kstat
.irqs
[cpu
][irq
]++;
261 if (action
&& action
->handler
) {
262 if (!(action
->flags
& SA_INTERRUPT
))
265 status
|= action
->flags
;
266 action
->handler(irq
, action
->dev_id
, regs
);
267 action
= action
->next
;
272 ppc_spurious_interrupts
++;
277 asmlinkage
void do_IRQ(struct pt_regs
*regs
, int isfake
)
279 int cpu
= smp_processor_id();
282 ppc_md
.do_IRQ(regs
, cpu
, isfake
);
286 unsigned long probe_irq_on (void)
291 int probe_irq_off (unsigned long irqs
)
296 void __init
init_IRQ(void)
309 unsigned char global_irq_holder
= NO_PROC_ID
;
310 unsigned volatile int global_irq_lock
;
311 atomic_t global_irq_count
;
313 atomic_t global_bh_count
;
314 atomic_t global_bh_lock
;
316 static void show(char * str
)
319 unsigned long *stack
;
320 int cpu
= smp_processor_id();
322 printk("\n%s, CPU %d:\n", str
, cpu
);
323 printk("irq: %d [%d %d]\n",
324 atomic_read(&global_irq_count
),
325 ppc_local_irq_count
[0],
326 ppc_local_irq_count
[1]);
327 printk("bh: %d [%d %d]\n",
328 atomic_read(&global_bh_count
),
329 ppc_local_bh_count
[0],
330 ppc_local_bh_count
[1]);
331 stack
= (unsigned long *) &str
;
332 for (i
= 40; i
; i
--) {
333 unsigned long x
= *++stack
;
334 if (x
> (unsigned long) &init_task_union
&& x
< (unsigned long) &vsprintf
) {
335 printk("<[%08lx]> ", x
);
340 static inline void wait_on_bh(void)
342 int count
= MAXCOUNT
;
348 /* nothing .. wait for the other bh's to go away */
349 } while (atomic_read(&global_bh_count
) != 0);
353 static inline void wait_on_irq(int cpu
)
355 int count
= MAXCOUNT
;
360 * Wait until all interrupts are gone. Wait
361 * for bottom half handlers unless we're
362 * already executing in one..
364 if (!atomic_read(&global_irq_count
)) {
365 if (ppc_local_bh_count
[cpu
]
366 || !atomic_read(&global_bh_count
))
370 /* Duh, we have to loop. Release the lock to avoid deadlocks */
371 clear_bit(0,&global_irq_lock
);
379 /* don't worry about the lock race Linus found
380 * on intel here. -- Cort
383 if (atomic_read(&global_irq_count
))
387 if (!ppc_local_bh_count
[cpu
]
388 && atomic_read(&global_bh_count
))
390 if (!test_and_set_bit(0,&global_irq_lock
))
397 * This is called when we want to synchronize with
398 * bottom half handlers. We need to wait until
399 * no other CPU is executing any bottom half handler.
401 * Don't wait if we're already running in an interrupt
402 * context or are inside a bh handler.
404 void synchronize_bh(void)
406 if (atomic_read(&global_bh_count
) && !in_interrupt())
411 * This is called when we want to synchronize with
412 * interrupts. We may for example tell a device to
413 * stop sending interrupts: but to make sure there
414 * are no interrupts that are executing on another
415 * CPU we need to call this function.
417 void synchronize_irq(void)
419 if (atomic_read(&global_irq_count
)) {
420 /* Stupid approach */
426 static inline void get_irqlock(int cpu
)
428 unsigned int loops
= MAXCOUNT
;
430 if (test_and_set_bit(0,&global_irq_lock
)) {
431 /* do we already hold the lock? */
432 if ((unsigned char) cpu
== global_irq_holder
)
434 /* Uhhuh.. Somebody else got it. Wait.. */
438 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu
, global_irq_holder
);
443 } while (test_bit(0,&global_irq_lock
));
444 } while (test_and_set_bit(0,&global_irq_lock
));
447 * We also need to make sure that nobody else is running
448 * in an interrupt context.
455 global_irq_holder
= cpu
;
459 * A global "cli()" while in an interrupt context
460 * turns into just a local cli(). Interrupts
461 * should use spinlocks for the (very unlikely)
462 * case that they ever want to protect against
465 * If we already have local interrupts disabled,
466 * this will not turn a local disable into a
467 * global one (problems with spinlocks: this makes
468 * save_flags+cli+sti usable inside a spinlock).
470 void __global_cli(void)
475 if (flags
& (1 << 15)) {
476 int cpu
= smp_processor_id();
478 if (!ppc_local_irq_count
[cpu
])
483 void __global_sti(void)
485 int cpu
= smp_processor_id();
487 if (!ppc_local_irq_count
[cpu
])
488 release_irqlock(cpu
);
493 * SMP flags value to restore to:
499 unsigned long __global_save_flags(void)
506 local_enabled
= (flags
>> 15) & 1;
507 /* default to local */
508 retval
= 2 + local_enabled
;
510 /* check for global flags if we're not in an interrupt */
511 if (!ppc_local_irq_count
[smp_processor_id()]) {
514 if (global_irq_holder
== (unsigned char) smp_processor_id())
524 register unsigned long *orig_sp
__asm__ ("r1");
525 register unsigned long lr
__asm__ ("r3");
529 asm volatile ("mflr 3");
531 sp
= (unsigned long *) *orig_sp
;
532 sp
= (unsigned long *) *sp
;
533 for (i
=1; i
<max_size
; i
++) {
539 sp
= (unsigned long *) *sp
;
545 void __global_restore_flags(unsigned long flags
)
562 unsigned long trace
[5];
566 printk("global_restore_flags: %08lx (%08lx)\n",
567 flags
, (&flags
)[-1]);
568 count
= tb(trace
, 5);
570 for(i
=0; i
<count
; i
++) {
571 printk(" %8.8lx", trace
[i
]);