Import 2.3.18pre1
[davej-history.git] / arch / ppc / kernel / irq.c
blobeece8308a78f3329c75702fc12a0f272aeeea868
1 /*
2 * $Id: irq.c,v 1.109 1999/09/05 11:56:31 paulus Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ioport.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/config.h>
42 #include <linux/init.h>
43 #include <linux/malloc.h>
44 #include <linux/openpic.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
48 #include <asm/bitops.h>
49 #include <asm/hydra.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 #include <asm/pgtable.h>
53 #include <asm/irq.h>
54 #include <asm/gg2.h>
55 #include <asm/cache.h>
56 #include <asm/prom.h>
57 #include <asm/amigaints.h>
58 #include <asm/amigahw.h>
59 #include <asm/amigappc.h>
60 #include <asm/ptrace.h>
62 #include "local_irq.h"
64 extern volatile unsigned long ipi_count;
65 void enable_irq(unsigned int irq_nr);
66 void disable_irq(unsigned int irq_nr);
68 volatile unsigned char *chrp_int_ack_special;
70 #ifdef CONFIG_APUS
71 /* Rename a few functions. Requires the CONFIG_APUS protection. */
72 #define request_irq nop_ppc_request_irq
73 #define free_irq nop_ppc_free_irq
74 #define get_irq_list nop_get_irq_list
75 #define VEC_SPUR (24)
76 #endif
78 #define MAXCOUNT 10000000
80 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
82 int ppc_spurious_interrupts = 0;
84 unsigned int ppc_local_bh_count[NR_CPUS];
85 unsigned int ppc_local_irq_count[NR_CPUS];
86 struct irqaction *ppc_irq_action[NR_IRQS];
87 unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
88 unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
89 atomic_t ppc_n_lost_interrupts;
92 /* nasty hack for shared irq's since we need to do kmalloc calls but
93 * can't very early in the boot when we need to do a request irq.
94 * this needs to be removed.
95 * -- Cort
97 static char cache_bitmask = 0;
98 static struct irqaction malloc_cache[8];
99 extern int mem_init_done;
101 void *irq_kmalloc(size_t size, int pri)
103 unsigned int i;
104 if ( mem_init_done )
105 return kmalloc(size,pri);
106 for ( i = 0; i <= 3 ; i++ )
107 if ( ! ( cache_bitmask & (1<<i) ) )
109 cache_bitmask |= (1<<i);
110 return (void *)(&malloc_cache[i]);
112 return 0;
115 void irq_kfree(void *ptr)
117 unsigned int i;
118 for ( i = 0 ; i <= 3 ; i++ )
119 if ( ptr == &malloc_cache[i] )
121 cache_bitmask &= ~(1<<i);
122 return;
124 kfree(ptr);
127 struct irqdesc irq_desc[NR_IRQS] = {{0, 0}, };
129 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
130 unsigned long irqflags, const char * devname, void *dev_id)
132 struct irqaction *old, **p, *action;
133 unsigned long flags;
135 if (irq >= NR_IRQS)
136 return -EINVAL;
137 if (!handler)
139 /* Free */
140 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next)
142 /* Found it - now free it */
143 save_flags(flags);
144 cli();
145 *p = action->next;
146 restore_flags(flags);
147 irq_kfree(action);
148 return 0;
150 return -ENOENT;
153 action = (struct irqaction *)
154 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
155 if (!action)
156 return -ENOMEM;
158 save_flags(flags);
159 cli();
161 action->handler = handler;
162 action->flags = irqflags;
163 action->mask = 0;
164 action->name = devname;
165 action->dev_id = dev_id;
166 action->next = NULL;
167 enable_irq(irq);
169 p = &irq_desc[irq].action;
171 if ((old = *p) != NULL) {
172 /* Can't share interrupts unless both agree to */
173 if (!(old->flags & action->flags & SA_SHIRQ))
174 return -EBUSY;
175 /* add new interrupt at end of irq queue */
176 do {
177 p = &old->next;
178 old = *p;
179 } while (old);
181 *p = action;
183 restore_flags(flags);
184 return 0;
187 void free_irq(unsigned int irq, void *dev_id)
189 request_irq(irq, NULL, 0, NULL, dev_id);
192 /* XXX should implement irq disable depth like on intel */
193 void disable_irq_nosync(unsigned int irq_nr)
195 mask_irq(irq_nr);
198 void disable_irq(unsigned int irq_nr)
200 mask_irq(irq_nr);
201 synchronize_irq();
204 void enable_irq(unsigned int irq_nr)
206 unmask_irq(irq_nr);
209 int get_irq_list(char *buf)
211 int i, len = 0, j;
212 struct irqaction * action;
214 len += sprintf(buf+len, " ");
215 for (j=0; j<smp_num_cpus; j++)
216 len += sprintf(buf+len, "CPU%d ",j);
217 *(char *)(buf+len++) = '\n';
219 for (i = 0 ; i < NR_IRQS ; i++) {
220 action = irq_desc[i].action;
221 if ( !action || !action->handler )
222 continue;
223 len += sprintf(buf+len, "%3d: ", i);
224 #ifdef __SMP__
225 for (j = 0; j < smp_num_cpus; j++)
226 len += sprintf(buf+len, "%10u ",
227 kstat.irqs[cpu_logical_map(j)][i]);
228 #else
229 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
230 #endif /* __SMP__ */
231 if ( irq_desc[i].ctl )
232 len += sprintf(buf+len, " %s ", irq_desc[i].ctl->typename );
233 len += sprintf(buf+len, " %s",action->name);
234 for (action=action->next; action; action = action->next) {
235 len += sprintf(buf+len, ", %s", action->name);
237 len += sprintf(buf+len, "\n");
239 #ifdef __SMP__
240 /* should this be per processor send/receive? */
241 len += sprintf(buf+len, "IPI: %10lu\n", ipi_count);
242 #endif
243 len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
244 return len;
248 * Eventually, this should take an array of interrupts and an array size
249 * so it can dispatch multiple interrupts.
251 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
253 int status;
254 struct irqaction *action;
255 int cpu = smp_processor_id();
257 mask_and_ack_irq(irq);
258 status = 0;
259 action = irq_desc[irq].action;
260 kstat.irqs[cpu][irq]++;
261 if (action && action->handler) {
262 if (!(action->flags & SA_INTERRUPT))
263 __sti();
264 do {
265 status |= action->flags;
266 action->handler(irq, action->dev_id, regs);
267 action = action->next;
268 } while ( action );
269 __cli();
270 unmask_irq(irq);
271 } else {
272 ppc_spurious_interrupts++;
273 disable_irq( irq );
277 asmlinkage void do_IRQ(struct pt_regs *regs, int isfake)
279 int cpu = smp_processor_id();
281 hardirq_enter(cpu);
282 ppc_md.do_IRQ(regs, cpu, isfake);
283 hardirq_exit(cpu);
286 unsigned long probe_irq_on (void)
288 return 0;
291 int probe_irq_off (unsigned long irqs)
293 return 0;
296 void __init init_IRQ(void)
298 static int once = 0;
300 if ( once )
301 return;
302 else
303 once++;
305 ppc_md.init_IRQ();
308 #ifdef __SMP__
309 unsigned char global_irq_holder = NO_PROC_ID;
310 unsigned volatile int global_irq_lock;
311 atomic_t global_irq_count;
313 atomic_t global_bh_count;
314 atomic_t global_bh_lock;
316 static void show(char * str)
318 int i;
319 unsigned long *stack;
320 int cpu = smp_processor_id();
322 printk("\n%s, CPU %d:\n", str, cpu);
323 printk("irq: %d [%d %d]\n",
324 atomic_read(&global_irq_count),
325 ppc_local_irq_count[0],
326 ppc_local_irq_count[1]);
327 printk("bh: %d [%d %d]\n",
328 atomic_read(&global_bh_count),
329 ppc_local_bh_count[0],
330 ppc_local_bh_count[1]);
331 stack = (unsigned long *) &str;
332 for (i = 40; i ; i--) {
333 unsigned long x = *++stack;
334 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
335 printk("<[%08lx]> ", x);
340 static inline void wait_on_bh(void)
342 int count = MAXCOUNT;
343 do {
344 if (!--count) {
345 show("wait_on_bh");
346 count = ~0;
348 /* nothing .. wait for the other bh's to go away */
349 } while (atomic_read(&global_bh_count) != 0);
353 static inline void wait_on_irq(int cpu)
355 int count = MAXCOUNT;
357 for (;;) {
360 * Wait until all interrupts are gone. Wait
361 * for bottom half handlers unless we're
362 * already executing in one..
364 if (!atomic_read(&global_irq_count)) {
365 if (ppc_local_bh_count[cpu]
366 || !atomic_read(&global_bh_count))
367 break;
370 /* Duh, we have to loop. Release the lock to avoid deadlocks */
371 clear_bit(0,&global_irq_lock);
373 for (;;) {
374 if (!--count) {
375 show("wait_on_irq");
376 count = ~0;
378 __sti();
379 /* don't worry about the lock race Linus found
380 * on intel here. -- Cort
382 __cli();
383 if (atomic_read(&global_irq_count))
384 continue;
385 if (global_irq_lock)
386 continue;
387 if (!ppc_local_bh_count[cpu]
388 && atomic_read(&global_bh_count))
389 continue;
390 if (!test_and_set_bit(0,&global_irq_lock))
391 break;
397 * This is called when we want to synchronize with
398 * bottom half handlers. We need to wait until
399 * no other CPU is executing any bottom half handler.
401 * Don't wait if we're already running in an interrupt
402 * context or are inside a bh handler.
404 void synchronize_bh(void)
406 if (atomic_read(&global_bh_count) && !in_interrupt())
407 wait_on_bh();
411 * This is called when we want to synchronize with
412 * interrupts. We may for example tell a device to
413 * stop sending interrupts: but to make sure there
414 * are no interrupts that are executing on another
415 * CPU we need to call this function.
417 void synchronize_irq(void)
419 if (atomic_read(&global_irq_count)) {
420 /* Stupid approach */
421 cli();
422 sti();
426 static inline void get_irqlock(int cpu)
428 unsigned int loops = MAXCOUNT;
430 if (test_and_set_bit(0,&global_irq_lock)) {
431 /* do we already hold the lock? */
432 if ((unsigned char) cpu == global_irq_holder)
433 return;
434 /* Uhhuh.. Somebody else got it. Wait.. */
435 do {
436 do {
437 if (loops-- == 0) {
438 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
439 #ifdef CONFIG_XMON
440 xmon(0);
441 #endif
443 } while (test_bit(0,&global_irq_lock));
444 } while (test_and_set_bit(0,&global_irq_lock));
447 * We also need to make sure that nobody else is running
448 * in an interrupt context.
450 wait_on_irq(cpu);
453 * Ok, finally..
455 global_irq_holder = cpu;
459 * A global "cli()" while in an interrupt context
460 * turns into just a local cli(). Interrupts
461 * should use spinlocks for the (very unlikely)
462 * case that they ever want to protect against
463 * each other.
465 * If we already have local interrupts disabled,
466 * this will not turn a local disable into a
467 * global one (problems with spinlocks: this makes
468 * save_flags+cli+sti usable inside a spinlock).
470 void __global_cli(void)
472 unsigned int flags;
474 __save_flags(flags);
475 if (flags & (1 << 15)) {
476 int cpu = smp_processor_id();
477 __cli();
478 if (!ppc_local_irq_count[cpu])
479 get_irqlock(cpu);
483 void __global_sti(void)
485 int cpu = smp_processor_id();
487 if (!ppc_local_irq_count[cpu])
488 release_irqlock(cpu);
489 __sti();
493 * SMP flags value to restore to:
494 * 0 - global cli
495 * 1 - global sti
496 * 2 - local cli
497 * 3 - local sti
499 unsigned long __global_save_flags(void)
501 int retval;
502 int local_enabled;
503 unsigned long flags;
505 __save_flags(flags);
506 local_enabled = (flags >> 15) & 1;
507 /* default to local */
508 retval = 2 + local_enabled;
510 /* check for global flags if we're not in an interrupt */
511 if (!ppc_local_irq_count[smp_processor_id()]) {
512 if (local_enabled)
513 retval = 1;
514 if (global_irq_holder == (unsigned char) smp_processor_id())
515 retval = 0;
517 return retval;
521 tb(long vals[],
522 int max_size)
524 register unsigned long *orig_sp __asm__ ("r1");
525 register unsigned long lr __asm__ ("r3");
526 unsigned long *sp;
527 int i;
529 asm volatile ("mflr 3");
530 vals[0] = lr;
531 sp = (unsigned long *) *orig_sp;
532 sp = (unsigned long *) *sp;
533 for (i=1; i<max_size; i++) {
534 if (sp == 0) {
535 break;
538 vals[i] = *(sp+1);
539 sp = (unsigned long *) *sp;
542 return i;
545 void __global_restore_flags(unsigned long flags)
547 switch (flags) {
548 case 0:
549 __global_cli();
550 break;
551 case 1:
552 __global_sti();
553 break;
554 case 2:
555 __cli();
556 break;
557 case 3:
558 __sti();
559 break;
560 default:
562 unsigned long trace[5];
563 int count;
564 int i;
566 printk("global_restore_flags: %08lx (%08lx)\n",
567 flags, (&flags)[-1]);
568 count = tb(trace, 5);
569 printk("tb:");
570 for(i=0; i<count; i++) {
571 printk(" %8.8lx", trace[i]);
573 printk("\n");
577 #endif /* __SMP__ */