Import 2.4.0-test6pre4
[davej-history.git] / arch / ppc / kernel / irq.c
blob64ef4b4dc6236ffb2e440b6c63ebf04034da1cdb
1 /*
2 * $Id: irq.c,v 1.113 1999/09/17 17:22:56 cort Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ioport.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/config.h>
42 #include <linux/init.h>
43 #include <linux/malloc.h>
44 #include <linux/openpic.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/irq.h>
48 #include <linux/proc_fs.h>
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/hydra.h>
53 #include <asm/system.h>
54 #include <asm/io.h>
55 #include <asm/pgtable.h>
56 #include <asm/irq.h>
57 #include <asm/gg2.h>
58 #include <asm/cache.h>
59 #include <asm/prom.h>
60 #include <asm/amigaints.h>
61 #include <asm/amigahw.h>
62 #include <asm/amigappc.h>
63 #include <asm/ptrace.h>
65 #include "local_irq.h"
67 extern volatile unsigned long ipi_count;
68 void enable_irq(unsigned int irq_nr);
69 void disable_irq(unsigned int irq_nr);
71 volatile unsigned char *chrp_int_ack_special;
73 #define MAXCOUNT 10000000
75 irq_desc_t irq_desc[NR_IRQS];
76 int ppc_spurious_interrupts = 0;
77 struct irqaction *ppc_irq_action[NR_IRQS];
78 unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
79 unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
80 atomic_t ppc_n_lost_interrupts;
82 /* nasty hack for shared irq's since we need to do kmalloc calls but
83 * can't very early in the boot when we need to do a request irq.
84 * this needs to be removed.
85 * -- Cort
87 #define IRQ_KMALLOC_ENTRIES 8
88 static int cache_bitmask = 0;
89 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
90 extern int mem_init_done;
92 void *irq_kmalloc(size_t size, int pri)
94 unsigned int i;
95 if ( mem_init_done )
96 return kmalloc(size,pri);
97 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
98 if ( ! ( cache_bitmask & (1<<i) ) )
100 cache_bitmask |= (1<<i);
101 return (void *)(&malloc_cache[i]);
103 return 0;
106 void irq_kfree(void *ptr)
108 unsigned int i;
109 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
110 if ( ptr == &malloc_cache[i] )
112 cache_bitmask &= ~(1<<i);
113 return;
115 kfree(ptr);
118 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
119 /* Name change so we can catch standard drivers that potentially mess up
120 * the internal interrupt controller on 8xx and 8260. Just bear with me,
121 * I don't like this either and I am searching a better solution. For
122 * now, this is what I need. -- Dan
124 int request_8xxirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
125 #elif defined(CONFIG_APUS)
126 int request_sysirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
127 #else
128 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
129 #endif
130 unsigned long irqflags, const char * devname, void *dev_id)
132 struct irqaction *old, **p, *action;
133 unsigned long flags;
135 if (irq >= NR_IRQS)
136 return -EINVAL;
137 if (!handler)
139 /* Free */
140 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next)
142 /* Found it - now free it */
143 save_flags(flags);
144 cli();
145 *p = action->next;
146 restore_flags(flags);
147 irq_kfree(action);
148 return 0;
150 return -ENOENT;
153 action = (struct irqaction *)
154 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
155 if (!action)
156 return -ENOMEM;
158 save_flags(flags);
159 cli();
161 action->handler = handler;
162 action->flags = irqflags;
163 action->mask = 0;
164 action->name = devname;
165 action->dev_id = dev_id;
166 action->next = NULL;
167 enable_irq(irq);
169 p = &irq_desc[irq].action;
171 if ((old = *p) != NULL) {
172 /* Can't share interrupts unless both agree to */
173 if (!(old->flags & action->flags & SA_SHIRQ))
174 return -EBUSY;
175 /* add new interrupt at end of irq queue */
176 do {
177 p = &old->next;
178 old = *p;
179 } while (old);
181 *p = action;
183 restore_flags(flags);
184 return 0;
187 #ifdef CONFIG_APUS
188 void sys_free_irq(unsigned int irq, void *dev_id)
190 sys_request_irq(irq, NULL, 0, NULL, dev_id);
192 #else
193 void free_irq(unsigned int irq, void *dev_id)
195 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
196 request_8xxirq(irq, NULL, 0, NULL, dev_id);
197 #else
198 request_irq(irq, NULL, 0, NULL, dev_id);
199 #endif
201 #endif
203 /* XXX should implement irq disable depth like on intel */
204 void disable_irq_nosync(unsigned int irq_nr)
206 mask_irq(irq_nr);
209 void disable_irq(unsigned int irq_nr)
211 mask_irq(irq_nr);
212 synchronize_irq();
215 void enable_irq(unsigned int irq_nr)
217 unmask_irq(irq_nr);
220 int get_irq_list(char *buf)
222 #ifdef CONFIG_APUS
223 return apus_get_irq_list (buf);
224 #else
225 int i, len = 0, j;
226 struct irqaction * action;
228 len += sprintf(buf+len, " ");
229 for (j=0; j<smp_num_cpus; j++)
230 len += sprintf(buf+len, "CPU%d ",j);
231 *(char *)(buf+len++) = '\n';
233 for (i = 0 ; i < NR_IRQS ; i++) {
234 action = irq_desc[i].action;
235 if ( !action || !action->handler )
236 continue;
237 len += sprintf(buf+len, "%3d: ", i);
238 #ifdef CONFIG_SMP
239 for (j = 0; j < smp_num_cpus; j++)
240 len += sprintf(buf+len, "%10u ",
241 kstat.irqs[cpu_logical_map(j)][i]);
242 #else
243 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
244 #endif /* CONFIG_SMP */
245 if ( irq_desc[i].handler )
246 len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );
247 else
248 len += sprintf(buf+len, " None ");
249 len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
250 len += sprintf(buf+len, " %s",action->name);
251 for (action=action->next; action; action = action->next) {
252 len += sprintf(buf+len, ", %s", action->name);
254 len += sprintf(buf+len, "\n");
256 #ifdef CONFIG_SMP
257 /* should this be per processor send/receive? */
258 len += sprintf(buf+len, "IPI: %10lu\n", ipi_count);
259 #endif
260 len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
261 return len;
262 #endif /* CONFIG_APUS */
266 * Eventually, this should take an array of interrupts and an array size
267 * so it can dispatch multiple interrupts.
269 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
271 int status;
272 struct irqaction *action;
273 int cpu = smp_processor_id();
275 mask_and_ack_irq(irq);
276 status = 0;
277 action = irq_desc[irq].action;
278 kstat.irqs[cpu][irq]++;
279 if (action && action->handler) {
280 if (!(action->flags & SA_INTERRUPT))
281 __sti();
282 do {
283 status |= action->flags;
284 action->handler(irq, action->dev_id, regs);
285 action = action->next;
286 } while ( action );
287 __cli();
288 if (irq_desc[irq].handler) {
289 if (irq_desc[irq].handler->end)
290 irq_desc[irq].handler->end(irq);
291 else if (irq_desc[irq].handler->enable)
292 irq_desc[irq].handler->enable(irq);
294 } else {
295 ppc_spurious_interrupts++;
296 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
297 disable_irq(irq);
298 if (irq_desc[irq].handler->end)
299 irq_desc[irq].handler->end(irq);
303 asmlinkage int do_IRQ(struct pt_regs *regs, int isfake)
305 int cpu = smp_processor_id();
306 int irq;
307 hardirq_enter( cpu );
309 /* every arch is required to have a get_irq -- Cort */
310 irq = ppc_md.get_irq( regs );
312 if ( irq < 0 )
314 /* -2 means ignore, already handled */
315 if (irq != -2)
317 printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n",
318 irq, regs->nip);
319 ppc_spurious_interrupts++;
321 goto out;
323 ppc_irq_dispatch_handler( regs, irq );
324 if (ppc_md.post_irq)
325 ppc_md.post_irq( regs, irq );
327 out:
328 hardirq_exit( cpu );
329 return 1; /* lets ret_from_int know we can do checks */
332 unsigned long probe_irq_on (void)
334 return 0;
337 int probe_irq_off (unsigned long irqs)
339 return 0;
342 void __init init_IRQ(void)
344 static int once = 0;
346 if ( once )
347 return;
348 else
349 once++;
351 ppc_md.init_IRQ();
354 #ifdef CONFIG_SMP
355 unsigned char global_irq_holder = NO_PROC_ID;
356 unsigned volatile int global_irq_lock;
357 atomic_t global_irq_count;
359 atomic_t global_bh_count;
361 static void show(char * str)
363 int i;
364 unsigned long *stack;
365 int cpu = smp_processor_id();
367 printk("\n%s, CPU %d:\n", str, cpu);
368 printk("irq: %d [%d %d]\n",
369 atomic_read(&global_irq_count),
370 local_irq_count(0),
371 local_irq_count(1));
372 printk("bh: %d [%d %d]\n",
373 atomic_read(&global_bh_count),
374 local_bh_count(0),
375 local_bh_count(1));
376 stack = (unsigned long *) &str;
377 for (i = 40; i ; i--) {
378 unsigned long x = *++stack;
379 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
380 printk("<[%08lx]> ", x);
385 static inline void wait_on_bh(void)
387 int count = MAXCOUNT;
388 do {
389 if (!--count) {
390 show("wait_on_bh");
391 count = ~0;
393 /* nothing .. wait for the other bh's to go away */
394 } while (atomic_read(&global_bh_count) != 0);
398 static inline void wait_on_irq(int cpu)
400 int count = MAXCOUNT;
402 for (;;) {
405 * Wait until all interrupts are gone. Wait
406 * for bottom half handlers unless we're
407 * already executing in one..
409 if (!atomic_read(&global_irq_count)) {
410 if (local_bh_count(cpu)
411 || !atomic_read(&global_bh_count))
412 break;
415 /* Duh, we have to loop. Release the lock to avoid deadlocks */
416 clear_bit(0,&global_irq_lock);
418 for (;;) {
419 if (!--count) {
420 show("wait_on_irq");
421 count = ~0;
423 __sti();
424 /* don't worry about the lock race Linus found
425 * on intel here. -- Cort
427 __cli();
428 if (atomic_read(&global_irq_count))
429 continue;
430 if (global_irq_lock)
431 continue;
432 if (!local_bh_count(cpu)
433 && atomic_read(&global_bh_count))
434 continue;
435 if (!test_and_set_bit(0,&global_irq_lock))
436 break;
442 * This is called when we want to synchronize with
443 * bottom half handlers. We need to wait until
444 * no other CPU is executing any bottom half handler.
446 * Don't wait if we're already running in an interrupt
447 * context or are inside a bh handler.
449 void synchronize_bh(void)
451 if (atomic_read(&global_bh_count) && !in_interrupt())
452 wait_on_bh();
456 * This is called when we want to synchronize with
457 * interrupts. We may for example tell a device to
458 * stop sending interrupts: but to make sure there
459 * are no interrupts that are executing on another
460 * CPU we need to call this function.
462 void synchronize_irq(void)
464 if (atomic_read(&global_irq_count)) {
465 /* Stupid approach */
466 cli();
467 sti();
471 static inline void get_irqlock(int cpu)
473 unsigned int loops = MAXCOUNT;
475 if (test_and_set_bit(0,&global_irq_lock)) {
476 /* do we already hold the lock? */
477 if ((unsigned char) cpu == global_irq_holder)
478 return;
479 /* Uhhuh.. Somebody else got it. Wait.. */
480 do {
481 do {
482 if (loops-- == 0) {
483 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
484 #ifdef CONFIG_XMON
485 xmon(0);
486 #endif
488 } while (test_bit(0,&global_irq_lock));
489 } while (test_and_set_bit(0,&global_irq_lock));
492 * We also need to make sure that nobody else is running
493 * in an interrupt context.
495 wait_on_irq(cpu);
498 * Ok, finally..
500 global_irq_holder = cpu;
504 * A global "cli()" while in an interrupt context
505 * turns into just a local cli(). Interrupts
506 * should use spinlocks for the (very unlikely)
507 * case that they ever want to protect against
508 * each other.
510 * If we already have local interrupts disabled,
511 * this will not turn a local disable into a
512 * global one (problems with spinlocks: this makes
513 * save_flags+cli+sti usable inside a spinlock).
515 void __global_cli(void)
517 unsigned long flags;
519 __save_flags(flags);
520 if (flags & (1 << 15)) {
521 int cpu = smp_processor_id();
522 __cli();
523 if (!local_irq_count(cpu))
524 get_irqlock(cpu);
528 void __global_sti(void)
530 int cpu = smp_processor_id();
532 if (!local_irq_count(cpu))
533 release_irqlock(cpu);
534 __sti();
538 * SMP flags value to restore to:
539 * 0 - global cli
540 * 1 - global sti
541 * 2 - local cli
542 * 3 - local sti
544 unsigned long __global_save_flags(void)
546 int retval;
547 int local_enabled;
548 unsigned long flags;
550 __save_flags(flags);
551 local_enabled = (flags >> 15) & 1;
552 /* default to local */
553 retval = 2 + local_enabled;
555 /* check for global flags if we're not in an interrupt */
556 if (!local_irq_count(smp_processor_id())) {
557 if (local_enabled)
558 retval = 1;
559 if (global_irq_holder == (unsigned char) smp_processor_id())
560 retval = 0;
562 return retval;
566 tb(long vals[],
567 int max_size)
569 register unsigned long *orig_sp __asm__ ("r1");
570 register unsigned long lr __asm__ ("r3");
571 unsigned long *sp;
572 int i;
574 asm volatile ("mflr 3");
575 vals[0] = lr;
576 sp = (unsigned long *) *orig_sp;
577 sp = (unsigned long *) *sp;
578 for (i=1; i<max_size; i++) {
579 if (sp == 0) {
580 break;
583 vals[i] = *(sp+1);
584 sp = (unsigned long *) *sp;
587 return i;
590 void __global_restore_flags(unsigned long flags)
592 switch (flags) {
593 case 0:
594 __global_cli();
595 break;
596 case 1:
597 __global_sti();
598 break;
599 case 2:
600 __cli();
601 break;
602 case 3:
603 __sti();
604 break;
605 default:
607 unsigned long trace[5];
608 int count;
609 int i;
611 printk("global_restore_flags: %08lx (%08lx)\n",
612 flags, (&flags)[-1]);
613 count = tb(trace, 5);
614 printk("tb:");
615 for(i=0; i<count; i++) {
616 printk(" %8.8lx", trace[i]);
618 printk("\n");
622 #endif /* CONFIG_SMP */
624 static struct proc_dir_entry * root_irq_dir;
625 static struct proc_dir_entry * irq_dir [NR_IRQS];
626 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
628 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
630 #define HEX_DIGITS 8
632 static int irq_affinity_read_proc (char *page, char **start, off_t off,
633 int count, int *eof, void *data)
635 if (count < HEX_DIGITS+1)
636 return -EINVAL;
637 return sprintf (page, "%08x\n", irq_affinity[(int)data]);
640 static unsigned int parse_hex_value (const char *buffer,
641 unsigned long count, unsigned long *ret)
643 unsigned char hexnum [HEX_DIGITS];
644 unsigned long value;
645 int i;
647 if (!count)
648 return -EINVAL;
649 if (count > HEX_DIGITS)
650 count = HEX_DIGITS;
651 if (copy_from_user(hexnum, buffer, count))
652 return -EFAULT;
655 * Parse the first 8 characters as a hex string, any non-hex char
656 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
658 value = 0;
660 for (i = 0; i < count; i++) {
661 unsigned int c = hexnum[i];
663 switch (c) {
664 case '0' ... '9': c -= '0'; break;
665 case 'a' ... 'f': c -= 'a'-10; break;
666 case 'A' ... 'F': c -= 'A'-10; break;
667 default:
668 goto out;
670 value = (value << 4) | c;
672 out:
673 *ret = value;
674 return 0;
677 static int irq_affinity_write_proc (struct file *file, const char *buffer,
678 unsigned long count, void *data)
680 int irq = (int) data, full_count = count, err;
681 unsigned long new_value;
683 if (!irq_desc[irq].handler->set_affinity)
684 return -EIO;
686 err = parse_hex_value(buffer, count, &new_value);
688 #if 0/*CONFIG_SMP*/
690 * Do not allow disabling IRQs completely - it's a too easy
691 * way to make the system unusable accidentally :-) At least
692 * one online CPU still has to be targeted.
694 if (!(new_value & cpu_online_map))
695 return -EINVAL;
696 #endif
698 irq_affinity[irq] = new_value;
699 irq_desc[irq].handler->set_affinity(irq, new_value);
701 return full_count;
704 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
705 int count, int *eof, void *data)
707 unsigned long *mask = (unsigned long *) data;
708 if (count < HEX_DIGITS+1)
709 return -EINVAL;
710 return sprintf (page, "%08lx\n", *mask);
713 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
714 unsigned long count, void *data)
716 unsigned long *mask = (unsigned long *) data, full_count = count, err;
717 unsigned long new_value;
719 err = parse_hex_value(buffer, count, &new_value);
720 if (err)
721 return err;
723 *mask = new_value;
724 return full_count;
727 #define MAX_NAMELEN 10
729 static void register_irq_proc (unsigned int irq)
731 struct proc_dir_entry *entry;
732 char name [MAX_NAMELEN];
734 if (!root_irq_dir || (irq_desc[irq].handler == NULL))
735 return;
737 memset(name, 0, MAX_NAMELEN);
738 sprintf(name, "%d", irq);
740 /* create /proc/irq/1234 */
741 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
743 /* create /proc/irq/1234/smp_affinity */
744 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
746 entry->nlink = 1;
747 entry->data = (void *)irq;
748 entry->read_proc = irq_affinity_read_proc;
749 entry->write_proc = irq_affinity_write_proc;
751 smp_affinity_entry[irq] = entry;
754 unsigned long prof_cpu_mask = -1;
756 void init_irq_proc (void)
758 struct proc_dir_entry *entry;
759 int i;
761 /* create /proc/irq */
762 root_irq_dir = proc_mkdir("irq", 0);
764 /* create /proc/irq/prof_cpu_mask */
765 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
767 entry->nlink = 1;
768 entry->data = (void *)&prof_cpu_mask;
769 entry->read_proc = prof_cpu_mask_read_proc;
770 entry->write_proc = prof_cpu_mask_write_proc;
773 * Create entries for all existing IRQs.
775 for (i = 0; i < NR_IRQS; i++) {
776 if (irq_desc[i].handler == NULL)
777 continue;
778 register_irq_proc(i);
782 void no_action(int irq, void *dev, struct pt_regs *regs)