- Linus: drop support for old-style Makefiles entirely. Big.
[davej-history.git] / arch / ppc / kernel / irq.c
bloba0caa4a4c9ecbfb5187523e52c98efa0308d6a09
1 /*
2 * $Id: irq.c,v 1.113 1999/09/17 17:22:56 cort Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ioport.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/config.h>
42 #include <linux/init.h>
43 #include <linux/malloc.h>
44 #include <linux/openpic.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/irq.h>
48 #include <linux/proc_fs.h>
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/hydra.h>
53 #include <asm/system.h>
54 #include <asm/io.h>
55 #include <asm/pgtable.h>
56 #include <asm/irq.h>
57 #include <asm/gg2.h>
58 #include <asm/cache.h>
59 #include <asm/prom.h>
60 #include <asm/amigaints.h>
61 #include <asm/amigahw.h>
62 #include <asm/amigappc.h>
63 #include <asm/ptrace.h>
65 #include "local_irq.h"
67 extern volatile unsigned long ipi_count;
68 void enable_irq(unsigned int irq_nr);
69 void disable_irq(unsigned int irq_nr);
71 volatile unsigned char *chrp_int_ack_special;
73 #define MAXCOUNT 10000000
75 irq_desc_t irq_desc[NR_IRQS];
76 int ppc_spurious_interrupts = 0;
77 struct irqaction *ppc_irq_action[NR_IRQS];
78 unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
79 unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
80 atomic_t ppc_n_lost_interrupts;
82 /* nasty hack for shared irq's since we need to do kmalloc calls but
83 * can't very early in the boot when we need to do a request irq.
84 * this needs to be removed.
85 * -- Cort
87 #define IRQ_KMALLOC_ENTRIES 8
88 static int cache_bitmask = 0;
89 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
90 extern int mem_init_done;
92 void *irq_kmalloc(size_t size, int pri)
94 unsigned int i;
95 if ( mem_init_done )
96 return kmalloc(size,pri);
97 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
98 if ( ! ( cache_bitmask & (1<<i) ) )
100 cache_bitmask |= (1<<i);
101 return (void *)(&malloc_cache[i]);
103 return 0;
106 void irq_kfree(void *ptr)
108 unsigned int i;
109 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
110 if ( ptr == &malloc_cache[i] )
112 cache_bitmask &= ~(1<<i);
113 return;
115 kfree(ptr);
118 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
119 /* Name change so we can catch standard drivers that potentially mess up
120 * the internal interrupt controller on 8xx and 8260. Just bear with me,
121 * I don't like this either and I am searching a better solution. For
122 * now, this is what I need. -- Dan
124 int request_8xxirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
125 #elif defined(CONFIG_APUS)
126 int request_sysirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
127 #else
128 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
129 #endif
130 unsigned long irqflags, const char * devname, void *dev_id)
132 struct irqaction *old, **p, *action;
133 unsigned long flags;
135 if (irq >= NR_IRQS)
136 return -EINVAL;
137 if (!handler)
139 /* Free */
140 p = &irq_desc[irq].action;
141 while ((action = *p) != NULL && action->dev_id != dev_id)
142 p = &action->next;
143 if (action == NULL)
144 return -ENOENT;
146 /* Found it - now free it */
147 save_flags(flags);
148 cli();
149 *p = action->next;
150 if (irq_desc[irq].action == NULL)
151 disable_irq(irq);
152 restore_flags(flags);
153 irq_kfree(action);
154 return 0;
157 action = (struct irqaction *)
158 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
159 if (!action)
160 return -ENOMEM;
162 save_flags(flags);
163 cli();
165 action->handler = handler;
166 action->flags = irqflags;
167 action->mask = 0;
168 action->name = devname;
169 action->dev_id = dev_id;
170 action->next = NULL;
171 enable_irq(irq);
173 p = &irq_desc[irq].action;
175 if ((old = *p) != NULL) {
176 /* Can't share interrupts unless both agree to */
177 if (!(old->flags & action->flags & SA_SHIRQ))
178 return -EBUSY;
179 /* add new interrupt at end of irq queue */
180 do {
181 p = &old->next;
182 old = *p;
183 } while (old);
185 *p = action;
187 restore_flags(flags);
188 return 0;
191 #ifdef CONFIG_APUS
192 void sys_free_irq(unsigned int irq, void *dev_id)
194 sys_request_irq(irq, NULL, 0, NULL, dev_id);
196 #else
197 void free_irq(unsigned int irq, void *dev_id)
199 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
200 request_8xxirq(irq, NULL, 0, NULL, dev_id);
201 #else
202 request_irq(irq, NULL, 0, NULL, dev_id);
203 #endif
205 #endif
207 /* XXX should implement irq disable depth like on intel */
208 void disable_irq_nosync(unsigned int irq_nr)
210 mask_irq(irq_nr);
213 void disable_irq(unsigned int irq_nr)
215 mask_irq(irq_nr);
216 synchronize_irq();
219 void enable_irq(unsigned int irq_nr)
221 unmask_irq(irq_nr);
224 int get_irq_list(char *buf)
226 #ifdef CONFIG_APUS
227 return apus_get_irq_list (buf);
228 #else
229 int i, len = 0, j;
230 struct irqaction * action;
232 len += sprintf(buf+len, " ");
233 for (j=0; j<smp_num_cpus; j++)
234 len += sprintf(buf+len, "CPU%d ",j);
235 *(char *)(buf+len++) = '\n';
237 for (i = 0 ; i < NR_IRQS ; i++) {
238 action = irq_desc[i].action;
239 if ( !action || !action->handler )
240 continue;
241 len += sprintf(buf+len, "%3d: ", i);
242 #ifdef CONFIG_SMP
243 for (j = 0; j < smp_num_cpus; j++)
244 len += sprintf(buf+len, "%10u ",
245 kstat.irqs[cpu_logical_map(j)][i]);
246 #else
247 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
248 #endif /* CONFIG_SMP */
249 if ( irq_desc[i].handler )
250 len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );
251 else
252 len += sprintf(buf+len, " None ");
253 len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
254 len += sprintf(buf+len, " %s",action->name);
255 for (action=action->next; action; action = action->next) {
256 len += sprintf(buf+len, ", %s", action->name);
258 len += sprintf(buf+len, "\n");
260 #ifdef CONFIG_SMP
261 /* should this be per processor send/receive? */
262 len += sprintf(buf+len, "IPI: %10lu\n", ipi_count);
263 #endif
264 len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
265 return len;
266 #endif /* CONFIG_APUS */
270 * Eventually, this should take an array of interrupts and an array size
271 * so it can dispatch multiple interrupts.
273 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
275 int status;
276 struct irqaction *action;
277 int cpu = smp_processor_id();
279 mask_and_ack_irq(irq);
280 status = 0;
281 action = irq_desc[irq].action;
282 kstat.irqs[cpu][irq]++;
283 if (action && action->handler) {
284 if (!(action->flags & SA_INTERRUPT))
285 __sti();
286 do {
287 status |= action->flags;
288 action->handler(irq, action->dev_id, regs);
289 action = action->next;
290 } while ( action );
291 __cli();
292 if (irq_desc[irq].handler) {
293 if (irq_desc[irq].handler->end)
294 irq_desc[irq].handler->end(irq);
295 else if (irq_desc[irq].handler->enable)
296 irq_desc[irq].handler->enable(irq);
298 } else {
299 ppc_spurious_interrupts++;
300 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
301 disable_irq(irq);
302 if (irq_desc[irq].handler->end)
303 irq_desc[irq].handler->end(irq);
307 int do_IRQ(struct pt_regs *regs, int isfake)
309 int cpu = smp_processor_id();
310 int irq;
311 hardirq_enter( cpu );
313 /* every arch is required to have a get_irq -- Cort */
314 irq = ppc_md.get_irq( regs );
316 if ( irq < 0 )
318 /* -2 means ignore, already handled */
319 if (irq != -2)
321 printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n",
322 irq, regs->nip);
323 ppc_spurious_interrupts++;
325 goto out;
327 ppc_irq_dispatch_handler( regs, irq );
328 if (ppc_md.post_irq)
329 ppc_md.post_irq( regs, irq );
331 out:
332 hardirq_exit( cpu );
333 return 1; /* lets ret_from_int know we can do checks */
336 unsigned long probe_irq_on (void)
338 return 0;
341 int probe_irq_off (unsigned long irqs)
343 return 0;
346 unsigned int probe_irq_mask(unsigned long irqs)
348 return 0;
351 void __init init_IRQ(void)
353 static int once = 0;
355 if ( once )
356 return;
357 else
358 once++;
360 ppc_md.init_IRQ();
363 #ifdef CONFIG_SMP
364 unsigned char global_irq_holder = NO_PROC_ID;
365 unsigned volatile int global_irq_lock;
366 atomic_t global_irq_count;
368 atomic_t global_bh_count;
370 static void show(char * str)
372 int i;
373 unsigned long *stack;
374 int cpu = smp_processor_id();
376 printk("\n%s, CPU %d:\n", str, cpu);
377 printk("irq: %d [%d %d]\n",
378 atomic_read(&global_irq_count),
379 local_irq_count(0),
380 local_irq_count(1));
381 printk("bh: %d [%d %d]\n",
382 atomic_read(&global_bh_count),
383 local_bh_count(0),
384 local_bh_count(1));
385 stack = (unsigned long *) &str;
386 for (i = 40; i ; i--) {
387 unsigned long x = *++stack;
388 if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
389 printk("<[%08lx]> ", x);
394 static inline void wait_on_bh(void)
396 int count = MAXCOUNT;
397 do {
398 if (!--count) {
399 show("wait_on_bh");
400 count = ~0;
402 /* nothing .. wait for the other bh's to go away */
403 } while (atomic_read(&global_bh_count) != 0);
407 static inline void wait_on_irq(int cpu)
409 int count = MAXCOUNT;
411 for (;;) {
414 * Wait until all interrupts are gone. Wait
415 * for bottom half handlers unless we're
416 * already executing in one..
418 if (!atomic_read(&global_irq_count)) {
419 if (local_bh_count(cpu)
420 || !atomic_read(&global_bh_count))
421 break;
424 /* Duh, we have to loop. Release the lock to avoid deadlocks */
425 clear_bit(0,&global_irq_lock);
427 for (;;) {
428 if (!--count) {
429 show("wait_on_irq");
430 count = ~0;
432 __sti();
433 /* don't worry about the lock race Linus found
434 * on intel here. -- Cort
436 __cli();
437 if (atomic_read(&global_irq_count))
438 continue;
439 if (global_irq_lock)
440 continue;
441 if (!local_bh_count(cpu)
442 && atomic_read(&global_bh_count))
443 continue;
444 if (!test_and_set_bit(0,&global_irq_lock))
445 break;
451 * This is called when we want to synchronize with
452 * bottom half handlers. We need to wait until
453 * no other CPU is executing any bottom half handler.
455 * Don't wait if we're already running in an interrupt
456 * context or are inside a bh handler.
458 void synchronize_bh(void)
460 if (atomic_read(&global_bh_count) && !in_interrupt())
461 wait_on_bh();
465 * This is called when we want to synchronize with
466 * interrupts. We may for example tell a device to
467 * stop sending interrupts: but to make sure there
468 * are no interrupts that are executing on another
469 * CPU we need to call this function.
471 void synchronize_irq(void)
473 if (atomic_read(&global_irq_count)) {
474 /* Stupid approach */
475 cli();
476 sti();
480 static inline void get_irqlock(int cpu)
482 unsigned int loops = MAXCOUNT;
484 if (test_and_set_bit(0,&global_irq_lock)) {
485 /* do we already hold the lock? */
486 if ((unsigned char) cpu == global_irq_holder)
487 return;
488 /* Uhhuh.. Somebody else got it. Wait.. */
489 do {
490 do {
491 if (loops-- == 0) {
492 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
493 #ifdef CONFIG_XMON
494 xmon(0);
495 #endif
497 } while (test_bit(0,&global_irq_lock));
498 } while (test_and_set_bit(0,&global_irq_lock));
501 * We also need to make sure that nobody else is running
502 * in an interrupt context.
504 wait_on_irq(cpu);
507 * Ok, finally..
509 global_irq_holder = cpu;
513 * A global "cli()" while in an interrupt context
514 * turns into just a local cli(). Interrupts
515 * should use spinlocks for the (very unlikely)
516 * case that they ever want to protect against
517 * each other.
519 * If we already have local interrupts disabled,
520 * this will not turn a local disable into a
521 * global one (problems with spinlocks: this makes
522 * save_flags+cli+sti usable inside a spinlock).
524 void __global_cli(void)
526 unsigned long flags;
528 __save_flags(flags);
529 if (flags & (1 << 15)) {
530 int cpu = smp_processor_id();
531 __cli();
532 if (!local_irq_count(cpu))
533 get_irqlock(cpu);
537 void __global_sti(void)
539 int cpu = smp_processor_id();
541 if (!local_irq_count(cpu))
542 release_irqlock(cpu);
543 __sti();
547 * SMP flags value to restore to:
548 * 0 - global cli
549 * 1 - global sti
550 * 2 - local cli
551 * 3 - local sti
553 unsigned long __global_save_flags(void)
555 int retval;
556 int local_enabled;
557 unsigned long flags;
559 __save_flags(flags);
560 local_enabled = (flags >> 15) & 1;
561 /* default to local */
562 retval = 2 + local_enabled;
564 /* check for global flags if we're not in an interrupt */
565 if (!local_irq_count(smp_processor_id())) {
566 if (local_enabled)
567 retval = 1;
568 if (global_irq_holder == (unsigned char) smp_processor_id())
569 retval = 0;
571 return retval;
575 tb(long vals[],
576 int max_size)
578 register unsigned long *orig_sp __asm__ ("r1");
579 register unsigned long lr __asm__ ("r3");
580 unsigned long *sp;
581 int i;
583 asm volatile ("mflr 3");
584 vals[0] = lr;
585 sp = (unsigned long *) *orig_sp;
586 sp = (unsigned long *) *sp;
587 for (i=1; i<max_size; i++) {
588 if (sp == 0) {
589 break;
592 vals[i] = *(sp+1);
593 sp = (unsigned long *) *sp;
596 return i;
599 void __global_restore_flags(unsigned long flags)
601 switch (flags) {
602 case 0:
603 __global_cli();
604 break;
605 case 1:
606 __global_sti();
607 break;
608 case 2:
609 __cli();
610 break;
611 case 3:
612 __sti();
613 break;
614 default:
616 unsigned long trace[5];
617 int count;
618 int i;
620 printk("global_restore_flags: %08lx (%08lx)\n",
621 flags, (&flags)[-1]);
622 count = tb(trace, 5);
623 printk("tb:");
624 for(i=0; i<count; i++) {
625 printk(" %8.8lx", trace[i]);
627 printk("\n");
631 #endif /* CONFIG_SMP */
633 static struct proc_dir_entry * root_irq_dir;
634 static struct proc_dir_entry * irq_dir [NR_IRQS];
635 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
637 unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
639 #define HEX_DIGITS 8
641 static int irq_affinity_read_proc (char *page, char **start, off_t off,
642 int count, int *eof, void *data)
644 if (count < HEX_DIGITS+1)
645 return -EINVAL;
646 return sprintf (page, "%08x\n", irq_affinity[(int)data]);
649 static unsigned int parse_hex_value (const char *buffer,
650 unsigned long count, unsigned long *ret)
652 unsigned char hexnum [HEX_DIGITS];
653 unsigned long value;
654 int i;
656 if (!count)
657 return -EINVAL;
658 if (count > HEX_DIGITS)
659 count = HEX_DIGITS;
660 if (copy_from_user(hexnum, buffer, count))
661 return -EFAULT;
664 * Parse the first 8 characters as a hex string, any non-hex char
665 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
667 value = 0;
669 for (i = 0; i < count; i++) {
670 unsigned int c = hexnum[i];
672 switch (c) {
673 case '0' ... '9': c -= '0'; break;
674 case 'a' ... 'f': c -= 'a'-10; break;
675 case 'A' ... 'F': c -= 'A'-10; break;
676 default:
677 goto out;
679 value = (value << 4) | c;
681 out:
682 *ret = value;
683 return 0;
686 static int irq_affinity_write_proc (struct file *file, const char *buffer,
687 unsigned long count, void *data)
689 int irq = (int) data, full_count = count, err;
690 unsigned long new_value;
692 if (!irq_desc[irq].handler->set_affinity)
693 return -EIO;
695 err = parse_hex_value(buffer, count, &new_value);
697 #if 0/*CONFIG_SMP*/
699 * Do not allow disabling IRQs completely - it's a too easy
700 * way to make the system unusable accidentally :-) At least
701 * one online CPU still has to be targeted.
703 if (!(new_value & cpu_online_map))
704 return -EINVAL;
705 #endif
707 irq_affinity[irq] = new_value;
708 irq_desc[irq].handler->set_affinity(irq, new_value);
710 return full_count;
713 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
714 int count, int *eof, void *data)
716 unsigned long *mask = (unsigned long *) data;
717 if (count < HEX_DIGITS+1)
718 return -EINVAL;
719 return sprintf (page, "%08lx\n", *mask);
722 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
723 unsigned long count, void *data)
725 unsigned long *mask = (unsigned long *) data, full_count = count, err;
726 unsigned long new_value;
728 err = parse_hex_value(buffer, count, &new_value);
729 if (err)
730 return err;
732 *mask = new_value;
733 return full_count;
736 #define MAX_NAMELEN 10
738 static void register_irq_proc (unsigned int irq)
740 struct proc_dir_entry *entry;
741 char name [MAX_NAMELEN];
743 if (!root_irq_dir || (irq_desc[irq].handler == NULL))
744 return;
746 memset(name, 0, MAX_NAMELEN);
747 sprintf(name, "%d", irq);
749 /* create /proc/irq/1234 */
750 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
752 /* create /proc/irq/1234/smp_affinity */
753 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
755 entry->nlink = 1;
756 entry->data = (void *)irq;
757 entry->read_proc = irq_affinity_read_proc;
758 entry->write_proc = irq_affinity_write_proc;
760 smp_affinity_entry[irq] = entry;
763 unsigned long prof_cpu_mask = -1;
765 void init_irq_proc (void)
767 struct proc_dir_entry *entry;
768 int i;
770 /* create /proc/irq */
771 root_irq_dir = proc_mkdir("irq", 0);
773 /* create /proc/irq/prof_cpu_mask */
774 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
776 entry->nlink = 1;
777 entry->data = (void *)&prof_cpu_mask;
778 entry->read_proc = prof_cpu_mask_read_proc;
779 entry->write_proc = prof_cpu_mask_write_proc;
782 * Create entries for all existing IRQs.
784 for (i = 0; i < NR_IRQS; i++) {
785 if (irq_desc[i].handler == NULL)
786 continue;
787 register_irq_proc(i);
791 void no_action(int irq, void *dev, struct pt_regs *regs)