2 * $Id: irq.c,v 1.113 1999/09/17 17:22:56 cort Exp $
4 * arch/ppc/kernel/irq.c
6 * Derived from arch/i386/kernel/irq.c
7 * Copyright (C) 1992 Linus Torvalds
8 * Adapted from arch/i386 by Gary Thomas
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Cort Dougan
12 * Adapted for Power Macintosh by Paul Mackerras
13 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and compliment of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ioport.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/config.h>
42 #include <linux/init.h>
43 #include <linux/malloc.h>
44 #include <linux/openpic.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/irq.h>
48 #include <linux/proc_fs.h>
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/hydra.h>
53 #include <asm/system.h>
55 #include <asm/pgtable.h>
58 #include <asm/cache.h>
60 #include <asm/amigaints.h>
61 #include <asm/amigahw.h>
62 #include <asm/amigappc.h>
63 #include <asm/ptrace.h>
65 #include "local_irq.h"
67 extern volatile unsigned long ipi_count
;
68 void enable_irq(unsigned int irq_nr
);
69 void disable_irq(unsigned int irq_nr
);
71 volatile unsigned char *chrp_int_ack_special
;
73 #define MAXCOUNT 10000000
75 irq_desc_t irq_desc
[NR_IRQS
];
76 int ppc_spurious_interrupts
= 0;
77 struct irqaction
*ppc_irq_action
[NR_IRQS
];
78 unsigned int ppc_cached_irq_mask
[NR_MASK_WORDS
];
79 unsigned int ppc_lost_interrupts
[NR_MASK_WORDS
];
80 atomic_t ppc_n_lost_interrupts
;
82 /* nasty hack for shared irq's since we need to do kmalloc calls but
83 * can't very early in the boot when we need to do a request irq.
84 * this needs to be removed.
87 #define IRQ_KMALLOC_ENTRIES 8
88 static int cache_bitmask
= 0;
89 static struct irqaction malloc_cache
[IRQ_KMALLOC_ENTRIES
];
90 extern int mem_init_done
;
92 void *irq_kmalloc(size_t size
, int pri
)
96 return kmalloc(size
,pri
);
97 for ( i
= 0; i
< IRQ_KMALLOC_ENTRIES
; i
++ )
98 if ( ! ( cache_bitmask
& (1<<i
) ) )
100 cache_bitmask
|= (1<<i
);
101 return (void *)(&malloc_cache
[i
]);
106 void irq_kfree(void *ptr
)
109 for ( i
= 0 ; i
< IRQ_KMALLOC_ENTRIES
; i
++ )
110 if ( ptr
== &malloc_cache
[i
] )
112 cache_bitmask
&= ~(1<<i
);
118 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
119 /* Name change so we can catch standard drivers that potentially mess up
120 * the internal interrupt controller on 8xx and 8260. Just bear with me,
121 * I don't like this either and I am searching a better solution. For
122 * now, this is what I need. -- Dan
124 int request_8xxirq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
125 #elif defined(CONFIG_APUS)
126 int request_sysirq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
128 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
130 unsigned long irqflags
, const char * devname
, void *dev_id
)
132 struct irqaction
*old
, **p
, *action
;
140 for (p
= &irq_desc
[irq
].action
; (action
= *p
) != NULL
; p
= &action
->next
)
142 /* Found it - now free it */
146 restore_flags(flags
);
153 action
= (struct irqaction
*)
154 irq_kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
161 action
->handler
= handler
;
162 action
->flags
= irqflags
;
164 action
->name
= devname
;
165 action
->dev_id
= dev_id
;
169 p
= &irq_desc
[irq
].action
;
171 if ((old
= *p
) != NULL
) {
172 /* Can't share interrupts unless both agree to */
173 if (!(old
->flags
& action
->flags
& SA_SHIRQ
))
175 /* add new interrupt at end of irq queue */
183 restore_flags(flags
);
188 void sys_free_irq(unsigned int irq
, void *dev_id
)
190 sys_request_irq(irq
, NULL
, 0, NULL
, dev_id
);
193 void free_irq(unsigned int irq
, void *dev_id
)
195 #if (defined(CONFIG_8xx) || defined(CONFIG_8260))
196 request_8xxirq(irq
, NULL
, 0, NULL
, dev_id
);
198 request_irq(irq
, NULL
, 0, NULL
, dev_id
);
203 /* XXX should implement irq disable depth like on intel */
204 void disable_irq_nosync(unsigned int irq_nr
)
209 void disable_irq(unsigned int irq_nr
)
215 void enable_irq(unsigned int irq_nr
)
220 int get_irq_list(char *buf
)
223 return apus_get_irq_list (buf
);
226 struct irqaction
* action
;
228 len
+= sprintf(buf
+len
, " ");
229 for (j
=0; j
<smp_num_cpus
; j
++)
230 len
+= sprintf(buf
+len
, "CPU%d ",j
);
231 *(char *)(buf
+len
++) = '\n';
233 for (i
= 0 ; i
< NR_IRQS
; i
++) {
234 action
= irq_desc
[i
].action
;
235 if ( !action
|| !action
->handler
)
237 len
+= sprintf(buf
+len
, "%3d: ", i
);
239 for (j
= 0; j
< smp_num_cpus
; j
++)
240 len
+= sprintf(buf
+len
, "%10u ",
241 kstat
.irqs
[cpu_logical_map(j
)][i
]);
243 len
+= sprintf(buf
+len
, "%10u ", kstat_irqs(i
));
244 #endif /* CONFIG_SMP */
245 if ( irq_desc
[i
].handler
)
246 len
+= sprintf(buf
+len
, " %s ", irq_desc
[i
].handler
->typename
);
248 len
+= sprintf(buf
+len
, " None ");
249 len
+= sprintf(buf
+len
, "%s", (irq_desc
[i
].status
& IRQ_LEVEL
) ? "Level " : "Edge ");
250 len
+= sprintf(buf
+len
, " %s",action
->name
);
251 for (action
=action
->next
; action
; action
= action
->next
) {
252 len
+= sprintf(buf
+len
, ", %s", action
->name
);
254 len
+= sprintf(buf
+len
, "\n");
257 /* should this be per processor send/receive? */
258 len
+= sprintf(buf
+len
, "IPI: %10lu\n", ipi_count
);
260 len
+= sprintf(buf
+len
, "BAD: %10u\n", ppc_spurious_interrupts
);
262 #endif /* CONFIG_APUS */
266 * Eventually, this should take an array of interrupts and an array size
267 * so it can dispatch multiple interrupts.
269 void ppc_irq_dispatch_handler(struct pt_regs
*regs
, int irq
)
272 struct irqaction
*action
;
273 int cpu
= smp_processor_id();
275 mask_and_ack_irq(irq
);
277 action
= irq_desc
[irq
].action
;
278 kstat
.irqs
[cpu
][irq
]++;
279 if (action
&& action
->handler
) {
280 if (!(action
->flags
& SA_INTERRUPT
))
283 status
|= action
->flags
;
284 action
->handler(irq
, action
->dev_id
, regs
);
285 action
= action
->next
;
288 if (irq_desc
[irq
].handler
) {
289 if (irq_desc
[irq
].handler
->end
)
290 irq_desc
[irq
].handler
->end(irq
);
291 else if (irq_desc
[irq
].handler
->enable
)
292 irq_desc
[irq
].handler
->enable(irq
);
295 ppc_spurious_interrupts
++;
296 printk(KERN_DEBUG
"Unhandled interrupt %x, disabled\n", irq
);
298 if (irq_desc
[irq
].handler
->end
)
299 irq_desc
[irq
].handler
->end(irq
);
303 asmlinkage
int do_IRQ(struct pt_regs
*regs
, int isfake
)
305 int cpu
= smp_processor_id();
307 hardirq_enter( cpu
);
309 /* every arch is required to have a get_irq -- Cort */
310 irq
= ppc_md
.get_irq( regs
);
314 /* -2 means ignore, already handled */
317 printk(KERN_DEBUG
"Bogus interrupt %d from PC = %lx\n",
319 ppc_spurious_interrupts
++;
323 ppc_irq_dispatch_handler( regs
, irq
);
325 ppc_md
.post_irq( regs
, irq
);
329 return 1; /* lets ret_from_int know we can do checks */
332 unsigned long probe_irq_on (void)
337 int probe_irq_off (unsigned long irqs
)
342 void __init
init_IRQ(void)
355 unsigned char global_irq_holder
= NO_PROC_ID
;
356 unsigned volatile int global_irq_lock
;
357 atomic_t global_irq_count
;
359 atomic_t global_bh_count
;
361 static void show(char * str
)
364 unsigned long *stack
;
365 int cpu
= smp_processor_id();
367 printk("\n%s, CPU %d:\n", str
, cpu
);
368 printk("irq: %d [%d %d]\n",
369 atomic_read(&global_irq_count
),
372 printk("bh: %d [%d %d]\n",
373 atomic_read(&global_bh_count
),
376 stack
= (unsigned long *) &str
;
377 for (i
= 40; i
; i
--) {
378 unsigned long x
= *++stack
;
379 if (x
> (unsigned long) &init_task_union
&& x
< (unsigned long) &vsprintf
) {
380 printk("<[%08lx]> ", x
);
385 static inline void wait_on_bh(void)
387 int count
= MAXCOUNT
;
393 /* nothing .. wait for the other bh's to go away */
394 } while (atomic_read(&global_bh_count
) != 0);
398 static inline void wait_on_irq(int cpu
)
400 int count
= MAXCOUNT
;
405 * Wait until all interrupts are gone. Wait
406 * for bottom half handlers unless we're
407 * already executing in one..
409 if (!atomic_read(&global_irq_count
)) {
410 if (local_bh_count(cpu
)
411 || !atomic_read(&global_bh_count
))
415 /* Duh, we have to loop. Release the lock to avoid deadlocks */
416 clear_bit(0,&global_irq_lock
);
424 /* don't worry about the lock race Linus found
425 * on intel here. -- Cort
428 if (atomic_read(&global_irq_count
))
432 if (!local_bh_count(cpu
)
433 && atomic_read(&global_bh_count
))
435 if (!test_and_set_bit(0,&global_irq_lock
))
442 * This is called when we want to synchronize with
443 * bottom half handlers. We need to wait until
444 * no other CPU is executing any bottom half handler.
446 * Don't wait if we're already running in an interrupt
447 * context or are inside a bh handler.
449 void synchronize_bh(void)
451 if (atomic_read(&global_bh_count
) && !in_interrupt())
456 * This is called when we want to synchronize with
457 * interrupts. We may for example tell a device to
458 * stop sending interrupts: but to make sure there
459 * are no interrupts that are executing on another
460 * CPU we need to call this function.
462 void synchronize_irq(void)
464 if (atomic_read(&global_irq_count
)) {
465 /* Stupid approach */
471 static inline void get_irqlock(int cpu
)
473 unsigned int loops
= MAXCOUNT
;
475 if (test_and_set_bit(0,&global_irq_lock
)) {
476 /* do we already hold the lock? */
477 if ((unsigned char) cpu
== global_irq_holder
)
479 /* Uhhuh.. Somebody else got it. Wait.. */
483 printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu
, global_irq_holder
);
488 } while (test_bit(0,&global_irq_lock
));
489 } while (test_and_set_bit(0,&global_irq_lock
));
492 * We also need to make sure that nobody else is running
493 * in an interrupt context.
500 global_irq_holder
= cpu
;
504 * A global "cli()" while in an interrupt context
505 * turns into just a local cli(). Interrupts
506 * should use spinlocks for the (very unlikely)
507 * case that they ever want to protect against
510 * If we already have local interrupts disabled,
511 * this will not turn a local disable into a
512 * global one (problems with spinlocks: this makes
513 * save_flags+cli+sti usable inside a spinlock).
515 void __global_cli(void)
520 if (flags
& (1 << 15)) {
521 int cpu
= smp_processor_id();
523 if (!local_irq_count(cpu
))
528 void __global_sti(void)
530 int cpu
= smp_processor_id();
532 if (!local_irq_count(cpu
))
533 release_irqlock(cpu
);
538 * SMP flags value to restore to:
544 unsigned long __global_save_flags(void)
551 local_enabled
= (flags
>> 15) & 1;
552 /* default to local */
553 retval
= 2 + local_enabled
;
555 /* check for global flags if we're not in an interrupt */
556 if (!local_irq_count(smp_processor_id())) {
559 if (global_irq_holder
== (unsigned char) smp_processor_id())
569 register unsigned long *orig_sp
__asm__ ("r1");
570 register unsigned long lr
__asm__ ("r3");
574 asm volatile ("mflr 3");
576 sp
= (unsigned long *) *orig_sp
;
577 sp
= (unsigned long *) *sp
;
578 for (i
=1; i
<max_size
; i
++) {
584 sp
= (unsigned long *) *sp
;
590 void __global_restore_flags(unsigned long flags
)
607 unsigned long trace
[5];
611 printk("global_restore_flags: %08lx (%08lx)\n",
612 flags
, (&flags
)[-1]);
613 count
= tb(trace
, 5);
615 for(i
=0; i
<count
; i
++) {
616 printk(" %8.8lx", trace
[i
]);
622 #endif /* CONFIG_SMP */
624 static struct proc_dir_entry
* root_irq_dir
;
625 static struct proc_dir_entry
* irq_dir
[NR_IRQS
];
626 static struct proc_dir_entry
* smp_affinity_entry
[NR_IRQS
];
628 unsigned int irq_affinity
[NR_IRQS
] = { [0 ... NR_IRQS
-1] = 0xffffffff};
632 static int irq_affinity_read_proc (char *page
, char **start
, off_t off
,
633 int count
, int *eof
, void *data
)
635 if (count
< HEX_DIGITS
+1)
637 return sprintf (page
, "%08x\n", irq_affinity
[(int)data
]);
640 static unsigned int parse_hex_value (const char *buffer
,
641 unsigned long count
, unsigned long *ret
)
643 unsigned char hexnum
[HEX_DIGITS
];
649 if (count
> HEX_DIGITS
)
651 if (copy_from_user(hexnum
, buffer
, count
))
655 * Parse the first 8 characters as a hex string, any non-hex char
656 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
660 for (i
= 0; i
< count
; i
++) {
661 unsigned int c
= hexnum
[i
];
664 case '0' ... '9': c
-= '0'; break;
665 case 'a' ... 'f': c
-= 'a'-10; break;
666 case 'A' ... 'F': c
-= 'A'-10; break;
670 value
= (value
<< 4) | c
;
677 static int irq_affinity_write_proc (struct file
*file
, const char *buffer
,
678 unsigned long count
, void *data
)
680 int irq
= (int) data
, full_count
= count
, err
;
681 unsigned long new_value
;
683 if (!irq_desc
[irq
].handler
->set_affinity
)
686 err
= parse_hex_value(buffer
, count
, &new_value
);
690 * Do not allow disabling IRQs completely - it's a too easy
691 * way to make the system unusable accidentally :-) At least
692 * one online CPU still has to be targeted.
694 if (!(new_value
& cpu_online_map
))
698 irq_affinity
[irq
] = new_value
;
699 irq_desc
[irq
].handler
->set_affinity(irq
, new_value
);
704 static int prof_cpu_mask_read_proc (char *page
, char **start
, off_t off
,
705 int count
, int *eof
, void *data
)
707 unsigned long *mask
= (unsigned long *) data
;
708 if (count
< HEX_DIGITS
+1)
710 return sprintf (page
, "%08lx\n", *mask
);
713 static int prof_cpu_mask_write_proc (struct file
*file
, const char *buffer
,
714 unsigned long count
, void *data
)
716 unsigned long *mask
= (unsigned long *) data
, full_count
= count
, err
;
717 unsigned long new_value
;
719 err
= parse_hex_value(buffer
, count
, &new_value
);
727 #define MAX_NAMELEN 10
729 static void register_irq_proc (unsigned int irq
)
731 struct proc_dir_entry
*entry
;
732 char name
[MAX_NAMELEN
];
734 if (!root_irq_dir
|| (irq_desc
[irq
].handler
== NULL
))
737 memset(name
, 0, MAX_NAMELEN
);
738 sprintf(name
, "%d", irq
);
740 /* create /proc/irq/1234 */
741 irq_dir
[irq
] = proc_mkdir(name
, root_irq_dir
);
743 /* create /proc/irq/1234/smp_affinity */
744 entry
= create_proc_entry("smp_affinity", 0600, irq_dir
[irq
]);
747 entry
->data
= (void *)irq
;
748 entry
->read_proc
= irq_affinity_read_proc
;
749 entry
->write_proc
= irq_affinity_write_proc
;
751 smp_affinity_entry
[irq
] = entry
;
754 unsigned long prof_cpu_mask
= -1;
756 void init_irq_proc (void)
758 struct proc_dir_entry
*entry
;
761 /* create /proc/irq */
762 root_irq_dir
= proc_mkdir("irq", 0);
764 /* create /proc/irq/prof_cpu_mask */
765 entry
= create_proc_entry("prof_cpu_mask", 0600, root_irq_dir
);
768 entry
->data
= (void *)&prof_cpu_mask
;
769 entry
->read_proc
= prof_cpu_mask_read_proc
;
770 entry
->write_proc
= prof_cpu_mask_write_proc
;
773 * Create entries for all existing IRQs.
775 for (i
= 0; i
< NR_IRQS
; i
++) {
776 if (irq_desc
[i
].handler
== NULL
)
778 register_irq_proc(i
);
782 void no_action(int irq
, void *dev
, struct pt_regs
*regs
)