1 /* $Id: irq.c,v 1.94 1999/05/28 14:59:20 anton Exp $
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
3 * Sparc the IRQ's are basically 'cast in stone'
4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ.
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@metabyte.com)
10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
14 #include <linux/config.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/linkage.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/malloc.h>
23 #include <linux/random.h>
24 #include <linux/init.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/delay.h>
28 #include <linux/tasks.h>
30 #include <asm/ptrace.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
35 #include <asm/vaddrs.h>
36 #include <asm/timer.h>
37 #include <asm/openprom.h>
38 #include <asm/oplib.h>
39 #include <asm/traps.h>
42 #include <asm/pgtable.h>
43 #include <asm/spinlock.h>
44 #include <asm/hardirq.h>
45 #include <asm/softirq.h>
49 * Dave Redman (djhr@tadpole.co.uk)
51 * IRQ numbers.. These are no longer restricted to 15..
53 * this is done to enable SBUS cards and onboard IO to be masked
54 * correctly. using the interrupt level isn't good enough.
57 * A device interrupting at sbus level6 and the Floppy both come in
58 * at IRQ11, but enabling and disabling them requires writing to
59 * different bits in the SLAVIO/SEC.
61 * As a result of these changes sun4m machines could now support
62 * directed CPU interrupts using the existing enable/disable irq code
67 static void irq_panic(void)
69 extern char *cputypval
;
70 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval
);
74 void (*init_timers
)(void (*)(int, void *,struct pt_regs
*)) =
75 (void (*)(void (*)(int, void *,struct pt_regs
*))) irq_panic
;
78 * Dave Redman (djhr@tadpole.co.uk)
80 * There used to be extern calls and hard coded values here.. very sucky!
81 * instead, because some of the devices attach very early, I do something
82 * equally sucky but at least we'll never try to free statically allocated
83 * space or call kmalloc before kmalloc_init :(.
85 * In fact it's the timer10 that attaches first.. then timer14
86 * then kmalloc_init is called.. then the tty interrupts attach.
90 #define MAX_STATIC_ALLOC 4
91 struct irqaction static_irqaction
[MAX_STATIC_ALLOC
];
92 int static_irq_count
= 0;
94 struct irqaction
*irq_action
[NR_IRQS
+1] = {
95 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
96 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
99 int get_irq_list(char *buf
)
102 struct irqaction
* action
;
107 if (sparc_cpu_model
== sun4d
) {
108 extern int sun4d_get_irq_list(char *);
110 return sun4d_get_irq_list(buf
);
112 for (i
= 0 ; i
< (NR_IRQS
+1) ; i
++) {
113 action
= *(i
+ irq_action
);
116 len
+= sprintf(buf
+len
, "%3d: ", i
);
118 len
+= sprintf(buf
+len
, "%10u ", kstat_irqs(i
));
120 for (j
= 0; j
< smp_num_cpus
; j
++)
121 len
+= sprintf(buf
+len
, "%10u ",
122 kstat
.irqs
[cpu_logical_map(j
)][i
]);
124 len
+= sprintf(buf
+len
, " %c %s",
125 (action
->flags
& SA_INTERRUPT
) ? '+' : ' ',
127 for (action
=action
->next
; action
; action
= action
->next
) {
128 len
+= sprintf(buf
+len
, ",%s %s",
129 (action
->flags
& SA_INTERRUPT
) ? " +" : "",
132 len
+= sprintf(buf
+len
, "\n");
137 void free_irq(unsigned int irq
, void *dev_id
)
139 struct irqaction
* action
;
140 struct irqaction
* tmp
= NULL
;
142 unsigned int cpu_irq
;
144 if (sparc_cpu_model
== sun4d
) {
145 extern void sun4d_free_irq(unsigned int, void *);
147 return sun4d_free_irq(irq
, dev_id
);
149 cpu_irq
= irq
& NR_IRQS
;
150 action
= *(cpu_irq
+ irq_action
);
151 if (cpu_irq
> 14) { /* 14 irq levels on the sparc */
152 printk("Trying to free bogus IRQ %d\n", irq
);
155 if (!action
->handler
) {
156 printk("Trying to free free IRQ%d\n",irq
);
160 for (; action
; action
= action
->next
) {
161 if (action
->dev_id
== dev_id
)
166 printk("Trying to free free shared IRQ%d\n",irq
);
169 } else if (action
->flags
& SA_SHIRQ
) {
170 printk("Trying to free shared IRQ%d with NULL device ID\n", irq
);
173 if (action
->flags
& SA_STATIC_ALLOC
)
175 /* This interrupt is marked as specially allocated
176 * so it is a bad idea to free it.
178 printk("Attempt to free statically allocated IRQ%d (%s)\n",
185 tmp
->next
= action
->next
;
187 *(cpu_irq
+ irq_action
) = action
->next
;
189 kfree_s(action
, sizeof(struct irqaction
));
191 if (!(*(cpu_irq
+ irq_action
)))
194 restore_flags(flags
);
198 unsigned int local_bh_count
;
199 unsigned int local_irq_count
;
202 /* SMP interrupt locking on Sparc. */
204 unsigned int local_bh_count
[NR_CPUS
];
205 unsigned int local_irq_count
[NR_CPUS
];
207 atomic_t global_bh_lock
= ATOMIC_INIT(0);
208 spinlock_t global_bh_count
= SPIN_LOCK_UNLOCKED
;
210 /* Who has global_irq_lock. */
211 unsigned char global_irq_holder
= NO_PROC_ID
;
213 /* This protects IRQ's. */
214 spinlock_t global_irq_lock
= SPIN_LOCK_UNLOCKED
;
216 /* Global IRQ locking depth. */
217 atomic_t global_irq_count
= ATOMIC_INIT(0);
219 /* This protects BH software state (masks, things like that). */
220 spinlock_t sparc_bh_lock
= SPIN_LOCK_UNLOCKED
;
222 void smp_show_backtrace_all_cpus(void);
223 void show_backtrace(void);
225 #define MAXCOUNT 100000000
226 #define VERBOSE_DEBUG_IRQLOCK
228 static void show(char * str
)
231 int cpu
= smp_processor_id();
233 printk("\n%s, CPU %d:\n", str
, cpu
);
234 printk("irq: %d [ ", atomic_read(&global_irq_count
));
236 for (i
= 0; i
< NR_CPUS
; i
++) {
237 printk("%d ", local_irq_count
[i
]);
241 printk("bh: %d [ ", (spin_is_locked(&global_bh_count
) ? 1 : 0));
243 for (i
= 0; i
< NR_CPUS
; i
++) {
244 printk("%d ", local_bh_count
[cpu
]);
248 #ifdef VERBOSE_DEBUG_IRQLOCK
249 smp_show_backtrace_all_cpus();
255 static inline void wait_on_bh(void)
257 int count
= MAXCOUNT
;
264 } while(spin_is_locked(&global_bh_count
));
268 * We have to allow irqs to arrive between __sti and __cli
270 #define SYNC_OTHER_CORES(x) udelay(x+1)
272 static inline void wait_on_irq(int cpu
)
274 int count
= MAXCOUNT
;
278 * Wait until all interrupts are gone. Wait
279 * for bottom half handlers unless we're
280 * already executing in one..
282 if (!atomic_read(&global_irq_count
)) {
283 if (local_bh_count
[cpu
] || !spin_is_locked(&global_bh_count
))
287 /* Duh, we have to loop. Release the lock to avoid deadlocks */
288 spin_unlock(&global_irq_lock
);
296 SYNC_OTHER_CORES(cpu
);
298 if (atomic_read(&global_irq_count
))
300 if (spin_is_locked (&global_irq_lock
))
302 if (!local_bh_count
[cpu
] && spin_is_locked(&global_bh_count
))
304 if (spin_trylock(&global_irq_lock
))
311 * This is called when we want to synchronize with
312 * bottom half handlers. We need to wait until
313 * no other CPU is executing any bottom half handler.
315 * Don't wait if we're already running in an interrupt
316 * context or are inside a bh handler.
318 void synchronize_bh(void)
320 if (spin_is_locked (&global_bh_count
) && !in_interrupt())
325 * This is called when we want to synchronize with
326 * interrupts. We may for example tell a device to
327 * stop sending interrupts: but to make sure there
328 * are no interrupts that are executing on another
329 * CPU we need to call this function.
331 void synchronize_irq(void)
333 if (atomic_read(&global_irq_count
)) {
334 /* Stupid approach */
340 static inline void get_irqlock(int cpu
)
342 int count
= MAXCOUNT
;
344 if (!spin_trylock(&global_irq_lock
)) {
345 /* do we already hold the lock? */
346 if ((unsigned char) cpu
== global_irq_holder
)
348 /* Uhhuh.. Somebody else got it. Wait.. */
350 while (spin_is_locked(&global_irq_lock
)) {
357 } while (!spin_trylock(&global_irq_lock
));
360 * We also to make sure that nobody else is running
361 * in an interrupt context.
368 global_irq_holder
= cpu
;
372 * A global "cli()" while in an interrupt context
373 * turns into just a local cli(). Interrupts
374 * should use spinlocks for the (very unlikely)
375 * case that they ever want to protect against
378 * If we already have local interrupts disabled,
379 * this will not turn a local disable into a
380 * global one (problems with spinlocks: this makes
381 * save_flags+cli+sti usable inside a spinlock).
383 void __global_cli(void)
389 if ((flags
& PSR_PIL
) != PSR_PIL
) {
390 int cpu
= smp_processor_id();
392 if (!local_irq_count
[cpu
])
397 void __global_sti(void)
399 int cpu
= smp_processor_id();
401 if (!local_irq_count
[cpu
])
402 release_irqlock(cpu
);
407 * SMP flags value to restore to:
413 unsigned long __global_save_flags(void)
416 int local_enabled
= 0;
421 if ((flags
& PSR_PIL
) != PSR_PIL
)
424 /* default to local */
425 retval
= 2 + local_enabled
;
427 /* check for global flags if we're not in an interrupt */
428 if (!local_irq_count
[smp_processor_id()]) {
431 if (global_irq_holder
== (unsigned char) smp_processor_id())
437 void __global_restore_flags(unsigned long flags
)
455 __asm__
__volatile__("mov %%i7, %0" : "=r" (pc
));
456 printk("global_restore_flags: Bogon flags(%08lx) caller %08lx\n", flags
, pc
);
463 void unexpected_irq(int irq
, void *dev_id
, struct pt_regs
* regs
)
466 struct irqaction
* action
;
467 unsigned int cpu_irq
;
469 cpu_irq
= irq
& NR_IRQS
;
470 action
= *(cpu_irq
+ irq_action
);
472 printk("IO device interrupt, irq = %d\n", irq
);
473 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs
->pc
,
474 regs
->npc
, regs
->u_regs
[14]);
476 printk("Expecting: ");
477 for (i
= 0; i
< 16; i
++)
479 prom_printf("[%s:%d:0x%x] ", action
->name
,
480 (int) i
, (unsigned int) action
->handler
);
483 panic("bogus interrupt received");
486 void handler_irq(int irq
, struct pt_regs
* regs
)
488 struct irqaction
* action
;
489 int cpu
= smp_processor_id();
491 extern void smp4m_irq_rotate(int cpu
);
495 disable_pil_irq(irq
);
497 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
499 smp4m_irq_rotate(cpu
);
501 action
= *(irq
+ irq_action
);
502 kstat
.irqs
[cpu
][irq
]++;
504 if (!action
|| !action
->handler
)
505 unexpected_irq(irq
, 0, regs
);
506 action
->handler(irq
, action
->dev_id
, regs
);
507 action
= action
->next
;
513 #ifdef CONFIG_BLK_DEV_FD
514 extern void floppy_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
516 void sparc_floppy_irq(int irq
, void *dev_id
, struct pt_regs
*regs
)
518 int cpu
= smp_processor_id();
520 disable_pil_irq(irq
);
522 kstat
.irqs
[cpu
][irq
]++;
523 floppy_interrupt(irq
, dev_id
, regs
);
529 /* Fast IRQ's on the Sparc can only have one routine attached to them,
530 * thus no sharing possible.
532 int request_fast_irq(unsigned int irq
,
533 void (*handler
)(int, void *, struct pt_regs
*),
534 unsigned long irqflags
, const char *devname
)
536 struct irqaction
*action
;
538 unsigned int cpu_irq
;
540 struct tt_entry
*trap_table
;
541 extern struct tt_entry trapbase_cpu1
, trapbase_cpu2
, trapbase_cpu3
;
544 cpu_irq
= irq
& NR_IRQS
;
549 action
= *(cpu_irq
+ irq_action
);
551 if(action
->flags
& SA_SHIRQ
)
552 panic("Trying to register fast irq when already shared.\n");
553 if(irqflags
& SA_SHIRQ
)
554 panic("Trying to register fast irq as shared.\n");
556 /* Anyway, someone already owns it so cannot be made fast. */
557 printk("request_fast_irq: Trying to register yet already owned.\n");
563 /* If this is flagged as statically allocated then we use our
564 * private struct which is never freed.
566 if (irqflags
& SA_STATIC_ALLOC
) {
567 if (static_irq_count
< MAX_STATIC_ALLOC
)
568 action
= &static_irqaction
[static_irq_count
++];
570 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
575 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
579 restore_flags(flags
);
583 /* Dork with trap table if we get this far. */
584 #define INSTANTIATE(table) \
585 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
586 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
587 SPARC_BRANCH((unsigned long) handler, \
588 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
589 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
590 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
592 INSTANTIATE(sparc_ttable
)
594 trap_table
= &trapbase_cpu1
; INSTANTIATE(trap_table
)
595 trap_table
= &trapbase_cpu2
; INSTANTIATE(trap_table
)
596 trap_table
= &trapbase_cpu3
; INSTANTIATE(trap_table
)
600 * XXX Correct thing whould be to flush only I- and D-cache lines
601 * which contain the handler in question. But as of time of the
602 * writing we have no CPU-neutral interface to fine-grained flushes.
606 action
->handler
= handler
;
607 action
->flags
= irqflags
;
609 action
->name
= devname
;
610 action
->dev_id
= NULL
;
613 *(cpu_irq
+ irq_action
) = action
;
616 restore_flags(flags
);
620 int request_irq(unsigned int irq
,
621 void (*handler
)(int, void *, struct pt_regs
*),
622 unsigned long irqflags
, const char * devname
, void *dev_id
)
624 struct irqaction
* action
, *tmp
= NULL
;
626 unsigned int cpu_irq
;
628 if (sparc_cpu_model
== sun4d
) {
629 extern int sun4d_request_irq(unsigned int,
630 void (*)(int, void *, struct pt_regs
*),
631 unsigned long, const char *, void *);
632 return sun4d_request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
634 cpu_irq
= irq
& NR_IRQS
;
641 action
= *(cpu_irq
+ irq_action
);
643 if ((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
)) {
644 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
);
648 if ((action
->flags
& SA_INTERRUPT
) ^ (irqflags
& SA_INTERRUPT
)) {
649 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq
);
652 action
= NULL
; /* Or else! */
657 /* If this is flagged as statically allocated then we use our
658 * private struct which is never freed.
660 if (irqflags
& SA_STATIC_ALLOC
) {
661 if (static_irq_count
< MAX_STATIC_ALLOC
)
662 action
= &static_irqaction
[static_irq_count
++];
664 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq
, devname
);
668 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
672 restore_flags(flags
);
676 action
->handler
= handler
;
677 action
->flags
= irqflags
;
679 action
->name
= devname
;
681 action
->dev_id
= dev_id
;
686 *(cpu_irq
+ irq_action
) = action
;
689 restore_flags(flags
);
693 /* We really don't need these at all on the Sparc. We only have
694 * stubs here because they are exported to modules.
696 unsigned long probe_irq_on(void)
701 int probe_irq_off(unsigned long mask
)
707 * This could probably be made indirect too and assigned in the CPU
708 * bits of the code. That would be much nicer I think and would also
709 * fit in with the idea of being able to tune your kernel for your machine
710 * by removing unrequired machine and device support.
714 __initfunc(void init_IRQ(void))
716 extern void sun4c_init_IRQ( void );
717 extern void sun4m_init_IRQ( void );
718 extern void sun4d_init_IRQ( void );
720 switch(sparc_cpu_model
) {
730 sun4m_pci_init_IRQ();
748 prom_printf("Cannot initialize IRQ's on this Sun machine...");