Import 2.3.18pre1
[davej-history.git] / arch / sparc / kernel / irq.c
blobadec38c8b310a7a9caa1155ca4c7bc8be7efb54c
1 /* $Id: irq.c,v 1.96 1999/08/31 06:54:21 davem Exp $
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
3 * Sparc the IRQ's are basically 'cast in stone'
4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ.
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@metabyte.com)
10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
14 #include <linux/config.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/linkage.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/malloc.h>
23 #include <linux/random.h>
24 #include <linux/init.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/delay.h>
28 #include <linux/threads.h>
29 #include <linux/spinlock.h>
31 #include <asm/ptrace.h>
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/psr.h>
35 #include <asm/smp.h>
36 #include <asm/vaddrs.h>
37 #include <asm/timer.h>
38 #include <asm/openprom.h>
39 #include <asm/oplib.h>
40 #include <asm/traps.h>
41 #include <asm/irq.h>
42 #include <asm/io.h>
43 #include <asm/pgtable.h>
44 #include <asm/hardirq.h>
45 #include <asm/softirq.h>
46 #include <asm/pcic.h>
49 * Dave Redman (djhr@tadpole.co.uk)
51 * IRQ numbers.. These are no longer restricted to 15..
53 * this is done to enable SBUS cards and onboard IO to be masked
54 * correctly. using the interrupt level isn't good enough.
56 * For example:
57 * A device interrupting at sbus level6 and the Floppy both come in
58 * at IRQ11, but enabling and disabling them requires writing to
59 * different bits in the SLAVIO/SEC.
61 * As a result of these changes sun4m machines could now support
62 * directed CPU interrupts using the existing enable/disable irq code
63 * with tweaks.
67 static void irq_panic(void)
69 extern char *cputypval;
70 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
71 prom_halt();
74 void (*init_timers)(void (*)(int, void *,struct pt_regs *)) =
75 (void (*)(void (*)(int, void *,struct pt_regs *))) irq_panic;
78 * Dave Redman (djhr@tadpole.co.uk)
80 * There used to be extern calls and hard coded values here.. very sucky!
81 * instead, because some of the devices attach very early, I do something
82 * equally sucky but at least we'll never try to free statically allocated
83 * space or call kmalloc before kmalloc_init :(.
85 * In fact it's the timer10 that attaches first.. then timer14
86 * then kmalloc_init is called.. then the tty interrupts attach.
87 * hmmm....
90 #define MAX_STATIC_ALLOC 4
91 struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92 int static_irq_count = 0;
94 struct irqaction *irq_action[NR_IRQS+1] = {
95 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
99 int get_irq_list(char *buf)
101 int i, len = 0;
102 struct irqaction * action;
103 #ifdef __SMP__
104 int j;
105 #endif
107 if (sparc_cpu_model == sun4d) {
108 extern int sun4d_get_irq_list(char *);
110 return sun4d_get_irq_list(buf);
112 for (i = 0 ; i < (NR_IRQS+1) ; i++) {
113 action = *(i + irq_action);
114 if (!action)
115 continue;
116 len += sprintf(buf+len, "%3d: ", i);
117 #ifndef __SMP__
118 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
119 #else
120 for (j = 0; j < smp_num_cpus; j++)
121 len += sprintf(buf+len, "%10u ",
122 kstat.irqs[cpu_logical_map(j)][i]);
123 #endif
124 len += sprintf(buf+len, " %c %s",
125 (action->flags & SA_INTERRUPT) ? '+' : ' ',
126 action->name);
127 for (action=action->next; action; action = action->next) {
128 len += sprintf(buf+len, ",%s %s",
129 (action->flags & SA_INTERRUPT) ? " +" : "",
130 action->name);
132 len += sprintf(buf+len, "\n");
134 return len;
137 void free_irq(unsigned int irq, void *dev_id)
139 struct irqaction * action;
140 struct irqaction * tmp = NULL;
141 unsigned long flags;
142 unsigned int cpu_irq;
144 if (sparc_cpu_model == sun4d) {
145 extern void sun4d_free_irq(unsigned int, void *);
147 return sun4d_free_irq(irq, dev_id);
149 cpu_irq = irq & NR_IRQS;
150 action = *(cpu_irq + irq_action);
151 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
152 printk("Trying to free bogus IRQ %d\n", irq);
153 return;
155 if (!action->handler) {
156 printk("Trying to free free IRQ%d\n",irq);
157 return;
159 if (dev_id) {
160 for (; action; action = action->next) {
161 if (action->dev_id == dev_id)
162 break;
163 tmp = action;
165 if (!action) {
166 printk("Trying to free free shared IRQ%d\n",irq);
167 return;
169 } else if (action->flags & SA_SHIRQ) {
170 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
171 return;
173 if (action->flags & SA_STATIC_ALLOC)
175 /* This interrupt is marked as specially allocated
176 * so it is a bad idea to free it.
178 printk("Attempt to free statically allocated IRQ%d (%s)\n",
179 irq, action->name);
180 return;
183 save_and_cli(flags);
184 if (action && tmp)
185 tmp->next = action->next;
186 else
187 *(cpu_irq + irq_action) = action->next;
189 kfree_s(action, sizeof(struct irqaction));
191 if (!(*(cpu_irq + irq_action)))
192 disable_irq(irq);
194 restore_flags(flags);
197 #ifndef __SMP__
198 unsigned int local_bh_count;
199 unsigned int local_irq_count;
201 #else
202 /* SMP interrupt locking on Sparc. */
204 unsigned int local_bh_count[NR_CPUS];
205 unsigned int local_irq_count[NR_CPUS];
207 atomic_t global_bh_lock = ATOMIC_INIT(0);
208 spinlock_t global_bh_count = SPIN_LOCK_UNLOCKED;
210 /* Who has global_irq_lock. */
211 unsigned char global_irq_holder = NO_PROC_ID;
213 /* This protects IRQ's. */
214 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
216 /* Global IRQ locking depth. */
217 atomic_t global_irq_count = ATOMIC_INIT(0);
219 /* This protects BH software state (masks, things like that). */
220 spinlock_t sparc_bh_lock = SPIN_LOCK_UNLOCKED;
222 void smp_show_backtrace_all_cpus(void);
223 void show_backtrace(void);
225 #define MAXCOUNT 100000000
226 #define VERBOSE_DEBUG_IRQLOCK
228 static void show(char * str)
230 int i;
231 int cpu = smp_processor_id();
233 printk("\n%s, CPU %d:\n", str, cpu);
234 printk("irq: %d [ ", atomic_read(&global_irq_count));
236 for (i = 0; i < NR_CPUS; i++) {
237 printk("%d ", local_irq_count[i]);
239 printk("]\n");
241 printk("bh: %d [ ", (spin_is_locked(&global_bh_count) ? 1 : 0));
243 for (i = 0; i < NR_CPUS; i++) {
244 printk("%d ", local_bh_count[cpu]);
246 printk("]\n");
248 #ifdef VERBOSE_DEBUG_IRQLOCK
249 smp_show_backtrace_all_cpus();
250 #else
251 show_backtrace();
252 #endif
255 static inline void wait_on_bh(void)
257 int count = MAXCOUNT;
258 do {
259 if(!--count) {
260 show("wait_on_bh");
261 count = 0;
263 barrier();
264 } while(spin_is_locked(&global_bh_count));
268 * We have to allow irqs to arrive between __sti and __cli
270 #define SYNC_OTHER_CORES(x) udelay(x+1)
272 static inline void wait_on_irq(int cpu)
274 int count = MAXCOUNT;
276 for (;;) {
278 * Wait until all interrupts are gone. Wait
279 * for bottom half handlers unless we're
280 * already executing in one..
282 if (!atomic_read(&global_irq_count)) {
283 if (local_bh_count[cpu] || !spin_is_locked(&global_bh_count))
284 break;
287 /* Duh, we have to loop. Release the lock to avoid deadlocks */
288 spin_unlock(&global_irq_lock);
290 for (;;) {
291 if (!--count) {
292 show("wait_on_irq");
293 count = ~0;
295 __sti();
296 SYNC_OTHER_CORES(cpu);
297 __cli();
298 if (atomic_read(&global_irq_count))
299 continue;
300 if (spin_is_locked (&global_irq_lock))
301 continue;
302 if (!local_bh_count[cpu] && spin_is_locked(&global_bh_count))
303 continue;
304 if (spin_trylock(&global_irq_lock))
305 break;
311 * This is called when we want to synchronize with
312 * bottom half handlers. We need to wait until
313 * no other CPU is executing any bottom half handler.
315 * Don't wait if we're already running in an interrupt
316 * context or are inside a bh handler.
318 void synchronize_bh(void)
320 if (spin_is_locked (&global_bh_count) && !in_interrupt())
321 wait_on_bh();
325 * This is called when we want to synchronize with
326 * interrupts. We may for example tell a device to
327 * stop sending interrupts: but to make sure there
328 * are no interrupts that are executing on another
329 * CPU we need to call this function.
331 void synchronize_irq(void)
333 if (atomic_read(&global_irq_count)) {
334 /* Stupid approach */
335 cli();
336 sti();
340 static inline void get_irqlock(int cpu)
342 int count = MAXCOUNT;
344 if (!spin_trylock(&global_irq_lock)) {
345 /* do we already hold the lock? */
346 if ((unsigned char) cpu == global_irq_holder)
347 return;
348 /* Uhhuh.. Somebody else got it. Wait.. */
349 do {
350 while (spin_is_locked(&global_irq_lock)) {
351 if (!--count) {
352 show("get_irqlock");
353 count = ~0;
355 barrier();
357 } while (!spin_trylock(&global_irq_lock));
360 * We also to make sure that nobody else is running
361 * in an interrupt context.
363 wait_on_irq(cpu);
366 * Ok, finally..
368 global_irq_holder = cpu;
372 * A global "cli()" while in an interrupt context
373 * turns into just a local cli(). Interrupts
374 * should use spinlocks for the (very unlikely)
375 * case that they ever want to protect against
376 * each other.
378 * If we already have local interrupts disabled,
379 * this will not turn a local disable into a
380 * global one (problems with spinlocks: this makes
381 * save_flags+cli+sti usable inside a spinlock).
383 void __global_cli(void)
385 unsigned int flags;
387 __save_flags(flags);
389 if ((flags & PSR_PIL) != PSR_PIL) {
390 int cpu = smp_processor_id();
391 __cli();
392 if (!local_irq_count[cpu])
393 get_irqlock(cpu);
397 void __global_sti(void)
399 int cpu = smp_processor_id();
401 if (!local_irq_count[cpu])
402 release_irqlock(cpu);
403 __sti();
407 * SMP flags value to restore to:
408 * 0 - global cli
409 * 1 - global sti
410 * 2 - local cli
411 * 3 - local sti
413 unsigned long __global_save_flags(void)
415 int retval;
416 int local_enabled = 0;
417 unsigned long flags;
419 __save_flags(flags);
421 if ((flags & PSR_PIL) != PSR_PIL)
422 local_enabled = 1;
424 /* default to local */
425 retval = 2 + local_enabled;
427 /* check for global flags if we're not in an interrupt */
428 if (!local_irq_count[smp_processor_id()]) {
429 if (local_enabled)
430 retval = 1;
431 if (global_irq_holder == (unsigned char) smp_processor_id())
432 retval = 0;
434 return retval;
437 void __global_restore_flags(unsigned long flags)
439 switch (flags) {
440 case 0:
441 __global_cli();
442 break;
443 case 1:
444 __global_sti();
445 break;
446 case 2:
447 __cli();
448 break;
449 case 3:
450 __sti();
451 break;
452 default:
454 unsigned long pc;
455 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
456 printk("global_restore_flags: Bogon flags(%08lx) caller %08lx\n", flags, pc);
461 #endif /* __SMP__ */
463 void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
465 int i;
466 struct irqaction * action;
467 unsigned int cpu_irq;
469 cpu_irq = irq & NR_IRQS;
470 action = *(cpu_irq + irq_action);
472 printk("IO device interrupt, irq = %d\n", irq);
473 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
474 regs->npc, regs->u_regs[14]);
475 if (action) {
476 printk("Expecting: ");
477 for (i = 0; i < 16; i++)
478 if (action->handler)
479 prom_printf("[%s:%d:0x%x] ", action->name,
480 (int) i, (unsigned int) action->handler);
482 printk("AIEEE\n");
483 panic("bogus interrupt received");
486 void handler_irq(int irq, struct pt_regs * regs)
488 struct irqaction * action;
489 int cpu = smp_processor_id();
490 #ifdef __SMP__
491 extern void smp4m_irq_rotate(int cpu);
492 #endif
494 irq_enter(cpu, irq);
495 disable_pil_irq(irq);
496 #ifdef __SMP__
497 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
498 if(irq < 10)
499 smp4m_irq_rotate(cpu);
500 #endif
501 action = *(irq + irq_action);
502 kstat.irqs[cpu][irq]++;
503 do {
504 if (!action || !action->handler)
505 unexpected_irq(irq, 0, regs);
506 action->handler(irq, action->dev_id, regs);
507 action = action->next;
508 } while (action);
509 enable_pil_irq(irq);
510 irq_exit(cpu, irq);
513 #ifdef CONFIG_BLK_DEV_FD
514 extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
516 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
518 int cpu = smp_processor_id();
520 disable_pil_irq(irq);
521 irq_enter(cpu, irq);
522 kstat.irqs[cpu][irq]++;
523 floppy_interrupt(irq, dev_id, regs);
524 irq_exit(cpu, irq);
525 enable_pil_irq(irq);
527 #endif
529 /* Fast IRQ's on the Sparc can only have one routine attached to them,
530 * thus no sharing possible.
532 int request_fast_irq(unsigned int irq,
533 void (*handler)(int, void *, struct pt_regs *),
534 unsigned long irqflags, const char *devname)
536 struct irqaction *action;
537 unsigned long flags;
538 unsigned int cpu_irq;
539 #ifdef __SMP__
540 struct tt_entry *trap_table;
541 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
542 #endif
544 cpu_irq = irq & NR_IRQS;
545 if(cpu_irq > 14)
546 return -EINVAL;
547 if(!handler)
548 return -EINVAL;
549 action = *(cpu_irq + irq_action);
550 if(action) {
551 if(action->flags & SA_SHIRQ)
552 panic("Trying to register fast irq when already shared.\n");
553 if(irqflags & SA_SHIRQ)
554 panic("Trying to register fast irq as shared.\n");
556 /* Anyway, someone already owns it so cannot be made fast. */
557 printk("request_fast_irq: Trying to register yet already owned.\n");
558 return -EBUSY;
561 save_and_cli(flags);
563 /* If this is flagged as statically allocated then we use our
564 * private struct which is never freed.
566 if (irqflags & SA_STATIC_ALLOC) {
567 if (static_irq_count < MAX_STATIC_ALLOC)
568 action = &static_irqaction[static_irq_count++];
569 else
570 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
571 irq, devname);
574 if (action == NULL)
575 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
576 GFP_KERNEL);
578 if (!action) {
579 restore_flags(flags);
580 return -ENOMEM;
583 /* Dork with trap table if we get this far. */
584 #define INSTANTIATE(table) \
585 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
586 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
587 SPARC_BRANCH((unsigned long) handler, \
588 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
589 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
590 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
592 INSTANTIATE(sparc_ttable)
593 #ifdef __SMP__
594 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
595 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
596 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
597 #endif
598 #undef INSTANTIATE
600 * XXX Correct thing whould be to flush only I- and D-cache lines
601 * which contain the handler in question. But as of time of the
602 * writing we have no CPU-neutral interface to fine-grained flushes.
604 flush_cache_all();
606 action->handler = handler;
607 action->flags = irqflags;
608 action->mask = 0;
609 action->name = devname;
610 action->dev_id = NULL;
611 action->next = NULL;
613 *(cpu_irq + irq_action) = action;
615 enable_irq(irq);
616 restore_flags(flags);
617 return 0;
620 int request_irq(unsigned int irq,
621 void (*handler)(int, void *, struct pt_regs *),
622 unsigned long irqflags, const char * devname, void *dev_id)
624 struct irqaction * action, *tmp = NULL;
625 unsigned long flags;
626 unsigned int cpu_irq;
628 if (sparc_cpu_model == sun4d) {
629 extern int sun4d_request_irq(unsigned int,
630 void (*)(int, void *, struct pt_regs *),
631 unsigned long, const char *, void *);
632 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
634 cpu_irq = irq & NR_IRQS;
635 if(cpu_irq > 14)
636 return -EINVAL;
638 if (!handler)
639 return -EINVAL;
641 action = *(cpu_irq + irq_action);
642 if (action) {
643 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
644 for (tmp = action; tmp->next; tmp = tmp->next);
645 } else {
646 return -EBUSY;
648 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
649 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
650 return -EBUSY;
652 action = NULL; /* Or else! */
655 save_and_cli(flags);
657 /* If this is flagged as statically allocated then we use our
658 * private struct which is never freed.
660 if (irqflags & SA_STATIC_ALLOC) {
661 if (static_irq_count < MAX_STATIC_ALLOC)
662 action = &static_irqaction[static_irq_count++];
663 else
664 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
667 if (action == NULL)
668 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
669 GFP_KERNEL);
671 if (!action) {
672 restore_flags(flags);
673 return -ENOMEM;
676 action->handler = handler;
677 action->flags = irqflags;
678 action->mask = 0;
679 action->name = devname;
680 action->next = NULL;
681 action->dev_id = dev_id;
683 if (tmp)
684 tmp->next = action;
685 else
686 *(cpu_irq + irq_action) = action;
688 enable_irq(irq);
689 restore_flags(flags);
690 return 0;
693 /* We really don't need these at all on the Sparc. We only have
694 * stubs here because they are exported to modules.
696 unsigned long probe_irq_on(void)
698 return 0;
701 int probe_irq_off(unsigned long mask)
703 return 0;
706 /* djhr
707 * This could probably be made indirect too and assigned in the CPU
708 * bits of the code. That would be much nicer I think and would also
709 * fit in with the idea of being able to tune your kernel for your machine
710 * by removing unrequired machine and device support.
714 void __init init_IRQ(void)
716 extern void sun4c_init_IRQ( void );
717 extern void sun4m_init_IRQ( void );
718 extern void sun4d_init_IRQ( void );
720 switch(sparc_cpu_model) {
721 case sun4c:
722 case sun4:
723 sun4c_init_IRQ();
724 break;
726 case sun4m:
727 #ifdef CONFIG_PCI
728 pcic_probe();
729 if (pci_present()) {
730 sun4m_pci_init_IRQ();
731 break;
733 #endif
734 sun4m_init_IRQ();
735 break;
737 case sun4d:
738 sun4d_init_IRQ();
739 break;
741 case ap1000:
742 #if CONFIG_AP1000
743 ap_init_IRQ();;
744 break;
745 #endif
747 default:
748 prom_printf("Cannot initialize IRQ's on this Sun machine...");
749 break;
751 btfixup();