Import 2.1.42pre1
[davej-history.git] / arch / sparc64 / kernel / irq.c
blob3c9b1a89e3d7f96ba31cae9ef3b4a9cdb303ba31
1 /* $Id: irq.c,v 1.13 1997/05/27 07:54:28 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
7 #include <linux/config.h>
8 #include <linux/ptrace.h>
9 #include <linux/errno.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/signal.h>
12 #include <linux/interrupt.h>
13 #include <linux/malloc.h>
14 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
15 #include <linux/init.h>
17 #include <asm/ptrace.h>
18 #include <asm/processor.h>
19 #include <asm/atomic.h>
20 #include <asm/system.h>
21 #include <asm/irq.h>
22 #include <asm/sbus.h>
23 #include <asm/iommu.h>
24 #include <asm/upa.h>
25 #include <asm/oplib.h>
26 #include <asm/smp.h>
27 #include <asm/hardirq.h>
28 #include <asm/softirq.h>
30 /* Internal flag, should not be visible elsewhere at all. */
31 #define SA_SYSIO_MASKED 0x100
33 /* UPA nodes send interrupt packet to UltraSparc with first data reg value
34 * low 5 bits holding the IRQ identifier being delivered. We must translate
35 * this into a non-vector IRQ so we can set the softint on this cpu. To
36 * make things even more swift we store the complete mask here.
39 #define NUM_IVECS 2048 /* XXX may need more on sunfire/wildfire */
41 unsigned long ivector_to_mask[NUM_IVECS];
43 /* This is based upon code in the 32-bit Sparc kernel written mostly by
44 * David Redman (djhr@tadpole.co.uk).
46 #define MAX_STATIC_ALLOC 4
47 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
48 static int static_irq_count = 0;
50 static struct irqaction *irq_action[NR_IRQS+1] = {
51 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
52 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
55 int get_irq_list(char *buf)
57 int i, len = 0;
58 struct irqaction *action;
60 for(i = 0; i < (NR_IRQS + 1); i++) {
61 if(!(action = *(i + irq_action)))
62 continue;
63 len += sprintf(buf + len, "%2d: %8d %c %s",
64 i, kstat.interrupts[i],
65 (action->flags & SA_INTERRUPT) ? '+' : ' ',
66 action->name);
67 for(action = action->next; action; action = action->next) {
68 len += sprintf(buf+len, ",%s %s",
69 (action->flags & SA_INTERRUPT) ? " +" : "",
70 action->name);
72 len += sprintf(buf + len, "\n");
74 return len;
77 /* INO number to Sparc PIL level. */
78 static unsigned char ino_to_pil[] = {
79 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 0 */
80 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 1 */
81 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 2 */
82 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 3 */
83 3, /* Onboard SCSI */
84 5, /* Onboard Ethernet */
85 /*XXX*/ 8, /* Onboard BPP */
86 0, /* Bogon */
87 13, /* Audio */
88 /*XXX*/15, /* PowerFail */
89 0, /* Bogon */
90 0, /* Bogon */
91 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
92 11, /* Floppy */
93 0, /* Spare Hardware (bogon for now) */
94 0, /* Keyboard (bogon for now) */
95 0, /* Mouse (bogon for now) */
96 0, /* Serial (bogon for now) */
97 0, 0, /* Bogon, Bogon */
98 10, /* Timer 0 */
99 11, /* Timer 1 */
100 0, 0, /* Bogon, Bogon */
101 15, /* Uncorrectable SBUS Error */
102 15, /* Correctable SBUS Error */
103 15, /* SBUS Error */
104 /*XXX*/ 0, /* Power Management (bogon for now) */
107 /* INO number to IMAP register offset for SYSIO external IRQ's.
108 * This should conform to both Sunfire/Wildfire server and Fusion
109 * desktop designs.
111 #define offset(x) ((unsigned long)(&(((struct sysio_regs *)0)->x)))
112 #define bogon ((unsigned long) -1)
113 static unsigned long irq_offsets[] = {
114 /* SBUS Slot 0 --> 3, level 1 --> 7 */
115 offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
116 offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
117 offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
118 offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
119 offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
120 offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
121 offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
122 offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
123 /* Onboard devices (not relevant/used on SunFire). */
124 offset(imap_scsi), offset(imap_eth), offset(imap_bpp), bogon,
125 offset(imap_audio), offset(imap_pfail), bogon, bogon,
126 offset(imap_kms), offset(imap_flpy), offset(imap_shw),
127 offset(imap_kbd), offset(imap_ms), offset(imap_ser), bogon, bogon,
128 offset(imap_tim0), offset(imap_tim1), bogon, bogon,
129 offset(imap_ue), offset(imap_ce), offset(imap_sberr),
130 offset(imap_pmgmt),
133 #undef bogon
135 #define NUM_IRQ_ENTRIES (sizeof(irq_offsets) / sizeof(irq_offsets[0]))
137 /* Convert an "interrupts" property IRQ level to an SBUS/SYSIO
138 * Interrupt Mapping register pointer, or NULL if none exists.
140 static unsigned int *irq_to_imap(unsigned int irq)
142 unsigned long offset;
143 struct sysio_regs *sregs;
145 if((irq == 14) ||
146 (irq >= NUM_IRQ_ENTRIES) ||
147 ((offset = irq_offsets[irq]) == ((unsigned long)-1)))
148 return NULL;
149 sregs = SBus_chain->iommu->sysio_regs;
150 offset += ((unsigned long) sregs);
151 return ((unsigned int *)offset) + 1;
154 /* Convert Interrupt Mapping register pointer to assosciated
155 * Interrupt Clear register pointer.
157 static unsigned int *imap_to_iclr(unsigned int *imap)
159 unsigned long diff;
161 diff = offset(iclr_unused0) - offset(imap_slot0);
162 return (unsigned int *) (((unsigned long)imap) + diff);
165 #undef offset
167 /* For non-SBUS IRQ's we do nothing, else we must enable them in the
168 * appropriate SYSIO interrupt map registers.
170 void enable_irq(unsigned int irq)
172 unsigned long tid;
173 unsigned int *imap;
175 /* If this is for the tick interrupt, just ignore, note
176 * that this is the one and only locally generated interrupt
177 * source, all others come from external sources (essentially
178 * any UPA device which is an interruptor). (actually, on
179 * second thought Ultra can generate local interrupts for
180 * async memory errors and we may setup handlers for those
181 * at some point as well)
183 * XXX See commentary below in request_irq() this assumption
184 * XXX is broken and needs to be fixed.
186 if(irq == 14)
187 return;
189 /* Check for bogons. */
190 imap = irq_to_imap(irq);
191 if(imap == NULL)
192 goto do_the_stb_watoosi;
194 /* We send it to our UPA MID, for SMP this will be different. */
195 __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (tid) : "i" (ASI_UPA_CONFIG));
196 tid = ((tid & UPA_CONFIG_MID) << 9);
198 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
199 * of this SYSIO's preconfigured IGN in the SYSIO Control
200 * Register, the hardware just mirrors that value here.
201 * However for Graphics and UPA Slave devices the full
202 * SYSIO_IMAP_INR field can be set by the programmer here.
203 * (XXX we will have to handle those for FFB etc. XXX)
205 *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
206 return;
208 do_the_stb_watoosi:
209 printk("Cannot enable irq(%d), doing the \"STB Watoosi\" instead.", irq);
210 panic("Trying to enable bogon IRQ");
213 void disable_irq(unsigned int irq)
215 unsigned int *imap;
217 /* XXX Grrr, I know this is broken... */
218 if(irq == 14)
219 return;
221 /* Check for bogons. */
222 imap = irq_to_imap(irq);
223 if(imap == NULL)
224 goto do_the_stb_watoosi;
226 /* NOTE: We do not want to futz with the IRQ clear registers
227 * and move the state to IDLE, the SCSI code does call
228 * disable_irq() to assure atomicity in the queue cmd
229 * SCSI adapter driver code. Thus we'd lose interrupts.
231 *imap &= ~(SYSIO_IMAP_VALID);
232 return;
234 do_the_stb_watoosi:
235 printk("Cannot disable irq(%d), doing the \"STB Watoosi\" instead.", irq);
236 panic("Trying to enable bogon IRQ");
239 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
240 unsigned long irqflags, const char *name, void *dev_cookie)
242 struct irqaction *action, *tmp = NULL;
243 unsigned long flags;
244 unsigned int cpu_irq, *imap, *iclr;
246 /* XXX This really is not the way to do it, the "right way"
247 * XXX is to have drivers set SA_SBUS or something like that
248 * XXX in irqflags and we base our decision here on whether
249 * XXX that flag bit is set or not.
251 if(irq == 14)
252 cpu_irq = irq;
253 else
254 cpu_irq = ino_to_pil[irq];
256 if(!handler)
257 return -EINVAL;
259 imap = irq_to_imap(irq);
261 action = *(cpu_irq + irq_action);
262 if(action) {
263 if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
264 for (tmp = action; tmp->next; tmp = tmp->next)
266 else
267 return -EBUSY;
269 if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
270 printk("Attempt to mix fast and slow interrupts on IRQ%d "
271 "denied\n", irq);
272 return -EBUSY;
274 action = NULL; /* Or else! */
277 save_and_cli(flags);
279 /* If this is flagged as statically allocated then we use our
280 * private struct which is never freed.
282 if(irqflags & SA_STATIC_ALLOC)
283 if(static_irq_count < MAX_STATIC_ALLOC)
284 action = &static_irqaction[static_irq_count++];
285 else
286 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
287 "using kmalloc\n", irq, name);
289 if(action == NULL)
290 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
291 GFP_KERNEL);
293 if(!action) {
294 restore_flags(flags);
295 return -ENOMEM;
298 if(imap) {
299 int ivindex = (*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO));
301 ivector_to_mask[ivindex] = (1<<cpu_irq);
302 iclr = imap_to_iclr(imap);
303 action->mask = (unsigned long) iclr;
304 irqflags |= SA_SYSIO_MASKED;
305 } else {
306 action->mask = 0;
309 action->handler = handler;
310 action->flags = irqflags;
311 action->name = name;
312 action->next = NULL;
313 action->dev_id = dev_cookie;
315 if(tmp)
316 tmp->next = action;
317 else
318 *(cpu_irq + irq_action) = action;
320 enable_irq(irq);
321 restore_flags(flags);
322 return 0;
325 void free_irq(unsigned int irq, void *dev_cookie)
327 struct irqaction *action;
328 struct irqaction *tmp = NULL;
329 unsigned long flags;
330 unsigned int cpu_irq;
332 if(irq == 14)
333 cpu_irq = irq;
334 else
335 cpu_irq = ino_to_pil[irq];
336 action = *(cpu_irq + irq_action);
337 if(!action->handler) {
338 printk("Freeing free IRQ %d\n", irq);
339 return;
341 if(dev_cookie) {
342 for( ; action; action = action->next) {
343 if(action->dev_id == dev_cookie)
344 break;
345 tmp = action;
347 if(!action) {
348 printk("Trying to free free shared IRQ %d\n", irq);
349 return;
351 } else if(action->flags & SA_SHIRQ) {
352 printk("Trying to free shared IRQ %d with NULL device cookie\n", irq);
353 return;
356 if(action->flags & SA_STATIC_ALLOC) {
357 printk("Attempt to free statically allocated IRQ %d (%s)\n",
358 irq, action->name);
359 return;
362 save_and_cli(flags);
363 if(action && tmp)
364 tmp->next = action->next;
365 else
366 *(cpu_irq + irq_action) = action->next;
368 if(action->flags & SA_SYSIO_MASKED) {
369 unsigned int *imap = irq_to_imap(irq);
370 if(imap != NULL)
371 ivector_to_mask[*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO)] = 0;
372 else
373 printk("free_irq: WHeee, SYSIO_MASKED yet no imap reg.\n");
376 kfree(action);
377 if(!*(cpu_irq + irq_action))
378 disable_irq(irq);
380 restore_flags(flags);
383 /* Per-processor IRQ locking depth, both SMP and non-SMP code use this. */
384 unsigned int local_irq_count[NR_CPUS];
386 #ifndef __SMP__
387 int __sparc64_bh_counter = 0;
389 #define irq_enter(cpu, irq) (local_irq_count[cpu]++)
390 #define irq_exit(cpu, irq) (local_irq_count[cpu]--)
392 #else
393 #error SMP not supported on sparc64 just yet
394 #endif /* __SMP__ */
396 void report_spurious_ivec(struct pt_regs *regs)
398 printk("IVEC: Spurious interrupt vector received at (%016lx)\n",
399 regs->tpc);
400 return;
403 void unexpected_irq(int irq, void *dev_cookie, struct pt_regs *regs)
405 int i;
406 struct irqaction *action;
407 unsigned int cpu_irq;
409 cpu_irq = irq & NR_IRQS;
410 action = *(cpu_irq + irq_action);
412 prom_printf("Unexpected IRQ[%d]: ", irq);
413 prom_printf("PC[%016lx] NPC[%016lx] FP[%016lx]\n",
414 regs->tpc, regs->tnpc, regs->u_regs[14]);
416 if(action) {
417 prom_printf("Expecting: ");
418 for(i = 0; i < 16; i++) {
419 if(action->handler)
420 prom_printf("[%s:%d:0x%016lx] ", action->name,
421 i, (unsigned long) action->handler);
424 prom_printf("AIEEE\n");
425 prom_printf("bogus interrupt received\n");
426 prom_cmdline ();
429 void handler_irq(int irq, struct pt_regs *regs)
431 struct irqaction *action;
432 int cpu = smp_processor_id();
434 /* XXX */
435 if(irq != 14)
436 clear_softint(1 << irq);
438 irq_enter(cpu, irq);
439 action = *(irq + irq_action);
440 kstat.interrupts[irq]++;
441 do {
442 if(!action || !action->handler)
443 unexpected_irq(irq, 0, regs);
444 action->handler(irq, action->dev_id, regs);
445 if(action->flags & SA_SYSIO_MASKED)
446 *((unsigned int *)action->mask) = SYSIO_ICLR_IDLE;
447 } while((action = action->next) != NULL);
448 irq_exit(cpu, irq);
451 #ifdef CONFIG_BLK_DEV_FD
452 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
454 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
456 struct irqaction *action = *(irq + irq_action);
457 int cpu = smp_processor_id();
459 irq_enter(cpu, irq);
460 floppy_interrupt(irq, dev_cookie, regs);
461 if(action->flags & SA_SYSIO_MASKED)
462 *((unsigned int *)action->mask) = SYSIO_ICLR_IDLE;
463 irq_exit(cpu, irq);
465 #endif
467 /* XXX This needs to be written for floppy driver, and soon will be necessary
468 * XXX for serial driver as well.
470 int request_fast_irq(unsigned int irq,
471 void (*handler)(int, void *, struct pt_regs *),
472 unsigned long irqflags, const char *name)
474 return -1;
477 /* We really don't need these at all on the Sparc. We only have
478 * stubs here because they are exported to modules.
480 unsigned long probe_irq_on(void)
482 return 0;
485 int probe_irq_off(unsigned long mask)
487 return 0;
490 /* XXX This is a hack, make it per-cpu so that SMP port will work correctly
491 * XXX with mixed MHZ Ultras in the machine. -DaveM
493 static unsigned long cpu_cfreq;
494 static unsigned long tick_offset;
496 /* XXX This doesn't belong here, just do this cruft in the timer.c handler code. */
497 static void timer_handler(int irq, void *dev_id, struct pt_regs *regs)
499 extern void timer_interrupt(int, void *, struct pt_regs *);
500 unsigned long compare;
502 if (!(get_softint () & 1)) {
503 /* Just to be sure... */
504 clear_softint(1 << 14);
505 printk("Spurious level14 at %016lx\n", regs->tpc);
506 return;
509 timer_interrupt(irq, dev_id, regs);
511 /* Acknowledge INT_TIMER */
512 clear_softint(1 << 0);
514 /* Set up for next timer tick. */
515 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
516 "add %0, %1, %0\n\t"
517 "wr %0, 0x0, %%tick_cmpr"
518 : "=r" (compare)
519 : "r" (tick_offset));
522 /* This is called from time_init() to get the jiffies timer going. */
523 void init_timers(void (*cfunc)(int, void *, struct pt_regs *))
525 int node, err;
527 /* XXX FIX this for SMP -JJ */
528 node = linux_cpus [0].prom_node;
529 cpu_cfreq = prom_getint(node, "clock-frequency");
530 tick_offset = cpu_cfreq / HZ;
531 err = request_irq(14, timer_handler, (SA_INTERRUPT|SA_STATIC_ALLOC),
532 "timer", NULL);
533 if(err) {
534 prom_printf("Serious problem, cannot register timer interrupt\n");
535 prom_halt();
536 } else {
537 unsigned long flags;
539 save_and_cli(flags);
541 __asm__ __volatile__("wr %0, 0x0, %%tick_cmpr\n\t"
542 "wrpr %%g0, 0x0, %%tick"
543 : /* No outputs */
544 : "r" (tick_offset));
546 clear_softint (get_softint ());
548 restore_flags(flags);
550 sti();
553 /* We use this nowhere else, so only define it's layout here. */
554 struct sun5_timer {
555 volatile u32 count0, _unused0;
556 volatile u32 limit0, _unused1;
557 volatile u32 count1, _unused2;
558 volatile u32 limit1, _unused3;
559 } *prom_timers;
561 static void map_prom_timers(void)
563 unsigned int addr[3];
564 int tnode, err;
566 /* PROM timer node hangs out in the top level of device siblings... */
567 tnode = prom_finddevice("/counter-timer");
569 /* Assume if node is not present, PROM uses different tick mechanism
570 * which we should not care about.
572 if(tnode == 0) {
573 prom_timers = (struct sun5_timer *) 0;
574 prom_printf("AIEEE, no timers\n");
575 return;
578 /* If PROM is really using this, it must be mapped by him. */
579 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
580 if(err == -1) {
581 prom_printf("PROM does not have timer mapped, trying to continue.\n");
582 prom_timers = (struct sun5_timer *) 0;
583 return;
585 prom_timers = (struct sun5_timer *) addr[0];
588 static void kill_prom_timer(void)
590 if(!prom_timers)
591 return;
593 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
594 * We turn both off here just to be paranoid.
596 prom_timers->limit0 = 0;
597 prom_timers->limit1 = 0;
600 #if 0 /* Unused at this time. -DaveM */
601 static void enable_prom_timer(void)
603 if(!prom_timers)
604 return;
606 /* Set it to fire off every 10ms. */
607 prom_timers->limit1 = 0xa000270f;
608 prom_timers->count1 = 0;
610 #endif
612 __initfunc(void init_IRQ(void))
614 int i;
616 map_prom_timers();
617 kill_prom_timer();
618 for(i = 0; i < NUM_IVECS; i++)
619 ivector_to_mask[i] = 0;
621 /* We need to clear any IRQ's pending in the soft interrupt
622 * registers, a spurious one could be left around from the
623 * PROM timer which we just disabled.
625 clear_softint(get_softint());
627 /* Now that ivector table is initialized, it is safe
628 * to receive IRQ vector traps. We will normally take
629 * one or two right now, in case some device PROM used
630 * to boot us wants to speak to us. We just ignore them.
632 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
633 "or %%g1, %0, %%g1\n\t"
634 "wrpr %%g1, 0x0, %%pstate"
635 : /* No outputs */
636 : "i" (PSTATE_IE)
637 : "g1");