- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / arch / sparc64 / kernel / irq.c
blob92bd3ed88a981bb24ee0f970a7f257d8811b18b7
1 /* $Id: irq.c,v 1.94 2000/09/21 06:27:10 anton Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
18 #include <linux/init.h>
19 #include <linux/delay.h>
21 #include <asm/ptrace.h>
22 #include <asm/processor.h>
23 #include <asm/atomic.h>
24 #include <asm/system.h>
25 #include <asm/irq.h>
26 #include <asm/sbus.h>
27 #include <asm/iommu.h>
28 #include <asm/upa.h>
29 #include <asm/oplib.h>
30 #include <asm/timer.h>
31 #include <asm/smp.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
34 #include <asm/starfire.h>
36 /* Internal flag, should not be visible elsewhere at all. */
37 #define SA_IMAP_MASKED 0x100
38 #define SA_DMA_SYNC 0x200
40 #ifdef CONFIG_SMP
41 static void distribute_irqs(void);
42 #endif
44 /* UPA nodes send interrupt packet to UltraSparc with first data reg
45 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
46 * delivered. We must translate this into a non-vector IRQ so we can
47 * set the softint on this cpu.
49 * To make processing these packets efficient and race free we use
50 * an array of irq buckets below. The interrupt vector handler in
51 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
52 * The IVEC handler does not need to act atomically, the PIL dispatch
53 * code uses CAS to get an atomic snapshot of the list and clear it
54 * at the same time.
57 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (64)));
59 #ifndef CONFIG_SMP
60 unsigned int __up_workvec[16] __attribute__ ((aligned (64)));
61 #define irq_work(__cpu, __pil) &(__up_workvec[(void)(__cpu), (__pil)])
62 #else
63 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
64 #endif
66 #ifdef CONFIG_PCI
67 /* This is a table of physical addresses used to deal with SA_DMA_SYNC.
68 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
69 * for devices behind busses other than APB on Sabre systems.
71 * Currently these physical addresses are just config space accesses
72 * to the command register for that device.
74 unsigned long pci_dma_wsync;
75 unsigned long dma_sync_reg_table[256];
76 unsigned char dma_sync_reg_table_entry = 0;
77 #endif
79 /* This is based upon code in the 32-bit Sparc kernel written mostly by
80 * David Redman (djhr@tadpole.co.uk).
82 #define MAX_STATIC_ALLOC 4
83 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
84 static int static_irq_count = 0;
86 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
87 struct irqaction *irq_action[NR_IRQS+1] = {
88 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
89 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
92 int get_irq_list(char *buf)
94 int i, len = 0;
95 struct irqaction *action;
96 #ifdef CONFIG_SMP
97 int j;
98 #endif
100 for(i = 0; i < (NR_IRQS + 1); i++) {
101 if(!(action = *(i + irq_action)))
102 continue;
103 len += sprintf(buf + len, "%3d: ", i);
104 #ifndef CONFIG_SMP
105 len += sprintf(buf + len, "%10u ", kstat_irqs(i));
106 #else
107 for (j = 0; j < smp_num_cpus; j++)
108 len += sprintf(buf + len, "%10u ",
109 kstat.irqs[cpu_logical_map(j)][i]);
110 #endif
111 len += sprintf(buf + len, "%c %s",
112 (action->flags & SA_INTERRUPT) ? '+' : ' ',
113 action->name);
114 for(action = action->next; action; action = action->next) {
115 len += sprintf(buf+len, ",%s %s",
116 (action->flags & SA_INTERRUPT) ? " +" : "",
117 action->name);
119 len += sprintf(buf + len, "\n");
121 return len;
124 /* Now these are always passed a true fully specified sun4u INO. */
125 void enable_irq(unsigned int irq)
127 struct ino_bucket *bucket = __bucket(irq);
128 unsigned long imap;
129 unsigned long tid;
131 imap = bucket->imap;
132 if (imap == 0UL)
133 return;
135 if(this_is_starfire == 0) {
136 /* We set it to our UPA MID. */
137 __asm__ __volatile__("ldxa [%%g0] %1, %0"
138 : "=r" (tid)
139 : "i" (ASI_UPA_CONFIG));
140 tid = ((tid & UPA_CONFIG_MID) << 9);
141 } else {
142 tid = (starfire_translate(imap, current->processor) << 26);
145 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
146 * of this SYSIO's preconfigured IGN in the SYSIO Control
147 * Register, the hardware just mirrors that value here.
148 * However for Graphics and UPA Slave devices the full
149 * IMAP_INR field can be set by the programmer here.
151 * Things like FFB can now be handled via the new IRQ mechanism.
153 upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
156 /* This now gets passed true ino's as well. */
157 void disable_irq(unsigned int irq)
159 struct ino_bucket *bucket = __bucket(irq);
160 unsigned long imap;
162 imap = bucket->imap;
163 if (imap != 0UL) {
164 u32 tmp;
166 /* NOTE: We do not want to futz with the IRQ clear registers
167 * and move the state to IDLE, the SCSI code does call
168 * disable_irq() to assure atomicity in the queue cmd
169 * SCSI adapter driver code. Thus we'd lose interrupts.
171 tmp = upa_readl(imap);
172 tmp &= ~IMAP_VALID;
173 upa_writel(tmp, imap);
177 /* The timer is the one "weird" interrupt which is generated by
178 * the CPU %tick register and not by some normal vectored interrupt
179 * source. To handle this special case, we use this dummy INO bucket.
181 static struct ino_bucket pil0_dummy_bucket = {
182 0, /* irq_chain */
183 0, /* pil */
184 0, /* pending */
185 0, /* flags */
186 0, /* __unused */
187 NULL, /* irq_info */
188 0UL, /* iclr */
189 0UL, /* imap */
192 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
194 struct ino_bucket *bucket;
195 int ino;
197 if(pil == 0) {
198 if(iclr != 0UL || imap != 0UL) {
199 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
200 iclr, imap);
201 prom_halt();
203 return __irq(&pil0_dummy_bucket);
206 /* RULE: Both must be specified in all other cases. */
207 if (iclr == 0UL || imap == 0UL) {
208 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
209 pil, inofixup, iclr, imap);
210 prom_halt();
213 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
214 if(ino > NUM_IVECS) {
215 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
216 ino, pil, inofixup, iclr, imap);
217 prom_halt();
220 /* Ok, looks good, set it up. Don't touch the irq_chain or
221 * the pending flag.
223 bucket = &ivector_table[ino];
224 if ((bucket->flags & IBF_ACTIVE) ||
225 (bucket->irq_info != NULL)) {
226 /* This is a gross fatal error if it happens here. */
227 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
228 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
229 ino, pil, inofixup, iclr, imap);
230 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
231 bucket->pil, bucket->iclr, bucket->imap);
232 prom_printf("IRQ: Cannot continue, halting...\n");
233 prom_halt();
235 bucket->imap = imap;
236 bucket->iclr = iclr;
237 bucket->pil = pil;
238 bucket->flags = 0;
240 bucket->irq_info = NULL;
242 return __irq(bucket);
245 static void atomic_bucket_insert(struct ino_bucket *bucket)
247 unsigned long pstate;
248 unsigned int *ent;
250 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
251 __asm__ __volatile__("wrpr %0, %1, %%pstate"
252 : : "r" (pstate), "i" (PSTATE_IE));
253 ent = irq_work(smp_processor_id(), bucket->pil);
254 bucket->irq_chain = *ent;
255 *ent = __irq(bucket);
256 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
259 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
260 unsigned long irqflags, const char *name, void *dev_id)
262 struct irqaction *action, *tmp = NULL;
263 struct ino_bucket *bucket = __bucket(irq);
264 unsigned long flags;
265 int pending = 0;
267 if ((bucket != &pil0_dummy_bucket) &&
268 (bucket < &ivector_table[0] ||
269 bucket >= &ivector_table[NUM_IVECS])) {
270 unsigned int *caller;
272 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
273 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
274 "from %p, irq %08x.\n", caller, irq);
275 return -EINVAL;
277 if(!handler)
278 return -EINVAL;
280 if (!bucket->pil)
281 irqflags &= ~SA_IMAP_MASKED;
282 else {
283 irqflags |= SA_IMAP_MASKED;
284 if (bucket->flags & IBF_PCI) {
286 * PCI IRQs should never use SA_INTERRUPT.
288 irqflags &= ~(SA_INTERRUPT);
291 * Check wether we _should_ use DMA Write Sync
292 * (for devices behind bridges behind APB).
294 if (bucket->flags & IBF_DMA_SYNC)
295 irqflags |= SA_DMA_SYNC;
299 save_and_cli(flags);
301 action = *(bucket->pil + irq_action);
302 if(action) {
303 if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
304 for (tmp = action; tmp->next; tmp = tmp->next)
306 else {
307 restore_flags(flags);
308 return -EBUSY;
310 if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
311 printk("Attempt to mix fast and slow interrupts on IRQ%d "
312 "denied\n", bucket->pil);
313 restore_flags(flags);
314 return -EBUSY;
316 action = NULL; /* Or else! */
319 /* If this is flagged as statically allocated then we use our
320 * private struct which is never freed.
322 if(irqflags & SA_STATIC_ALLOC) {
323 if(static_irq_count < MAX_STATIC_ALLOC)
324 action = &static_irqaction[static_irq_count++];
325 else
326 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
327 "using kmalloc\n", irq, name);
329 if(action == NULL)
330 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
331 GFP_KERNEL);
333 if(!action) {
334 restore_flags(flags);
335 return -ENOMEM;
338 if ((irqflags & SA_IMAP_MASKED) == 0) {
339 bucket->irq_info = action;
340 bucket->flags |= IBF_ACTIVE;
341 } else {
342 if((bucket->flags & IBF_ACTIVE) != 0) {
343 void *orig = bucket->irq_info;
344 void **vector = NULL;
346 if((bucket->flags & IBF_PCI) == 0) {
347 printk("IRQ: Trying to share non-PCI bucket.\n");
348 goto free_and_ebusy;
350 if((bucket->flags & IBF_MULTI) == 0) {
351 vector = kmalloc(sizeof(void *) * 4, GFP_KERNEL);
352 if(vector == NULL)
353 goto free_and_enomem;
355 /* We might have slept. */
356 if ((bucket->flags & IBF_MULTI) != 0) {
357 int ent;
359 kfree(vector);
360 vector = (void **)bucket->irq_info;
361 for(ent = 0; ent < 4; ent++) {
362 if (vector[ent] == NULL) {
363 vector[ent] = action;
364 break;
367 if (ent == 4)
368 goto free_and_ebusy;
369 } else {
370 vector[0] = orig;
371 vector[1] = action;
372 vector[2] = NULL;
373 vector[3] = NULL;
374 bucket->irq_info = vector;
375 bucket->flags |= IBF_MULTI;
377 } else {
378 int ent;
380 vector = (void **)orig;
381 for(ent = 0; ent < 4; ent++) {
382 if(vector[ent] == NULL) {
383 vector[ent] = action;
384 break;
387 if (ent == 4)
388 goto free_and_ebusy;
390 } else {
391 bucket->irq_info = action;
392 bucket->flags |= IBF_ACTIVE;
394 pending = bucket->pending;
395 if(pending)
396 bucket->pending = 0;
399 action->mask = (unsigned long) bucket;
400 action->handler = handler;
401 action->flags = irqflags;
402 action->name = name;
403 action->next = NULL;
404 action->dev_id = dev_id;
406 if(tmp)
407 tmp->next = action;
408 else
409 *(bucket->pil + irq_action) = action;
411 enable_irq(irq);
413 /* We ate the IVEC already, this makes sure it does not get lost. */
414 if(pending) {
415 atomic_bucket_insert(bucket);
416 set_softint(1 << bucket->pil);
418 restore_flags(flags);
420 #ifdef CONFIG_SMP
421 distribute_irqs();
422 #endif
423 return 0;
425 free_and_ebusy:
426 kfree(action);
427 restore_flags(flags);
428 return -EBUSY;
430 free_and_enomem:
431 kfree(action);
432 restore_flags(flags);
433 return -ENOMEM;
436 void free_irq(unsigned int irq, void *dev_id)
438 struct irqaction *action;
439 struct irqaction *tmp = NULL;
440 unsigned long flags;
441 struct ino_bucket *bucket = __bucket(irq), *bp;
443 if ((bucket != &pil0_dummy_bucket) &&
444 (bucket < &ivector_table[0] ||
445 bucket >= &ivector_table[NUM_IVECS])) {
446 unsigned int *caller;
448 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
449 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
450 "from %p, irq %08x.\n", caller, irq);
451 return;
454 action = *(bucket->pil + irq_action);
455 if(!action->handler) {
456 printk("Freeing free IRQ %d\n", bucket->pil);
457 return;
459 if(dev_id) {
460 for( ; action; action = action->next) {
461 if(action->dev_id == dev_id)
462 break;
463 tmp = action;
465 if(!action) {
466 printk("Trying to free free shared IRQ %d\n", bucket->pil);
467 return;
469 } else if(action->flags & SA_SHIRQ) {
470 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
471 return;
474 if(action->flags & SA_STATIC_ALLOC) {
475 printk("Attempt to free statically allocated IRQ %d (%s)\n",
476 bucket->pil, action->name);
477 return;
480 save_and_cli(flags);
481 if(action && tmp)
482 tmp->next = action->next;
483 else
484 *(bucket->pil + irq_action) = action->next;
486 if(action->flags & SA_IMAP_MASKED) {
487 unsigned long imap = bucket->imap;
488 void **vector, *orig;
489 int ent;
491 orig = bucket->irq_info;
492 vector = (void **)orig;
494 if ((bucket->flags & IBF_MULTI) != 0) {
495 int other = 0;
496 void *orphan = NULL;
497 for(ent = 0; ent < 4; ent++) {
498 if(vector[ent] == action)
499 vector[ent] = NULL;
500 else if(vector[ent] != NULL) {
501 orphan = vector[ent];
502 other++;
506 /* Only free when no other shared irq
507 * uses this bucket.
509 if(other) {
510 if (other == 1) {
511 /* Convert back to non-shared bucket. */
512 bucket->irq_info = orphan;
513 bucket->flags &= ~(IBF_MULTI);
514 kfree(vector);
516 goto out;
518 } else {
519 bucket->irq_info = NULL;
522 /* This unique interrupt source is now inactive. */
523 bucket->flags &= ~IBF_ACTIVE;
525 /* See if any other buckets share this bucket's IMAP
526 * and are still active.
528 for(ent = 0; ent < NUM_IVECS; ent++) {
529 bp = &ivector_table[ent];
530 if(bp != bucket &&
531 bp->imap == imap &&
532 (bp->flags & IBF_ACTIVE) != 0)
533 break;
536 /* Only disable when no other sub-irq levels of
537 * the same IMAP are active.
539 if (ent == NUM_IVECS)
540 disable_irq(irq);
543 out:
544 kfree(action);
545 restore_flags(flags);
548 #ifdef CONFIG_SMP
550 /* Who has the global irq brlock */
551 unsigned char global_irq_holder = NO_PROC_ID;
553 static void show(char * str)
555 int cpu = smp_processor_id();
556 int i;
558 printk("\n%s, CPU %d:\n", str, cpu);
559 printk("irq: %d [ ", irqs_running());
560 for (i = 0; i < smp_num_cpus; i++)
561 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
562 printk("]\nbh: %d [ ",
563 (spin_is_locked(&global_bh_lock) ? 1 : 0));
564 for (i = 0; i < smp_num_cpus; i++)
565 printk("%u ", local_bh_count(i));
566 printk("]\n");
569 #define MAXCOUNT 100000000
571 #if 0
572 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
573 #else
574 #define SYNC_OTHER_ULTRAS(x) membar("#Sync");
575 #endif
577 void synchronize_irq(void)
579 if (irqs_running()) {
580 cli();
581 sti();
585 static inline void get_irqlock(int cpu)
587 int count;
589 if ((unsigned char)cpu == global_irq_holder)
590 return;
592 count = MAXCOUNT;
593 again:
594 br_write_lock(BR_GLOBALIRQ_LOCK);
595 for (;;) {
596 spinlock_t *lock;
598 if (!irqs_running() &&
599 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
600 break;
602 br_write_unlock(BR_GLOBALIRQ_LOCK);
603 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
604 while (irqs_running() ||
605 spin_is_locked(lock) ||
606 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
607 if (!--count) {
608 show("get_irqlock");
609 count = (~0 >> 1);
611 __sti();
612 SYNC_OTHER_ULTRAS(cpu);
613 __cli();
615 goto again;
618 global_irq_holder = cpu;
621 void __global_cli(void)
623 unsigned long flags;
625 __save_flags(flags);
626 if(flags == 0) {
627 int cpu = smp_processor_id();
628 __cli();
629 if (! local_irq_count(cpu))
630 get_irqlock(cpu);
634 void __global_sti(void)
636 int cpu = smp_processor_id();
638 if (! local_irq_count(cpu))
639 release_irqlock(cpu);
640 __sti();
643 unsigned long __global_save_flags(void)
645 unsigned long flags, local_enabled, retval;
647 __save_flags(flags);
648 local_enabled = ((flags == 0) ? 1 : 0);
649 retval = 2 + local_enabled;
650 if (! local_irq_count(smp_processor_id())) {
651 if (local_enabled)
652 retval = 1;
653 if (global_irq_holder == (unsigned char) smp_processor_id())
654 retval = 0;
656 return retval;
659 void __global_restore_flags(unsigned long flags)
661 switch (flags) {
662 case 0:
663 __global_cli();
664 break;
665 case 1:
666 __global_sti();
667 break;
668 case 2:
669 __cli();
670 break;
671 case 3:
672 __sti();
673 break;
674 default:
676 unsigned long pc;
677 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
678 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
679 flags, pc);
684 #endif /* CONFIG_SMP */
686 void catch_disabled_ivec(struct pt_regs *regs)
688 int cpu = smp_processor_id();
689 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
691 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
692 * to other devices. Here a single IMAP enabled potentially multiple
693 * unique interrupt sources (which each do have a unique ICLR register.
695 * So what we do is just register that the IVEC arrived, when registered
696 * for real the request_irq() code will check the bit and signal
697 * a local CPU interrupt for it.
699 #if 0
700 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
701 bucket - &ivector_table[0], regs->tpc);
702 #endif
703 *irq_work(cpu, 0) = 0;
704 bucket->pending = 1;
707 /* Tune this... */
708 #define FORWARD_VOLUME 12
710 void handler_irq(int irq, struct pt_regs *regs)
712 struct ino_bucket *bp, *nbp;
713 int cpu = smp_processor_id();
714 #ifdef CONFIG_SMP
715 int should_forward = (this_is_starfire == 0 &&
716 irq < 10 &&
717 current->pid != 0);
718 unsigned int buddy = 0;
720 /* 'cpu' is the MID (ie. UPAID), calculate the MID
721 * of our buddy.
723 if (should_forward != 0) {
724 buddy = cpu_number_map(cpu) + 1;
725 if (buddy >= NR_CPUS ||
726 (buddy = cpu_logical_map(buddy)) == -1)
727 buddy = cpu_logical_map(0);
729 /* Voo-doo programming. */
730 if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
731 should_forward = 0;
732 buddy <<= 26;
734 #endif
736 #ifndef CONFIG_SMP
738 * Check for TICK_INT on level 14 softint.
740 if ((irq == 14) && (get_softint() & (1UL << 0)))
741 irq = 0;
742 #endif
743 clear_softint(1 << irq);
745 irq_enter(cpu, irq);
746 kstat.irqs[cpu][irq]++;
748 /* Sliiiick... */
749 #ifndef CONFIG_SMP
750 bp = ((irq != 0) ?
751 __bucket(xchg32(irq_work(cpu, irq), 0)) :
752 &pil0_dummy_bucket);
753 #else
754 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
755 #endif
756 for ( ; bp != NULL; bp = nbp) {
757 unsigned char flags = bp->flags;
759 nbp = __bucket(bp->irq_chain);
760 if ((flags & IBF_ACTIVE) != 0) {
761 #ifdef CONFIG_PCI
762 if ((flags & IBF_DMA_SYNC) != 0) {
763 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
764 upa_readq(pci_dma_wsync);
766 #endif
767 if ((flags & IBF_MULTI) == 0) {
768 struct irqaction *ap = bp->irq_info;
769 ap->handler(__irq(bp), ap->dev_id, regs);
770 } else {
771 void **vector = (void **)bp->irq_info;
772 int ent;
773 for (ent = 0; ent < 4; ent++) {
774 struct irqaction *ap = vector[ent];
775 if (ap != NULL)
776 ap->handler(__irq(bp), ap->dev_id, regs);
779 /* Only the dummy bucket lacks IMAP/ICLR. */
780 if (bp->pil != 0) {
781 #ifdef CONFIG_SMP
782 /* Ok, here is what is going on:
783 * 1) Retargeting IRQs on Starfire is very
784 * expensive so just forget about it on them.
785 * 2) Moving around very high priority interrupts
786 * is a losing game.
787 * 3) If the current cpu is idle, interrupts are
788 * useful work, so keep them here. But do not
789 * pass to our neighbour if he is not very idle.
791 if (should_forward != 0) {
792 /* Push it to our buddy. */
793 should_forward = 0;
794 upa_writel(buddy | IMAP_VALID, bp->imap);
796 #endif
797 upa_writel(ICLR_IDLE, bp->iclr);
799 } else
800 bp->pending = 1;
802 irq_exit(cpu, irq);
805 #ifdef CONFIG_BLK_DEV_FD
806 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
808 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
810 struct irqaction *action = *(irq + irq_action);
811 struct ino_bucket *bucket;
812 int cpu = smp_processor_id();
814 irq_enter(cpu, irq);
815 kstat.irqs[cpu][irq]++;
817 *(irq_work(cpu, irq)) = 0;
818 bucket = (struct ino_bucket *)action->mask;
820 floppy_interrupt(irq, dev_cookie, regs);
821 upa_writel(ICLR_IDLE, bucket->iclr);
823 irq_exit(cpu, irq);
825 #endif
827 /* The following assumes that the branch lies before the place we
828 * are branching to. This is the case for a trap vector...
829 * You have been warned.
831 #define SPARC_BRANCH(dest_addr, inst_addr) \
832 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
834 #define SPARC_NOP (0x01000000)
836 static void install_fast_irq(unsigned int cpu_irq,
837 void (*handler)(int, void *, struct pt_regs *))
839 extern unsigned long sparc64_ttable_tl0;
840 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
841 unsigned int *insns;
843 ttent += 0x820;
844 ttent += (cpu_irq - 1) << 5;
845 insns = (unsigned int *) ttent;
846 insns[0] = SPARC_BRANCH(((unsigned long) handler),
847 ((unsigned long)&insns[0]));
848 insns[1] = SPARC_NOP;
849 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
852 int request_fast_irq(unsigned int irq,
853 void (*handler)(int, void *, struct pt_regs *),
854 unsigned long irqflags, const char *name, void *dev_id)
856 struct irqaction *action;
857 struct ino_bucket *bucket = __bucket(irq);
858 unsigned long flags;
860 /* No pil0 dummy buckets allowed here. */
861 if (bucket < &ivector_table[0] ||
862 bucket >= &ivector_table[NUM_IVECS]) {
863 unsigned int *caller;
865 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
866 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
867 "from %p, irq %08x.\n", caller, irq);
868 return -EINVAL;
871 /* Only IMAP style interrupts can be registered as fast. */
872 if(bucket->pil == 0)
873 return -EINVAL;
875 if(!handler)
876 return -EINVAL;
878 if ((bucket->pil == 0) || (bucket->pil == 14)) {
879 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
880 return -EBUSY;
883 action = *(bucket->pil + irq_action);
884 if(action) {
885 if(action->flags & SA_SHIRQ)
886 panic("Trying to register fast irq when already shared.\n");
887 if(irqflags & SA_SHIRQ)
888 panic("Trying to register fast irq as shared.\n");
889 printk("request_fast_irq: Trying to register yet already owned.\n");
890 return -EBUSY;
893 save_and_cli(flags);
894 if(irqflags & SA_STATIC_ALLOC) {
895 if(static_irq_count < MAX_STATIC_ALLOC)
896 action = &static_irqaction[static_irq_count++];
897 else
898 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
899 "using kmalloc\n", bucket->pil, name);
901 if(action == NULL)
902 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
903 GFP_KERNEL);
904 if(!action) {
905 restore_flags(flags);
906 return -ENOMEM;
908 install_fast_irq(bucket->pil, handler);
910 bucket->irq_info = action;
911 bucket->flags |= IBF_ACTIVE;
913 action->mask = (unsigned long) bucket;
914 action->handler = handler;
915 action->flags = irqflags | SA_IMAP_MASKED;
916 action->dev_id = NULL;
917 action->name = name;
918 action->next = NULL;
920 *(bucket->pil + irq_action) = action;
921 enable_irq(irq);
923 restore_flags(flags);
925 #ifdef CONFIG_SMP
926 distribute_irqs();
927 #endif
928 return 0;
931 /* We really don't need these at all on the Sparc. We only have
932 * stubs here because they are exported to modules.
934 unsigned long probe_irq_on(void)
936 return 0;
939 int probe_irq_off(unsigned long mask)
941 return 0;
944 /* This is gets the master TICK_INT timer going. */
945 void init_timers(void (*cfunc)(int, void *, struct pt_regs *),
946 unsigned long *clock)
948 unsigned long pstate;
949 extern unsigned long timer_tick_offset;
950 int node, err;
951 #ifdef CONFIG_SMP
952 extern void smp_tick_init(void);
953 #endif
955 node = linux_cpus[0].prom_node;
956 *clock = prom_getint(node, "clock-frequency");
957 timer_tick_offset = *clock / HZ;
958 #ifdef CONFIG_SMP
959 smp_tick_init();
960 #endif
962 /* Register IRQ handler. */
963 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC),
964 "timer", NULL);
966 if(err) {
967 prom_printf("Serious problem, cannot register TICK_INT\n");
968 prom_halt();
971 /* Guarentee that the following sequences execute
972 * uninterrupted.
974 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
975 "wrpr %0, %1, %%pstate"
976 : "=r" (pstate)
977 : "i" (PSTATE_IE));
979 /* Set things up so user can access tick register for profiling
980 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
981 * read back of %tick after writing it.
983 __asm__ __volatile__("
984 sethi %%hi(0x80000000), %%g1
985 ba,pt %%xcc, 1f
986 sllx %%g1, 32, %%g1
987 .align 64
988 1: rd %%tick, %%g2
989 add %%g2, 6, %%g2
990 andn %%g2, %%g1, %%g2
991 wrpr %%g2, 0, %%tick
992 rdpr %%tick, %%g0"
993 : /* no outputs */
994 : /* no inputs */
995 : "g1", "g2");
997 /* Workaround for Spitfire Errata (#54 I think??), I discovered
998 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
999 * number 103640.
1001 * On Blackbird writes to %tick_cmpr can fail, the
1002 * workaround seems to be to execute the wr instruction
1003 * at the start of an I-cache line, and perform a dummy
1004 * read back from %tick_cmpr right after writing to it. -DaveM
1006 __asm__ __volatile__("
1007 rd %%tick, %%g1
1008 ba,pt %%xcc, 1f
1009 add %%g1, %0, %%g1
1010 .align 64
1011 1: wr %%g1, 0x0, %%tick_cmpr
1012 rd %%tick_cmpr, %%g0"
1013 : /* no outputs */
1014 : "r" (timer_tick_offset)
1015 : "g1");
1017 /* Restore PSTATE_IE. */
1018 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1019 : /* no outputs */
1020 : "r" (pstate));
1022 sti();
1025 #ifdef CONFIG_SMP
1026 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
1028 struct ino_bucket *bucket = __bucket(p->mask);
1029 unsigned long imap = bucket->imap;
1030 unsigned int tid;
1032 /* Never change this, it causes problems on Ex000 systems. */
1033 if (bucket->pil == 12)
1034 return goal_cpu;
1036 if(this_is_starfire == 0) {
1037 tid = __cpu_logical_map[goal_cpu] << 26;
1038 } else {
1039 tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
1041 upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
1043 goal_cpu++;
1044 if(goal_cpu >= NR_CPUS ||
1045 __cpu_logical_map[goal_cpu] == -1)
1046 goal_cpu = 0;
1047 return goal_cpu;
1050 /* Called from request_irq. */
1051 static void distribute_irqs(void)
1053 unsigned long flags;
1054 int cpu, level;
1056 save_and_cli(flags);
1057 cpu = 0;
1058 for(level = 0; level < NR_IRQS; level++) {
1059 struct irqaction *p = irq_action[level];
1060 while(p) {
1061 if(p->flags & SA_IMAP_MASKED)
1062 cpu = retarget_one_irq(p, cpu);
1063 p = p->next;
1066 restore_flags(flags);
1068 #endif
1071 struct sun5_timer *prom_timers;
1072 static u64 prom_limit0, prom_limit1;
1074 static void map_prom_timers(void)
1076 unsigned int addr[3];
1077 int tnode, err;
1079 /* PROM timer node hangs out in the top level of device siblings... */
1080 tnode = prom_finddevice("/counter-timer");
1082 /* Assume if node is not present, PROM uses different tick mechanism
1083 * which we should not care about.
1085 if(tnode == 0 || tnode == -1) {
1086 prom_timers = (struct sun5_timer *) 0;
1087 return;
1090 /* If PROM is really using this, it must be mapped by him. */
1091 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1092 if(err == -1) {
1093 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1094 prom_timers = (struct sun5_timer *) 0;
1095 return;
1097 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1100 static void kill_prom_timer(void)
1102 if(!prom_timers)
1103 return;
1105 /* Save them away for later. */
1106 prom_limit0 = prom_timers->limit0;
1107 prom_limit1 = prom_timers->limit1;
1109 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1110 * We turn both off here just to be paranoid.
1112 prom_timers->limit0 = 0;
1113 prom_timers->limit1 = 0;
1115 /* Wheee, eat the interrupt packet too... */
1116 __asm__ __volatile__("
1117 mov 0x40, %%g2
1118 ldxa [%%g0] %0, %%g1
1119 ldxa [%%g2] %1, %%g1
1120 stxa %%g0, [%%g0] %0
1121 membar #Sync
1122 " : /* no outputs */
1123 : "i" (ASI_INTR_RECEIVE), "i" (ASI_UDB_INTR_R)
1124 : "g1", "g2");
1127 void enable_prom_timer(void)
1129 if(!prom_timers)
1130 return;
1132 /* Set it to whatever was there before. */
1133 prom_timers->limit1 = prom_limit1;
1134 prom_timers->count1 = 0;
1135 prom_timers->limit0 = prom_limit0;
1136 prom_timers->count0 = 0;
1139 void __init init_IRQ(void)
1141 static int called = 0;
1143 if (called == 0) {
1144 called = 1;
1145 map_prom_timers();
1146 kill_prom_timer();
1147 memset(&ivector_table[0], 0, sizeof(ivector_table));
1148 #ifndef CONFIG_SMP
1149 memset(&__up_workvec[0], 0, sizeof(__up_workvec));
1150 #endif
1153 /* We need to clear any IRQ's pending in the soft interrupt
1154 * registers, a spurious one could be left around from the
1155 * PROM timer which we just disabled.
1157 clear_softint(get_softint());
1159 /* Now that ivector table is initialized, it is safe
1160 * to receive IRQ vector traps. We will normally take
1161 * one or two right now, in case some device PROM used
1162 * to boot us wants to speak to us. We just ignore them.
1164 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1165 "or %%g1, %0, %%g1\n\t"
1166 "wrpr %%g1, 0x0, %%pstate"
1167 : /* No outputs */
1168 : "i" (PSTATE_IE)
1169 : "g1");
1172 void init_irq_proc(void)
1174 /* For now, nothing... */