- pre2
[davej-history.git] / arch / sparc64 / kernel / irq.c
blob9e1c652461a41441192930b84990f02267b28903
1 /* $Id: irq.c,v 1.92 2000/08/26 02:42:28 anton Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
18 #include <linux/init.h>
19 #include <linux/delay.h>
21 #include <asm/ptrace.h>
22 #include <asm/processor.h>
23 #include <asm/atomic.h>
24 #include <asm/system.h>
25 #include <asm/irq.h>
26 #include <asm/sbus.h>
27 #include <asm/iommu.h>
28 #include <asm/upa.h>
29 #include <asm/oplib.h>
30 #include <asm/timer.h>
31 #include <asm/smp.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
35 /* Internal flag, should not be visible elsewhere at all. */
36 #define SA_IMAP_MASKED 0x100
37 #define SA_DMA_SYNC 0x200
39 #ifdef CONFIG_SMP
40 static void distribute_irqs(void);
41 #endif
43 /* UPA nodes send interrupt packet to UltraSparc with first data reg
44 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
45 * delivered. We must translate this into a non-vector IRQ so we can
46 * set the softint on this cpu.
48 * To make processing these packets efficient and race free we use
49 * an array of irq buckets below. The interrupt vector handler in
50 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
51 * The IVEC handler does not need to act atomically, the PIL dispatch
52 * code uses CAS to get an atomic snapshot of the list and clear it
53 * at the same time.
56 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (64)));
58 #ifndef CONFIG_SMP
59 unsigned int __up_workvec[16] __attribute__ ((aligned (64)));
60 #define irq_work(__cpu, __pil) &(__up_workvec[(void)(__cpu), (__pil)])
61 #else
62 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
63 #endif
65 #ifdef CONFIG_PCI
66 /* This is a table of physical addresses used to deal with SA_DMA_SYNC.
67 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
68 * for devices behind busses other than APB on Sabre systems.
70 * Currently these physical addresses are just config space accesses
71 * to the command register for that device.
73 unsigned long pci_dma_wsync;
74 unsigned long dma_sync_reg_table[256];
75 unsigned char dma_sync_reg_table_entry = 0;
76 #endif
78 /* This is based upon code in the 32-bit Sparc kernel written mostly by
79 * David Redman (djhr@tadpole.co.uk).
81 #define MAX_STATIC_ALLOC 4
82 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
83 static int static_irq_count = 0;
85 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
86 struct irqaction *irq_action[NR_IRQS+1] = {
87 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
88 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
91 int get_irq_list(char *buf)
93 int i, len = 0;
94 struct irqaction *action;
95 #ifdef CONFIG_SMP
96 int j;
97 #endif
99 for(i = 0; i < (NR_IRQS + 1); i++) {
100 if(!(action = *(i + irq_action)))
101 continue;
102 len += sprintf(buf + len, "%3d: ", i);
103 #ifndef CONFIG_SMP
104 len += sprintf(buf + len, "%10u ", kstat_irqs(i));
105 #else
106 for (j = 0; j < smp_num_cpus; j++)
107 len += sprintf(buf + len, "%10u ",
108 kstat.irqs[cpu_logical_map(j)][i]);
109 #endif
110 len += sprintf(buf + len, "%c %s",
111 (action->flags & SA_INTERRUPT) ? '+' : ' ',
112 action->name);
113 for(action = action->next; action; action = action->next) {
114 len += sprintf(buf+len, ",%s %s",
115 (action->flags & SA_INTERRUPT) ? " +" : "",
116 action->name);
118 len += sprintf(buf + len, "\n");
120 return len;
123 /* Now these are always passed a true fully specified sun4u INO. */
124 void enable_irq(unsigned int irq)
126 extern int this_is_starfire;
127 struct ino_bucket *bucket = __bucket(irq);
128 unsigned long imap;
129 unsigned long tid;
131 imap = bucket->imap;
132 if (imap == 0UL)
133 return;
135 if(this_is_starfire == 0) {
136 /* We set it to our UPA MID. */
137 __asm__ __volatile__("ldxa [%%g0] %1, %0"
138 : "=r" (tid)
139 : "i" (ASI_UPA_CONFIG));
140 tid = ((tid & UPA_CONFIG_MID) << 9);
141 } else {
142 extern unsigned int starfire_translate(unsigned long imap,
143 unsigned int upaid);
145 tid = (starfire_translate(imap, current->processor) << 26);
148 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
149 * of this SYSIO's preconfigured IGN in the SYSIO Control
150 * Register, the hardware just mirrors that value here.
151 * However for Graphics and UPA Slave devices the full
152 * IMAP_INR field can be set by the programmer here.
154 * Things like FFB can now be handled via the new IRQ mechanism.
156 upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
159 /* This now gets passed true ino's as well. */
160 void disable_irq(unsigned int irq)
162 struct ino_bucket *bucket = __bucket(irq);
163 unsigned long imap;
165 imap = bucket->imap;
166 if (imap != 0UL) {
167 u32 tmp;
169 /* NOTE: We do not want to futz with the IRQ clear registers
170 * and move the state to IDLE, the SCSI code does call
171 * disable_irq() to assure atomicity in the queue cmd
172 * SCSI adapter driver code. Thus we'd lose interrupts.
174 tmp = upa_readl(imap);
175 tmp &= ~IMAP_VALID;
176 upa_writel(tmp, imap);
180 /* The timer is the one "weird" interrupt which is generated by
181 * the CPU %tick register and not by some normal vectored interrupt
182 * source. To handle this special case, we use this dummy INO bucket.
184 static struct ino_bucket pil0_dummy_bucket = {
185 0, /* irq_chain */
186 0, /* pil */
187 0, /* pending */
188 0, /* flags */
189 0, /* __unused */
190 NULL, /* irq_info */
191 0UL, /* iclr */
192 0UL, /* imap */
195 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
197 struct ino_bucket *bucket;
198 int ino;
200 if(pil == 0) {
201 if(iclr != 0UL || imap != 0UL) {
202 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
203 iclr, imap);
204 prom_halt();
206 return __irq(&pil0_dummy_bucket);
209 /* RULE: Both must be specified in all other cases. */
210 if (iclr == 0UL || imap == 0UL) {
211 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
212 pil, inofixup, iclr, imap);
213 prom_halt();
216 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
217 if(ino > NUM_IVECS) {
218 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
219 ino, pil, inofixup, iclr, imap);
220 prom_halt();
223 /* Ok, looks good, set it up. Don't touch the irq_chain or
224 * the pending flag.
226 bucket = &ivector_table[ino];
227 if ((bucket->flags & IBF_ACTIVE) ||
228 (bucket->irq_info != NULL)) {
229 /* This is a gross fatal error if it happens here. */
230 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
231 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
232 ino, pil, inofixup, iclr, imap);
233 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
234 bucket->pil, bucket->iclr, bucket->imap);
235 prom_printf("IRQ: Cannot continue, halting...\n");
236 prom_halt();
238 bucket->imap = imap;
239 bucket->iclr = iclr;
240 bucket->pil = pil;
241 bucket->flags = 0;
243 bucket->irq_info = NULL;
245 return __irq(bucket);
248 static void atomic_bucket_insert(struct ino_bucket *bucket)
250 unsigned long pstate;
251 unsigned int *ent;
253 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
254 __asm__ __volatile__("wrpr %0, %1, %%pstate"
255 : : "r" (pstate), "i" (PSTATE_IE));
256 ent = irq_work(smp_processor_id(), bucket->pil);
257 bucket->irq_chain = *ent;
258 *ent = __irq(bucket);
259 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
262 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
263 unsigned long irqflags, const char *name, void *dev_id)
265 struct irqaction *action, *tmp = NULL;
266 struct ino_bucket *bucket = __bucket(irq);
267 unsigned long flags;
268 int pending = 0;
270 if ((bucket != &pil0_dummy_bucket) &&
271 (bucket < &ivector_table[0] ||
272 bucket >= &ivector_table[NUM_IVECS])) {
273 unsigned int *caller;
275 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
276 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
277 "from %p, irq %08x.\n", caller, irq);
278 return -EINVAL;
280 if(!handler)
281 return -EINVAL;
283 if (!bucket->pil)
284 irqflags &= ~SA_IMAP_MASKED;
285 else {
286 irqflags |= SA_IMAP_MASKED;
287 if (bucket->flags & IBF_PCI) {
289 * PCI IRQs should never use SA_INTERRUPT.
291 irqflags &= ~(SA_INTERRUPT);
294 * Check wether we _should_ use DMA Write Sync
295 * (for devices behind bridges behind APB).
297 if (bucket->flags & IBF_DMA_SYNC)
298 irqflags |= SA_DMA_SYNC;
302 save_and_cli(flags);
304 action = *(bucket->pil + irq_action);
305 if(action) {
306 if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
307 for (tmp = action; tmp->next; tmp = tmp->next)
309 else {
310 restore_flags(flags);
311 return -EBUSY;
313 if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
314 printk("Attempt to mix fast and slow interrupts on IRQ%d "
315 "denied\n", bucket->pil);
316 restore_flags(flags);
317 return -EBUSY;
319 action = NULL; /* Or else! */
322 /* If this is flagged as statically allocated then we use our
323 * private struct which is never freed.
325 if(irqflags & SA_STATIC_ALLOC) {
326 if(static_irq_count < MAX_STATIC_ALLOC)
327 action = &static_irqaction[static_irq_count++];
328 else
329 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
330 "using kmalloc\n", irq, name);
332 if(action == NULL)
333 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
334 GFP_KERNEL);
336 if(!action) {
337 restore_flags(flags);
338 return -ENOMEM;
341 if ((irqflags & SA_IMAP_MASKED) == 0) {
342 bucket->irq_info = action;
343 bucket->flags |= IBF_ACTIVE;
344 } else {
345 if((bucket->flags & IBF_ACTIVE) != 0) {
346 void *orig = bucket->irq_info;
347 void **vector = NULL;
349 if((bucket->flags & IBF_PCI) == 0) {
350 printk("IRQ: Trying to share non-PCI bucket.\n");
351 goto free_and_ebusy;
353 if((bucket->flags & IBF_MULTI) == 0) {
354 vector = kmalloc(sizeof(void *) * 4, GFP_KERNEL);
355 if(vector == NULL)
356 goto free_and_enomem;
358 /* We might have slept. */
359 if ((bucket->flags & IBF_MULTI) != 0) {
360 int ent;
362 kfree(vector);
363 vector = (void **)bucket->irq_info;
364 for(ent = 0; ent < 4; ent++) {
365 if (vector[ent] == NULL) {
366 vector[ent] = action;
367 break;
370 if (ent == 4)
371 goto free_and_ebusy;
372 } else {
373 vector[0] = orig;
374 vector[1] = action;
375 vector[2] = NULL;
376 vector[3] = NULL;
377 bucket->irq_info = vector;
378 bucket->flags |= IBF_MULTI;
380 } else {
381 int ent;
383 vector = (void **)orig;
384 for(ent = 0; ent < 4; ent++) {
385 if(vector[ent] == NULL) {
386 vector[ent] = action;
387 break;
390 if (ent == 4)
391 goto free_and_ebusy;
393 } else {
394 bucket->irq_info = action;
395 bucket->flags |= IBF_ACTIVE;
397 pending = bucket->pending;
398 if(pending)
399 bucket->pending = 0;
402 action->mask = (unsigned long) bucket;
403 action->handler = handler;
404 action->flags = irqflags;
405 action->name = name;
406 action->next = NULL;
407 action->dev_id = dev_id;
409 if(tmp)
410 tmp->next = action;
411 else
412 *(bucket->pil + irq_action) = action;
414 enable_irq(irq);
416 /* We ate the IVEC already, this makes sure it does not get lost. */
417 if(pending) {
418 atomic_bucket_insert(bucket);
419 set_softint(1 << bucket->pil);
421 restore_flags(flags);
423 #ifdef CONFIG_SMP
424 distribute_irqs();
425 #endif
426 return 0;
428 free_and_ebusy:
429 kfree(action);
430 restore_flags(flags);
431 return -EBUSY;
433 free_and_enomem:
434 kfree(action);
435 restore_flags(flags);
436 return -ENOMEM;
439 void free_irq(unsigned int irq, void *dev_id)
441 struct irqaction *action;
442 struct irqaction *tmp = NULL;
443 unsigned long flags;
444 struct ino_bucket *bucket = __bucket(irq), *bp;
446 if ((bucket != &pil0_dummy_bucket) &&
447 (bucket < &ivector_table[0] ||
448 bucket >= &ivector_table[NUM_IVECS])) {
449 unsigned int *caller;
451 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
452 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
453 "from %p, irq %08x.\n", caller, irq);
454 return;
457 action = *(bucket->pil + irq_action);
458 if(!action->handler) {
459 printk("Freeing free IRQ %d\n", bucket->pil);
460 return;
462 if(dev_id) {
463 for( ; action; action = action->next) {
464 if(action->dev_id == dev_id)
465 break;
466 tmp = action;
468 if(!action) {
469 printk("Trying to free free shared IRQ %d\n", bucket->pil);
470 return;
472 } else if(action->flags & SA_SHIRQ) {
473 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
474 return;
477 if(action->flags & SA_STATIC_ALLOC) {
478 printk("Attempt to free statically allocated IRQ %d (%s)\n",
479 bucket->pil, action->name);
480 return;
483 save_and_cli(flags);
484 if(action && tmp)
485 tmp->next = action->next;
486 else
487 *(bucket->pil + irq_action) = action->next;
489 if(action->flags & SA_IMAP_MASKED) {
490 unsigned long imap = bucket->imap;
491 void **vector, *orig;
492 int ent;
494 orig = bucket->irq_info;
495 vector = (void **)orig;
497 if ((bucket->flags & IBF_MULTI) != 0) {
498 int other = 0;
499 void *orphan = NULL;
500 for(ent = 0; ent < 4; ent++) {
501 if(vector[ent] == action)
502 vector[ent] = NULL;
503 else if(vector[ent] != NULL) {
504 orphan = vector[ent];
505 other++;
509 /* Only free when no other shared irq
510 * uses this bucket.
512 if(other) {
513 if (other == 1) {
514 /* Convert back to non-shared bucket. */
515 bucket->irq_info = orphan;
516 bucket->flags &= ~(IBF_MULTI);
517 kfree(vector);
519 goto out;
521 } else {
522 bucket->irq_info = NULL;
525 /* This unique interrupt source is now inactive. */
526 bucket->flags &= ~IBF_ACTIVE;
528 /* See if any other buckets share this bucket's IMAP
529 * and are still active.
531 for(ent = 0; ent < NUM_IVECS; ent++) {
532 bp = &ivector_table[ent];
533 if(bp != bucket &&
534 bp->imap == imap &&
535 (bp->flags & IBF_ACTIVE) != 0)
536 break;
539 /* Only disable when no other sub-irq levels of
540 * the same IMAP are active.
542 if (ent == NUM_IVECS)
543 disable_irq(irq);
546 out:
547 kfree(action);
548 restore_flags(flags);
551 #ifdef CONFIG_SMP
553 /* Who has global_irq_lock. */
554 unsigned char global_irq_holder = NO_PROC_ID;
556 static void show(char * str)
558 int cpu = smp_processor_id();
559 int i;
561 printk("\n%s, CPU %d:\n", str, cpu);
562 printk("irq: %d [ ", irqs_running());
563 for (i = 0; i < smp_num_cpus; i++)
564 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
565 printk("]\nbh: %d [ ",
566 (spin_is_locked(&global_bh_lock) ? 1 : 0));
567 for (i = 0; i < smp_num_cpus; i++)
568 printk("%u ", local_bh_count(i));
569 printk("]\n");
572 #define MAXCOUNT 100000000
574 #if 0
575 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
576 #else
577 #define SYNC_OTHER_ULTRAS(x) membar("#Sync");
578 #endif
580 void synchronize_irq(void)
582 if (irqs_running()) {
583 cli();
584 sti();
588 static inline void get_irqlock(int cpu)
590 int count;
592 if ((unsigned char)cpu == global_irq_holder)
593 return;
595 count = MAXCOUNT;
596 again:
597 br_write_lock(BR_GLOBALIRQ_LOCK);
598 for (;;) {
599 spinlock_t *lock;
601 if (!irqs_running() &&
602 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
603 break;
605 br_write_unlock(BR_GLOBALIRQ_LOCK);
606 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
607 while (irqs_running() ||
608 spin_is_locked(lock) ||
609 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
610 if (!--count) {
611 show("get_irqlock");
612 count = (~0 >> 1);
614 __sti();
615 SYNC_OTHER_ULTRAS(cpu);
616 __cli();
618 goto again;
621 global_irq_holder = cpu;
624 void __global_cli(void)
626 unsigned long flags;
628 __save_flags(flags);
629 if(flags == 0) {
630 int cpu = smp_processor_id();
631 __cli();
632 if (! local_irq_count(cpu))
633 get_irqlock(cpu);
637 void __global_sti(void)
639 int cpu = smp_processor_id();
641 if (! local_irq_count(cpu))
642 release_irqlock(cpu);
643 __sti();
646 unsigned long __global_save_flags(void)
648 unsigned long flags, local_enabled, retval;
650 __save_flags(flags);
651 local_enabled = ((flags == 0) ? 1 : 0);
652 retval = 2 + local_enabled;
653 if (! local_irq_count(smp_processor_id())) {
654 if (local_enabled)
655 retval = 1;
656 if (global_irq_holder == (unsigned char) smp_processor_id())
657 retval = 0;
659 return retval;
662 void __global_restore_flags(unsigned long flags)
664 switch (flags) {
665 case 0:
666 __global_cli();
667 break;
668 case 1:
669 __global_sti();
670 break;
671 case 2:
672 __cli();
673 break;
674 case 3:
675 __sti();
676 break;
677 default:
679 unsigned long pc;
680 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
681 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
682 flags, pc);
687 #endif /* CONFIG_SMP */
689 void catch_disabled_ivec(struct pt_regs *regs)
691 int cpu = smp_processor_id();
692 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
694 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
695 * to other devices. Here a single IMAP enabled potentially multiple
696 * unique interrupt sources (which each do have a unique ICLR register.
698 * So what we do is just register that the IVEC arrived, when registered
699 * for real the request_irq() code will check the bit and signal
700 * a local CPU interrupt for it.
702 #if 0
703 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
704 bucket - &ivector_table[0], regs->tpc);
705 #endif
706 *irq_work(cpu, 0) = 0;
707 bucket->pending = 1;
710 /* Tune this... */
711 #define FORWARD_VOLUME 12
713 void handler_irq(int irq, struct pt_regs *regs)
715 struct ino_bucket *bp, *nbp;
716 int cpu = smp_processor_id();
717 #ifdef CONFIG_SMP
718 extern int this_is_starfire;
719 int should_forward = (this_is_starfire == 0 &&
720 irq < 10 &&
721 current->pid != 0);
722 unsigned int buddy = 0;
724 /* 'cpu' is the MID (ie. UPAID), calculate the MID
725 * of our buddy.
727 if (should_forward != 0) {
728 buddy = cpu_number_map(cpu) + 1;
729 if (buddy >= NR_CPUS ||
730 (buddy = cpu_logical_map(buddy)) == -1)
731 buddy = cpu_logical_map(0);
733 /* Voo-doo programming. */
734 if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
735 should_forward = 0;
736 buddy <<= 26;
738 #endif
740 #ifndef CONFIG_SMP
742 * Check for TICK_INT on level 14 softint.
744 if ((irq == 14) && (get_softint() & (1UL << 0)))
745 irq = 0;
746 #endif
747 clear_softint(1 << irq);
749 irq_enter(cpu, irq);
750 kstat.irqs[cpu][irq]++;
752 /* Sliiiick... */
753 #ifndef CONFIG_SMP
754 bp = ((irq != 0) ?
755 __bucket(xchg32(irq_work(cpu, irq), 0)) :
756 &pil0_dummy_bucket);
757 #else
758 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
759 #endif
760 for ( ; bp != NULL; bp = nbp) {
761 unsigned char flags = bp->flags;
763 nbp = __bucket(bp->irq_chain);
764 if ((flags & IBF_ACTIVE) != 0) {
765 #ifdef CONFIG_PCI
766 if ((flags & IBF_DMA_SYNC) != 0) {
767 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
768 upa_readq(pci_dma_wsync);
770 #endif
771 if ((flags & IBF_MULTI) == 0) {
772 struct irqaction *ap = bp->irq_info;
773 ap->handler(__irq(bp), ap->dev_id, regs);
774 } else {
775 void **vector = (void **)bp->irq_info;
776 int ent;
777 for (ent = 0; ent < 4; ent++) {
778 struct irqaction *ap = vector[ent];
779 if (ap != NULL)
780 ap->handler(__irq(bp), ap->dev_id, regs);
783 /* Only the dummy bucket lacks IMAP/ICLR. */
784 if (bp->pil != 0) {
785 #ifdef CONFIG_SMP
786 /* Ok, here is what is going on:
787 * 1) Retargeting IRQs on Starfire is very
788 * expensive so just forget about it on them.
789 * 2) Moving around very high priority interrupts
790 * is a losing game.
791 * 3) If the current cpu is idle, interrupts are
792 * useful work, so keep them here. But do not
793 * pass to our neighbour if he is not very idle.
795 if (should_forward != 0) {
796 /* Push it to our buddy. */
797 should_forward = 0;
798 upa_writel(buddy | IMAP_VALID, bp->imap);
800 #endif
801 upa_writel(ICLR_IDLE, bp->iclr);
803 } else
804 bp->pending = 1;
806 irq_exit(cpu, irq);
809 #ifdef CONFIG_BLK_DEV_FD
810 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
812 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
814 struct irqaction *action = *(irq + irq_action);
815 struct ino_bucket *bucket;
816 int cpu = smp_processor_id();
818 irq_enter(cpu, irq);
819 kstat.irqs[cpu][irq]++;
821 *(irq_work(cpu, irq)) = 0;
822 bucket = (struct ino_bucket *)action->mask;
824 floppy_interrupt(irq, dev_cookie, regs);
825 upa_writel(ICLR_IDLE, bucket->iclr);
827 irq_exit(cpu, irq);
829 #endif
831 /* The following assumes that the branch lies before the place we
832 * are branching to. This is the case for a trap vector...
833 * You have been warned.
835 #define SPARC_BRANCH(dest_addr, inst_addr) \
836 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
838 #define SPARC_NOP (0x01000000)
840 static void install_fast_irq(unsigned int cpu_irq,
841 void (*handler)(int, void *, struct pt_regs *))
843 extern unsigned long sparc64_ttable_tl0;
844 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
845 unsigned int *insns;
847 ttent += 0x820;
848 ttent += (cpu_irq - 1) << 5;
849 insns = (unsigned int *) ttent;
850 insns[0] = SPARC_BRANCH(((unsigned long) handler),
851 ((unsigned long)&insns[0]));
852 insns[1] = SPARC_NOP;
853 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
856 int request_fast_irq(unsigned int irq,
857 void (*handler)(int, void *, struct pt_regs *),
858 unsigned long irqflags, const char *name, void *dev_id)
860 struct irqaction *action;
861 struct ino_bucket *bucket = __bucket(irq);
862 unsigned long flags;
864 /* No pil0 dummy buckets allowed here. */
865 if (bucket < &ivector_table[0] ||
866 bucket >= &ivector_table[NUM_IVECS]) {
867 unsigned int *caller;
869 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
870 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
871 "from %p, irq %08x.\n", caller, irq);
872 return -EINVAL;
875 /* Only IMAP style interrupts can be registered as fast. */
876 if(bucket->pil == 0)
877 return -EINVAL;
879 if(!handler)
880 return -EINVAL;
882 if ((bucket->pil == 0) || (bucket->pil == 14)) {
883 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
884 return -EBUSY;
887 action = *(bucket->pil + irq_action);
888 if(action) {
889 if(action->flags & SA_SHIRQ)
890 panic("Trying to register fast irq when already shared.\n");
891 if(irqflags & SA_SHIRQ)
892 panic("Trying to register fast irq as shared.\n");
893 printk("request_fast_irq: Trying to register yet already owned.\n");
894 return -EBUSY;
897 save_and_cli(flags);
898 if(irqflags & SA_STATIC_ALLOC) {
899 if(static_irq_count < MAX_STATIC_ALLOC)
900 action = &static_irqaction[static_irq_count++];
901 else
902 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
903 "using kmalloc\n", bucket->pil, name);
905 if(action == NULL)
906 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
907 GFP_KERNEL);
908 if(!action) {
909 restore_flags(flags);
910 return -ENOMEM;
912 install_fast_irq(bucket->pil, handler);
914 bucket->irq_info = action;
915 bucket->flags |= IBF_ACTIVE;
917 action->mask = (unsigned long) bucket;
918 action->handler = handler;
919 action->flags = irqflags | SA_IMAP_MASKED;
920 action->dev_id = NULL;
921 action->name = name;
922 action->next = NULL;
924 *(bucket->pil + irq_action) = action;
925 enable_irq(irq);
927 restore_flags(flags);
929 #ifdef CONFIG_SMP
930 distribute_irqs();
931 #endif
932 return 0;
935 /* We really don't need these at all on the Sparc. We only have
936 * stubs here because they are exported to modules.
938 unsigned long probe_irq_on(void)
940 return 0;
943 int probe_irq_off(unsigned long mask)
945 return 0;
948 /* This is gets the master TICK_INT timer going. */
949 void init_timers(void (*cfunc)(int, void *, struct pt_regs *),
950 unsigned long *clock)
952 unsigned long pstate;
953 extern unsigned long timer_tick_offset;
954 int node, err;
955 #ifdef CONFIG_SMP
956 extern void smp_tick_init(void);
957 #endif
959 node = linux_cpus[0].prom_node;
960 *clock = prom_getint(node, "clock-frequency");
961 timer_tick_offset = *clock / HZ;
962 #ifdef CONFIG_SMP
963 smp_tick_init();
964 #endif
966 /* Register IRQ handler. */
967 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC),
968 "timer", NULL);
970 if(err) {
971 prom_printf("Serious problem, cannot register TICK_INT\n");
972 prom_halt();
975 /* Guarentee that the following sequences execute
976 * uninterrupted.
978 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
979 "wrpr %0, %1, %%pstate"
980 : "=r" (pstate)
981 : "i" (PSTATE_IE));
983 /* Set things up so user can access tick register for profiling
984 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
985 * read back of %tick after writing it.
987 __asm__ __volatile__("
988 sethi %%hi(0x80000000), %%g1
989 ba,pt %%xcc, 1f
990 sllx %%g1, 32, %%g1
991 .align 64
992 1: rd %%tick, %%g2
993 add %%g2, 6, %%g2
994 andn %%g2, %%g1, %%g2
995 wrpr %%g2, 0, %%tick
996 rdpr %%tick, %%g0"
997 : /* no outputs */
998 : /* no inputs */
999 : "g1", "g2");
1001 /* Workaround for Spitfire Errata (#54 I think??), I discovered
1002 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
1003 * number 103640.
1005 * On Blackbird writes to %tick_cmpr can fail, the
1006 * workaround seems to be to execute the wr instruction
1007 * at the start of an I-cache line, and perform a dummy
1008 * read back from %tick_cmpr right after writing to it. -DaveM
1010 __asm__ __volatile__("
1011 rd %%tick, %%g1
1012 ba,pt %%xcc, 1f
1013 add %%g1, %0, %%g1
1014 .align 64
1015 1: wr %%g1, 0x0, %%tick_cmpr
1016 rd %%tick_cmpr, %%g0"
1017 : /* no outputs */
1018 : "r" (timer_tick_offset)
1019 : "g1");
1021 /* Restore PSTATE_IE. */
1022 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1023 : /* no outputs */
1024 : "r" (pstate));
1026 sti();
1029 #ifdef CONFIG_SMP
1030 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
1032 extern int this_is_starfire;
1033 struct ino_bucket *bucket = __bucket(p->mask);
1034 unsigned long imap = bucket->imap;
1035 unsigned int tid;
1037 /* Never change this, it causes problems on Ex000 systems. */
1038 if (bucket->pil == 12)
1039 return goal_cpu;
1041 if(this_is_starfire == 0) {
1042 tid = __cpu_logical_map[goal_cpu] << 26;
1043 } else {
1044 extern unsigned int starfire_translate(unsigned long imap,
1045 unsigned int upaid);
1047 tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
1049 upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
1051 goal_cpu++;
1052 if(goal_cpu >= NR_CPUS ||
1053 __cpu_logical_map[goal_cpu] == -1)
1054 goal_cpu = 0;
1055 return goal_cpu;
1058 /* Called from request_irq. */
1059 static void distribute_irqs(void)
1061 unsigned long flags;
1062 int cpu, level;
1064 save_and_cli(flags);
1065 cpu = 0;
1066 for(level = 0; level < NR_IRQS; level++) {
1067 struct irqaction *p = irq_action[level];
1068 while(p) {
1069 if(p->flags & SA_IMAP_MASKED)
1070 cpu = retarget_one_irq(p, cpu);
1071 p = p->next;
1074 restore_flags(flags);
1076 #endif
1079 struct sun5_timer *prom_timers;
1080 static u64 prom_limit0, prom_limit1;
1082 static void map_prom_timers(void)
1084 unsigned int addr[3];
1085 int tnode, err;
1087 /* PROM timer node hangs out in the top level of device siblings... */
1088 tnode = prom_finddevice("/counter-timer");
1090 /* Assume if node is not present, PROM uses different tick mechanism
1091 * which we should not care about.
1093 if(tnode == 0 || tnode == -1) {
1094 prom_timers = (struct sun5_timer *) 0;
1095 return;
1098 /* If PROM is really using this, it must be mapped by him. */
1099 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1100 if(err == -1) {
1101 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1102 prom_timers = (struct sun5_timer *) 0;
1103 return;
1105 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1108 static void kill_prom_timer(void)
1110 if(!prom_timers)
1111 return;
1113 /* Save them away for later. */
1114 prom_limit0 = prom_timers->limit0;
1115 prom_limit1 = prom_timers->limit1;
1117 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1118 * We turn both off here just to be paranoid.
1120 prom_timers->limit0 = 0;
1121 prom_timers->limit1 = 0;
1123 /* Wheee, eat the interrupt packet too... */
1124 __asm__ __volatile__("
1125 mov 0x40, %%g2
1126 ldxa [%%g0] %0, %%g1
1127 ldxa [%%g2] %1, %%g1
1128 stxa %%g0, [%%g0] %0
1129 membar #Sync
1130 " : /* no outputs */
1131 : "i" (ASI_INTR_RECEIVE), "i" (ASI_UDB_INTR_R)
1132 : "g1", "g2");
1135 void enable_prom_timer(void)
1137 if(!prom_timers)
1138 return;
1140 /* Set it to whatever was there before. */
1141 prom_timers->limit1 = prom_limit1;
1142 prom_timers->count1 = 0;
1143 prom_timers->limit0 = prom_limit0;
1144 prom_timers->count0 = 0;
1147 void __init init_IRQ(void)
1149 static int called = 0;
1151 if (called == 0) {
1152 called = 1;
1153 map_prom_timers();
1154 kill_prom_timer();
1155 memset(&ivector_table[0], 0, sizeof(ivector_table));
1156 #ifndef CONFIG_SMP
1157 memset(&__up_workvec[0], 0, sizeof(__up_workvec));
1158 #endif
1161 /* We need to clear any IRQ's pending in the soft interrupt
1162 * registers, a spurious one could be left around from the
1163 * PROM timer which we just disabled.
1165 clear_softint(get_softint());
1167 /* Now that ivector table is initialized, it is safe
1168 * to receive IRQ vector traps. We will normally take
1169 * one or two right now, in case some device PROM used
1170 * to boot us wants to speak to us. We just ignore them.
1172 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1173 "or %%g1, %0, %%g1\n\t"
1174 "wrpr %%g1, 0x0, %%pstate"
1175 : /* No outputs */
1176 : "i" (PSTATE_IE)
1177 : "g1");
1180 void init_irq_proc(void)
1182 /* For now, nothing... */