1 /* $Id: irq.c,v 1.92 2000/08/26 02:42:28 anton Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
15 #include <linux/interrupt.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
18 #include <linux/init.h>
19 #include <linux/delay.h>
21 #include <asm/ptrace.h>
22 #include <asm/processor.h>
23 #include <asm/atomic.h>
24 #include <asm/system.h>
27 #include <asm/iommu.h>
29 #include <asm/oplib.h>
30 #include <asm/timer.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
35 /* Internal flag, should not be visible elsewhere at all. */
36 #define SA_IMAP_MASKED 0x100
37 #define SA_DMA_SYNC 0x200
40 static void distribute_irqs(void);
43 /* UPA nodes send interrupt packet to UltraSparc with first data reg
44 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
45 * delivered. We must translate this into a non-vector IRQ so we can
46 * set the softint on this cpu.
48 * To make processing these packets efficient and race free we use
49 * an array of irq buckets below. The interrupt vector handler in
50 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
51 * The IVEC handler does not need to act atomically, the PIL dispatch
52 * code uses CAS to get an atomic snapshot of the list and clear it
56 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (64)));
59 unsigned int __up_workvec
[16] __attribute__ ((aligned (64)));
60 #define irq_work(__cpu, __pil) &(__up_workvec[(void)(__cpu), (__pil)])
62 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
66 /* This is a table of physical addresses used to deal with SA_DMA_SYNC.
67 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
68 * for devices behind busses other than APB on Sabre systems.
70 * Currently these physical addresses are just config space accesses
71 * to the command register for that device.
73 unsigned long pci_dma_wsync
;
74 unsigned long dma_sync_reg_table
[256];
75 unsigned char dma_sync_reg_table_entry
= 0;
78 /* This is based upon code in the 32-bit Sparc kernel written mostly by
79 * David Redman (djhr@tadpole.co.uk).
81 #define MAX_STATIC_ALLOC 4
82 static struct irqaction static_irqaction
[MAX_STATIC_ALLOC
];
83 static int static_irq_count
= 0;
85 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
86 struct irqaction
*irq_action
[NR_IRQS
+1] = {
87 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
88 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
91 int get_irq_list(char *buf
)
94 struct irqaction
*action
;
99 for(i
= 0; i
< (NR_IRQS
+ 1); i
++) {
100 if(!(action
= *(i
+ irq_action
)))
102 len
+= sprintf(buf
+ len
, "%3d: ", i
);
104 len
+= sprintf(buf
+ len
, "%10u ", kstat_irqs(i
));
106 for (j
= 0; j
< smp_num_cpus
; j
++)
107 len
+= sprintf(buf
+ len
, "%10u ",
108 kstat
.irqs
[cpu_logical_map(j
)][i
]);
110 len
+= sprintf(buf
+ len
, "%c %s",
111 (action
->flags
& SA_INTERRUPT
) ? '+' : ' ',
113 for(action
= action
->next
; action
; action
= action
->next
) {
114 len
+= sprintf(buf
+len
, ",%s %s",
115 (action
->flags
& SA_INTERRUPT
) ? " +" : "",
118 len
+= sprintf(buf
+ len
, "\n");
123 /* Now these are always passed a true fully specified sun4u INO. */
124 void enable_irq(unsigned int irq
)
126 extern int this_is_starfire
;
127 struct ino_bucket
*bucket
= __bucket(irq
);
135 if(this_is_starfire
== 0) {
136 /* We set it to our UPA MID. */
137 __asm__
__volatile__("ldxa [%%g0] %1, %0"
139 : "i" (ASI_UPA_CONFIG
));
140 tid
= ((tid
& UPA_CONFIG_MID
) << 9);
142 extern unsigned int starfire_translate(unsigned long imap
,
145 tid
= (starfire_translate(imap
, current
->processor
) << 26);
148 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
149 * of this SYSIO's preconfigured IGN in the SYSIO Control
150 * Register, the hardware just mirrors that value here.
151 * However for Graphics and UPA Slave devices the full
152 * IMAP_INR field can be set by the programmer here.
154 * Things like FFB can now be handled via the new IRQ mechanism.
156 upa_writel(IMAP_VALID
| (tid
& IMAP_TID
), imap
);
159 /* This now gets passed true ino's as well. */
160 void disable_irq(unsigned int irq
)
162 struct ino_bucket
*bucket
= __bucket(irq
);
169 /* NOTE: We do not want to futz with the IRQ clear registers
170 * and move the state to IDLE, the SCSI code does call
171 * disable_irq() to assure atomicity in the queue cmd
172 * SCSI adapter driver code. Thus we'd lose interrupts.
174 tmp
= upa_readl(imap
);
176 upa_writel(tmp
, imap
);
180 /* The timer is the one "weird" interrupt which is generated by
181 * the CPU %tick register and not by some normal vectored interrupt
182 * source. To handle this special case, we use this dummy INO bucket.
184 static struct ino_bucket pil0_dummy_bucket
= {
195 unsigned int build_irq(int pil
, int inofixup
, unsigned long iclr
, unsigned long imap
)
197 struct ino_bucket
*bucket
;
201 if(iclr
!= 0UL || imap
!= 0UL) {
202 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
206 return __irq(&pil0_dummy_bucket
);
209 /* RULE: Both must be specified in all other cases. */
210 if (iclr
== 0UL || imap
== 0UL) {
211 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
212 pil
, inofixup
, iclr
, imap
);
216 ino
= (upa_readl(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
217 if(ino
> NUM_IVECS
) {
218 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
219 ino
, pil
, inofixup
, iclr
, imap
);
223 /* Ok, looks good, set it up. Don't touch the irq_chain or
226 bucket
= &ivector_table
[ino
];
227 if ((bucket
->flags
& IBF_ACTIVE
) ||
228 (bucket
->irq_info
!= NULL
)) {
229 /* This is a gross fatal error if it happens here. */
230 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
231 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
232 ino
, pil
, inofixup
, iclr
, imap
);
233 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
234 bucket
->pil
, bucket
->iclr
, bucket
->imap
);
235 prom_printf("IRQ: Cannot continue, halting...\n");
243 bucket
->irq_info
= NULL
;
245 return __irq(bucket
);
248 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
250 unsigned long pstate
;
253 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
254 __asm__
__volatile__("wrpr %0, %1, %%pstate"
255 : : "r" (pstate
), "i" (PSTATE_IE
));
256 ent
= irq_work(smp_processor_id(), bucket
->pil
);
257 bucket
->irq_chain
= *ent
;
258 *ent
= __irq(bucket
);
259 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
262 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
263 unsigned long irqflags
, const char *name
, void *dev_id
)
265 struct irqaction
*action
, *tmp
= NULL
;
266 struct ino_bucket
*bucket
= __bucket(irq
);
270 if ((bucket
!= &pil0_dummy_bucket
) &&
271 (bucket
< &ivector_table
[0] ||
272 bucket
>= &ivector_table
[NUM_IVECS
])) {
273 unsigned int *caller
;
275 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
276 printk(KERN_CRIT
"request_irq: Old style IRQ registry attempt "
277 "from %p, irq %08x.\n", caller
, irq
);
284 irqflags
&= ~SA_IMAP_MASKED
;
286 irqflags
|= SA_IMAP_MASKED
;
287 if (bucket
->flags
& IBF_PCI
) {
289 * PCI IRQs should never use SA_INTERRUPT.
291 irqflags
&= ~(SA_INTERRUPT
);
294 * Check wether we _should_ use DMA Write Sync
295 * (for devices behind bridges behind APB).
297 if (bucket
->flags
& IBF_DMA_SYNC
)
298 irqflags
|= SA_DMA_SYNC
;
304 action
= *(bucket
->pil
+ irq_action
);
306 if((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
))
307 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
)
310 restore_flags(flags
);
313 if((action
->flags
& SA_INTERRUPT
) ^ (irqflags
& SA_INTERRUPT
)) {
314 printk("Attempt to mix fast and slow interrupts on IRQ%d "
315 "denied\n", bucket
->pil
);
316 restore_flags(flags
);
319 action
= NULL
; /* Or else! */
322 /* If this is flagged as statically allocated then we use our
323 * private struct which is never freed.
325 if(irqflags
& SA_STATIC_ALLOC
) {
326 if(static_irq_count
< MAX_STATIC_ALLOC
)
327 action
= &static_irqaction
[static_irq_count
++];
329 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
330 "using kmalloc\n", irq
, name
);
333 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
337 restore_flags(flags
);
341 if ((irqflags
& SA_IMAP_MASKED
) == 0) {
342 bucket
->irq_info
= action
;
343 bucket
->flags
|= IBF_ACTIVE
;
345 if((bucket
->flags
& IBF_ACTIVE
) != 0) {
346 void *orig
= bucket
->irq_info
;
347 void **vector
= NULL
;
349 if((bucket
->flags
& IBF_PCI
) == 0) {
350 printk("IRQ: Trying to share non-PCI bucket.\n");
353 if((bucket
->flags
& IBF_MULTI
) == 0) {
354 vector
= kmalloc(sizeof(void *) * 4, GFP_KERNEL
);
356 goto free_and_enomem
;
358 /* We might have slept. */
359 if ((bucket
->flags
& IBF_MULTI
) != 0) {
363 vector
= (void **)bucket
->irq_info
;
364 for(ent
= 0; ent
< 4; ent
++) {
365 if (vector
[ent
] == NULL
) {
366 vector
[ent
] = action
;
377 bucket
->irq_info
= vector
;
378 bucket
->flags
|= IBF_MULTI
;
383 vector
= (void **)orig
;
384 for(ent
= 0; ent
< 4; ent
++) {
385 if(vector
[ent
] == NULL
) {
386 vector
[ent
] = action
;
394 bucket
->irq_info
= action
;
395 bucket
->flags
|= IBF_ACTIVE
;
397 pending
= bucket
->pending
;
402 action
->mask
= (unsigned long) bucket
;
403 action
->handler
= handler
;
404 action
->flags
= irqflags
;
407 action
->dev_id
= dev_id
;
412 *(bucket
->pil
+ irq_action
) = action
;
416 /* We ate the IVEC already, this makes sure it does not get lost. */
418 atomic_bucket_insert(bucket
);
419 set_softint(1 << bucket
->pil
);
421 restore_flags(flags
);
430 restore_flags(flags
);
435 restore_flags(flags
);
439 void free_irq(unsigned int irq
, void *dev_id
)
441 struct irqaction
*action
;
442 struct irqaction
*tmp
= NULL
;
444 struct ino_bucket
*bucket
= __bucket(irq
), *bp
;
446 if ((bucket
!= &pil0_dummy_bucket
) &&
447 (bucket
< &ivector_table
[0] ||
448 bucket
>= &ivector_table
[NUM_IVECS
])) {
449 unsigned int *caller
;
451 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
452 printk(KERN_CRIT
"free_irq: Old style IRQ removal attempt "
453 "from %p, irq %08x.\n", caller
, irq
);
457 action
= *(bucket
->pil
+ irq_action
);
458 if(!action
->handler
) {
459 printk("Freeing free IRQ %d\n", bucket
->pil
);
463 for( ; action
; action
= action
->next
) {
464 if(action
->dev_id
== dev_id
)
469 printk("Trying to free free shared IRQ %d\n", bucket
->pil
);
472 } else if(action
->flags
& SA_SHIRQ
) {
473 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket
->pil
);
477 if(action
->flags
& SA_STATIC_ALLOC
) {
478 printk("Attempt to free statically allocated IRQ %d (%s)\n",
479 bucket
->pil
, action
->name
);
485 tmp
->next
= action
->next
;
487 *(bucket
->pil
+ irq_action
) = action
->next
;
489 if(action
->flags
& SA_IMAP_MASKED
) {
490 unsigned long imap
= bucket
->imap
;
491 void **vector
, *orig
;
494 orig
= bucket
->irq_info
;
495 vector
= (void **)orig
;
497 if ((bucket
->flags
& IBF_MULTI
) != 0) {
500 for(ent
= 0; ent
< 4; ent
++) {
501 if(vector
[ent
] == action
)
503 else if(vector
[ent
] != NULL
) {
504 orphan
= vector
[ent
];
509 /* Only free when no other shared irq
514 /* Convert back to non-shared bucket. */
515 bucket
->irq_info
= orphan
;
516 bucket
->flags
&= ~(IBF_MULTI
);
522 bucket
->irq_info
= NULL
;
525 /* This unique interrupt source is now inactive. */
526 bucket
->flags
&= ~IBF_ACTIVE
;
528 /* See if any other buckets share this bucket's IMAP
529 * and are still active.
531 for(ent
= 0; ent
< NUM_IVECS
; ent
++) {
532 bp
= &ivector_table
[ent
];
535 (bp
->flags
& IBF_ACTIVE
) != 0)
539 /* Only disable when no other sub-irq levels of
540 * the same IMAP are active.
542 if (ent
== NUM_IVECS
)
548 restore_flags(flags
);
553 /* Who has global_irq_lock. */
554 unsigned char global_irq_holder
= NO_PROC_ID
;
556 static void show(char * str
)
558 int cpu
= smp_processor_id();
561 printk("\n%s, CPU %d:\n", str
, cpu
);
562 printk("irq: %d [ ", irqs_running());
563 for (i
= 0; i
< smp_num_cpus
; i
++)
564 printk("%u ", __brlock_array
[i
][BR_GLOBALIRQ_LOCK
]);
565 printk("]\nbh: %d [ ",
566 (spin_is_locked(&global_bh_lock
) ? 1 : 0));
567 for (i
= 0; i
< smp_num_cpus
; i
++)
568 printk("%u ", local_bh_count(i
));
572 #define MAXCOUNT 100000000
575 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
577 #define SYNC_OTHER_ULTRAS(x) membar("#Sync");
580 void synchronize_irq(void)
582 if (irqs_running()) {
588 static inline void get_irqlock(int cpu
)
592 if ((unsigned char)cpu
== global_irq_holder
)
597 br_write_lock(BR_GLOBALIRQ_LOCK
);
601 if (!irqs_running() &&
602 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock
)))
605 br_write_unlock(BR_GLOBALIRQ_LOCK
);
606 lock
= &__br_write_locks
[BR_GLOBALIRQ_LOCK
].lock
;
607 while (irqs_running() ||
608 spin_is_locked(lock
) ||
609 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock
))) {
615 SYNC_OTHER_ULTRAS(cpu
);
621 global_irq_holder
= cpu
;
624 void __global_cli(void)
630 int cpu
= smp_processor_id();
632 if (! local_irq_count(cpu
))
637 void __global_sti(void)
639 int cpu
= smp_processor_id();
641 if (! local_irq_count(cpu
))
642 release_irqlock(cpu
);
646 unsigned long __global_save_flags(void)
648 unsigned long flags
, local_enabled
, retval
;
651 local_enabled
= ((flags
== 0) ? 1 : 0);
652 retval
= 2 + local_enabled
;
653 if (! local_irq_count(smp_processor_id())) {
656 if (global_irq_holder
== (unsigned char) smp_processor_id())
662 void __global_restore_flags(unsigned long flags
)
680 __asm__
__volatile__("mov %%i7, %0" : "=r" (pc
));
681 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
687 #endif /* CONFIG_SMP */
689 void catch_disabled_ivec(struct pt_regs
*regs
)
691 int cpu
= smp_processor_id();
692 struct ino_bucket
*bucket
= __bucket(*irq_work(cpu
, 0));
694 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
695 * to other devices. Here a single IMAP enabled potentially multiple
696 * unique interrupt sources (which each do have a unique ICLR register.
698 * So what we do is just register that the IVEC arrived, when registered
699 * for real the request_irq() code will check the bit and signal
700 * a local CPU interrupt for it.
703 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
704 bucket
- &ivector_table
[0], regs
->tpc
);
706 *irq_work(cpu
, 0) = 0;
711 #define FORWARD_VOLUME 12
713 void handler_irq(int irq
, struct pt_regs
*regs
)
715 struct ino_bucket
*bp
, *nbp
;
716 int cpu
= smp_processor_id();
718 extern int this_is_starfire
;
719 int should_forward
= (this_is_starfire
== 0 &&
722 unsigned int buddy
= 0;
724 /* 'cpu' is the MID (ie. UPAID), calculate the MID
727 if (should_forward
!= 0) {
728 buddy
= cpu_number_map(cpu
) + 1;
729 if (buddy
>= NR_CPUS
||
730 (buddy
= cpu_logical_map(buddy
)) == -1)
731 buddy
= cpu_logical_map(0);
733 /* Voo-doo programming. */
734 if (cpu_data
[buddy
].idle_volume
< FORWARD_VOLUME
)
742 * Check for TICK_INT on level 14 softint.
744 if ((irq
== 14) && (get_softint() & (1UL << 0)))
747 clear_softint(1 << irq
);
750 kstat
.irqs
[cpu
][irq
]++;
755 __bucket(xchg32(irq_work(cpu
, irq
), 0)) :
758 bp
= __bucket(xchg32(irq_work(cpu
, irq
), 0));
760 for ( ; bp
!= NULL
; bp
= nbp
) {
761 unsigned char flags
= bp
->flags
;
763 nbp
= __bucket(bp
->irq_chain
);
764 if ((flags
& IBF_ACTIVE
) != 0) {
766 if ((flags
& IBF_DMA_SYNC
) != 0) {
767 upa_readl(dma_sync_reg_table
[bp
->synctab_ent
]);
768 upa_readq(pci_dma_wsync
);
771 if ((flags
& IBF_MULTI
) == 0) {
772 struct irqaction
*ap
= bp
->irq_info
;
773 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
775 void **vector
= (void **)bp
->irq_info
;
777 for (ent
= 0; ent
< 4; ent
++) {
778 struct irqaction
*ap
= vector
[ent
];
780 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
783 /* Only the dummy bucket lacks IMAP/ICLR. */
786 /* Ok, here is what is going on:
787 * 1) Retargeting IRQs on Starfire is very
788 * expensive so just forget about it on them.
789 * 2) Moving around very high priority interrupts
791 * 3) If the current cpu is idle, interrupts are
792 * useful work, so keep them here. But do not
793 * pass to our neighbour if he is not very idle.
795 if (should_forward
!= 0) {
796 /* Push it to our buddy. */
798 upa_writel(buddy
| IMAP_VALID
, bp
->imap
);
801 upa_writel(ICLR_IDLE
, bp
->iclr
);
809 #ifdef CONFIG_BLK_DEV_FD
810 extern void floppy_interrupt(int irq
, void *dev_cookie
, struct pt_regs
*regs
);
812 void sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
814 struct irqaction
*action
= *(irq
+ irq_action
);
815 struct ino_bucket
*bucket
;
816 int cpu
= smp_processor_id();
819 kstat
.irqs
[cpu
][irq
]++;
821 *(irq_work(cpu
, irq
)) = 0;
822 bucket
= (struct ino_bucket
*)action
->mask
;
824 floppy_interrupt(irq
, dev_cookie
, regs
);
825 upa_writel(ICLR_IDLE
, bucket
->iclr
);
831 /* The following assumes that the branch lies before the place we
832 * are branching to. This is the case for a trap vector...
833 * You have been warned.
835 #define SPARC_BRANCH(dest_addr, inst_addr) \
836 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
838 #define SPARC_NOP (0x01000000)
840 static void install_fast_irq(unsigned int cpu_irq
,
841 void (*handler
)(int, void *, struct pt_regs
*))
843 extern unsigned long sparc64_ttable_tl0
;
844 unsigned long ttent
= (unsigned long) &sparc64_ttable_tl0
;
848 ttent
+= (cpu_irq
- 1) << 5;
849 insns
= (unsigned int *) ttent
;
850 insns
[0] = SPARC_BRANCH(((unsigned long) handler
),
851 ((unsigned long)&insns
[0]));
852 insns
[1] = SPARC_NOP
;
853 __asm__
__volatile__("membar #StoreStore; flush %0" : : "r" (ttent
));
856 int request_fast_irq(unsigned int irq
,
857 void (*handler
)(int, void *, struct pt_regs
*),
858 unsigned long irqflags
, const char *name
, void *dev_id
)
860 struct irqaction
*action
;
861 struct ino_bucket
*bucket
= __bucket(irq
);
864 /* No pil0 dummy buckets allowed here. */
865 if (bucket
< &ivector_table
[0] ||
866 bucket
>= &ivector_table
[NUM_IVECS
]) {
867 unsigned int *caller
;
869 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
870 printk(KERN_CRIT
"request_fast_irq: Old style IRQ registry attempt "
871 "from %p, irq %08x.\n", caller
, irq
);
875 /* Only IMAP style interrupts can be registered as fast. */
882 if ((bucket
->pil
== 0) || (bucket
->pil
== 14)) {
883 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
887 action
= *(bucket
->pil
+ irq_action
);
889 if(action
->flags
& SA_SHIRQ
)
890 panic("Trying to register fast irq when already shared.\n");
891 if(irqflags
& SA_SHIRQ
)
892 panic("Trying to register fast irq as shared.\n");
893 printk("request_fast_irq: Trying to register yet already owned.\n");
898 if(irqflags
& SA_STATIC_ALLOC
) {
899 if(static_irq_count
< MAX_STATIC_ALLOC
)
900 action
= &static_irqaction
[static_irq_count
++];
902 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
903 "using kmalloc\n", bucket
->pil
, name
);
906 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
909 restore_flags(flags
);
912 install_fast_irq(bucket
->pil
, handler
);
914 bucket
->irq_info
= action
;
915 bucket
->flags
|= IBF_ACTIVE
;
917 action
->mask
= (unsigned long) bucket
;
918 action
->handler
= handler
;
919 action
->flags
= irqflags
| SA_IMAP_MASKED
;
920 action
->dev_id
= NULL
;
924 *(bucket
->pil
+ irq_action
) = action
;
927 restore_flags(flags
);
935 /* We really don't need these at all on the Sparc. We only have
936 * stubs here because they are exported to modules.
938 unsigned long probe_irq_on(void)
943 int probe_irq_off(unsigned long mask
)
948 /* This is gets the master TICK_INT timer going. */
949 void init_timers(void (*cfunc
)(int, void *, struct pt_regs
*),
950 unsigned long *clock
)
952 unsigned long pstate
;
953 extern unsigned long timer_tick_offset
;
956 extern void smp_tick_init(void);
959 node
= linux_cpus
[0].prom_node
;
960 *clock
= prom_getint(node
, "clock-frequency");
961 timer_tick_offset
= *clock
/ HZ
;
966 /* Register IRQ handler. */
967 err
= request_irq(build_irq(0, 0, 0UL, 0UL), cfunc
, (SA_INTERRUPT
| SA_STATIC_ALLOC
),
971 prom_printf("Serious problem, cannot register TICK_INT\n");
975 /* Guarentee that the following sequences execute
978 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
979 "wrpr %0, %1, %%pstate"
983 /* Set things up so user can access tick register for profiling
984 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
985 * read back of %tick after writing it.
987 __asm__
__volatile__("
988 sethi %%hi(0x80000000), %%g1
994 andn %%g2, %%g1, %%g2
1001 /* Workaround for Spitfire Errata (#54 I think??), I discovered
1002 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
1005 * On Blackbird writes to %tick_cmpr can fail, the
1006 * workaround seems to be to execute the wr instruction
1007 * at the start of an I-cache line, and perform a dummy
1008 * read back from %tick_cmpr right after writing to it. -DaveM
1010 __asm__
__volatile__("
1015 1: wr %%g1, 0x0, %%tick_cmpr
1016 rd %%tick_cmpr, %%g0"
1018 : "r" (timer_tick_offset
)
1021 /* Restore PSTATE_IE. */
1022 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1030 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
1032 extern int this_is_starfire
;
1033 struct ino_bucket
*bucket
= __bucket(p
->mask
);
1034 unsigned long imap
= bucket
->imap
;
1037 /* Never change this, it causes problems on Ex000 systems. */
1038 if (bucket
->pil
== 12)
1041 if(this_is_starfire
== 0) {
1042 tid
= __cpu_logical_map
[goal_cpu
] << 26;
1044 extern unsigned int starfire_translate(unsigned long imap
,
1045 unsigned int upaid
);
1047 tid
= (starfire_translate(imap
, __cpu_logical_map
[goal_cpu
]) << 26);
1049 upa_writel(IMAP_VALID
| (tid
& IMAP_TID
), imap
);
1052 if(goal_cpu
>= NR_CPUS
||
1053 __cpu_logical_map
[goal_cpu
] == -1)
1058 /* Called from request_irq. */
1059 static void distribute_irqs(void)
1061 unsigned long flags
;
1064 save_and_cli(flags
);
1066 for(level
= 0; level
< NR_IRQS
; level
++) {
1067 struct irqaction
*p
= irq_action
[level
];
1069 if(p
->flags
& SA_IMAP_MASKED
)
1070 cpu
= retarget_one_irq(p
, cpu
);
1074 restore_flags(flags
);
1079 struct sun5_timer
*prom_timers
;
1080 static u64 prom_limit0
, prom_limit1
;
1082 static void map_prom_timers(void)
1084 unsigned int addr
[3];
1087 /* PROM timer node hangs out in the top level of device siblings... */
1088 tnode
= prom_finddevice("/counter-timer");
1090 /* Assume if node is not present, PROM uses different tick mechanism
1091 * which we should not care about.
1093 if(tnode
== 0 || tnode
== -1) {
1094 prom_timers
= (struct sun5_timer
*) 0;
1098 /* If PROM is really using this, it must be mapped by him. */
1099 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
1101 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1102 prom_timers
= (struct sun5_timer
*) 0;
1105 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
1108 static void kill_prom_timer(void)
1113 /* Save them away for later. */
1114 prom_limit0
= prom_timers
->limit0
;
1115 prom_limit1
= prom_timers
->limit1
;
1117 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1118 * We turn both off here just to be paranoid.
1120 prom_timers
->limit0
= 0;
1121 prom_timers
->limit1
= 0;
1123 /* Wheee, eat the interrupt packet too... */
1124 __asm__
__volatile__("
1126 ldxa [%%g0] %0, %%g1
1127 ldxa [%%g2] %1, %%g1
1128 stxa %%g0, [%%g0] %0
1130 " : /* no outputs */
1131 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_UDB_INTR_R
)
1135 void enable_prom_timer(void)
1140 /* Set it to whatever was there before. */
1141 prom_timers
->limit1
= prom_limit1
;
1142 prom_timers
->count1
= 0;
1143 prom_timers
->limit0
= prom_limit0
;
1144 prom_timers
->count0
= 0;
1147 void __init
init_IRQ(void)
1149 static int called
= 0;
1155 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
1157 memset(&__up_workvec
[0], 0, sizeof(__up_workvec
));
1161 /* We need to clear any IRQ's pending in the soft interrupt
1162 * registers, a spurious one could be left around from the
1163 * PROM timer which we just disabled.
1165 clear_softint(get_softint());
1167 /* Now that ivector table is initialized, it is safe
1168 * to receive IRQ vector traps. We will normally take
1169 * one or two right now, in case some device PROM used
1170 * to boot us wants to speak to us. We just ignore them.
1172 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1173 "or %%g1, %0, %%g1\n\t"
1174 "wrpr %%g1, 0x0, %%pstate"
1180 void init_irq_proc(void)
1182 /* For now, nothing... */