1 /* $Id: irq.c,v 1.94 2000/09/21 06:27:10 anton Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
15 #include <linux/interrupt.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
18 #include <linux/init.h>
19 #include <linux/delay.h>
21 #include <asm/ptrace.h>
22 #include <asm/processor.h>
23 #include <asm/atomic.h>
24 #include <asm/system.h>
27 #include <asm/iommu.h>
29 #include <asm/oplib.h>
30 #include <asm/timer.h>
32 #include <asm/hardirq.h>
33 #include <asm/softirq.h>
34 #include <asm/starfire.h>
36 /* Internal flag, should not be visible elsewhere at all. */
37 #define SA_IMAP_MASKED 0x100
38 #define SA_DMA_SYNC 0x200
41 static void distribute_irqs(void);
44 /* UPA nodes send interrupt packet to UltraSparc with first data reg
45 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
46 * delivered. We must translate this into a non-vector IRQ so we can
47 * set the softint on this cpu.
49 * To make processing these packets efficient and race free we use
50 * an array of irq buckets below. The interrupt vector handler in
51 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
52 * The IVEC handler does not need to act atomically, the PIL dispatch
53 * code uses CAS to get an atomic snapshot of the list and clear it
57 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (64)));
60 unsigned int __up_workvec
[16] __attribute__ ((aligned (64)));
61 #define irq_work(__cpu, __pil) &(__up_workvec[(void)(__cpu), (__pil)])
63 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
67 /* This is a table of physical addresses used to deal with SA_DMA_SYNC.
68 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
69 * for devices behind busses other than APB on Sabre systems.
71 * Currently these physical addresses are just config space accesses
72 * to the command register for that device.
74 unsigned long pci_dma_wsync
;
75 unsigned long dma_sync_reg_table
[256];
76 unsigned char dma_sync_reg_table_entry
= 0;
79 /* This is based upon code in the 32-bit Sparc kernel written mostly by
80 * David Redman (djhr@tadpole.co.uk).
82 #define MAX_STATIC_ALLOC 4
83 static struct irqaction static_irqaction
[MAX_STATIC_ALLOC
];
84 static int static_irq_count
= 0;
86 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
87 struct irqaction
*irq_action
[NR_IRQS
+1] = {
88 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
89 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
92 int get_irq_list(char *buf
)
95 struct irqaction
*action
;
100 for(i
= 0; i
< (NR_IRQS
+ 1); i
++) {
101 if(!(action
= *(i
+ irq_action
)))
103 len
+= sprintf(buf
+ len
, "%3d: ", i
);
105 len
+= sprintf(buf
+ len
, "%10u ", kstat_irqs(i
));
107 for (j
= 0; j
< smp_num_cpus
; j
++)
108 len
+= sprintf(buf
+ len
, "%10u ",
109 kstat
.irqs
[cpu_logical_map(j
)][i
]);
111 len
+= sprintf(buf
+ len
, "%c %s",
112 (action
->flags
& SA_INTERRUPT
) ? '+' : ' ',
114 for(action
= action
->next
; action
; action
= action
->next
) {
115 len
+= sprintf(buf
+len
, ",%s %s",
116 (action
->flags
& SA_INTERRUPT
) ? " +" : "",
119 len
+= sprintf(buf
+ len
, "\n");
124 /* Now these are always passed a true fully specified sun4u INO. */
125 void enable_irq(unsigned int irq
)
127 struct ino_bucket
*bucket
= __bucket(irq
);
135 if(this_is_starfire
== 0) {
136 /* We set it to our UPA MID. */
137 __asm__
__volatile__("ldxa [%%g0] %1, %0"
139 : "i" (ASI_UPA_CONFIG
));
140 tid
= ((tid
& UPA_CONFIG_MID
) << 9);
142 tid
= (starfire_translate(imap
, current
->processor
) << 26);
145 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
146 * of this SYSIO's preconfigured IGN in the SYSIO Control
147 * Register, the hardware just mirrors that value here.
148 * However for Graphics and UPA Slave devices the full
149 * IMAP_INR field can be set by the programmer here.
151 * Things like FFB can now be handled via the new IRQ mechanism.
153 upa_writel(IMAP_VALID
| (tid
& IMAP_TID
), imap
);
156 /* This now gets passed true ino's as well. */
157 void disable_irq(unsigned int irq
)
159 struct ino_bucket
*bucket
= __bucket(irq
);
166 /* NOTE: We do not want to futz with the IRQ clear registers
167 * and move the state to IDLE, the SCSI code does call
168 * disable_irq() to assure atomicity in the queue cmd
169 * SCSI adapter driver code. Thus we'd lose interrupts.
171 tmp
= upa_readl(imap
);
173 upa_writel(tmp
, imap
);
177 /* The timer is the one "weird" interrupt which is generated by
178 * the CPU %tick register and not by some normal vectored interrupt
179 * source. To handle this special case, we use this dummy INO bucket.
181 static struct ino_bucket pil0_dummy_bucket
= {
192 unsigned int build_irq(int pil
, int inofixup
, unsigned long iclr
, unsigned long imap
)
194 struct ino_bucket
*bucket
;
198 if(iclr
!= 0UL || imap
!= 0UL) {
199 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
203 return __irq(&pil0_dummy_bucket
);
206 /* RULE: Both must be specified in all other cases. */
207 if (iclr
== 0UL || imap
== 0UL) {
208 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
209 pil
, inofixup
, iclr
, imap
);
213 ino
= (upa_readl(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
214 if(ino
> NUM_IVECS
) {
215 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
216 ino
, pil
, inofixup
, iclr
, imap
);
220 /* Ok, looks good, set it up. Don't touch the irq_chain or
223 bucket
= &ivector_table
[ino
];
224 if ((bucket
->flags
& IBF_ACTIVE
) ||
225 (bucket
->irq_info
!= NULL
)) {
226 /* This is a gross fatal error if it happens here. */
227 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
228 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
229 ino
, pil
, inofixup
, iclr
, imap
);
230 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
231 bucket
->pil
, bucket
->iclr
, bucket
->imap
);
232 prom_printf("IRQ: Cannot continue, halting...\n");
240 bucket
->irq_info
= NULL
;
242 return __irq(bucket
);
245 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
247 unsigned long pstate
;
250 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
251 __asm__
__volatile__("wrpr %0, %1, %%pstate"
252 : : "r" (pstate
), "i" (PSTATE_IE
));
253 ent
= irq_work(smp_processor_id(), bucket
->pil
);
254 bucket
->irq_chain
= *ent
;
255 *ent
= __irq(bucket
);
256 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
259 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
260 unsigned long irqflags
, const char *name
, void *dev_id
)
262 struct irqaction
*action
, *tmp
= NULL
;
263 struct ino_bucket
*bucket
= __bucket(irq
);
267 if ((bucket
!= &pil0_dummy_bucket
) &&
268 (bucket
< &ivector_table
[0] ||
269 bucket
>= &ivector_table
[NUM_IVECS
])) {
270 unsigned int *caller
;
272 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
273 printk(KERN_CRIT
"request_irq: Old style IRQ registry attempt "
274 "from %p, irq %08x.\n", caller
, irq
);
281 irqflags
&= ~SA_IMAP_MASKED
;
283 irqflags
|= SA_IMAP_MASKED
;
284 if (bucket
->flags
& IBF_PCI
) {
286 * PCI IRQs should never use SA_INTERRUPT.
288 irqflags
&= ~(SA_INTERRUPT
);
291 * Check wether we _should_ use DMA Write Sync
292 * (for devices behind bridges behind APB).
294 if (bucket
->flags
& IBF_DMA_SYNC
)
295 irqflags
|= SA_DMA_SYNC
;
301 action
= *(bucket
->pil
+ irq_action
);
303 if((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
))
304 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
)
307 restore_flags(flags
);
310 if((action
->flags
& SA_INTERRUPT
) ^ (irqflags
& SA_INTERRUPT
)) {
311 printk("Attempt to mix fast and slow interrupts on IRQ%d "
312 "denied\n", bucket
->pil
);
313 restore_flags(flags
);
316 action
= NULL
; /* Or else! */
319 /* If this is flagged as statically allocated then we use our
320 * private struct which is never freed.
322 if(irqflags
& SA_STATIC_ALLOC
) {
323 if(static_irq_count
< MAX_STATIC_ALLOC
)
324 action
= &static_irqaction
[static_irq_count
++];
326 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
327 "using kmalloc\n", irq
, name
);
330 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
334 restore_flags(flags
);
338 if ((irqflags
& SA_IMAP_MASKED
) == 0) {
339 bucket
->irq_info
= action
;
340 bucket
->flags
|= IBF_ACTIVE
;
342 if((bucket
->flags
& IBF_ACTIVE
) != 0) {
343 void *orig
= bucket
->irq_info
;
344 void **vector
= NULL
;
346 if((bucket
->flags
& IBF_PCI
) == 0) {
347 printk("IRQ: Trying to share non-PCI bucket.\n");
350 if((bucket
->flags
& IBF_MULTI
) == 0) {
351 vector
= kmalloc(sizeof(void *) * 4, GFP_KERNEL
);
353 goto free_and_enomem
;
355 /* We might have slept. */
356 if ((bucket
->flags
& IBF_MULTI
) != 0) {
360 vector
= (void **)bucket
->irq_info
;
361 for(ent
= 0; ent
< 4; ent
++) {
362 if (vector
[ent
] == NULL
) {
363 vector
[ent
] = action
;
374 bucket
->irq_info
= vector
;
375 bucket
->flags
|= IBF_MULTI
;
380 vector
= (void **)orig
;
381 for(ent
= 0; ent
< 4; ent
++) {
382 if(vector
[ent
] == NULL
) {
383 vector
[ent
] = action
;
391 bucket
->irq_info
= action
;
392 bucket
->flags
|= IBF_ACTIVE
;
394 pending
= bucket
->pending
;
399 action
->mask
= (unsigned long) bucket
;
400 action
->handler
= handler
;
401 action
->flags
= irqflags
;
404 action
->dev_id
= dev_id
;
409 *(bucket
->pil
+ irq_action
) = action
;
413 /* We ate the IVEC already, this makes sure it does not get lost. */
415 atomic_bucket_insert(bucket
);
416 set_softint(1 << bucket
->pil
);
418 restore_flags(flags
);
427 restore_flags(flags
);
432 restore_flags(flags
);
436 void free_irq(unsigned int irq
, void *dev_id
)
438 struct irqaction
*action
;
439 struct irqaction
*tmp
= NULL
;
441 struct ino_bucket
*bucket
= __bucket(irq
), *bp
;
443 if ((bucket
!= &pil0_dummy_bucket
) &&
444 (bucket
< &ivector_table
[0] ||
445 bucket
>= &ivector_table
[NUM_IVECS
])) {
446 unsigned int *caller
;
448 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
449 printk(KERN_CRIT
"free_irq: Old style IRQ removal attempt "
450 "from %p, irq %08x.\n", caller
, irq
);
454 action
= *(bucket
->pil
+ irq_action
);
455 if(!action
->handler
) {
456 printk("Freeing free IRQ %d\n", bucket
->pil
);
460 for( ; action
; action
= action
->next
) {
461 if(action
->dev_id
== dev_id
)
466 printk("Trying to free free shared IRQ %d\n", bucket
->pil
);
469 } else if(action
->flags
& SA_SHIRQ
) {
470 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket
->pil
);
474 if(action
->flags
& SA_STATIC_ALLOC
) {
475 printk("Attempt to free statically allocated IRQ %d (%s)\n",
476 bucket
->pil
, action
->name
);
482 tmp
->next
= action
->next
;
484 *(bucket
->pil
+ irq_action
) = action
->next
;
486 if(action
->flags
& SA_IMAP_MASKED
) {
487 unsigned long imap
= bucket
->imap
;
488 void **vector
, *orig
;
491 orig
= bucket
->irq_info
;
492 vector
= (void **)orig
;
494 if ((bucket
->flags
& IBF_MULTI
) != 0) {
497 for(ent
= 0; ent
< 4; ent
++) {
498 if(vector
[ent
] == action
)
500 else if(vector
[ent
] != NULL
) {
501 orphan
= vector
[ent
];
506 /* Only free when no other shared irq
511 /* Convert back to non-shared bucket. */
512 bucket
->irq_info
= orphan
;
513 bucket
->flags
&= ~(IBF_MULTI
);
519 bucket
->irq_info
= NULL
;
522 /* This unique interrupt source is now inactive. */
523 bucket
->flags
&= ~IBF_ACTIVE
;
525 /* See if any other buckets share this bucket's IMAP
526 * and are still active.
528 for(ent
= 0; ent
< NUM_IVECS
; ent
++) {
529 bp
= &ivector_table
[ent
];
532 (bp
->flags
& IBF_ACTIVE
) != 0)
536 /* Only disable when no other sub-irq levels of
537 * the same IMAP are active.
539 if (ent
== NUM_IVECS
)
545 restore_flags(flags
);
550 /* Who has the global irq brlock */
551 unsigned char global_irq_holder
= NO_PROC_ID
;
553 static void show(char * str
)
555 int cpu
= smp_processor_id();
558 printk("\n%s, CPU %d:\n", str
, cpu
);
559 printk("irq: %d [ ", irqs_running());
560 for (i
= 0; i
< smp_num_cpus
; i
++)
561 printk("%u ", __brlock_array
[i
][BR_GLOBALIRQ_LOCK
]);
562 printk("]\nbh: %d [ ",
563 (spin_is_locked(&global_bh_lock
) ? 1 : 0));
564 for (i
= 0; i
< smp_num_cpus
; i
++)
565 printk("%u ", local_bh_count(i
));
569 #define MAXCOUNT 100000000
572 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
574 #define SYNC_OTHER_ULTRAS(x) membar("#Sync");
577 void synchronize_irq(void)
579 if (irqs_running()) {
585 static inline void get_irqlock(int cpu
)
589 if ((unsigned char)cpu
== global_irq_holder
)
594 br_write_lock(BR_GLOBALIRQ_LOCK
);
598 if (!irqs_running() &&
599 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock
)))
602 br_write_unlock(BR_GLOBALIRQ_LOCK
);
603 lock
= &__br_write_locks
[BR_GLOBALIRQ_LOCK
].lock
;
604 while (irqs_running() ||
605 spin_is_locked(lock
) ||
606 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock
))) {
612 SYNC_OTHER_ULTRAS(cpu
);
618 global_irq_holder
= cpu
;
621 void __global_cli(void)
627 int cpu
= smp_processor_id();
629 if (! local_irq_count(cpu
))
634 void __global_sti(void)
636 int cpu
= smp_processor_id();
638 if (! local_irq_count(cpu
))
639 release_irqlock(cpu
);
643 unsigned long __global_save_flags(void)
645 unsigned long flags
, local_enabled
, retval
;
648 local_enabled
= ((flags
== 0) ? 1 : 0);
649 retval
= 2 + local_enabled
;
650 if (! local_irq_count(smp_processor_id())) {
653 if (global_irq_holder
== (unsigned char) smp_processor_id())
659 void __global_restore_flags(unsigned long flags
)
677 __asm__
__volatile__("mov %%i7, %0" : "=r" (pc
));
678 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
684 #endif /* CONFIG_SMP */
686 void catch_disabled_ivec(struct pt_regs
*regs
)
688 int cpu
= smp_processor_id();
689 struct ino_bucket
*bucket
= __bucket(*irq_work(cpu
, 0));
691 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
692 * to other devices. Here a single IMAP enabled potentially multiple
693 * unique interrupt sources (which each do have a unique ICLR register.
695 * So what we do is just register that the IVEC arrived, when registered
696 * for real the request_irq() code will check the bit and signal
697 * a local CPU interrupt for it.
700 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
701 bucket
- &ivector_table
[0], regs
->tpc
);
703 *irq_work(cpu
, 0) = 0;
708 #define FORWARD_VOLUME 12
710 void handler_irq(int irq
, struct pt_regs
*regs
)
712 struct ino_bucket
*bp
, *nbp
;
713 int cpu
= smp_processor_id();
715 int should_forward
= (this_is_starfire
== 0 &&
718 unsigned int buddy
= 0;
720 /* 'cpu' is the MID (ie. UPAID), calculate the MID
723 if (should_forward
!= 0) {
724 buddy
= cpu_number_map(cpu
) + 1;
725 if (buddy
>= NR_CPUS
||
726 (buddy
= cpu_logical_map(buddy
)) == -1)
727 buddy
= cpu_logical_map(0);
729 /* Voo-doo programming. */
730 if (cpu_data
[buddy
].idle_volume
< FORWARD_VOLUME
)
738 * Check for TICK_INT on level 14 softint.
740 if ((irq
== 14) && (get_softint() & (1UL << 0)))
743 clear_softint(1 << irq
);
746 kstat
.irqs
[cpu
][irq
]++;
751 __bucket(xchg32(irq_work(cpu
, irq
), 0)) :
754 bp
= __bucket(xchg32(irq_work(cpu
, irq
), 0));
756 for ( ; bp
!= NULL
; bp
= nbp
) {
757 unsigned char flags
= bp
->flags
;
759 nbp
= __bucket(bp
->irq_chain
);
760 if ((flags
& IBF_ACTIVE
) != 0) {
762 if ((flags
& IBF_DMA_SYNC
) != 0) {
763 upa_readl(dma_sync_reg_table
[bp
->synctab_ent
]);
764 upa_readq(pci_dma_wsync
);
767 if ((flags
& IBF_MULTI
) == 0) {
768 struct irqaction
*ap
= bp
->irq_info
;
769 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
771 void **vector
= (void **)bp
->irq_info
;
773 for (ent
= 0; ent
< 4; ent
++) {
774 struct irqaction
*ap
= vector
[ent
];
776 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
779 /* Only the dummy bucket lacks IMAP/ICLR. */
782 /* Ok, here is what is going on:
783 * 1) Retargeting IRQs on Starfire is very
784 * expensive so just forget about it on them.
785 * 2) Moving around very high priority interrupts
787 * 3) If the current cpu is idle, interrupts are
788 * useful work, so keep them here. But do not
789 * pass to our neighbour if he is not very idle.
791 if (should_forward
!= 0) {
792 /* Push it to our buddy. */
794 upa_writel(buddy
| IMAP_VALID
, bp
->imap
);
797 upa_writel(ICLR_IDLE
, bp
->iclr
);
805 #ifdef CONFIG_BLK_DEV_FD
806 extern void floppy_interrupt(int irq
, void *dev_cookie
, struct pt_regs
*regs
);
808 void sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
810 struct irqaction
*action
= *(irq
+ irq_action
);
811 struct ino_bucket
*bucket
;
812 int cpu
= smp_processor_id();
815 kstat
.irqs
[cpu
][irq
]++;
817 *(irq_work(cpu
, irq
)) = 0;
818 bucket
= (struct ino_bucket
*)action
->mask
;
820 floppy_interrupt(irq
, dev_cookie
, regs
);
821 upa_writel(ICLR_IDLE
, bucket
->iclr
);
827 /* The following assumes that the branch lies before the place we
828 * are branching to. This is the case for a trap vector...
829 * You have been warned.
831 #define SPARC_BRANCH(dest_addr, inst_addr) \
832 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
834 #define SPARC_NOP (0x01000000)
836 static void install_fast_irq(unsigned int cpu_irq
,
837 void (*handler
)(int, void *, struct pt_regs
*))
839 extern unsigned long sparc64_ttable_tl0
;
840 unsigned long ttent
= (unsigned long) &sparc64_ttable_tl0
;
844 ttent
+= (cpu_irq
- 1) << 5;
845 insns
= (unsigned int *) ttent
;
846 insns
[0] = SPARC_BRANCH(((unsigned long) handler
),
847 ((unsigned long)&insns
[0]));
848 insns
[1] = SPARC_NOP
;
849 __asm__
__volatile__("membar #StoreStore; flush %0" : : "r" (ttent
));
852 int request_fast_irq(unsigned int irq
,
853 void (*handler
)(int, void *, struct pt_regs
*),
854 unsigned long irqflags
, const char *name
, void *dev_id
)
856 struct irqaction
*action
;
857 struct ino_bucket
*bucket
= __bucket(irq
);
860 /* No pil0 dummy buckets allowed here. */
861 if (bucket
< &ivector_table
[0] ||
862 bucket
>= &ivector_table
[NUM_IVECS
]) {
863 unsigned int *caller
;
865 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
866 printk(KERN_CRIT
"request_fast_irq: Old style IRQ registry attempt "
867 "from %p, irq %08x.\n", caller
, irq
);
871 /* Only IMAP style interrupts can be registered as fast. */
878 if ((bucket
->pil
== 0) || (bucket
->pil
== 14)) {
879 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
883 action
= *(bucket
->pil
+ irq_action
);
885 if(action
->flags
& SA_SHIRQ
)
886 panic("Trying to register fast irq when already shared.\n");
887 if(irqflags
& SA_SHIRQ
)
888 panic("Trying to register fast irq as shared.\n");
889 printk("request_fast_irq: Trying to register yet already owned.\n");
894 if(irqflags
& SA_STATIC_ALLOC
) {
895 if(static_irq_count
< MAX_STATIC_ALLOC
)
896 action
= &static_irqaction
[static_irq_count
++];
898 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
899 "using kmalloc\n", bucket
->pil
, name
);
902 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
905 restore_flags(flags
);
908 install_fast_irq(bucket
->pil
, handler
);
910 bucket
->irq_info
= action
;
911 bucket
->flags
|= IBF_ACTIVE
;
913 action
->mask
= (unsigned long) bucket
;
914 action
->handler
= handler
;
915 action
->flags
= irqflags
| SA_IMAP_MASKED
;
916 action
->dev_id
= NULL
;
920 *(bucket
->pil
+ irq_action
) = action
;
923 restore_flags(flags
);
931 /* We really don't need these at all on the Sparc. We only have
932 * stubs here because they are exported to modules.
934 unsigned long probe_irq_on(void)
939 int probe_irq_off(unsigned long mask
)
944 /* This is gets the master TICK_INT timer going. */
945 void init_timers(void (*cfunc
)(int, void *, struct pt_regs
*),
946 unsigned long *clock
)
948 unsigned long pstate
;
949 extern unsigned long timer_tick_offset
;
952 extern void smp_tick_init(void);
955 node
= linux_cpus
[0].prom_node
;
956 *clock
= prom_getint(node
, "clock-frequency");
957 timer_tick_offset
= *clock
/ HZ
;
962 /* Register IRQ handler. */
963 err
= request_irq(build_irq(0, 0, 0UL, 0UL), cfunc
, (SA_INTERRUPT
| SA_STATIC_ALLOC
),
967 prom_printf("Serious problem, cannot register TICK_INT\n");
971 /* Guarentee that the following sequences execute
974 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
975 "wrpr %0, %1, %%pstate"
979 /* Set things up so user can access tick register for profiling
980 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
981 * read back of %tick after writing it.
983 __asm__
__volatile__("
984 sethi %%hi(0x80000000), %%g1
990 andn %%g2, %%g1, %%g2
997 /* Workaround for Spitfire Errata (#54 I think??), I discovered
998 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
1001 * On Blackbird writes to %tick_cmpr can fail, the
1002 * workaround seems to be to execute the wr instruction
1003 * at the start of an I-cache line, and perform a dummy
1004 * read back from %tick_cmpr right after writing to it. -DaveM
1006 __asm__
__volatile__("
1011 1: wr %%g1, 0x0, %%tick_cmpr
1012 rd %%tick_cmpr, %%g0"
1014 : "r" (timer_tick_offset
)
1017 /* Restore PSTATE_IE. */
1018 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1026 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
1028 struct ino_bucket
*bucket
= __bucket(p
->mask
);
1029 unsigned long imap
= bucket
->imap
;
1032 /* Never change this, it causes problems on Ex000 systems. */
1033 if (bucket
->pil
== 12)
1036 if(this_is_starfire
== 0) {
1037 tid
= __cpu_logical_map
[goal_cpu
] << 26;
1039 tid
= (starfire_translate(imap
, __cpu_logical_map
[goal_cpu
]) << 26);
1041 upa_writel(IMAP_VALID
| (tid
& IMAP_TID
), imap
);
1044 if(goal_cpu
>= NR_CPUS
||
1045 __cpu_logical_map
[goal_cpu
] == -1)
1050 /* Called from request_irq. */
1051 static void distribute_irqs(void)
1053 unsigned long flags
;
1056 save_and_cli(flags
);
1058 for(level
= 0; level
< NR_IRQS
; level
++) {
1059 struct irqaction
*p
= irq_action
[level
];
1061 if(p
->flags
& SA_IMAP_MASKED
)
1062 cpu
= retarget_one_irq(p
, cpu
);
1066 restore_flags(flags
);
1071 struct sun5_timer
*prom_timers
;
1072 static u64 prom_limit0
, prom_limit1
;
1074 static void map_prom_timers(void)
1076 unsigned int addr
[3];
1079 /* PROM timer node hangs out in the top level of device siblings... */
1080 tnode
= prom_finddevice("/counter-timer");
1082 /* Assume if node is not present, PROM uses different tick mechanism
1083 * which we should not care about.
1085 if(tnode
== 0 || tnode
== -1) {
1086 prom_timers
= (struct sun5_timer
*) 0;
1090 /* If PROM is really using this, it must be mapped by him. */
1091 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
1093 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1094 prom_timers
= (struct sun5_timer
*) 0;
1097 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
1100 static void kill_prom_timer(void)
1105 /* Save them away for later. */
1106 prom_limit0
= prom_timers
->limit0
;
1107 prom_limit1
= prom_timers
->limit1
;
1109 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1110 * We turn both off here just to be paranoid.
1112 prom_timers
->limit0
= 0;
1113 prom_timers
->limit1
= 0;
1115 /* Wheee, eat the interrupt packet too... */
1116 __asm__
__volatile__("
1118 ldxa [%%g0] %0, %%g1
1119 ldxa [%%g2] %1, %%g1
1120 stxa %%g0, [%%g0] %0
1122 " : /* no outputs */
1123 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_UDB_INTR_R
)
1127 void enable_prom_timer(void)
1132 /* Set it to whatever was there before. */
1133 prom_timers
->limit1
= prom_limit1
;
1134 prom_timers
->count1
= 0;
1135 prom_timers
->limit0
= prom_limit0
;
1136 prom_timers
->count0
= 0;
1139 void __init
init_IRQ(void)
1141 static int called
= 0;
1147 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
1149 memset(&__up_workvec
[0], 0, sizeof(__up_workvec
));
1153 /* We need to clear any IRQ's pending in the soft interrupt
1154 * registers, a spurious one could be left around from the
1155 * PROM timer which we just disabled.
1157 clear_softint(get_softint());
1159 /* Now that ivector table is initialized, it is safe
1160 * to receive IRQ vector traps. We will normally take
1161 * one or two right now, in case some device PROM used
1162 * to boot us wants to speak to us. We just ignore them.
1164 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1165 "or %%g1, %0, %%g1\n\t"
1166 "wrpr %%g1, 0x0, %%pstate"
1172 void init_irq_proc(void)
1174 /* For now, nothing... */