2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/ioport.h>
15 #include <linux/timex.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h>
18 #include <linux/smp_lock.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/delay.h>
21 #include <linux/irq.h>
23 #include <asm/bitops.h>
24 #include <asm/bootinfo.h>
26 #include <asm/mipsregs.h>
27 #include <asm/system.h>
29 #include <asm/ptrace.h>
30 #include <asm/processor.h>
31 #include <asm/pci/bridge.h>
32 #include <asm/sn/sn0/hub.h>
33 #include <asm/sn/sn0/ip27.h>
34 #include <asm/sn/arch.h>
35 #include <asm/sn/intr.h>
36 #include <asm/sn/intr_public.h>
40 #define DBG(x...) printk(x)
47 * Linux has a controller-independent x86 interrupt architecture.
48 * every controller has a 'controller-template', that is used
49 * by the main code to do the right thing. Each driver-visible
50 * interrupt source is transparently wired to the apropriate
51 * controller. Thus drivers need not be aware of the
52 * interrupt-controller.
54 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
55 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
56 * (IO-APICs assumed to be messaging to Pentium local-APICs)
58 * the code is designed to be easily extended with new/different
59 * interrupt controllers, without having to do assembly magic.
62 extern asmlinkage
void ip27_irq(void);
63 extern int irq_to_bus
[], irq_to_slot
[], bus_to_cpu
[];
64 int intr_connect_level(int cpu
, int bit
);
65 int intr_disconnect_level(int cpu
, int bit
);
67 unsigned long spurious_count
= 0;
70 * we need to map irq's up to at least bit 7 of the INT_MASK0_A register
71 * since bits 0-6 are pre-allocated for other purposes.
73 #define IRQ_TO_SWLEVEL(cpu, i) i + 7
74 #define SWLEVEL_TO_IRQ(cpu, s) s - 7
76 * use these macros to get the encoded nasid and widget id
79 #define IRQ_TO_BUS(i) irq_to_bus[(i)]
80 #define IRQ_TO_CPU(i) bus_to_cpu[IRQ_TO_BUS(i)]
81 #define NASID_FROM_PCI_IRQ(i) bus_to_nid[IRQ_TO_BUS(i)]
82 #define WID_FROM_PCI_IRQ(i) bus_to_wid[IRQ_TO_BUS(i)]
83 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
85 void disable_irq(unsigned int irq_nr
)
87 panic("disable_irq() called ...");
90 void enable_irq(unsigned int irq_nr
)
92 panic("enable_irq() called ...");
95 /* This is stupid for an Origin which can have thousands of IRQs ... */
96 static struct irqaction
*irq_action
[NR_IRQS
];
98 int get_irq_list(char *buf
)
101 struct irqaction
* action
;
103 for (i
= 0 ; i
< NR_IRQS
; i
++) {
104 action
= irq_action
[i
];
107 len
+= sprintf(buf
+len
, "%2d: %8d %c %s", i
, kstat
.irqs
[0][i
],
108 (action
->flags
& SA_INTERRUPT
) ? '+' : ' ',
110 for (action
=action
->next
; action
; action
= action
->next
) {
111 len
+= sprintf(buf
+len
, ",%s %s",
112 (action
->flags
& SA_INTERRUPT
)
116 len
+= sprintf(buf
+len
, "\n");
122 * do_IRQ handles all normal device IRQ's (the special SMP cross-CPU interrupts
123 * have their own specific handlers).
125 static void do_IRQ(cpuid_t thiscpu
, int irq
, struct pt_regs
* regs
)
127 struct irqaction
*action
;
130 irq_enter(thiscpu
, irq
);
131 kstat
.irqs
[thiscpu
][irq
]++;
133 action
= *(irq
+ irq_action
);
135 if (!(action
->flags
& SA_INTERRUPT
))
139 do_random
|= action
->flags
;
140 action
->handler(irq
, action
->dev_id
, regs
);
141 action
= action
->next
;
143 if (do_random
& SA_SAMPLE_RANDOM
)
144 add_interrupt_randomness(irq
);
147 irq_exit(thiscpu
, irq
);
149 /* unmasking and bottom half handling is done magically for us. */
155 static int ms1bit(unsigned long x
)
159 if (x
>> 32) b
= 32, x
>>= 32;
161 if (x
>> 16) b
+= 16, x
>>= 16;
162 if (x
>> 8) b
+= 8, x
>>= 8;
163 if (x
>> 4) b
+= 4, x
>>= 4;
164 if (x
>> 2) b
+= 2, x
>>= 2;
166 return b
+ (int) (x
>> 1);
170 * This code is unnecessarily complex, because we do SA_INTERRUPT
171 * intr enabling. Basically, once we grab the set of intrs we need
172 * to service, we must mask _all_ these interrupts; firstly, to make
173 * sure the same intr does not intr again, causing recursion that
174 * can lead to stack overflow. Secondly, we can not just mask the
175 * one intr we are do_IRQing, because the non-masked intrs in the
176 * first set might intr again, causing multiple servicings of the
177 * same intr. This effect is mostly seen for intercpu intrs.
180 void ip27_do_irq(struct pt_regs
*regs
)
183 hubreg_t pend0
, mask0
;
184 cpuid_t thiscpu
= smp_processor_id();
185 int pi_int_mask0
= ((cputoslice(thiscpu
) == 0) ?
186 PI_INT_MASK0_A
: PI_INT_MASK0_B
);
188 /* copied from Irix intpend0() */
189 while (((pend0
= LOCAL_HUB_L(PI_INT_PEND0
)) &
190 (mask0
= LOCAL_HUB_L(pi_int_mask0
))) != 0) {
191 pend0
&= mask0
; /* Pick intrs we should look at */
193 /* Prevent any of the picked intrs from recursing */
194 LOCAL_HUB_S(pi_int_mask0
, mask0
& ~(pend0
));
196 swlevel
= ms1bit(pend0
);
197 LOCAL_HUB_CLR_INTR(swlevel
);
198 /* "map" swlevel to irq */
199 irq
= SWLEVEL_TO_IRQ(thiscpu
, swlevel
);
200 do_IRQ(thiscpu
, irq
, regs
);
201 /* clear bit in pend0 */
202 pend0
^= 1ULL << swlevel
;
204 /* Now allow the set of serviced intrs again */
205 LOCAL_HUB_S(pi_int_mask0
, mask0
);
206 LOCAL_HUB_L(PI_INT_PEND0
);
212 /* Startup one of the (PCI ...) IRQs routes over a bridge. */
213 static unsigned int bridge_startup(unsigned int irq
)
219 nasid_t master
= NASID_FROM_PCI_IRQ(irq
);
221 bridge
= (bridge_t
*) NODE_SWIN_BASE(master
, WID_FROM_PCI_IRQ(irq
));
222 pin
= SLOT_FROM_PCI_IRQ(irq
);
223 cpu
= IRQ_TO_CPU(irq
);
225 DBG("bridge_startup(): irq= 0x%x pin=%d\n", irq
, pin
);
227 * "map" irq to a swlevel greater than 6 since the first 6 bits
228 * of INT_PEND0 are taken
230 swlevel
= IRQ_TO_SWLEVEL(cpu
, irq
);
231 intr_connect_level(cpu
, swlevel
);
233 bridge
->b_int_addr
[pin
].addr
= (0x20000 | swlevel
| (master
<< 8));
234 bridge
->b_int_enable
|= (1 << pin
);
235 /* more stuff in int_enable reg */
236 bridge
->b_int_enable
|= 0x7ffffe00;
239 * XXX This only works if b_int_device is initialized to 0!
240 * We program the bridge to have a 1:1 mapping between devices
241 * (slots) and intr pins.
243 device
= bridge
->b_int_device
;
244 device
|= (pin
<< (pin
*3));
245 bridge
->b_int_device
= device
;
247 bridge
->b_widget
.w_tflush
; /* Flush */
249 return 0; /* Never anything pending. */
252 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
253 static unsigned int bridge_shutdown(unsigned int irq
)
258 bridge
= (bridge_t
*) NODE_SWIN_BASE(NASID_FROM_PCI_IRQ(irq
),
259 WID_FROM_PCI_IRQ(irq
));
260 DBG("bridge_shutdown: irq 0x%x\n", irq
);
261 pin
= SLOT_FROM_PCI_IRQ(irq
);
264 * map irq to a swlevel greater than 6 since the first 6 bits
265 * of INT_PEND0 are taken
267 swlevel
= IRQ_TO_SWLEVEL(cpu
, irq
);
268 intr_disconnect_level(smp_processor_id(), swlevel
);
270 bridge
->b_int_enable
&= ~(1 << pin
);
271 bridge
->b_widget
.w_tflush
; /* Flush */
273 return 0; /* Never anything pending. */
278 bridge_t
*bridge
= (bridge_t
*) 0x9200000008000000;
280 printk("bridge->b_int_status = 0x%x\n", bridge
->b_int_status
);
281 printk("bridge->b_int_enable = 0x%x\n", bridge
->b_int_enable
);
282 printk("PI_INT_PEND0 = 0x%lx\n", LOCAL_HUB_L(PI_INT_PEND0
));
283 printk("PI_INT_MASK0_A = 0x%lx\n", LOCAL_HUB_L(PI_INT_MASK0_A
));
286 int setup_irq(unsigned int irq
, struct irqaction
*new)
289 struct irqaction
*old
, **p
;
292 DBG("setup_irq: 0x%x\n", irq
);
293 if (irq
>= NR_IRQS
) {
294 printk("IRQ array overflow %d\n", irq
);
297 if (new->flags
& SA_SAMPLE_RANDOM
)
298 rand_initialize_irq(irq
);
301 p
= irq_action
+ irq
;
302 if ((old
= *p
) != NULL
) {
303 /* Can't share interrupts unless both agree to */
304 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
305 restore_flags(flags
);
309 /* Add new interrupt at end of irq queue */
319 if ((!shared
) && (irq
>= BASE_PCI_IRQ
)) {
322 restore_flags(flags
);
327 int request_irq(unsigned int irq
,
328 void (*handler
)(int, void *, struct pt_regs
*),
329 unsigned long irqflags
, const char * devname
, void *dev_id
)
332 struct irqaction
*action
;
334 DBG("request_irq(): irq= 0x%x\n", irq
);
338 action
= (struct irqaction
*)kmalloc(sizeof(*action
), GFP_KERNEL
);
342 action
->handler
= handler
;
343 action
->flags
= irqflags
;
345 action
->name
= devname
;
347 action
->dev_id
= dev_id
;
349 DBG("request_irq(): %s devid= 0x%x\n", devname
, dev_id
);
350 retval
= setup_irq(irq
, action
);
351 DBG("request_irq(): retval= %d\n", retval
);
357 void free_irq(unsigned int irq
, void *dev_id
)
359 struct irqaction
* action
, **p
;
362 if (irq
>= NR_IRQS
) {
363 printk("Trying to free IRQ%d\n", irq
);
366 for (p
= irq
+ irq_action
; (action
= *p
) != NULL
; p
= &action
->next
) {
367 if (action
->dev_id
!= dev_id
)
370 /* Found it - now free it */
373 if (irq
>= BASE_PCI_IRQ
)
374 bridge_shutdown(irq
);
375 restore_flags(flags
);
379 printk("Trying to free free IRQ%d\n",irq
);
382 /* Useless ISA nonsense. */
383 unsigned long probe_irq_on (void)
385 panic("probe_irq_on called!\n");
389 int probe_irq_off (unsigned long irqs
)
394 void __init
init_IRQ(void)
396 set_except_vector(0, ip27_irq
);
402 * This following are the global intr on off routines, copied almost
403 * entirely from i386 code.
406 int global_irq_holder
= NO_PROC_ID
;
407 spinlock_t global_irq_lock
= SPIN_LOCK_UNLOCKED
;
409 extern void show_stack(unsigned long* esp
);
411 static void show(char * str
)
414 int cpu
= smp_processor_id();
416 printk("\n%s, CPU %d:\n", str
, cpu
);
417 printk("irq: %d [",irqs_running());
418 for(i
=0;i
< smp_num_cpus
;i
++)
419 printk(" %d",local_irq_count(i
));
420 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock
) ? 1 : 0);
421 for(i
=0;i
< smp_num_cpus
;i
++)
422 printk(" %d",local_bh_count(i
));
424 printk(" ]\nStack dumps:");
425 for(i
= 0; i
< smp_num_cpus
; i
++) {
429 printk("\nCPU %d:",i
);
430 printk("Code not developed yet\n");
433 printk("\nCPU %d:",cpu
);
434 printk("Code not developed yet\n");
435 /* show_stack(NULL); */
439 #define MAXCOUNT 100000000
440 #define SYNC_OTHER_CORES(x) udelay(x+1)
442 static inline void wait_on_irq(int cpu
)
444 int count
= MAXCOUNT
;
449 * Wait until all interrupts are gone. Wait
450 * for bottom half handlers unless we're
451 * already executing in one..
454 if (local_bh_count(cpu
) || !spin_is_locked(&global_bh_lock
))
457 /* Duh, we have to loop. Release the lock to avoid deadlocks */
458 spin_unlock(&global_irq_lock
);
466 SYNC_OTHER_CORES(cpu
);
470 if (spin_is_locked(&global_irq_lock
))
472 if (!local_bh_count(cpu
) && spin_is_locked(&global_bh_lock
))
474 if (spin_trylock(&global_irq_lock
))
480 void synchronize_irq(void)
482 if (irqs_running()) {
483 /* Stupid approach */
489 static inline void get_irqlock(int cpu
)
491 if (!spin_trylock(&global_irq_lock
)) {
492 /* do we already hold the lock? */
493 if ((unsigned char) cpu
== global_irq_holder
)
495 /* Uhhuh.. Somebody else got it. Wait.. */
496 spin_lock(&global_irq_lock
);
499 * We also to make sure that nobody else is running
500 * in an interrupt context.
507 global_irq_holder
= cpu
;
510 void __global_cli(void)
515 if (flags
& ST0_IE
) {
516 int cpu
= smp_processor_id();
518 if (!local_irq_count(cpu
))
523 void __global_sti(void)
525 int cpu
= smp_processor_id();
527 if (!local_irq_count(cpu
))
528 release_irqlock(cpu
);
533 * SMP flags value to restore to:
539 unsigned long __global_save_flags(void)
544 int cpu
= smp_processor_id();
547 local_enabled
= (flags
& ST0_IE
);
548 /* default to local */
549 retval
= 2 + local_enabled
;
551 /* check for global flags if we're not in an interrupt */
552 if (!local_irq_count(cpu
)) {
555 if (global_irq_holder
== cpu
)
561 void __global_restore_flags(unsigned long flags
)
577 printk("global_restore_flags: %08lx\n", flags
);
581 #endif /* CONFIG_SMP */
584 * Get values that vary depending on which CPU and bit we're operating on.
586 static hub_intmasks_t
*intr_get_ptrs(cpuid_t cpu
, int bit
, int *new_bit
,
587 hubreg_t
**intpend_masks
, int *ip
)
589 hub_intmasks_t
*hub_intmasks
;
591 hub_intmasks
= &cpu_data
[cpu
].p_intmasks
;
592 if (bit
< N_INTPEND_BITS
) {
593 *intpend_masks
= hub_intmasks
->intpend0_masks
;
597 *intpend_masks
= hub_intmasks
->intpend1_masks
;
599 *new_bit
= bit
- N_INTPEND_BITS
;
604 int intr_connect_level(int cpu
, int bit
)
607 int slice
= cputoslice(cpu
);
608 volatile hubreg_t
*mask_reg
;
609 hubreg_t
*intpend_masks
;
610 nasid_t nasid
= COMPACT_TO_NASID_NODEID(cputocnode(cpu
));
612 (void)intr_get_ptrs(cpu
, bit
, &bit
, &intpend_masks
, &ip
);
614 /* Make sure it's not already pending when we connect it. */
615 REMOTE_HUB_CLR_INTR(nasid
, bit
+ ip
* N_INTPEND_BITS
);
617 intpend_masks
[0] |= (1ULL << (u64
)bit
);
620 mask_reg
= REMOTE_HUB_ADDR(nasid
, PI_INT_MASK0_A
+
621 PI_INT_MASK_OFFSET
* slice
);
623 mask_reg
= REMOTE_HUB_ADDR(nasid
, PI_INT_MASK1_A
+
624 PI_INT_MASK_OFFSET
* slice
);
626 HUB_S(mask_reg
, intpend_masks
[0]);
630 int intr_disconnect_level(int cpu
, int bit
)
633 int slice
= cputoslice(cpu
);
634 volatile hubreg_t
*mask_reg
;
635 hubreg_t
*intpend_masks
;
636 nasid_t nasid
= COMPACT_TO_NASID_NODEID(cputocnode(cpu
));
638 (void)intr_get_ptrs(cpu
, bit
, &bit
, &intpend_masks
, &ip
);
639 intpend_masks
[0] &= ~(1ULL << (u64
)bit
);
641 mask_reg
= REMOTE_HUB_ADDR(nasid
, PI_INT_MASK0_A
+
642 PI_INT_MASK_OFFSET
* slice
);
644 mask_reg
= REMOTE_HUB_ADDR(nasid
, PI_INT_MASK1_A
+
645 PI_INT_MASK_OFFSET
* slice
);
647 HUB_S(mask_reg
, intpend_masks
[0]);
652 void handle_resched_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
654 /* Nothing, the return from intr will work for us */
657 extern void smp_call_function_interrupt(void);
659 void install_cpuintr(int cpu
)
662 #if (CPUS_PER_NODE == 2)
667 * This is a hack till we have a pernode irqlist. Currently,
668 * just have the master cpu set up the handlers for the per
672 irq
= CPU_RESCHED_A_IRQ
+ cputoslice(cpu
);
673 intr_connect_level(cpu
, IRQ_TO_SWLEVEL(cpu
, irq
));
675 if (request_irq(irq
, handle_resched_intr
, 0, "resched", 0))
676 panic("intercpu intr unconnectible\n");
677 irq
= CPU_CALL_A_IRQ
+ cputoslice(cpu
);
678 intr_connect_level(cpu
, IRQ_TO_SWLEVEL(cpu
, irq
));
680 if (request_irq(irq
, smp_call_function_interrupt
, 0,
682 panic("intercpu intr unconnectible\n");
686 irq
= CPU_RESCHED_A_IRQ
+ cputoslice(cpu
) + 1;
687 if (request_irq(irq
, handle_resched_intr
, 0, "resched", 0))
688 panic("intercpu intr unconnectible\n");
689 irq
= CPU_CALL_A_IRQ
+ cputoslice(cpu
) + 1;
690 if (request_irq(irq
, smp_call_function_interrupt
, 0,
692 panic("intercpu intr unconnectible\n");
695 #else /* CPUS_PER_NODE */
696 #error Must redefine this for more than 2 CPUS.
697 #endif /* CPUS_PER_NODE */
698 #endif /* CONFIG_SMP */
701 void install_tlbintr(int cpu
)
703 int intr_bit
= N_INTPEND_BITS
+ TLB_INTR_A
+ cputoslice(cpu
);
705 intr_connect_level(cpu
, intr_bit
);