- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / mips64 / sgi-ip27 / ip27-irq.c
bloba9a7b7153eede2bdf5b87f048f8b88b94c45f542
1 /*
2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
6 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/ioport.h>
15 #include <linux/timex.h>
16 #include <linux/malloc.h>
17 #include <linux/random.h>
18 #include <linux/smp_lock.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/delay.h>
21 #include <linux/irq.h>
23 #include <asm/bitops.h>
24 #include <asm/bootinfo.h>
25 #include <asm/io.h>
26 #include <asm/mipsregs.h>
27 #include <asm/system.h>
29 #include <asm/ptrace.h>
30 #include <asm/processor.h>
31 #include <asm/pci/bridge.h>
32 #include <asm/sn/sn0/hub.h>
33 #include <asm/sn/sn0/ip27.h>
34 #include <asm/sn/arch.h>
35 #include <asm/sn/intr.h>
36 #include <asm/sn/intr_public.h>
38 #undef DEBUG_IRQ
39 #ifdef DEBUG_IRQ
40 #define DBG(x...) printk(x)
41 #else
42 #define DBG(x...)
43 #endif
47 * Linux has a controller-independent x86 interrupt architecture.
48 * every controller has a 'controller-template', that is used
49 * by the main code to do the right thing. Each driver-visible
50 * interrupt source is transparently wired to the apropriate
51 * controller. Thus drivers need not be aware of the
52 * interrupt-controller.
54 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
55 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
56 * (IO-APICs assumed to be messaging to Pentium local-APICs)
58 * the code is designed to be easily extended with new/different
59 * interrupt controllers, without having to do assembly magic.
62 extern asmlinkage void ip27_irq(void);
63 extern int irq_to_bus[], irq_to_slot[], bus_to_cpu[];
64 int intr_connect_level(int cpu, int bit);
65 int intr_disconnect_level(int cpu, int bit);
67 unsigned long spurious_count = 0;
70 * we need to map irq's up to at least bit 7 of the INT_MASK0_A register
71 * since bits 0-6 are pre-allocated for other purposes.
73 #define IRQ_TO_SWLEVEL(cpu, i) i + 7
74 #define SWLEVEL_TO_IRQ(cpu, s) s - 7
76 * use these macros to get the encoded nasid and widget id
77 * from the irq value
79 #define IRQ_TO_BUS(i) irq_to_bus[(i)]
80 #define IRQ_TO_CPU(i) bus_to_cpu[IRQ_TO_BUS(i)]
81 #define NASID_FROM_PCI_IRQ(i) bus_to_nid[IRQ_TO_BUS(i)]
82 #define WID_FROM_PCI_IRQ(i) bus_to_wid[IRQ_TO_BUS(i)]
83 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
85 void disable_irq(unsigned int irq_nr)
87 panic("disable_irq() called ...");
90 void enable_irq(unsigned int irq_nr)
92 panic("enable_irq() called ...");
95 /* This is stupid for an Origin which can have thousands of IRQs ... */
96 static struct irqaction *irq_action[NR_IRQS];
98 int get_irq_list(char *buf)
100 int i, len = 0;
101 struct irqaction * action;
103 for (i = 0 ; i < NR_IRQS ; i++) {
104 action = irq_action[i];
105 if (!action)
106 continue;
107 len += sprintf(buf+len, "%2d: %8d %c %s", i, kstat.irqs[0][i],
108 (action->flags & SA_INTERRUPT) ? '+' : ' ',
109 action->name);
110 for (action=action->next; action; action = action->next) {
111 len += sprintf(buf+len, ",%s %s",
112 (action->flags & SA_INTERRUPT)
113 ? " +" : "",
114 action->name);
116 len += sprintf(buf+len, "\n");
118 return len;
122 * do_IRQ handles all normal device IRQ's (the special SMP cross-CPU interrupts
123 * have their own specific handlers).
125 static void do_IRQ(cpuid_t thiscpu, int irq, struct pt_regs * regs)
127 struct irqaction *action;
128 int do_random;
130 irq_enter(thiscpu, irq);
131 kstat.irqs[thiscpu][irq]++;
133 action = *(irq + irq_action);
134 if (action) {
135 if (!(action->flags & SA_INTERRUPT))
136 __sti();
137 do_random = 0;
138 do {
139 do_random |= action->flags;
140 action->handler(irq, action->dev_id, regs);
141 action = action->next;
142 } while (action);
143 if (do_random & SA_SAMPLE_RANDOM)
144 add_interrupt_randomness(irq);
145 __cli();
147 irq_exit(thiscpu, irq);
149 /* unmasking and bottom half handling is done magically for us. */
153 * Find first bit set
155 static int ms1bit(unsigned long x)
157 int b;
159 if (x >> 32) b = 32, x >>= 32;
160 else b = 0;
161 if (x >> 16) b += 16, x >>= 16;
162 if (x >> 8) b += 8, x >>= 8;
163 if (x >> 4) b += 4, x >>= 4;
164 if (x >> 2) b += 2, x >>= 2;
166 return b + (int) (x >> 1);
170 * This code is unnecessarily complex, because we do SA_INTERRUPT
171 * intr enabling. Basically, once we grab the set of intrs we need
172 * to service, we must mask _all_ these interrupts; firstly, to make
173 * sure the same intr does not intr again, causing recursion that
174 * can lead to stack overflow. Secondly, we can not just mask the
175 * one intr we are do_IRQing, because the non-masked intrs in the
176 * first set might intr again, causing multiple servicings of the
177 * same intr. This effect is mostly seen for intercpu intrs.
178 * Kanoj 05.13.00
180 void ip27_do_irq(struct pt_regs *regs)
182 int irq, swlevel;
183 hubreg_t pend0, mask0;
184 cpuid_t thiscpu = smp_processor_id();
185 int pi_int_mask0 = ((cputoslice(thiscpu) == 0) ?
186 PI_INT_MASK0_A : PI_INT_MASK0_B);
188 /* copied from Irix intpend0() */
189 while (((pend0 = LOCAL_HUB_L(PI_INT_PEND0)) &
190 (mask0 = LOCAL_HUB_L(pi_int_mask0))) != 0) {
191 pend0 &= mask0; /* Pick intrs we should look at */
192 if (pend0) {
193 /* Prevent any of the picked intrs from recursing */
194 LOCAL_HUB_S(pi_int_mask0, mask0 & ~(pend0));
195 do {
196 swlevel = ms1bit(pend0);
197 LOCAL_HUB_CLR_INTR(swlevel);
198 /* "map" swlevel to irq */
199 irq = SWLEVEL_TO_IRQ(thiscpu, swlevel);
200 do_IRQ(thiscpu, irq, regs);
201 /* clear bit in pend0 */
202 pend0 ^= 1ULL << swlevel;
203 } while(pend0);
204 /* Now allow the set of serviced intrs again */
205 LOCAL_HUB_S(pi_int_mask0, mask0);
206 LOCAL_HUB_L(PI_INT_PEND0);
212 /* Startup one of the (PCI ...) IRQs routes over a bridge. */
213 static unsigned int bridge_startup(unsigned int irq)
215 bridgereg_t device;
216 bridge_t *bridge;
217 int pin, swlevel;
218 cpuid_t cpu;
219 nasid_t master = NASID_FROM_PCI_IRQ(irq);
221 bridge = (bridge_t *) NODE_SWIN_BASE(master, WID_FROM_PCI_IRQ(irq));
222 pin = SLOT_FROM_PCI_IRQ(irq);
223 cpu = IRQ_TO_CPU(irq);
225 DBG("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
227 * "map" irq to a swlevel greater than 6 since the first 6 bits
228 * of INT_PEND0 are taken
230 swlevel = IRQ_TO_SWLEVEL(cpu, irq);
231 intr_connect_level(cpu, swlevel);
233 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (master << 8));
234 bridge->b_int_enable |= (1 << pin);
235 /* more stuff in int_enable reg */
236 bridge->b_int_enable |= 0x7ffffe00;
239 * XXX This only works if b_int_device is initialized to 0!
240 * We program the bridge to have a 1:1 mapping between devices
241 * (slots) and intr pins.
243 device = bridge->b_int_device;
244 device |= (pin << (pin*3));
245 bridge->b_int_device = device;
247 bridge->b_widget.w_tflush; /* Flush */
249 return 0; /* Never anything pending. */
252 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
253 static unsigned int bridge_shutdown(unsigned int irq)
255 bridge_t *bridge;
256 int pin, swlevel;
258 bridge = (bridge_t *) NODE_SWIN_BASE(NASID_FROM_PCI_IRQ(irq),
259 WID_FROM_PCI_IRQ(irq));
260 DBG("bridge_shutdown: irq 0x%x\n", irq);
261 pin = SLOT_FROM_PCI_IRQ(irq);
264 * map irq to a swlevel greater than 6 since the first 6 bits
265 * of INT_PEND0 are taken
267 swlevel = IRQ_TO_SWLEVEL(cpu, irq);
268 intr_disconnect_level(smp_processor_id(), swlevel);
270 bridge->b_int_enable &= ~(1 << pin);
271 bridge->b_widget.w_tflush; /* Flush */
273 return 0; /* Never anything pending. */
276 void irq_debug(void)
278 bridge_t *bridge = (bridge_t *) 0x9200000008000000;
280 printk("bridge->b_int_status = 0x%x\n", bridge->b_int_status);
281 printk("bridge->b_int_enable = 0x%x\n", bridge->b_int_enable);
282 printk("PI_INT_PEND0 = 0x%lx\n", LOCAL_HUB_L(PI_INT_PEND0));
283 printk("PI_INT_MASK0_A = 0x%lx\n", LOCAL_HUB_L(PI_INT_MASK0_A));
286 int setup_irq(unsigned int irq, struct irqaction *new)
288 int shared = 0;
289 struct irqaction *old, **p;
290 unsigned long flags;
292 DBG("setup_irq: 0x%x\n", irq);
293 if (irq >= NR_IRQS) {
294 printk("IRQ array overflow %d\n", irq);
295 while(1);
297 if (new->flags & SA_SAMPLE_RANDOM)
298 rand_initialize_irq(irq);
300 save_and_cli(flags);
301 p = irq_action + irq;
302 if ((old = *p) != NULL) {
303 /* Can't share interrupts unless both agree to */
304 if (!(old->flags & new->flags & SA_SHIRQ)) {
305 restore_flags(flags);
306 return -EBUSY;
309 /* Add new interrupt at end of irq queue */
310 do {
311 p = &old->next;
312 old = *p;
313 } while (old);
314 shared = 1;
317 *p = new;
319 if ((!shared) && (irq >= BASE_PCI_IRQ)) {
320 bridge_startup(irq);
322 restore_flags(flags);
324 return 0;
327 int request_irq(unsigned int irq,
328 void (*handler)(int, void *, struct pt_regs *),
329 unsigned long irqflags, const char * devname, void *dev_id)
331 int retval;
332 struct irqaction *action;
334 DBG("request_irq(): irq= 0x%x\n", irq);
335 if (!handler)
336 return -EINVAL;
338 action = (struct irqaction *)kmalloc(sizeof(*action), GFP_KERNEL);
339 if (!action)
340 return -ENOMEM;
342 action->handler = handler;
343 action->flags = irqflags;
344 action->mask = 0;
345 action->name = devname;
346 action->next = NULL;
347 action->dev_id = dev_id;
349 DBG("request_irq(): %s devid= 0x%x\n", devname, dev_id);
350 retval = setup_irq(irq, action);
351 DBG("request_irq(): retval= %d\n", retval);
352 if (retval)
353 kfree(action);
354 return retval;
357 void free_irq(unsigned int irq, void *dev_id)
359 struct irqaction * action, **p;
360 unsigned long flags;
362 if (irq >= NR_IRQS) {
363 printk("Trying to free IRQ%d\n", irq);
364 return;
366 for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
367 if (action->dev_id != dev_id)
368 continue;
370 /* Found it - now free it */
371 save_and_cli(flags);
372 *p = action->next;
373 if (irq >= BASE_PCI_IRQ)
374 bridge_shutdown(irq);
375 restore_flags(flags);
376 kfree(action);
377 return;
379 printk("Trying to free free IRQ%d\n",irq);
382 /* Useless ISA nonsense. */
383 unsigned long probe_irq_on (void)
385 panic("probe_irq_on called!\n");
386 return 0;
389 int probe_irq_off (unsigned long irqs)
391 return 0;
394 void __init init_IRQ(void)
396 set_except_vector(0, ip27_irq);
399 #ifdef CONFIG_SMP
402 * This following are the global intr on off routines, copied almost
403 * entirely from i386 code.
406 int global_irq_holder = NO_PROC_ID;
407 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
409 extern void show_stack(unsigned long* esp);
411 static void show(char * str)
413 int i;
414 int cpu = smp_processor_id();
416 printk("\n%s, CPU %d:\n", str, cpu);
417 printk("irq: %d [",irqs_running());
418 for(i=0;i < smp_num_cpus;i++)
419 printk(" %d",local_irq_count(i));
420 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
421 for(i=0;i < smp_num_cpus;i++)
422 printk(" %d",local_bh_count(i));
424 printk(" ]\nStack dumps:");
425 for(i = 0; i < smp_num_cpus; i++) {
426 unsigned long esp;
427 if (i == cpu)
428 continue;
429 printk("\nCPU %d:",i);
430 printk("Code not developed yet\n");
431 /* show_stack(0); */
433 printk("\nCPU %d:",cpu);
434 printk("Code not developed yet\n");
435 /* show_stack(NULL); */
436 printk("\n");
439 #define MAXCOUNT 100000000
440 #define SYNC_OTHER_CORES(x) udelay(x+1)
442 static inline void wait_on_irq(int cpu)
444 int count = MAXCOUNT;
446 for (;;) {
449 * Wait until all interrupts are gone. Wait
450 * for bottom half handlers unless we're
451 * already executing in one..
453 if (!irqs_running())
454 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
455 break;
457 /* Duh, we have to loop. Release the lock to avoid deadlocks */
458 spin_unlock(&global_irq_lock);
460 for (;;) {
461 if (!--count) {
462 show("wait_on_irq");
463 count = ~0;
465 __sti();
466 SYNC_OTHER_CORES(cpu);
467 __cli();
468 if (irqs_running())
469 continue;
470 if (spin_is_locked(&global_irq_lock))
471 continue;
472 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
473 continue;
474 if (spin_trylock(&global_irq_lock))
475 break;
480 void synchronize_irq(void)
482 if (irqs_running()) {
483 /* Stupid approach */
484 cli();
485 sti();
489 static inline void get_irqlock(int cpu)
491 if (!spin_trylock(&global_irq_lock)) {
492 /* do we already hold the lock? */
493 if ((unsigned char) cpu == global_irq_holder)
494 return;
495 /* Uhhuh.. Somebody else got it. Wait.. */
496 spin_lock(&global_irq_lock);
499 * We also to make sure that nobody else is running
500 * in an interrupt context.
502 wait_on_irq(cpu);
505 * Ok, finally..
507 global_irq_holder = cpu;
510 void __global_cli(void)
512 unsigned int flags;
514 __save_flags(flags);
515 if (flags & ST0_IE) {
516 int cpu = smp_processor_id();
517 __cli();
518 if (!local_irq_count(cpu))
519 get_irqlock(cpu);
523 void __global_sti(void)
525 int cpu = smp_processor_id();
527 if (!local_irq_count(cpu))
528 release_irqlock(cpu);
529 __sti();
533 * SMP flags value to restore to:
534 * 0 - global cli
535 * 1 - global sti
536 * 2 - local cli
537 * 3 - local sti
539 unsigned long __global_save_flags(void)
541 int retval;
542 int local_enabled;
543 unsigned long flags;
544 int cpu = smp_processor_id();
546 __save_flags(flags);
547 local_enabled = (flags & ST0_IE);
548 /* default to local */
549 retval = 2 + local_enabled;
551 /* check for global flags if we're not in an interrupt */
552 if (!local_irq_count(cpu)) {
553 if (local_enabled)
554 retval = 1;
555 if (global_irq_holder == cpu)
556 retval = 0;
558 return retval;
561 void __global_restore_flags(unsigned long flags)
563 switch (flags) {
564 case 0:
565 __global_cli();
566 break;
567 case 1:
568 __global_sti();
569 break;
570 case 2:
571 __cli();
572 break;
573 case 3:
574 __sti();
575 break;
576 default:
577 printk("global_restore_flags: %08lx\n", flags);
581 #endif /* CONFIG_SMP */
584 * Get values that vary depending on which CPU and bit we're operating on.
586 static hub_intmasks_t *intr_get_ptrs(cpuid_t cpu, int bit, int *new_bit,
587 hubreg_t **intpend_masks, int *ip)
589 hub_intmasks_t *hub_intmasks;
591 hub_intmasks = &cpu_data[cpu].p_intmasks;
592 if (bit < N_INTPEND_BITS) {
593 *intpend_masks = hub_intmasks->intpend0_masks;
594 *ip = 0;
595 *new_bit = bit;
596 } else {
597 *intpend_masks = hub_intmasks->intpend1_masks;
598 *ip = 1;
599 *new_bit = bit - N_INTPEND_BITS;
601 return hub_intmasks;
604 int intr_connect_level(int cpu, int bit)
606 int ip;
607 int slice = cputoslice(cpu);
608 volatile hubreg_t *mask_reg;
609 hubreg_t *intpend_masks;
610 nasid_t nasid = COMPACT_TO_NASID_NODEID(cputocnode(cpu));
612 (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &ip);
614 /* Make sure it's not already pending when we connect it. */
615 REMOTE_HUB_CLR_INTR(nasid, bit + ip * N_INTPEND_BITS);
617 intpend_masks[0] |= (1ULL << (u64)bit);
619 if (ip == 0) {
620 mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK0_A +
621 PI_INT_MASK_OFFSET * slice);
622 } else {
623 mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK1_A +
624 PI_INT_MASK_OFFSET * slice);
626 HUB_S(mask_reg, intpend_masks[0]);
627 return(0);
630 int intr_disconnect_level(int cpu, int bit)
632 int ip;
633 int slice = cputoslice(cpu);
634 volatile hubreg_t *mask_reg;
635 hubreg_t *intpend_masks;
636 nasid_t nasid = COMPACT_TO_NASID_NODEID(cputocnode(cpu));
638 (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &ip);
639 intpend_masks[0] &= ~(1ULL << (u64)bit);
640 if (ip == 0) {
641 mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK0_A +
642 PI_INT_MASK_OFFSET * slice);
643 } else {
644 mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK1_A +
645 PI_INT_MASK_OFFSET * slice);
647 HUB_S(mask_reg, intpend_masks[0]);
648 return(0);
652 void handle_resched_intr(int irq, void *dev_id, struct pt_regs *regs)
654 /* Nothing, the return from intr will work for us */
657 extern void smp_call_function_interrupt(void);
659 void install_cpuintr(int cpu)
661 #ifdef CONFIG_SMP
662 #if (CPUS_PER_NODE == 2)
663 static int done = 0;
664 int irq;
667 * This is a hack till we have a pernode irqlist. Currently,
668 * just have the master cpu set up the handlers for the per
669 * cpu irqs.
672 irq = CPU_RESCHED_A_IRQ + cputoslice(cpu);
673 intr_connect_level(cpu, IRQ_TO_SWLEVEL(cpu, irq));
674 if (done == 0)
675 if (request_irq(irq, handle_resched_intr, 0, "resched", 0))
676 panic("intercpu intr unconnectible\n");
677 irq = CPU_CALL_A_IRQ + cputoslice(cpu);
678 intr_connect_level(cpu, IRQ_TO_SWLEVEL(cpu, irq));
679 if (done == 0)
680 if (request_irq(irq, smp_call_function_interrupt, 0,
681 "callfunc", 0))
682 panic("intercpu intr unconnectible\n");
683 /* HACK STARTS */
684 if (done)
685 return;
686 irq = CPU_RESCHED_A_IRQ + cputoslice(cpu) + 1;
687 if (request_irq(irq, handle_resched_intr, 0, "resched", 0))
688 panic("intercpu intr unconnectible\n");
689 irq = CPU_CALL_A_IRQ + cputoslice(cpu) + 1;
690 if (request_irq(irq, smp_call_function_interrupt, 0,
691 "callfunc", 0))
692 panic("intercpu intr unconnectible\n");
693 done = 1;
694 /* HACK ENDS */
695 #else /* CPUS_PER_NODE */
696 #error Must redefine this for more than 2 CPUS.
697 #endif /* CPUS_PER_NODE */
698 #endif /* CONFIG_SMP */
701 void install_tlbintr(int cpu)
703 int intr_bit = N_INTPEND_BITS + TLB_INTR_A + cputoslice(cpu);
705 intr_connect_level(cpu, intr_bit);