Merge git://git.infradead.org/mtd-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / kernel / irq-gic.c
blob3f43c2e3aa5a59ede8eab7ecdaee7762ea6afb68
1 #undef DEBUG
3 #include <linux/bitmap.h>
4 #include <linux/init.h>
6 #include <asm/io.h>
7 #include <asm/gic.h>
8 #include <asm/gcmpregs.h>
9 #include <asm/mips-boards/maltaint.h>
10 #include <asm/irq.h>
11 #include <linux/hardirq.h>
12 #include <asm-generic/bitops/find.h>
15 static unsigned long _gic_base;
16 static unsigned int _irqbase, _mapsize, numvpes, numintrs;
17 static struct gic_intr_map *_intrmap;
19 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
20 static struct gic_pending_regs pending_regs[NR_CPUS];
21 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
23 #define gic_wedgeb2bok 0 /*
24 * Can GIC handle b2b writes to wedge register?
26 #if gic_wedgeb2bok == 0
27 static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
28 #endif
30 void gic_send_ipi(unsigned int intr)
32 #if gic_wedgeb2bok == 0
33 unsigned long flags;
34 #endif
35 pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
36 read_c0_status());
37 if (!gic_wedgeb2bok)
38 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
39 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
40 if (!gic_wedgeb2bok) {
41 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
42 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
46 /* This is Malta specific and needs to be exported */
47 static void vpe_local_setup(unsigned int numvpes)
49 int i;
50 unsigned long timer_interrupt = 5, perf_interrupt = 5;
51 unsigned int vpe_ctl;
54 * Setup the default performance counter timer interrupts
55 * for all VPEs
57 for (i = 0; i < numvpes; i++) {
58 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
60 /* Are Interrupts locally routable? */
61 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
62 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
63 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
64 GIC_MAP_TO_PIN_MSK | timer_interrupt);
66 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
67 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
68 GIC_MAP_TO_PIN_MSK | perf_interrupt);
72 unsigned int gic_get_int(void)
74 unsigned int i;
75 unsigned long *pending, *intrmask, *pcpu_mask;
76 unsigned long *pending_abs, *intrmask_abs;
78 /* Get per-cpu bitmaps */
79 pending = pending_regs[smp_processor_id()].pending;
80 intrmask = intrmask_regs[smp_processor_id()].intrmask;
81 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
83 pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
84 GIC_SH_PEND_31_0_OFS);
85 intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
86 GIC_SH_MASK_31_0_OFS);
88 for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
89 GICREAD(*pending_abs, pending[i]);
90 GICREAD(*intrmask_abs, intrmask[i]);
91 pending_abs++;
92 intrmask_abs++;
95 bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
96 bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
98 i = find_first_bit(pending, GIC_NUM_INTRS);
100 pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
102 return i;
105 static unsigned int gic_irq_startup(unsigned int irq)
107 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
108 irq -= _irqbase;
109 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
110 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
111 1 << (irq % 32));
112 return 0;
115 static void gic_irq_ack(unsigned int irq)
117 #if gic_wedgeb2bok == 0
118 unsigned long flags;
119 #endif
120 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
121 irq -= _irqbase;
122 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
123 1 << (irq % 32));
125 if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
126 if (!gic_wedgeb2bok)
127 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
128 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
129 if (!gic_wedgeb2bok) {
130 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
131 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
136 static void gic_mask_irq(unsigned int irq)
138 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
139 irq -= _irqbase;
140 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
141 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
142 1 << (irq % 32));
145 static void gic_unmask_irq(unsigned int irq)
147 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
148 irq -= _irqbase;
149 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
150 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
151 1 << (irq % 32));
154 #ifdef CONFIG_SMP
156 static DEFINE_SPINLOCK(gic_lock);
158 static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags;
162 int i;
164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase;
167 cpumask_and(&tmp, cpumask, cpu_online_mask);
168 if (cpus_empty(tmp))
169 return -1;
171 /* Assumption : cpumask refers to a single CPU */
172 spin_lock_irqsave(&gic_lock, flags);
173 for (;;) {
174 /* Re-route this IRQ */
175 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
178 * FIXME: assumption that _intrmap is ordered and has no holes
181 /* Update the intr_map */
182 _intrmap[irq].cpunum = first_cpu(tmp);
184 /* Update the pcpu_masks */
185 for (i = 0; i < NR_CPUS; i++)
186 clear_bit(irq, pcpu_masks[i].pcpu_mask);
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
190 cpumask_copy(irq_desc[irq].affinity, cpumask);
191 spin_unlock_irqrestore(&gic_lock, flags);
193 return 0;
195 #endif
197 static struct irq_chip gic_irq_controller = {
198 .name = "MIPS GIC",
199 .startup = gic_irq_startup,
200 .ack = gic_irq_ack,
201 .mask = gic_mask_irq,
202 .mask_ack = gic_mask_irq,
203 .unmask = gic_unmask_irq,
204 .eoi = gic_unmask_irq,
205 #ifdef CONFIG_SMP
206 .set_affinity = gic_set_affinity,
207 #endif
210 static void __init setup_intr(unsigned int intr, unsigned int cpu,
211 unsigned int pin, unsigned int polarity, unsigned int trigtype)
213 /* Setup Intr to Pin mapping */
214 if (pin & GIC_MAP_TO_NMI_MSK) {
215 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
216 /* FIXME: hack to route NMI to all cpu's */
217 for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
218 GICWRITE(GIC_REG_ADDR(SHARED,
219 GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
220 0xffffffff);
222 } else {
223 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
224 GIC_MAP_TO_PIN_MSK | pin);
225 /* Setup Intr to CPU mapping */
226 GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
229 /* Setup Intr Polarity */
230 GIC_SET_POLARITY(intr, polarity);
232 /* Setup Intr Trigger Type */
233 GIC_SET_TRIGGER(intr, trigtype);
235 /* Init Intr Masks */
236 GIC_SET_INTR_MASK(intr, 0);
239 static void __init gic_basic_init(void)
241 unsigned int i, cpu;
243 /* Setup defaults */
244 for (i = 0; i < GIC_NUM_INTRS; i++) {
245 GIC_SET_POLARITY(i, GIC_POL_POS);
246 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
247 GIC_SET_INTR_MASK(i, 0);
250 /* Setup specifics */
251 for (i = 0; i < _mapsize; i++) {
252 cpu = _intrmap[i].cpunum;
253 if (cpu == X)
254 continue;
256 setup_intr(_intrmap[i].intrnum,
257 _intrmap[i].cpunum,
258 _intrmap[i].pin,
259 _intrmap[i].polarity,
260 _intrmap[i].trigtype);
261 /* Initialise per-cpu Interrupt software masks */
262 if (_intrmap[i].ipiflag)
263 set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
266 vpe_local_setup(numvpes);
268 for (i = _irqbase; i < (_irqbase + numintrs); i++)
269 set_irq_chip(i, &gic_irq_controller);
272 void __init gic_init(unsigned long gic_base_addr,
273 unsigned long gic_addrspace_size,
274 struct gic_intr_map *intr_map, unsigned int intr_map_size,
275 unsigned int irqbase)
277 unsigned int gicconfig;
279 _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
280 gic_addrspace_size);
281 _irqbase = irqbase;
282 _intrmap = intr_map;
283 _mapsize = intr_map_size;
285 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
286 numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
287 GIC_SH_CONFIG_NUMINTRS_SHF;
288 numintrs = ((numintrs + 1) * 8);
290 numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
291 GIC_SH_CONFIG_NUMVPES_SHF;
293 pr_debug("%s called\n", __func__);
295 gic_basic_init();