call_function_many: fix list delete vs add race
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / cavium-octeon / octeon-irq.c
blobce7500cdf5b709de9755d94f6b07b41eeca17359
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7 */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
14 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
17 static int octeon_coreid_for_cpu(int cpu)
19 #ifdef CONFIG_SMP
20 return cpu_logical_map(cpu);
21 #else
22 return cvmx_get_core_num();
23 #endif
26 static void octeon_irq_core_ack(unsigned int irq)
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
32 * interrupt code.
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
36 if (bit < 2)
37 clear_c0_cause(0x100 << bit);
40 static void octeon_irq_core_eoi(unsigned int irq)
42 struct irq_desc *desc = irq_to_desc(irq);
43 unsigned int bit = irq - OCTEON_IRQ_SW0;
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
47 * been disabled.
49 if ((unlikely(desc->status & IRQ_DISABLED)))
50 return;
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
54 * interrupt code.
56 set_c0_status(0x100 << bit);
59 static void octeon_irq_core_enable(unsigned int irq)
61 unsigned long flags;
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
65 * We need to disable interrupts to make sure our updates are
66 * atomic.
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
73 static void octeon_irq_core_disable_local(unsigned int irq)
75 unsigned long flags;
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
78 * We need to disable interrupts to make sure our updates are
79 * atomic.
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
86 static void octeon_irq_core_disable(unsigned int irq)
88 #ifdef CONFIG_SMP
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
91 #else
92 octeon_irq_core_disable_local(irq);
93 #endif
96 static struct irq_chip octeon_irq_chip_core = {
97 .name = "Core",
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
105 static void octeon_irq_ciu0_ack(unsigned int irq)
107 switch (irq) {
108 case OCTEON_IRQ_GMX_DRP0:
109 case OCTEON_IRQ_GMX_DRP1:
110 case OCTEON_IRQ_IPD_DRP:
111 case OCTEON_IRQ_KEY_ZERO:
112 case OCTEON_IRQ_TIMER0:
113 case OCTEON_IRQ_TIMER1:
114 case OCTEON_IRQ_TIMER2:
115 case OCTEON_IRQ_TIMER3:
117 int index = cvmx_get_core_num() * 2;
118 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
120 * CIU timer type interrupts must be acknoleged by
121 * writing a '1' bit to their sum0 bit.
123 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
124 break;
126 default:
127 break;
131 * In order to avoid any locking accessing the CIU, we
132 * acknowledge CIU interrupts by disabling all of them. This
133 * way we can use a per core register and avoid any out of
134 * core locking requirements. This has the side affect that
135 * CIU interrupts can't be processed recursively.
137 * We don't need to disable IRQs to make these atomic since
138 * they are already disabled earlier in the low level
139 * interrupt code.
141 clear_c0_status(0x100 << 2);
144 static void octeon_irq_ciu0_eoi(unsigned int irq)
147 * Enable all CIU interrupts again. We don't need to disable
148 * IRQs to make these atomic since they are already disabled
149 * earlier in the low level interrupt code.
151 set_c0_status(0x100 << 2);
154 static int next_coreid_for_irq(struct irq_desc *desc)
157 #ifdef CONFIG_SMP
158 int coreid;
159 int weight = cpumask_weight(desc->affinity);
161 if (weight > 1) {
162 int cpu = smp_processor_id();
163 for (;;) {
164 cpu = cpumask_next(cpu, desc->affinity);
165 if (cpu >= nr_cpu_ids) {
166 cpu = -1;
167 continue;
168 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
169 break;
172 coreid = octeon_coreid_for_cpu(cpu);
173 } else if (weight == 1) {
174 coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
175 } else {
176 coreid = cvmx_get_core_num();
178 return coreid;
179 #else
180 return cvmx_get_core_num();
181 #endif
184 static void octeon_irq_ciu0_enable(unsigned int irq)
186 struct irq_desc *desc = irq_to_desc(irq);
187 int coreid = next_coreid_for_irq(desc);
188 unsigned long flags;
189 uint64_t en0;
190 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
192 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
193 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
194 en0 |= 1ull << bit;
195 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
196 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
197 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
200 static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
202 int coreid = cvmx_get_core_num();
203 unsigned long flags;
204 uint64_t en0;
205 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
207 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
208 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
209 en0 |= 1ull << bit;
210 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
211 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
212 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
215 static void octeon_irq_ciu0_disable(unsigned int irq)
217 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
218 unsigned long flags;
219 uint64_t en0;
220 int cpu;
221 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
222 for_each_online_cpu(cpu) {
223 int coreid = octeon_coreid_for_cpu(cpu);
224 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
225 en0 &= ~(1ull << bit);
226 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
229 * We need to do a read after the last update to make sure all
230 * of them are done.
232 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
233 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
237 * Enable the irq on the next core in the affinity set for chips that
238 * have the EN*_W1{S,C} registers.
240 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
242 int index;
243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244 struct irq_desc *desc = irq_to_desc(irq);
246 if ((desc->status & IRQ_DISABLED) == 0) {
247 index = next_coreid_for_irq(desc) * 2;
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
253 * Enable the irq on the current CPU for chips that
254 * have the EN*_W1{S,C} registers.
256 static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
258 int index;
259 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
261 index = cvmx_get_core_num() * 2;
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
266 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
267 * registers.
269 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
271 int index = cvmx_get_core_num() * 2;
272 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
274 switch (irq) {
275 case OCTEON_IRQ_GMX_DRP0:
276 case OCTEON_IRQ_GMX_DRP1:
277 case OCTEON_IRQ_IPD_DRP:
278 case OCTEON_IRQ_KEY_ZERO:
279 case OCTEON_IRQ_TIMER0:
280 case OCTEON_IRQ_TIMER1:
281 case OCTEON_IRQ_TIMER2:
282 case OCTEON_IRQ_TIMER3:
284 * CIU timer type interrupts must be acknoleged by
285 * writing a '1' bit to their sum0 bit.
287 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
288 break;
289 default:
290 break;
293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
297 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
298 * registers.
300 static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
302 struct irq_desc *desc = irq_to_desc(irq);
303 int index = cvmx_get_core_num() * 2;
304 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
306 if (likely((desc->status & IRQ_DISABLED) == 0))
307 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
311 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
312 * registers.
314 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
316 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
317 int index;
318 int cpu;
319 for_each_online_cpu(cpu) {
320 index = octeon_coreid_for_cpu(cpu) * 2;
321 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
325 #ifdef CONFIG_SMP
326 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
328 int cpu;
329 struct irq_desc *desc = irq_to_desc(irq);
330 int enable_one = (desc->status & IRQ_DISABLED) == 0;
331 unsigned long flags;
332 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
335 * For non-v2 CIU, we will allow only single CPU affinity.
336 * This removes the need to do locking in the .ack/.eoi
337 * functions.
339 if (cpumask_weight(dest) != 1)
340 return -EINVAL;
342 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
343 for_each_online_cpu(cpu) {
344 int coreid = octeon_coreid_for_cpu(cpu);
345 uint64_t en0 =
346 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
347 if (cpumask_test_cpu(cpu, dest) && enable_one) {
348 enable_one = 0;
349 en0 |= 1ull << bit;
350 } else {
351 en0 &= ~(1ull << bit);
353 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
356 * We need to do a read after the last update to make sure all
357 * of them are done.
359 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
360 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
362 return 0;
366 * Set affinity for the irq for chips that have the EN*_W1{S,C}
367 * registers.
369 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
370 const struct cpumask *dest)
372 int cpu;
373 int index;
374 struct irq_desc *desc = irq_to_desc(irq);
375 int enable_one = (desc->status & IRQ_DISABLED) == 0;
376 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
378 for_each_online_cpu(cpu) {
379 index = octeon_coreid_for_cpu(cpu) * 2;
380 if (cpumask_test_cpu(cpu, dest) && enable_one) {
381 enable_one = 0;
382 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
383 } else {
384 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
387 return 0;
389 #endif
392 * Newer octeon chips have support for lockless CIU operation.
394 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
395 .name = "CIU0",
396 .enable = octeon_irq_ciu0_enable_v2,
397 .disable = octeon_irq_ciu0_disable_all_v2,
398 .eoi = octeon_irq_ciu0_enable_v2,
399 #ifdef CONFIG_SMP
400 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
401 #endif
404 static struct irq_chip octeon_irq_chip_ciu0 = {
405 .name = "CIU0",
406 .enable = octeon_irq_ciu0_enable,
407 .disable = octeon_irq_ciu0_disable,
408 .eoi = octeon_irq_ciu0_eoi,
409 #ifdef CONFIG_SMP
410 .set_affinity = octeon_irq_ciu0_set_affinity,
411 #endif
414 /* The mbox versions don't do any affinity or round-robin. */
415 static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
416 .name = "CIU0-M",
417 .enable = octeon_irq_ciu0_enable_mbox_v2,
418 .disable = octeon_irq_ciu0_disable,
419 .eoi = octeon_irq_ciu0_eoi_mbox_v2,
422 static struct irq_chip octeon_irq_chip_ciu0_mbox = {
423 .name = "CIU0-M",
424 .enable = octeon_irq_ciu0_enable_mbox,
425 .disable = octeon_irq_ciu0_disable,
426 .eoi = octeon_irq_ciu0_eoi,
429 static void octeon_irq_ciu1_ack(unsigned int irq)
432 * In order to avoid any locking accessing the CIU, we
433 * acknowledge CIU interrupts by disabling all of them. This
434 * way we can use a per core register and avoid any out of
435 * core locking requirements. This has the side affect that
436 * CIU interrupts can't be processed recursively. We don't
437 * need to disable IRQs to make these atomic since they are
438 * already disabled earlier in the low level interrupt code.
440 clear_c0_status(0x100 << 3);
443 static void octeon_irq_ciu1_eoi(unsigned int irq)
446 * Enable all CIU interrupts again. We don't need to disable
447 * IRQs to make these atomic since they are already disabled
448 * earlier in the low level interrupt code.
450 set_c0_status(0x100 << 3);
453 static void octeon_irq_ciu1_enable(unsigned int irq)
455 struct irq_desc *desc = irq_to_desc(irq);
456 int coreid = next_coreid_for_irq(desc);
457 unsigned long flags;
458 uint64_t en1;
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
463 en1 |= 1ull << bit;
464 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
466 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
470 * Watchdog interrupts are special. They are associated with a single
471 * core, so we hardwire the affinity to that core.
473 static void octeon_irq_ciu1_wd_enable(unsigned int irq)
475 unsigned long flags;
476 uint64_t en1;
477 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
478 int coreid = bit;
480 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
481 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
482 en1 |= 1ull << bit;
483 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
484 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
485 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
488 static void octeon_irq_ciu1_disable(unsigned int irq)
490 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
491 unsigned long flags;
492 uint64_t en1;
493 int cpu;
494 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
495 for_each_online_cpu(cpu) {
496 int coreid = octeon_coreid_for_cpu(cpu);
497 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
498 en1 &= ~(1ull << bit);
499 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
502 * We need to do a read after the last update to make sure all
503 * of them are done.
505 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
506 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
510 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
511 * registers.
513 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
515 int index;
516 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
517 struct irq_desc *desc = irq_to_desc(irq);
519 if ((desc->status & IRQ_DISABLED) == 0) {
520 index = next_coreid_for_irq(desc) * 2 + 1;
521 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
526 * Watchdog interrupts are special. They are associated with a single
527 * core, so we hardwire the affinity to that core.
529 static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
531 int index;
532 int coreid = irq - OCTEON_IRQ_WDOG0;
533 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
534 struct irq_desc *desc = irq_to_desc(irq);
536 if ((desc->status & IRQ_DISABLED) == 0) {
537 index = coreid * 2 + 1;
538 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
543 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
544 * registers.
546 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
548 int index = cvmx_get_core_num() * 2 + 1;
549 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
551 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
555 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
556 * registers.
558 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
560 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
561 int index;
562 int cpu;
563 for_each_online_cpu(cpu) {
564 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
565 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
569 #ifdef CONFIG_SMP
570 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
571 const struct cpumask *dest)
573 int cpu;
574 struct irq_desc *desc = irq_to_desc(irq);
575 int enable_one = (desc->status & IRQ_DISABLED) == 0;
576 unsigned long flags;
577 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
580 * For non-v2 CIU, we will allow only single CPU affinity.
581 * This removes the need to do locking in the .ack/.eoi
582 * functions.
584 if (cpumask_weight(dest) != 1)
585 return -EINVAL;
587 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
588 for_each_online_cpu(cpu) {
589 int coreid = octeon_coreid_for_cpu(cpu);
590 uint64_t en1 =
591 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
592 if (cpumask_test_cpu(cpu, dest) && enable_one) {
593 enable_one = 0;
594 en1 |= 1ull << bit;
595 } else {
596 en1 &= ~(1ull << bit);
598 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
601 * We need to do a read after the last update to make sure all
602 * of them are done.
604 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
605 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
607 return 0;
611 * Set affinity for the irq for chips that have the EN*_W1{S,C}
612 * registers.
614 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
615 const struct cpumask *dest)
617 int cpu;
618 int index;
619 struct irq_desc *desc = irq_to_desc(irq);
620 int enable_one = (desc->status & IRQ_DISABLED) == 0;
621 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
622 for_each_online_cpu(cpu) {
623 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 enable_one = 0;
626 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
627 } else {
628 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
631 return 0;
633 #endif
636 * Newer octeon chips have support for lockless CIU operation.
638 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
639 .name = "CIU1",
640 .enable = octeon_irq_ciu1_enable_v2,
641 .disable = octeon_irq_ciu1_disable_all_v2,
642 .eoi = octeon_irq_ciu1_enable_v2,
643 #ifdef CONFIG_SMP
644 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
645 #endif
648 static struct irq_chip octeon_irq_chip_ciu1 = {
649 .name = "CIU1",
650 .enable = octeon_irq_ciu1_enable,
651 .disable = octeon_irq_ciu1_disable,
652 .eoi = octeon_irq_ciu1_eoi,
653 #ifdef CONFIG_SMP
654 .set_affinity = octeon_irq_ciu1_set_affinity,
655 #endif
658 static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
659 .name = "CIU1-W",
660 .enable = octeon_irq_ciu1_wd_enable_v2,
661 .disable = octeon_irq_ciu1_disable_all_v2,
662 .eoi = octeon_irq_ciu1_wd_enable_v2,
665 static struct irq_chip octeon_irq_chip_ciu1_wd = {
666 .name = "CIU1-W",
667 .enable = octeon_irq_ciu1_wd_enable,
668 .disable = octeon_irq_ciu1_disable,
669 .eoi = octeon_irq_ciu1_eoi,
672 static void (*octeon_ciu0_ack)(unsigned int);
673 static void (*octeon_ciu1_ack)(unsigned int);
675 void __init arch_init_irq(void)
677 unsigned int irq;
678 struct irq_chip *chip0;
679 struct irq_chip *chip0_mbox;
680 struct irq_chip *chip1;
681 struct irq_chip *chip1_wd;
683 #ifdef CONFIG_SMP
684 /* Set the default affinity to the boot cpu. */
685 cpumask_clear(irq_default_affinity);
686 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
687 #endif
689 if (NR_IRQS < OCTEON_IRQ_LAST)
690 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
692 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
693 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
694 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
695 octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
697 chip0 = &octeon_irq_chip_ciu0_v2;
698 chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
699 chip1 = &octeon_irq_chip_ciu1_v2;
700 chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
701 } else {
702 octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 octeon_ciu1_ack = octeon_irq_ciu1_ack;
704 chip0 = &octeon_irq_chip_ciu0;
705 chip0_mbox = &octeon_irq_chip_ciu0_mbox;
706 chip1 = &octeon_irq_chip_ciu1;
707 chip1_wd = &octeon_irq_chip_ciu1_wd;
710 /* 0 - 15 reserved for i8259 master and slave controller. */
712 /* 17 - 23 Mips internal */
713 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
714 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
715 handle_percpu_irq);
718 /* 24 - 87 CIU_INT_SUM0 */
719 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
720 switch (irq) {
721 case OCTEON_IRQ_MBOX0:
722 case OCTEON_IRQ_MBOX1:
723 set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
724 break;
725 default:
726 set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
727 break;
731 /* 88 - 151 CIU_INT_SUM1 */
732 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
733 set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
735 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
738 set_c0_status(0x300 << 2);
741 asmlinkage void plat_irq_dispatch(void)
743 const unsigned long core_id = cvmx_get_core_num();
744 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
745 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
746 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
747 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
748 unsigned long cop0_cause;
749 unsigned long cop0_status;
750 uint64_t ciu_en;
751 uint64_t ciu_sum;
752 unsigned int irq;
754 while (1) {
755 cop0_cause = read_c0_cause();
756 cop0_status = read_c0_status();
757 cop0_cause &= cop0_status;
758 cop0_cause &= ST0_IM;
760 if (unlikely(cop0_cause & STATUSF_IP2)) {
761 ciu_sum = cvmx_read_csr(ciu_sum0_address);
762 ciu_en = cvmx_read_csr(ciu_en0_address);
763 ciu_sum &= ciu_en;
764 if (likely(ciu_sum)) {
765 irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
766 octeon_ciu0_ack(irq);
767 do_IRQ(irq);
768 } else {
769 spurious_interrupt();
771 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
772 ciu_sum = cvmx_read_csr(ciu_sum1_address);
773 ciu_en = cvmx_read_csr(ciu_en1_address);
774 ciu_sum &= ciu_en;
775 if (likely(ciu_sum)) {
776 irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
777 octeon_ciu1_ack(irq);
778 do_IRQ(irq);
779 } else {
780 spurious_interrupt();
782 } else if (likely(cop0_cause)) {
783 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
784 } else {
785 break;
790 #ifdef CONFIG_HOTPLUG_CPU
792 void fixup_irqs(void)
794 int irq;
795 struct irq_desc *desc;
796 cpumask_t new_affinity;
797 unsigned long flags;
798 int do_set_affinity;
799 int cpu;
801 cpu = smp_processor_id();
803 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
804 octeon_irq_core_disable_local(irq);
806 for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
807 desc = irq_to_desc(irq);
808 switch (irq) {
809 case OCTEON_IRQ_MBOX0:
810 case OCTEON_IRQ_MBOX1:
811 /* The eoi function will disable them on this CPU. */
812 desc->chip->eoi(irq);
813 break;
814 case OCTEON_IRQ_WDOG0:
815 case OCTEON_IRQ_WDOG1:
816 case OCTEON_IRQ_WDOG2:
817 case OCTEON_IRQ_WDOG3:
818 case OCTEON_IRQ_WDOG4:
819 case OCTEON_IRQ_WDOG5:
820 case OCTEON_IRQ_WDOG6:
821 case OCTEON_IRQ_WDOG7:
822 case OCTEON_IRQ_WDOG8:
823 case OCTEON_IRQ_WDOG9:
824 case OCTEON_IRQ_WDOG10:
825 case OCTEON_IRQ_WDOG11:
826 case OCTEON_IRQ_WDOG12:
827 case OCTEON_IRQ_WDOG13:
828 case OCTEON_IRQ_WDOG14:
829 case OCTEON_IRQ_WDOG15:
831 * These have special per CPU semantics and
832 * are handled in the watchdog driver.
834 break;
835 default:
836 raw_spin_lock_irqsave(&desc->lock, flags);
838 * If this irq has an action, it is in use and
839 * must be migrated if it has affinity to this
840 * cpu.
842 if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
843 if (cpumask_weight(desc->affinity) > 1) {
845 * It has multi CPU affinity,
846 * just remove this CPU from
847 * the affinity set.
849 cpumask_copy(&new_affinity, desc->affinity);
850 cpumask_clear_cpu(cpu, &new_affinity);
851 } else {
853 * Otherwise, put it on lowest
854 * numbered online CPU.
856 cpumask_clear(&new_affinity);
857 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
859 do_set_affinity = 1;
860 } else {
861 do_set_affinity = 0;
863 raw_spin_unlock_irqrestore(&desc->lock, flags);
865 if (do_set_affinity)
866 irq_set_affinity(irq, &new_affinity);
868 break;
873 #endif /* CONFIG_HOTPLUG_CPU */