MIPS: Octeon: Move MSI code out of octeon-irq.c.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / cavium-octeon / octeon-irq.c
blobf4b901aaf5090217b28b2250abd3819479bf30c2
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2004-2008 Cavium Networks
7 */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
14 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
17 static int octeon_coreid_for_cpu(int cpu)
19 #ifdef CONFIG_SMP
20 return cpu_logical_map(cpu);
21 #else
22 return cvmx_get_core_num();
23 #endif
26 static void octeon_irq_core_ack(unsigned int irq)
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
32 * interrupt code.
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
36 if (bit < 2)
37 clear_c0_cause(0x100 << bit);
40 static void octeon_irq_core_eoi(unsigned int irq)
42 struct irq_desc *desc = irq_desc + irq;
43 unsigned int bit = irq - OCTEON_IRQ_SW0;
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
47 * been disabled.
49 if (desc->status & IRQ_DISABLED)
50 return;
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
54 * interrupt code.
56 set_c0_status(0x100 << bit);
59 static void octeon_irq_core_enable(unsigned int irq)
61 unsigned long flags;
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
65 * We need to disable interrupts to make sure our updates are
66 * atomic.
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
73 static void octeon_irq_core_disable_local(unsigned int irq)
75 unsigned long flags;
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
78 * We need to disable interrupts to make sure our updates are
79 * atomic.
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
86 static void octeon_irq_core_disable(unsigned int irq)
88 #ifdef CONFIG_SMP
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
91 #else
92 octeon_irq_core_disable_local(irq);
93 #endif
96 static struct irq_chip octeon_irq_chip_core = {
97 .name = "Core",
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
105 static void octeon_irq_ciu0_ack(unsigned int irq)
108 * In order to avoid any locking accessing the CIU, we
109 * acknowledge CIU interrupts by disabling all of them. This
110 * way we can use a per core register and avoid any out of
111 * core locking requirements. This has the side affect that
112 * CIU interrupts can't be processed recursively.
114 * We don't need to disable IRQs to make these atomic since
115 * they are already disabled earlier in the low level
116 * interrupt code.
118 clear_c0_status(0x100 << 2);
121 static void octeon_irq_ciu0_eoi(unsigned int irq)
124 * Enable all CIU interrupts again. We don't need to disable
125 * IRQs to make these atomic since they are already disabled
126 * earlier in the low level interrupt code.
128 set_c0_status(0x100 << 2);
131 static void octeon_irq_ciu0_enable(unsigned int irq)
133 int coreid = cvmx_get_core_num();
134 unsigned long flags;
135 uint64_t en0;
136 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
138 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
139 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
140 en0 |= 1ull << bit;
141 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
142 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
143 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
146 static void octeon_irq_ciu0_disable(unsigned int irq)
148 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
149 unsigned long flags;
150 uint64_t en0;
151 int cpu;
152 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
153 for_each_online_cpu(cpu) {
154 int coreid = octeon_coreid_for_cpu(cpu);
155 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 en0 &= ~(1ull << bit);
157 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
160 * We need to do a read after the last update to make sure all
161 * of them are done.
163 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
164 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
168 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
169 * registers.
171 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
173 int index = cvmx_get_core_num() * 2;
174 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
176 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
180 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
181 * registers.
183 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
185 int index = cvmx_get_core_num() * 2;
186 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
188 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
192 * CIU timer type interrupts must be acknoleged by writing a '1' bit
193 * to their sum0 bit.
195 static void octeon_irq_ciu0_timer_ack(unsigned int irq)
197 int index = cvmx_get_core_num() * 2;
198 uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
199 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
202 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
204 octeon_irq_ciu0_timer_ack(irq);
205 octeon_irq_ciu0_ack(irq);
208 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
210 octeon_irq_ciu0_timer_ack(irq);
211 octeon_irq_ciu0_ack_v2(irq);
215 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
216 * registers.
218 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
220 struct irq_desc *desc = irq_desc + irq;
221 int index = cvmx_get_core_num() * 2;
222 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
224 if ((desc->status & IRQ_DISABLED) == 0)
225 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
229 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
230 * registers.
232 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
234 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
235 int index;
236 int cpu;
237 for_each_online_cpu(cpu) {
238 index = octeon_coreid_for_cpu(cpu) * 2;
239 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
243 #ifdef CONFIG_SMP
244 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
246 int cpu;
247 unsigned long flags;
248 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
250 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
251 for_each_online_cpu(cpu) {
252 int coreid = octeon_coreid_for_cpu(cpu);
253 uint64_t en0 =
254 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
255 if (cpumask_test_cpu(cpu, dest))
256 en0 |= 1ull << bit;
257 else
258 en0 &= ~(1ull << bit);
259 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
262 * We need to do a read after the last update to make sure all
263 * of them are done.
265 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
266 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
268 return 0;
272 * Set affinity for the irq for chips that have the EN*_W1{S,C}
273 * registers.
275 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
276 const struct cpumask *dest)
278 int cpu;
279 int index;
280 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
281 for_each_online_cpu(cpu) {
282 index = octeon_coreid_for_cpu(cpu) * 2;
283 if (cpumask_test_cpu(cpu, dest))
284 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
285 else
286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
288 return 0;
290 #endif
293 * Newer octeon chips have support for lockless CIU operation.
295 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
296 .name = "CIU0",
297 .enable = octeon_irq_ciu0_enable_v2,
298 .disable = octeon_irq_ciu0_disable_all_v2,
299 .ack = octeon_irq_ciu0_ack_v2,
300 .eoi = octeon_irq_ciu0_eoi_v2,
301 #ifdef CONFIG_SMP
302 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
303 #endif
306 static struct irq_chip octeon_irq_chip_ciu0 = {
307 .name = "CIU0",
308 .enable = octeon_irq_ciu0_enable,
309 .disable = octeon_irq_ciu0_disable,
310 .ack = octeon_irq_ciu0_ack,
311 .eoi = octeon_irq_ciu0_eoi,
312 #ifdef CONFIG_SMP
313 .set_affinity = octeon_irq_ciu0_set_affinity,
314 #endif
317 static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
318 .name = "CIU0-T",
319 .enable = octeon_irq_ciu0_enable_v2,
320 .disable = octeon_irq_ciu0_disable_all_v2,
321 .ack = octeon_irq_ciu0_timer_ack_v2,
322 .eoi = octeon_irq_ciu0_eoi_v2,
323 #ifdef CONFIG_SMP
324 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
325 #endif
328 static struct irq_chip octeon_irq_chip_ciu0_timer = {
329 .name = "CIU0-T",
330 .enable = octeon_irq_ciu0_enable,
331 .disable = octeon_irq_ciu0_disable,
332 .ack = octeon_irq_ciu0_timer_ack_v1,
333 .eoi = octeon_irq_ciu0_eoi,
334 #ifdef CONFIG_SMP
335 .set_affinity = octeon_irq_ciu0_set_affinity,
336 #endif
340 static void octeon_irq_ciu1_ack(unsigned int irq)
343 * In order to avoid any locking accessing the CIU, we
344 * acknowledge CIU interrupts by disabling all of them. This
345 * way we can use a per core register and avoid any out of
346 * core locking requirements. This has the side affect that
347 * CIU interrupts can't be processed recursively. We don't
348 * need to disable IRQs to make these atomic since they are
349 * already disabled earlier in the low level interrupt code.
351 clear_c0_status(0x100 << 3);
354 static void octeon_irq_ciu1_eoi(unsigned int irq)
357 * Enable all CIU interrupts again. We don't need to disable
358 * IRQs to make these atomic since they are already disabled
359 * earlier in the low level interrupt code.
361 set_c0_status(0x100 << 3);
364 static void octeon_irq_ciu1_enable(unsigned int irq)
366 int coreid = cvmx_get_core_num();
367 unsigned long flags;
368 uint64_t en1;
369 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
371 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
372 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
373 en1 |= 1ull << bit;
374 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
375 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
376 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
379 static void octeon_irq_ciu1_disable(unsigned int irq)
381 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
382 unsigned long flags;
383 uint64_t en1;
384 int cpu;
385 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
386 for_each_online_cpu(cpu) {
387 int coreid = octeon_coreid_for_cpu(cpu);
388 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
389 en1 &= ~(1ull << bit);
390 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
393 * We need to do a read after the last update to make sure all
394 * of them are done.
396 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
397 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
401 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
402 * registers.
404 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
406 int index = cvmx_get_core_num() * 2 + 1;
407 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
409 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
413 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
414 * registers.
416 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
418 int index = cvmx_get_core_num() * 2 + 1;
419 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
421 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
425 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
426 * registers.
428 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
430 struct irq_desc *desc = irq_desc + irq;
431 int index = cvmx_get_core_num() * 2 + 1;
432 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
434 if ((desc->status & IRQ_DISABLED) == 0)
435 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
439 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
440 * registers.
442 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
444 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
445 int index;
446 int cpu;
447 for_each_online_cpu(cpu) {
448 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
449 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
453 #ifdef CONFIG_SMP
454 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
455 const struct cpumask *dest)
457 int cpu;
458 unsigned long flags;
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 for_each_online_cpu(cpu) {
463 int coreid = octeon_coreid_for_cpu(cpu);
464 uint64_t en1 =
465 cvmx_read_csr(CVMX_CIU_INTX_EN1
466 (coreid * 2 + 1));
467 if (cpumask_test_cpu(cpu, dest))
468 en1 |= 1ull << bit;
469 else
470 en1 &= ~(1ull << bit);
471 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
474 * We need to do a read after the last update to make sure all
475 * of them are done.
477 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
478 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
480 return 0;
484 * Set affinity for the irq for chips that have the EN*_W1{S,C}
485 * registers.
487 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
488 const struct cpumask *dest)
490 int cpu;
491 int index;
492 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
493 for_each_online_cpu(cpu) {
494 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
495 if (cpumask_test_cpu(cpu, dest))
496 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
497 else
498 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
500 return 0;
502 #endif
505 * Newer octeon chips have support for lockless CIU operation.
507 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
508 .name = "CIU0",
509 .enable = octeon_irq_ciu1_enable_v2,
510 .disable = octeon_irq_ciu1_disable_all_v2,
511 .ack = octeon_irq_ciu1_ack_v2,
512 .eoi = octeon_irq_ciu1_eoi_v2,
513 #ifdef CONFIG_SMP
514 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
515 #endif
518 static struct irq_chip octeon_irq_chip_ciu1 = {
519 .name = "CIU1",
520 .enable = octeon_irq_ciu1_enable,
521 .disable = octeon_irq_ciu1_disable,
522 .ack = octeon_irq_ciu1_ack,
523 .eoi = octeon_irq_ciu1_eoi,
524 #ifdef CONFIG_SMP
525 .set_affinity = octeon_irq_ciu1_set_affinity,
526 #endif
529 void __init arch_init_irq(void)
531 int irq;
532 struct irq_chip *chip0;
533 struct irq_chip *chip0_timer;
534 struct irq_chip *chip1;
536 #ifdef CONFIG_SMP
537 /* Set the default affinity to the boot cpu. */
538 cpumask_clear(irq_default_affinity);
539 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
540 #endif
542 if (NR_IRQS < OCTEON_IRQ_LAST)
543 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
545 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
546 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
547 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
548 chip0 = &octeon_irq_chip_ciu0_v2;
549 chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
550 chip1 = &octeon_irq_chip_ciu1_v2;
551 } else {
552 chip0 = &octeon_irq_chip_ciu0;
553 chip0_timer = &octeon_irq_chip_ciu0_timer;
554 chip1 = &octeon_irq_chip_ciu1;
557 /* 0 - 15 reserved for i8259 master and slave controller. */
559 /* 17 - 23 Mips internal */
560 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
561 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
562 handle_percpu_irq);
565 /* 24 - 87 CIU_INT_SUM0 */
566 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
567 switch (irq) {
568 case OCTEON_IRQ_GMX_DRP0:
569 case OCTEON_IRQ_GMX_DRP1:
570 case OCTEON_IRQ_IPD_DRP:
571 case OCTEON_IRQ_KEY_ZERO:
572 case OCTEON_IRQ_TIMER0:
573 case OCTEON_IRQ_TIMER1:
574 case OCTEON_IRQ_TIMER2:
575 case OCTEON_IRQ_TIMER3:
576 set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
577 break;
578 default:
579 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
580 break;
584 /* 88 - 151 CIU_INT_SUM1 */
585 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
586 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
589 set_c0_status(0x300 << 2);
592 asmlinkage void plat_irq_dispatch(void)
594 const unsigned long core_id = cvmx_get_core_num();
595 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
596 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
597 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
598 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
599 unsigned long cop0_cause;
600 unsigned long cop0_status;
601 uint64_t ciu_en;
602 uint64_t ciu_sum;
604 while (1) {
605 cop0_cause = read_c0_cause();
606 cop0_status = read_c0_status();
607 cop0_cause &= cop0_status;
608 cop0_cause &= ST0_IM;
610 if (unlikely(cop0_cause & STATUSF_IP2)) {
611 ciu_sum = cvmx_read_csr(ciu_sum0_address);
612 ciu_en = cvmx_read_csr(ciu_en0_address);
613 ciu_sum &= ciu_en;
614 if (likely(ciu_sum))
615 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
616 else
617 spurious_interrupt();
618 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
619 ciu_sum = cvmx_read_csr(ciu_sum1_address);
620 ciu_en = cvmx_read_csr(ciu_en1_address);
621 ciu_sum &= ciu_en;
622 if (likely(ciu_sum))
623 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
624 else
625 spurious_interrupt();
626 } else if (likely(cop0_cause)) {
627 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
628 } else {
629 break;
634 #ifdef CONFIG_HOTPLUG_CPU
635 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
637 unsigned int isset;
638 int coreid = octeon_coreid_for_cpu(cpu);
639 int bit = (irq < OCTEON_IRQ_WDOG0) ?
640 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
641 if (irq < 64) {
642 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
643 (1ull << bit)) >> bit;
644 } else {
645 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
646 (1ull << bit)) >> bit;
648 return isset;
651 void fixup_irqs(void)
653 int irq;
655 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
656 octeon_irq_core_disable_local(irq);
658 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
659 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
660 /* ciu irq migrates to next cpu */
661 octeon_irq_chip_ciu0.disable(irq);
662 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
666 #if 0
667 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
668 octeon_irq_mailbox_mask(irq);
669 #endif
670 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
671 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
672 /* ciu irq migrates to next cpu */
673 octeon_irq_chip_ciu0.disable(irq);
674 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
678 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
679 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
680 /* ciu irq migrates to next cpu */
681 octeon_irq_chip_ciu1.disable(irq);
682 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
687 #endif /* CONFIG_HOTPLUG_CPU */