2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock
);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock
);
18 DEFINE_SPINLOCK(octeon_irq_msi_lock
);
20 static int octeon_coreid_for_cpu(int cpu
)
23 return cpu_logical_map(cpu
);
25 return cvmx_get_core_num();
29 static void octeon_irq_core_ack(unsigned int irq
)
31 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
33 * We don't need to disable IRQs to make these atomic since
34 * they are already disabled earlier in the low level
37 clear_c0_status(0x100 << bit
);
38 /* The two user interrupts must be cleared manually. */
40 clear_c0_cause(0x100 << bit
);
43 static void octeon_irq_core_eoi(unsigned int irq
)
45 struct irq_desc
*desc
= irq_desc
+ irq
;
46 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
48 * If an IRQ is being processed while we are disabling it the
49 * handler will attempt to unmask the interrupt after it has
52 if (desc
->status
& IRQ_DISABLED
)
55 * We don't need to disable IRQs to make these atomic since
56 * they are already disabled earlier in the low level
59 set_c0_status(0x100 << bit
);
62 static void octeon_irq_core_enable(unsigned int irq
)
65 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
68 * We need to disable interrupts to make sure our updates are
71 local_irq_save(flags
);
72 set_c0_status(0x100 << bit
);
73 local_irq_restore(flags
);
76 static void octeon_irq_core_disable_local(unsigned int irq
)
79 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
81 * We need to disable interrupts to make sure our updates are
84 local_irq_save(flags
);
85 clear_c0_status(0x100 << bit
);
86 local_irq_restore(flags
);
89 static void octeon_irq_core_disable(unsigned int irq
)
92 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local
,
93 (void *) (long) irq
, 1);
95 octeon_irq_core_disable_local(irq
);
99 static struct irq_chip octeon_irq_chip_core
= {
101 .enable
= octeon_irq_core_enable
,
102 .disable
= octeon_irq_core_disable
,
103 .ack
= octeon_irq_core_ack
,
104 .eoi
= octeon_irq_core_eoi
,
108 static void octeon_irq_ciu0_ack(unsigned int irq
)
111 * In order to avoid any locking accessing the CIU, we
112 * acknowledge CIU interrupts by disabling all of them. This
113 * way we can use a per core register and avoid any out of
114 * core locking requirements. This has the side affect that
115 * CIU interrupts can't be processed recursively.
117 * We don't need to disable IRQs to make these atomic since
118 * they are already disabled earlier in the low level
121 clear_c0_status(0x100 << 2);
124 static void octeon_irq_ciu0_eoi(unsigned int irq
)
127 * Enable all CIU interrupts again. We don't need to disable
128 * IRQs to make these atomic since they are already disabled
129 * earlier in the low level interrupt code.
131 set_c0_status(0x100 << 2);
134 static void octeon_irq_ciu0_enable(unsigned int irq
)
136 int coreid
= cvmx_get_core_num();
139 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
142 * A read lock is used here to make sure only one core is ever
143 * updating the CIU enable bits at a time. During an enable
144 * the cores don't interfere with each other. During a disable
145 * the write lock stops any enables that might cause a
148 read_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
149 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
151 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
152 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
153 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
156 static void octeon_irq_ciu0_disable(unsigned int irq
)
158 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
162 write_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
163 for_each_online_cpu(cpu
) {
164 int coreid
= octeon_coreid_for_cpu(cpu
);
165 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
166 en0
&= ~(1ull << bit
);
167 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
170 * We need to do a read after the last update to make sure all
173 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
174 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
178 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
181 static void octeon_irq_ciu0_enable_v2(unsigned int irq
)
183 int index
= cvmx_get_core_num() * 2;
184 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
186 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
190 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
193 static void octeon_irq_ciu0_ack_v2(unsigned int irq
)
195 int index
= cvmx_get_core_num() * 2;
196 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
198 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
202 * CIU timer type interrupts must be acknoleged by writing a '1' bit
205 static void octeon_irq_ciu0_timer_ack(unsigned int irq
)
207 int index
= cvmx_get_core_num() * 2;
208 uint64_t mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
209 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index
), mask
);
212 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq
)
214 octeon_irq_ciu0_timer_ack(irq
);
215 octeon_irq_ciu0_ack(irq
);
218 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq
)
220 octeon_irq_ciu0_timer_ack(irq
);
221 octeon_irq_ciu0_ack_v2(irq
);
225 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
228 static void octeon_irq_ciu0_eoi_v2(unsigned int irq
)
230 struct irq_desc
*desc
= irq_desc
+ irq
;
231 int index
= cvmx_get_core_num() * 2;
232 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
234 if ((desc
->status
& IRQ_DISABLED
) == 0)
235 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
239 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
242 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq
)
244 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
247 for_each_online_cpu(cpu
) {
248 index
= octeon_coreid_for_cpu(cpu
) * 2;
249 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
254 static int octeon_irq_ciu0_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
258 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
260 write_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
261 for_each_online_cpu(cpu
) {
262 int coreid
= octeon_coreid_for_cpu(cpu
);
264 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
265 if (cpumask_test_cpu(cpu
, dest
))
268 en0
&= ~(1ull << bit
);
269 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
272 * We need to do a read after the last update to make sure all
275 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
276 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
282 * Set affinity for the irq for chips that have the EN*_W1{S,C}
285 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq
,
286 const struct cpumask
*dest
)
290 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
291 for_each_online_cpu(cpu
) {
292 index
= octeon_coreid_for_cpu(cpu
) * 2;
293 if (cpumask_test_cpu(cpu
, dest
))
294 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
296 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
303 * Newer octeon chips have support for lockless CIU operation.
305 static struct irq_chip octeon_irq_chip_ciu0_v2
= {
307 .enable
= octeon_irq_ciu0_enable_v2
,
308 .disable
= octeon_irq_ciu0_disable_all_v2
,
309 .ack
= octeon_irq_ciu0_ack_v2
,
310 .eoi
= octeon_irq_ciu0_eoi_v2
,
312 .set_affinity
= octeon_irq_ciu0_set_affinity_v2
,
316 static struct irq_chip octeon_irq_chip_ciu0
= {
318 .enable
= octeon_irq_ciu0_enable
,
319 .disable
= octeon_irq_ciu0_disable
,
320 .ack
= octeon_irq_ciu0_ack
,
321 .eoi
= octeon_irq_ciu0_eoi
,
323 .set_affinity
= octeon_irq_ciu0_set_affinity
,
327 static struct irq_chip octeon_irq_chip_ciu0_timer_v2
= {
329 .enable
= octeon_irq_ciu0_enable_v2
,
330 .disable
= octeon_irq_ciu0_disable_all_v2
,
331 .ack
= octeon_irq_ciu0_timer_ack_v2
,
332 .eoi
= octeon_irq_ciu0_eoi_v2
,
334 .set_affinity
= octeon_irq_ciu0_set_affinity_v2
,
338 static struct irq_chip octeon_irq_chip_ciu0_timer
= {
340 .enable
= octeon_irq_ciu0_enable
,
341 .disable
= octeon_irq_ciu0_disable
,
342 .ack
= octeon_irq_ciu0_timer_ack_v1
,
343 .eoi
= octeon_irq_ciu0_eoi
,
345 .set_affinity
= octeon_irq_ciu0_set_affinity
,
350 static void octeon_irq_ciu1_ack(unsigned int irq
)
353 * In order to avoid any locking accessing the CIU, we
354 * acknowledge CIU interrupts by disabling all of them. This
355 * way we can use a per core register and avoid any out of
356 * core locking requirements. This has the side affect that
357 * CIU interrupts can't be processed recursively. We don't
358 * need to disable IRQs to make these atomic since they are
359 * already disabled earlier in the low level interrupt code.
361 clear_c0_status(0x100 << 3);
364 static void octeon_irq_ciu1_eoi(unsigned int irq
)
367 * Enable all CIU interrupts again. We don't need to disable
368 * IRQs to make these atomic since they are already disabled
369 * earlier in the low level interrupt code.
371 set_c0_status(0x100 << 3);
374 static void octeon_irq_ciu1_enable(unsigned int irq
)
376 int coreid
= cvmx_get_core_num();
379 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
382 * A read lock is used here to make sure only one core is ever
383 * updating the CIU enable bits at a time. During an enable
384 * the cores don't interfere with each other. During a disable
385 * the write lock stops any enables that might cause a
388 read_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
389 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
391 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
392 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
393 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
396 static void octeon_irq_ciu1_disable(unsigned int irq
)
398 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
402 write_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
403 for_each_online_cpu(cpu
) {
404 int coreid
= octeon_coreid_for_cpu(cpu
);
405 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
406 en1
&= ~(1ull << bit
);
407 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
410 * We need to do a read after the last update to make sure all
413 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
414 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
418 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
421 static void octeon_irq_ciu1_enable_v2(unsigned int irq
)
423 int index
= cvmx_get_core_num() * 2 + 1;
424 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
426 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
430 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
433 static void octeon_irq_ciu1_ack_v2(unsigned int irq
)
435 int index
= cvmx_get_core_num() * 2 + 1;
436 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
438 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
442 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
445 static void octeon_irq_ciu1_eoi_v2(unsigned int irq
)
447 struct irq_desc
*desc
= irq_desc
+ irq
;
448 int index
= cvmx_get_core_num() * 2 + 1;
449 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
451 if ((desc
->status
& IRQ_DISABLED
) == 0)
452 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
456 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
459 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq
)
461 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
464 for_each_online_cpu(cpu
) {
465 index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
466 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
471 static int octeon_irq_ciu1_set_affinity(unsigned int irq
,
472 const struct cpumask
*dest
)
476 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
478 write_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
479 for_each_online_cpu(cpu
) {
480 int coreid
= octeon_coreid_for_cpu(cpu
);
482 cvmx_read_csr(CVMX_CIU_INTX_EN1
484 if (cpumask_test_cpu(cpu
, dest
))
487 en1
&= ~(1ull << bit
);
488 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
491 * We need to do a read after the last update to make sure all
494 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
495 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
501 * Set affinity for the irq for chips that have the EN*_W1{S,C}
504 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq
,
505 const struct cpumask
*dest
)
509 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
510 for_each_online_cpu(cpu
) {
511 index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
512 if (cpumask_test_cpu(cpu
, dest
))
513 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
515 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
522 * Newer octeon chips have support for lockless CIU operation.
524 static struct irq_chip octeon_irq_chip_ciu1_v2
= {
526 .enable
= octeon_irq_ciu1_enable_v2
,
527 .disable
= octeon_irq_ciu1_disable_all_v2
,
528 .ack
= octeon_irq_ciu1_ack_v2
,
529 .eoi
= octeon_irq_ciu1_eoi_v2
,
531 .set_affinity
= octeon_irq_ciu1_set_affinity_v2
,
535 static struct irq_chip octeon_irq_chip_ciu1
= {
537 .enable
= octeon_irq_ciu1_enable
,
538 .disable
= octeon_irq_ciu1_disable
,
539 .ack
= octeon_irq_ciu1_ack
,
540 .eoi
= octeon_irq_ciu1_eoi
,
542 .set_affinity
= octeon_irq_ciu1_set_affinity
,
546 #ifdef CONFIG_PCI_MSI
548 static void octeon_irq_msi_ack(unsigned int irq
)
550 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
551 /* These chips have PCI */
552 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV
,
553 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
556 * These chips have PCIe. Thankfully the ACK doesn't
559 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0
,
560 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
564 static void octeon_irq_msi_eoi(unsigned int irq
)
569 static void octeon_irq_msi_enable(unsigned int irq
)
571 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
573 * Octeon PCI doesn't have the ability to mask/unmask
574 * MSI interrupts individually. Instead of
575 * masking/unmasking them in groups of 16, we simple
576 * assume MSI devices are well behaved. MSI
577 * interrupts are always enable and the ACK is assumed
581 /* These chips have PCIe. Note that we only support
582 * the first 64 MSI interrupts. Unfortunately all the
583 * MSI enables are in the same register. We use
584 * MSI0's lock to control access to them all.
588 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
589 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
590 en
|= 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
);
591 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
592 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
593 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
597 static void octeon_irq_msi_disable(unsigned int irq
)
599 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
600 /* See comment in enable */
603 * These chips have PCIe. Note that we only support
604 * the first 64 MSI interrupts. Unfortunately all the
605 * MSI enables are in the same register. We use
606 * MSI0's lock to control access to them all.
610 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
611 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
612 en
&= ~(1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
613 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
614 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
615 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
619 static struct irq_chip octeon_irq_chip_msi
= {
621 .enable
= octeon_irq_msi_enable
,
622 .disable
= octeon_irq_msi_disable
,
623 .ack
= octeon_irq_msi_ack
,
624 .eoi
= octeon_irq_msi_eoi
,
628 void __init
arch_init_irq(void)
631 struct irq_chip
*chip0
;
632 struct irq_chip
*chip0_timer
;
633 struct irq_chip
*chip1
;
636 /* Set the default affinity to the boot cpu. */
637 cpumask_clear(irq_default_affinity
);
638 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
641 if (NR_IRQS
< OCTEON_IRQ_LAST
)
642 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
644 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X
) ||
645 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X
) ||
646 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X
)) {
647 chip0
= &octeon_irq_chip_ciu0_v2
;
648 chip0_timer
= &octeon_irq_chip_ciu0_timer_v2
;
649 chip1
= &octeon_irq_chip_ciu1_v2
;
651 chip0
= &octeon_irq_chip_ciu0
;
652 chip0_timer
= &octeon_irq_chip_ciu0_timer
;
653 chip1
= &octeon_irq_chip_ciu1
;
656 /* 0 - 15 reserved for i8259 master and slave controller. */
658 /* 17 - 23 Mips internal */
659 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++) {
660 set_irq_chip_and_handler(irq
, &octeon_irq_chip_core
,
664 /* 24 - 87 CIU_INT_SUM0 */
665 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
667 case OCTEON_IRQ_GMX_DRP0
:
668 case OCTEON_IRQ_GMX_DRP1
:
669 case OCTEON_IRQ_IPD_DRP
:
670 case OCTEON_IRQ_KEY_ZERO
:
671 case OCTEON_IRQ_TIMER0
:
672 case OCTEON_IRQ_TIMER1
:
673 case OCTEON_IRQ_TIMER2
:
674 case OCTEON_IRQ_TIMER3
:
675 set_irq_chip_and_handler(irq
, chip0_timer
, handle_percpu_irq
);
678 set_irq_chip_and_handler(irq
, chip0
, handle_percpu_irq
);
683 /* 88 - 151 CIU_INT_SUM1 */
684 for (irq
= OCTEON_IRQ_WDOG0
; irq
<= OCTEON_IRQ_RESERVED151
; irq
++) {
685 set_irq_chip_and_handler(irq
, chip1
, handle_percpu_irq
);
688 #ifdef CONFIG_PCI_MSI
689 /* 152 - 215 PCI/PCIe MSI interrupts */
690 for (irq
= OCTEON_IRQ_MSI_BIT0
; irq
<= OCTEON_IRQ_MSI_BIT63
; irq
++) {
691 set_irq_chip_and_handler(irq
, &octeon_irq_chip_msi
,
695 set_c0_status(0x300 << 2);
698 asmlinkage
void plat_irq_dispatch(void)
700 const unsigned long core_id
= cvmx_get_core_num();
701 const uint64_t ciu_sum0_address
= CVMX_CIU_INTX_SUM0(core_id
* 2);
702 const uint64_t ciu_en0_address
= CVMX_CIU_INTX_EN0(core_id
* 2);
703 const uint64_t ciu_sum1_address
= CVMX_CIU_INT_SUM1
;
704 const uint64_t ciu_en1_address
= CVMX_CIU_INTX_EN1(core_id
* 2 + 1);
705 unsigned long cop0_cause
;
706 unsigned long cop0_status
;
711 cop0_cause
= read_c0_cause();
712 cop0_status
= read_c0_status();
713 cop0_cause
&= cop0_status
;
714 cop0_cause
&= ST0_IM
;
716 if (unlikely(cop0_cause
& STATUSF_IP2
)) {
717 ciu_sum
= cvmx_read_csr(ciu_sum0_address
);
718 ciu_en
= cvmx_read_csr(ciu_en0_address
);
721 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WORKQ0
- 1);
723 spurious_interrupt();
724 } else if (unlikely(cop0_cause
& STATUSF_IP3
)) {
725 ciu_sum
= cvmx_read_csr(ciu_sum1_address
);
726 ciu_en
= cvmx_read_csr(ciu_en1_address
);
729 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WDOG0
- 1);
731 spurious_interrupt();
732 } else if (likely(cop0_cause
)) {
733 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);
740 #ifdef CONFIG_HOTPLUG_CPU
741 static int is_irq_enabled_on_cpu(unsigned int irq
, unsigned int cpu
)
744 int coreid
= octeon_coreid_for_cpu(cpu
);
745 int bit
= (irq
< OCTEON_IRQ_WDOG0
) ?
746 irq
- OCTEON_IRQ_WORKQ0
: irq
- OCTEON_IRQ_WDOG0
;
748 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2)) &
749 (1ull << bit
)) >> bit
;
751 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1)) &
752 (1ull << bit
)) >> bit
;
757 void fixup_irqs(void)
761 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++)
762 octeon_irq_core_disable_local(irq
);
764 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_GPIO15
; irq
++) {
765 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
766 /* ciu irq migrates to next cpu */
767 octeon_irq_chip_ciu0
.disable(irq
);
768 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
773 for (irq
= OCTEON_IRQ_MBOX0
; irq
<= OCTEON_IRQ_MBOX1
; irq
++)
774 octeon_irq_mailbox_mask(irq
);
776 for (irq
= OCTEON_IRQ_UART0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
777 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
778 /* ciu irq migrates to next cpu */
779 octeon_irq_chip_ciu0
.disable(irq
);
780 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
784 for (irq
= OCTEON_IRQ_UART2
; irq
<= OCTEON_IRQ_RESERVED135
; irq
++) {
785 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
786 /* ciu irq migrates to next cpu */
787 octeon_irq_chip_ciu1
.disable(irq
);
788 octeon_irq_ciu1_set_affinity(irq
, &cpu_online_map
);
793 #endif /* CONFIG_HOTPLUG_CPU */