2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock
);
17 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock
);
19 static int octeon_coreid_for_cpu(int cpu
)
22 return cpu_logical_map(cpu
);
24 return cvmx_get_core_num();
28 static void octeon_irq_core_ack(unsigned int irq
)
30 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
32 * We don't need to disable IRQs to make these atomic since
33 * they are already disabled earlier in the low level
36 clear_c0_status(0x100 << bit
);
37 /* The two user interrupts must be cleared manually. */
39 clear_c0_cause(0x100 << bit
);
42 static void octeon_irq_core_eoi(unsigned int irq
)
44 struct irq_desc
*desc
= irq_desc
+ irq
;
45 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
47 * If an IRQ is being processed while we are disabling it the
48 * handler will attempt to unmask the interrupt after it has
51 if (desc
->status
& IRQ_DISABLED
)
54 * We don't need to disable IRQs to make these atomic since
55 * they are already disabled earlier in the low level
58 set_c0_status(0x100 << bit
);
61 static void octeon_irq_core_enable(unsigned int irq
)
64 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
67 * We need to disable interrupts to make sure our updates are
70 local_irq_save(flags
);
71 set_c0_status(0x100 << bit
);
72 local_irq_restore(flags
);
75 static void octeon_irq_core_disable_local(unsigned int irq
)
78 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
80 * We need to disable interrupts to make sure our updates are
83 local_irq_save(flags
);
84 clear_c0_status(0x100 << bit
);
85 local_irq_restore(flags
);
88 static void octeon_irq_core_disable(unsigned int irq
)
91 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local
,
92 (void *) (long) irq
, 1);
94 octeon_irq_core_disable_local(irq
);
98 static struct irq_chip octeon_irq_chip_core
= {
100 .enable
= octeon_irq_core_enable
,
101 .disable
= octeon_irq_core_disable
,
102 .ack
= octeon_irq_core_ack
,
103 .eoi
= octeon_irq_core_eoi
,
107 static void octeon_irq_ciu0_ack(unsigned int irq
)
110 * In order to avoid any locking accessing the CIU, we
111 * acknowledge CIU interrupts by disabling all of them. This
112 * way we can use a per core register and avoid any out of
113 * core locking requirements. This has the side affect that
114 * CIU interrupts can't be processed recursively.
116 * We don't need to disable IRQs to make these atomic since
117 * they are already disabled earlier in the low level
120 clear_c0_status(0x100 << 2);
123 static void octeon_irq_ciu0_eoi(unsigned int irq
)
126 * Enable all CIU interrupts again. We don't need to disable
127 * IRQs to make these atomic since they are already disabled
128 * earlier in the low level interrupt code.
130 set_c0_status(0x100 << 2);
133 static void octeon_irq_ciu0_enable(unsigned int irq
)
135 int coreid
= cvmx_get_core_num();
138 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
140 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock
, flags
);
141 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
143 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
144 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
145 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock
, flags
);
148 static void octeon_irq_ciu0_disable(unsigned int irq
)
150 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
154 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock
, flags
);
155 for_each_online_cpu(cpu
) {
156 int coreid
= octeon_coreid_for_cpu(cpu
);
157 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
158 en0
&= ~(1ull << bit
);
159 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
162 * We need to do a read after the last update to make sure all
165 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
166 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock
, flags
);
170 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
173 static void octeon_irq_ciu0_enable_v2(unsigned int irq
)
175 int index
= cvmx_get_core_num() * 2;
176 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
178 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
182 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
185 static void octeon_irq_ciu0_ack_v2(unsigned int irq
)
187 int index
= cvmx_get_core_num() * 2;
188 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
190 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
194 * CIU timer type interrupts must be acknoleged by writing a '1' bit
197 static void octeon_irq_ciu0_timer_ack(unsigned int irq
)
199 int index
= cvmx_get_core_num() * 2;
200 uint64_t mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
201 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index
), mask
);
204 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq
)
206 octeon_irq_ciu0_timer_ack(irq
);
207 octeon_irq_ciu0_ack(irq
);
210 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq
)
212 octeon_irq_ciu0_timer_ack(irq
);
213 octeon_irq_ciu0_ack_v2(irq
);
217 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
220 static void octeon_irq_ciu0_eoi_v2(unsigned int irq
)
222 struct irq_desc
*desc
= irq_desc
+ irq
;
223 int index
= cvmx_get_core_num() * 2;
224 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
226 if ((desc
->status
& IRQ_DISABLED
) == 0)
227 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
231 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
234 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq
)
236 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
239 for_each_online_cpu(cpu
) {
240 index
= octeon_coreid_for_cpu(cpu
) * 2;
241 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
246 static int octeon_irq_ciu0_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
250 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
252 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock
, flags
);
253 for_each_online_cpu(cpu
) {
254 int coreid
= octeon_coreid_for_cpu(cpu
);
256 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
257 if (cpumask_test_cpu(cpu
, dest
))
260 en0
&= ~(1ull << bit
);
261 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
264 * We need to do a read after the last update to make sure all
267 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
268 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock
, flags
);
274 * Set affinity for the irq for chips that have the EN*_W1{S,C}
277 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq
,
278 const struct cpumask
*dest
)
282 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WORKQ0
);
283 for_each_online_cpu(cpu
) {
284 index
= octeon_coreid_for_cpu(cpu
) * 2;
285 if (cpumask_test_cpu(cpu
, dest
))
286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index
), mask
);
288 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index
), mask
);
295 * Newer octeon chips have support for lockless CIU operation.
297 static struct irq_chip octeon_irq_chip_ciu0_v2
= {
299 .enable
= octeon_irq_ciu0_enable_v2
,
300 .disable
= octeon_irq_ciu0_disable_all_v2
,
301 .ack
= octeon_irq_ciu0_ack_v2
,
302 .eoi
= octeon_irq_ciu0_eoi_v2
,
304 .set_affinity
= octeon_irq_ciu0_set_affinity_v2
,
308 static struct irq_chip octeon_irq_chip_ciu0
= {
310 .enable
= octeon_irq_ciu0_enable
,
311 .disable
= octeon_irq_ciu0_disable
,
312 .ack
= octeon_irq_ciu0_ack
,
313 .eoi
= octeon_irq_ciu0_eoi
,
315 .set_affinity
= octeon_irq_ciu0_set_affinity
,
319 static struct irq_chip octeon_irq_chip_ciu0_timer_v2
= {
321 .enable
= octeon_irq_ciu0_enable_v2
,
322 .disable
= octeon_irq_ciu0_disable_all_v2
,
323 .ack
= octeon_irq_ciu0_timer_ack_v2
,
324 .eoi
= octeon_irq_ciu0_eoi_v2
,
326 .set_affinity
= octeon_irq_ciu0_set_affinity_v2
,
330 static struct irq_chip octeon_irq_chip_ciu0_timer
= {
332 .enable
= octeon_irq_ciu0_enable
,
333 .disable
= octeon_irq_ciu0_disable
,
334 .ack
= octeon_irq_ciu0_timer_ack_v1
,
335 .eoi
= octeon_irq_ciu0_eoi
,
337 .set_affinity
= octeon_irq_ciu0_set_affinity
,
342 static void octeon_irq_ciu1_ack(unsigned int irq
)
345 * In order to avoid any locking accessing the CIU, we
346 * acknowledge CIU interrupts by disabling all of them. This
347 * way we can use a per core register and avoid any out of
348 * core locking requirements. This has the side affect that
349 * CIU interrupts can't be processed recursively. We don't
350 * need to disable IRQs to make these atomic since they are
351 * already disabled earlier in the low level interrupt code.
353 clear_c0_status(0x100 << 3);
356 static void octeon_irq_ciu1_eoi(unsigned int irq
)
359 * Enable all CIU interrupts again. We don't need to disable
360 * IRQs to make these atomic since they are already disabled
361 * earlier in the low level interrupt code.
363 set_c0_status(0x100 << 3);
366 static void octeon_irq_ciu1_enable(unsigned int irq
)
368 int coreid
= cvmx_get_core_num();
371 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
373 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock
, flags
);
374 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
376 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
377 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
378 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock
, flags
);
381 static void octeon_irq_ciu1_disable(unsigned int irq
)
383 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
387 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock
, flags
);
388 for_each_online_cpu(cpu
) {
389 int coreid
= octeon_coreid_for_cpu(cpu
);
390 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
391 en1
&= ~(1ull << bit
);
392 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
395 * We need to do a read after the last update to make sure all
398 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
399 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock
, flags
);
403 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
406 static void octeon_irq_ciu1_enable_v2(unsigned int irq
)
408 int index
= cvmx_get_core_num() * 2 + 1;
409 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
411 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
415 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
418 static void octeon_irq_ciu1_ack_v2(unsigned int irq
)
420 int index
= cvmx_get_core_num() * 2 + 1;
421 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
423 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
427 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
430 static void octeon_irq_ciu1_eoi_v2(unsigned int irq
)
432 struct irq_desc
*desc
= irq_desc
+ irq
;
433 int index
= cvmx_get_core_num() * 2 + 1;
434 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
436 if ((desc
->status
& IRQ_DISABLED
) == 0)
437 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
441 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
444 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq
)
446 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
449 for_each_online_cpu(cpu
) {
450 index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
451 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
456 static int octeon_irq_ciu1_set_affinity(unsigned int irq
,
457 const struct cpumask
*dest
)
461 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
463 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock
, flags
);
464 for_each_online_cpu(cpu
) {
465 int coreid
= octeon_coreid_for_cpu(cpu
);
467 cvmx_read_csr(CVMX_CIU_INTX_EN1
469 if (cpumask_test_cpu(cpu
, dest
))
472 en1
&= ~(1ull << bit
);
473 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
476 * We need to do a read after the last update to make sure all
479 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
480 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock
, flags
);
486 * Set affinity for the irq for chips that have the EN*_W1{S,C}
489 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq
,
490 const struct cpumask
*dest
)
494 u64 mask
= 1ull << (irq
- OCTEON_IRQ_WDOG0
);
495 for_each_online_cpu(cpu
) {
496 index
= octeon_coreid_for_cpu(cpu
) * 2 + 1;
497 if (cpumask_test_cpu(cpu
, dest
))
498 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index
), mask
);
500 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index
), mask
);
507 * Newer octeon chips have support for lockless CIU operation.
509 static struct irq_chip octeon_irq_chip_ciu1_v2
= {
511 .enable
= octeon_irq_ciu1_enable_v2
,
512 .disable
= octeon_irq_ciu1_disable_all_v2
,
513 .ack
= octeon_irq_ciu1_ack_v2
,
514 .eoi
= octeon_irq_ciu1_eoi_v2
,
516 .set_affinity
= octeon_irq_ciu1_set_affinity_v2
,
520 static struct irq_chip octeon_irq_chip_ciu1
= {
522 .enable
= octeon_irq_ciu1_enable
,
523 .disable
= octeon_irq_ciu1_disable
,
524 .ack
= octeon_irq_ciu1_ack
,
525 .eoi
= octeon_irq_ciu1_eoi
,
527 .set_affinity
= octeon_irq_ciu1_set_affinity
,
531 #ifdef CONFIG_PCI_MSI
533 static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock
);
535 static void octeon_irq_msi_ack(unsigned int irq
)
537 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
538 /* These chips have PCI */
539 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV
,
540 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
543 * These chips have PCIe. Thankfully the ACK doesn't
546 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0
,
547 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
551 static void octeon_irq_msi_eoi(unsigned int irq
)
556 static void octeon_irq_msi_enable(unsigned int irq
)
558 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
560 * Octeon PCI doesn't have the ability to mask/unmask
561 * MSI interrupts individually. Instead of
562 * masking/unmasking them in groups of 16, we simple
563 * assume MSI devices are well behaved. MSI
564 * interrupts are always enable and the ACK is assumed
568 /* These chips have PCIe. Note that we only support
569 * the first 64 MSI interrupts. Unfortunately all the
570 * MSI enables are in the same register. We use
571 * MSI0's lock to control access to them all.
575 raw_spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
576 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
577 en
|= 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
);
578 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
579 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
580 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
584 static void octeon_irq_msi_disable(unsigned int irq
)
586 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
587 /* See comment in enable */
590 * These chips have PCIe. Note that we only support
591 * the first 64 MSI interrupts. Unfortunately all the
592 * MSI enables are in the same register. We use
593 * MSI0's lock to control access to them all.
597 raw_spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
598 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
599 en
&= ~(1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
600 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
601 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
602 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
606 static struct irq_chip octeon_irq_chip_msi
= {
608 .enable
= octeon_irq_msi_enable
,
609 .disable
= octeon_irq_msi_disable
,
610 .ack
= octeon_irq_msi_ack
,
611 .eoi
= octeon_irq_msi_eoi
,
615 void __init
arch_init_irq(void)
618 struct irq_chip
*chip0
;
619 struct irq_chip
*chip0_timer
;
620 struct irq_chip
*chip1
;
623 /* Set the default affinity to the boot cpu. */
624 cpumask_clear(irq_default_affinity
);
625 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
628 if (NR_IRQS
< OCTEON_IRQ_LAST
)
629 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
631 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X
) ||
632 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X
) ||
633 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X
)) {
634 chip0
= &octeon_irq_chip_ciu0_v2
;
635 chip0_timer
= &octeon_irq_chip_ciu0_timer_v2
;
636 chip1
= &octeon_irq_chip_ciu1_v2
;
638 chip0
= &octeon_irq_chip_ciu0
;
639 chip0_timer
= &octeon_irq_chip_ciu0_timer
;
640 chip1
= &octeon_irq_chip_ciu1
;
643 /* 0 - 15 reserved for i8259 master and slave controller. */
645 /* 17 - 23 Mips internal */
646 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++) {
647 set_irq_chip_and_handler(irq
, &octeon_irq_chip_core
,
651 /* 24 - 87 CIU_INT_SUM0 */
652 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
654 case OCTEON_IRQ_GMX_DRP0
:
655 case OCTEON_IRQ_GMX_DRP1
:
656 case OCTEON_IRQ_IPD_DRP
:
657 case OCTEON_IRQ_KEY_ZERO
:
658 case OCTEON_IRQ_TIMER0
:
659 case OCTEON_IRQ_TIMER1
:
660 case OCTEON_IRQ_TIMER2
:
661 case OCTEON_IRQ_TIMER3
:
662 set_irq_chip_and_handler(irq
, chip0_timer
, handle_percpu_irq
);
665 set_irq_chip_and_handler(irq
, chip0
, handle_percpu_irq
);
670 /* 88 - 151 CIU_INT_SUM1 */
671 for (irq
= OCTEON_IRQ_WDOG0
; irq
<= OCTEON_IRQ_RESERVED151
; irq
++) {
672 set_irq_chip_and_handler(irq
, chip1
, handle_percpu_irq
);
675 #ifdef CONFIG_PCI_MSI
676 /* 152 - 215 PCI/PCIe MSI interrupts */
677 for (irq
= OCTEON_IRQ_MSI_BIT0
; irq
<= OCTEON_IRQ_MSI_BIT63
; irq
++) {
678 set_irq_chip_and_handler(irq
, &octeon_irq_chip_msi
,
682 set_c0_status(0x300 << 2);
685 asmlinkage
void plat_irq_dispatch(void)
687 const unsigned long core_id
= cvmx_get_core_num();
688 const uint64_t ciu_sum0_address
= CVMX_CIU_INTX_SUM0(core_id
* 2);
689 const uint64_t ciu_en0_address
= CVMX_CIU_INTX_EN0(core_id
* 2);
690 const uint64_t ciu_sum1_address
= CVMX_CIU_INT_SUM1
;
691 const uint64_t ciu_en1_address
= CVMX_CIU_INTX_EN1(core_id
* 2 + 1);
692 unsigned long cop0_cause
;
693 unsigned long cop0_status
;
698 cop0_cause
= read_c0_cause();
699 cop0_status
= read_c0_status();
700 cop0_cause
&= cop0_status
;
701 cop0_cause
&= ST0_IM
;
703 if (unlikely(cop0_cause
& STATUSF_IP2
)) {
704 ciu_sum
= cvmx_read_csr(ciu_sum0_address
);
705 ciu_en
= cvmx_read_csr(ciu_en0_address
);
708 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WORKQ0
- 1);
710 spurious_interrupt();
711 } else if (unlikely(cop0_cause
& STATUSF_IP3
)) {
712 ciu_sum
= cvmx_read_csr(ciu_sum1_address
);
713 ciu_en
= cvmx_read_csr(ciu_en1_address
);
716 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WDOG0
- 1);
718 spurious_interrupt();
719 } else if (likely(cop0_cause
)) {
720 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);
727 #ifdef CONFIG_HOTPLUG_CPU
728 static int is_irq_enabled_on_cpu(unsigned int irq
, unsigned int cpu
)
731 int coreid
= octeon_coreid_for_cpu(cpu
);
732 int bit
= (irq
< OCTEON_IRQ_WDOG0
) ?
733 irq
- OCTEON_IRQ_WORKQ0
: irq
- OCTEON_IRQ_WDOG0
;
735 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2)) &
736 (1ull << bit
)) >> bit
;
738 isset
= (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1)) &
739 (1ull << bit
)) >> bit
;
744 void fixup_irqs(void)
748 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++)
749 octeon_irq_core_disable_local(irq
);
751 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_GPIO15
; irq
++) {
752 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
753 /* ciu irq migrates to next cpu */
754 octeon_irq_chip_ciu0
.disable(irq
);
755 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
760 for (irq
= OCTEON_IRQ_MBOX0
; irq
<= OCTEON_IRQ_MBOX1
; irq
++)
761 octeon_irq_mailbox_mask(irq
);
763 for (irq
= OCTEON_IRQ_UART0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
764 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
765 /* ciu irq migrates to next cpu */
766 octeon_irq_chip_ciu0
.disable(irq
);
767 octeon_irq_ciu0_set_affinity(irq
, &cpu_online_map
);
771 for (irq
= OCTEON_IRQ_UART2
; irq
<= OCTEON_IRQ_RESERVED135
; irq
++) {
772 if (is_irq_enabled_on_cpu(irq
, smp_processor_id())) {
773 /* ciu irq migrates to next cpu */
774 octeon_irq_chip_ciu1
.disable(irq
);
775 octeon_irq_ciu1_set_affinity(irq
, &cpu_online_map
);
780 #endif /* CONFIG_HOTPLUG_CPU */