2 * arch/ppc64/kernel/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
25 #include <asm/pgtable.h>
30 #include <asm/hvcall.h>
31 #include <asm/machdep.h>
35 static unsigned int xics_startup(unsigned int irq
);
36 static void xics_enable_irq(unsigned int irq
);
37 static void xics_disable_irq(unsigned int irq
);
38 static void xics_mask_and_ack_irq(unsigned int irq
);
39 static void xics_end_irq(unsigned int irq
);
40 static void xics_set_affinity(unsigned int irq_nr
, cpumask_t cpumask
);
42 struct hw_interrupt_type xics_pic
= {
44 .startup
= xics_startup
,
45 .enable
= xics_enable_irq
,
46 .disable
= xics_disable_irq
,
47 .ack
= xics_mask_and_ack_irq
,
49 .set_affinity
= xics_set_affinity
52 struct hw_interrupt_type xics_8259_pic
= {
53 .typename
= " XICS/8259",
54 .ack
= xics_mask_and_ack_irq
,
57 /* This is used to map real irq numbers to virtual */
58 static struct radix_tree_root irq_map
= RADIX_TREE_INIT(GFP_ATOMIC
);
61 #define XICS_IRQ_SPURIOUS 0
63 /* Want a priority other than 0. Various HW issues require this. */
64 #define DEFAULT_PRIORITY 5
67 * Mark IPIs as higher priority so we can take them inside interrupts that
68 * arent marked SA_INTERRUPT
70 #define IPI_PRIORITY 4
88 static struct xics_ipl
*xics_per_cpu
[NR_CPUS
];
90 static int xics_irq_8259_cascade
= 0;
91 static int xics_irq_8259_cascade_real
= 0;
92 static unsigned int default_server
= 0xFF;
93 /* also referenced in smp.c... */
94 unsigned int default_distrib_server
= 0;
97 * XICS only has a single IPI, so encode the messages per CPU
99 struct xics_ipi_struct xics_ipi_message
[NR_CPUS
] __cacheline_aligned
;
101 /* RTAS service tokens */
108 int (*xirr_info_get
)(int cpu
);
109 void (*xirr_info_set
)(int cpu
, int val
);
110 void (*cppr_info
)(int cpu
, u8 val
);
111 void (*qirr_info
)(int cpu
, u8 val
);
117 static int pSeries_xirr_info_get(int n_cpu
)
119 return xics_per_cpu
[n_cpu
]->xirr
.word
;
122 static void pSeries_xirr_info_set(int n_cpu
, int value
)
124 xics_per_cpu
[n_cpu
]->xirr
.word
= value
;
127 static void pSeries_cppr_info(int n_cpu
, u8 value
)
129 xics_per_cpu
[n_cpu
]->xirr
.bytes
[0] = value
;
132 static void pSeries_qirr_info(int n_cpu
, u8 value
)
134 xics_per_cpu
[n_cpu
]->qirr
.bytes
[0] = value
;
137 static xics_ops pSeries_ops
= {
138 pSeries_xirr_info_get
,
139 pSeries_xirr_info_set
,
144 static xics_ops
*ops
= &pSeries_ops
;
149 static inline long plpar_eoi(unsigned long xirr
)
151 return plpar_hcall_norets(H_EOI
, xirr
);
154 static inline long plpar_cppr(unsigned long cppr
)
156 return plpar_hcall_norets(H_CPPR
, cppr
);
159 static inline long plpar_ipi(unsigned long servernum
, unsigned long mfrr
)
161 return plpar_hcall_norets(H_IPI
, servernum
, mfrr
);
164 static inline long plpar_xirr(unsigned long *xirr_ret
)
167 return plpar_hcall(H_XIRR
, 0, 0, 0, 0, xirr_ret
, &dummy
, &dummy
);
170 static int pSeriesLP_xirr_info_get(int n_cpu
)
172 unsigned long lpar_rc
;
173 unsigned long return_value
;
175 lpar_rc
= plpar_xirr(&return_value
);
176 if (lpar_rc
!= H_Success
)
177 panic(" bad return code xirr - rc = %lx \n", lpar_rc
);
178 return (int)return_value
;
181 static void pSeriesLP_xirr_info_set(int n_cpu
, int value
)
183 unsigned long lpar_rc
;
184 unsigned long val64
= value
& 0xffffffff;
186 lpar_rc
= plpar_eoi(val64
);
187 if (lpar_rc
!= H_Success
)
188 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc
,
192 void pSeriesLP_cppr_info(int n_cpu
, u8 value
)
194 unsigned long lpar_rc
;
196 lpar_rc
= plpar_cppr(value
);
197 if (lpar_rc
!= H_Success
)
198 panic("bad return code cppr - rc = %lx\n", lpar_rc
);
201 static void pSeriesLP_qirr_info(int n_cpu
, u8 value
)
203 unsigned long lpar_rc
;
205 lpar_rc
= plpar_ipi(get_hard_smp_processor_id(n_cpu
), value
);
206 if (lpar_rc
!= H_Success
)
207 panic("bad return code qirr - rc = %lx\n", lpar_rc
);
210 xics_ops pSeriesLP_ops
= {
211 pSeriesLP_xirr_info_get
,
212 pSeriesLP_xirr_info_set
,
217 static unsigned int xics_startup(unsigned int virq
)
219 virq
= irq_offset_down(virq
);
220 if (radix_tree_insert(&irq_map
, virt_irq_to_real(virq
),
221 &virt_irq_to_real_map
[virq
]) == -ENOMEM
)
222 printk(KERN_CRIT
"Out of memory creating real -> virtual"
223 " IRQ mapping for irq %u (real 0x%x)\n",
224 virq
, virt_irq_to_real(virq
));
225 return 0; /* return value is ignored */
228 static unsigned int real_irq_to_virt(unsigned int real_irq
)
232 ptr
= radix_tree_lookup(&irq_map
, real_irq
);
235 return ptr
- virt_irq_to_real_map
;
239 static int get_irq_server(unsigned int irq
)
243 #ifdef CONFIG_IRQ_ALL_CPUS
244 /* For the moment only implement delivery to all cpus or one cpu */
245 if (smp_threads_ready
) {
246 cpumask_t cpumask
= irq_affinity
[irq
];
247 cpumask_t tmp
= CPU_MASK_NONE
;
248 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
249 server
= default_distrib_server
;
251 cpus_and(tmp
, cpu_online_map
, cpumask
);
254 server
= default_distrib_server
;
256 server
= get_hard_smp_processor_id(first_cpu(tmp
));
259 server
= default_server
;
262 server
= default_server
;
268 static int get_irq_server(unsigned int irq
)
270 return default_server
;
274 static void xics_enable_irq(unsigned int virq
)
280 irq
= virt_irq_to_real(irq_offset_down(virq
));
284 server
= get_irq_server(virq
);
285 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
,
287 if (call_status
!= 0) {
288 printk(KERN_ERR
"xics_enable_irq: irq=%d: ibm_set_xive "
289 "returned %x\n", irq
, call_status
);
293 /* Now unmask the interrupt (often a no-op) */
294 call_status
= rtas_call(ibm_int_on
, 1, 1, NULL
, irq
);
295 if (call_status
!= 0) {
296 printk(KERN_ERR
"xics_enable_irq: irq=%d: ibm_int_on "
297 "returned %x\n", irq
, call_status
);
302 static void xics_disable_real_irq(unsigned int irq
)
310 call_status
= rtas_call(ibm_int_off
, 1, 1, NULL
, irq
);
311 if (call_status
!= 0) {
312 printk(KERN_ERR
"xics_disable_real_irq: irq=%d: "
313 "ibm_int_off returned %x\n", irq
, call_status
);
317 server
= get_irq_server(irq
);
318 /* Have to set XIVE to 0xff to be able to remove a slot */
319 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
, 0xff);
320 if (call_status
!= 0) {
321 printk(KERN_ERR
"xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
322 " returned %x\n", irq
, call_status
);
327 static void xics_disable_irq(unsigned int virq
)
331 irq
= virt_irq_to_real(irq_offset_down(virq
));
332 xics_disable_real_irq(irq
);
335 static void xics_end_irq(unsigned int irq
)
337 int cpu
= smp_processor_id();
340 ops
->xirr_info_set(cpu
, ((0xff << 24) |
341 (virt_irq_to_real(irq_offset_down(irq
)))));
345 static void xics_mask_and_ack_irq(unsigned int irq
)
347 int cpu
= smp_processor_id();
349 if (irq
< irq_offset_value()) {
352 ops
->xirr_info_set(cpu
, ((0xff<<24) |
353 xics_irq_8259_cascade_real
));
358 int xics_get_irq(struct pt_regs
*regs
)
360 unsigned int cpu
= smp_processor_id();
364 vec
= ops
->xirr_info_get(cpu
);
365 /* (vec >> 24) == old priority */
368 /* for sanity, this had better be < NR_IRQS - 16 */
369 if (vec
== xics_irq_8259_cascade_real
) {
370 irq
= i8259_irq(cpu
);
372 /* Spurious cascaded interrupt. Still must ack xics */
373 xics_end_irq(irq_offset_up(xics_irq_8259_cascade
));
377 } else if (vec
== XICS_IRQ_SPURIOUS
) {
380 irq
= real_irq_to_virt(vec
);
382 irq
= real_irq_to_virt_slowpath(vec
);
384 printk(KERN_ERR
"Interrupt %d (real) is invalid,"
385 " disabling it.\n", vec
);
386 xics_disable_real_irq(vec
);
388 irq
= irq_offset_up(irq
);
395 irqreturn_t
xics_ipi_action(int irq
, void *dev_id
, struct pt_regs
*regs
)
397 int cpu
= smp_processor_id();
399 ops
->qirr_info(cpu
, 0xff);
401 WARN_ON(cpu_is_offline(cpu
));
403 while (xics_ipi_message
[cpu
].value
) {
404 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION
,
405 &xics_ipi_message
[cpu
].value
)) {
407 smp_message_recv(PPC_MSG_CALL_FUNCTION
, regs
);
409 if (test_and_clear_bit(PPC_MSG_RESCHEDULE
,
410 &xics_ipi_message
[cpu
].value
)) {
412 smp_message_recv(PPC_MSG_RESCHEDULE
, regs
);
415 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK
,
416 &xics_ipi_message
[cpu
].value
)) {
418 smp_message_recv(PPC_MSG_MIGRATE_TASK
, regs
);
421 #ifdef CONFIG_DEBUGGER
422 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK
,
423 &xics_ipi_message
[cpu
].value
)) {
425 smp_message_recv(PPC_MSG_DEBUGGER_BREAK
, regs
);
432 void xics_cause_IPI(int cpu
)
434 ops
->qirr_info(cpu
, IPI_PRIORITY
);
437 void xics_setup_cpu(void)
439 int cpu
= smp_processor_id();
441 ops
->cppr_info(cpu
, 0xff);
445 #endif /* CONFIG_SMP */
447 void xics_init_IRQ(void)
450 unsigned long intr_size
= 0;
451 struct device_node
*np
;
452 uint
*ireg
, ilen
, indx
= 0;
453 unsigned long intr_base
= 0;
454 struct xics_interrupt_node
{
459 ppc64_boot_msg(0x20, "XICS Init");
461 ibm_get_xive
= rtas_token("ibm,get-xive");
462 ibm_set_xive
= rtas_token("ibm,set-xive");
463 ibm_int_on
= rtas_token("ibm,int-on");
464 ibm_int_off
= rtas_token("ibm,int-off");
466 np
= of_find_node_by_type(NULL
, "PowerPC-External-Interrupt-Presentation");
468 panic("xics_init_IRQ: can't find interrupt presentation");
471 ireg
= (uint
*)get_property(np
, "ibm,interrupt-server-ranges", NULL
);
474 * set node starting index for this node
479 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
481 panic("xics_init_IRQ: can't find interrupt reg property");
484 inodes
[indx
].addr
= (unsigned long long)*ireg
++ << 32;
485 ilen
-= sizeof(uint
);
486 inodes
[indx
].addr
|= *ireg
++;
487 ilen
-= sizeof(uint
);
488 inodes
[indx
].size
= (unsigned long long)*ireg
++ << 32;
489 ilen
-= sizeof(uint
);
490 inodes
[indx
].size
|= *ireg
++;
491 ilen
-= sizeof(uint
);
493 if (indx
>= NR_CPUS
) break;
496 np
= of_find_node_by_type(np
, "PowerPC-External-Interrupt-Presentation");
497 if ((indx
< NR_CPUS
) && np
) goto nextnode
;
499 /* Find the server numbers for the boot cpu. */
500 for (np
= of_find_node_by_type(NULL
, "cpu");
502 np
= of_find_node_by_type(np
, "cpu")) {
503 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
504 if (ireg
&& ireg
[0] == hard_smp_processor_id()) {
505 ireg
= (uint
*)get_property(np
, "ibm,ppc-interrupt-gserver#s", &ilen
);
506 i
= ilen
/ sizeof(int);
508 default_server
= ireg
[0];
509 default_distrib_server
= ireg
[i
-1]; /* take last element */
516 intr_base
= inodes
[0].addr
;
517 intr_size
= (ulong
)inodes
[0].size
;
519 np
= of_find_node_by_type(NULL
, "interrupt-controller");
521 printk(KERN_WARNING
"xics: no ISA interrupt controller\n");
522 xics_irq_8259_cascade_real
= -1;
523 xics_irq_8259_cascade
= -1;
525 ireg
= (uint
*) get_property(np
, "interrupts", NULL
);
527 panic("xics_init_IRQ: can't find ISA interrupts property");
529 xics_irq_8259_cascade_real
= *ireg
;
530 xics_irq_8259_cascade
531 = virt_irq_create_mapping(xics_irq_8259_cascade_real
);
535 if (systemcfg
->platform
== PLATFORM_PSERIES
) {
538 /* FIXME: Do this dynamically! --RR */
541 xics_per_cpu
[i
] = __ioremap((ulong
)inodes
[get_hard_smp_processor_id(i
)].addr
,
542 (ulong
)inodes
[get_hard_smp_processor_id(i
)].size
,
546 xics_per_cpu
[0] = __ioremap((ulong
)intr_base
, intr_size
,
548 #endif /* CONFIG_SMP */
549 } else if (systemcfg
->platform
== PLATFORM_PSERIES_LPAR
) {
550 ops
= &pSeriesLP_ops
;
553 xics_8259_pic
.enable
= i8259_pic
.enable
;
554 xics_8259_pic
.disable
= i8259_pic
.disable
;
555 for (i
= 0; i
< 16; ++i
)
556 get_irq_desc(i
)->handler
= &xics_8259_pic
;
557 for (; i
< NR_IRQS
; ++i
)
558 get_irq_desc(i
)->handler
= &xics_pic
;
560 ops
->cppr_info(boot_cpuid
, 0xff);
563 ppc64_boot_msg(0x21, "XICS Done");
567 * We cant do this in init_IRQ because we need the memory subsystem up for
570 static int __init
xics_setup_i8259(void)
572 if (naca
->interrupt_controller
== IC_PPC_XIC
&&
573 xics_irq_8259_cascade
!= -1) {
574 if (request_irq(irq_offset_up(xics_irq_8259_cascade
),
575 no_action
, 0, "8259 cascade", NULL
))
576 printk(KERN_ERR
"xics_setup_i8259: couldn't get 8259 "
582 arch_initcall(xics_setup_i8259
);
585 void xics_request_IPIs(void)
587 virt_irq_to_real_map
[XICS_IPI
] = XICS_IPI
;
589 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
590 request_irq(irq_offset_up(XICS_IPI
), xics_ipi_action
, SA_INTERRUPT
,
592 get_irq_desc(irq_offset_up(XICS_IPI
))->status
|= IRQ_PER_CPU
;
596 static void xics_set_affinity(unsigned int virq
, cpumask_t cpumask
)
601 unsigned long newmask
;
602 cpumask_t tmp
= CPU_MASK_NONE
;
604 irq
= virt_irq_to_real(irq_offset_down(virq
));
605 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
608 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
611 printk(KERN_ERR
"xics_set_affinity: irq=%d ibm,get-xive "
612 "returns %d\n", irq
, status
);
616 /* For the moment only implement delivery to all cpus or one cpu */
617 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
618 newmask
= default_distrib_server
;
620 cpus_and(tmp
, cpu_online_map
, cpumask
);
623 newmask
= get_hard_smp_processor_id(first_cpu(tmp
));
626 status
= rtas_call(ibm_set_xive
, 3, 1, NULL
,
627 irq
, newmask
, xics_status
[1]);
630 printk(KERN_ERR
"xics_set_affinity: irq=%d ibm,set-xive "
631 "returns %d\n", irq
, status
);
636 #ifdef CONFIG_HOTPLUG_CPU
638 /* Interrupts are disabled. */
639 void xics_migrate_irqs_away(void)
641 int set_indicator
= rtas_token("set-indicator");
642 const unsigned int giqs
= 9005UL; /* Global Interrupt Queue Server */
644 unsigned int irq
, virq
, cpu
= smp_processor_id();
646 BUG_ON(set_indicator
== RTAS_UNKNOWN_SERVICE
);
648 /* Reject any interrupt that was queued to us... */
649 ops
->cppr_info(cpu
, 0);
652 /* Refuse any new interrupts... */
653 rtas_call(set_indicator
, 3, 1, &status
, giqs
,
654 hard_smp_processor_id(), 0);
655 WARN_ON(status
!= 0);
657 /* Allow IPIs again... */
658 ops
->cppr_info(cpu
, DEFAULT_PRIORITY
);
666 /* We cant set affinity on ISA interrupts */
667 if (virq
< irq_offset_value())
670 desc
= get_irq_desc(virq
);
671 irq
= virt_irq_to_real(irq_offset_down(virq
));
673 /* We need to get IPIs still. */
674 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
677 /* We only need to migrate enabled IRQS */
678 if (desc
== NULL
|| desc
->handler
== NULL
679 || desc
->action
== NULL
680 || desc
->handler
->set_affinity
== NULL
)
683 spin_lock_irqsave(&desc
->lock
, flags
);
685 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
687 printk(KERN_ERR
"migrate_irqs_away: irq=%d "
688 "ibm,get-xive returns %d\n",
694 * We only support delivery to all cpus or to one cpu.
695 * The irq has to be migrated only in the single cpu
698 if (xics_status
[0] != get_hard_smp_processor_id(cpu
))
701 printk(KERN_WARNING
"IRQ %d affinity broken off cpu %u\n",
704 /* Reset affinity to all cpus */
705 xics_status
[0] = default_distrib_server
;
707 status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
,
708 xics_status
[0], xics_status
[1]);
710 printk(KERN_ERR
"migrate_irqs_away: irq=%d "
711 "ibm,set-xive returns %d\n",
715 spin_unlock_irqrestore(&desc
->lock
, flags
);