[POWERPC] Add new interrupt mapping core and change platforms to use it
[linux-2.6/btrfs-unstable.git] / arch / powerpc / platforms / pseries / xics.c
blob716972aa9777298fd308ff2c1d01c27b6ea290ef
1 /*
2 * arch/powerpc/platforms/pseries/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #undef DEBUG
14 #include <linux/types.h>
15 #include <linux/threads.h>
16 #include <linux/kernel.h>
17 #include <linux/irq.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <linux/signal.h>
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/radix-tree.h>
24 #include <linux/cpu.h>
26 #include <asm/firmware.h>
27 #include <asm/prom.h>
28 #include <asm/io.h>
29 #include <asm/pgtable.h>
30 #include <asm/smp.h>
31 #include <asm/rtas.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/i8259.h>
36 #include "xics.h"
38 #define XICS_IPI 2
39 #define XICS_IRQ_SPURIOUS 0
41 /* Want a priority other than 0. Various HW issues require this. */
42 #define DEFAULT_PRIORITY 5
45 * Mark IPIs as higher priority so we can take them inside interrupts that
46 * arent marked IRQF_DISABLED
48 #define IPI_PRIORITY 4
50 struct xics_ipl {
51 union {
52 u32 word;
53 u8 bytes[4];
54 } xirr_poll;
55 union {
56 u32 word;
57 u8 bytes[4];
58 } xirr;
59 u32 dummy;
60 union {
61 u32 word;
62 u8 bytes[4];
63 } qirr;
66 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
68 static unsigned int default_server = 0xFF;
69 static unsigned int default_distrib_server = 0;
70 static unsigned int interrupt_server_size = 8;
72 static struct irq_host *xics_host;
75 * XICS only has a single IPI, so encode the messages per CPU
77 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
79 /* RTAS service tokens */
80 static int ibm_get_xive;
81 static int ibm_set_xive;
82 static int ibm_int_on;
83 static int ibm_int_off;
86 /* Direct HW low level accessors */
89 static inline unsigned int direct_xirr_info_get(int n_cpu)
91 return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
94 static inline void direct_xirr_info_set(int n_cpu, int value)
96 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
99 static inline void direct_cppr_info(int n_cpu, u8 value)
101 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
104 static inline void direct_qirr_info(int n_cpu, u8 value)
106 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
110 /* LPAR low level accessors */
113 static inline long plpar_eoi(unsigned long xirr)
115 return plpar_hcall_norets(H_EOI, xirr);
118 static inline long plpar_cppr(unsigned long cppr)
120 return plpar_hcall_norets(H_CPPR, cppr);
123 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
125 return plpar_hcall_norets(H_IPI, servernum, mfrr);
128 static inline long plpar_xirr(unsigned long *xirr_ret)
130 unsigned long dummy;
131 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
134 static inline unsigned int lpar_xirr_info_get(int n_cpu)
136 unsigned long lpar_rc;
137 unsigned long return_value;
139 lpar_rc = plpar_xirr(&return_value);
140 if (lpar_rc != H_SUCCESS)
141 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
142 return (unsigned int)return_value;
145 static inline void lpar_xirr_info_set(int n_cpu, int value)
147 unsigned long lpar_rc;
148 unsigned long val64 = value & 0xffffffff;
150 lpar_rc = plpar_eoi(val64);
151 if (lpar_rc != H_SUCCESS)
152 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
153 val64);
156 static inline void lpar_cppr_info(int n_cpu, u8 value)
158 unsigned long lpar_rc;
160 lpar_rc = plpar_cppr(value);
161 if (lpar_rc != H_SUCCESS)
162 panic("bad return code cppr - rc = %lx\n", lpar_rc);
165 static inline void lpar_qirr_info(int n_cpu , u8 value)
167 unsigned long lpar_rc;
169 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
170 if (lpar_rc != H_SUCCESS)
171 panic("bad return code qirr - rc = %lx\n", lpar_rc);
175 /* High level handlers and init code */
178 #ifdef CONFIG_SMP
179 static int get_irq_server(unsigned int virq)
181 unsigned int server;
182 /* For the moment only implement delivery to all cpus or one cpu */
183 cpumask_t cpumask = irq_desc[virq].affinity;
184 cpumask_t tmp = CPU_MASK_NONE;
186 if (!distribute_irqs)
187 return default_server;
189 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
190 server = default_distrib_server;
191 } else {
192 cpus_and(tmp, cpu_online_map, cpumask);
194 if (cpus_empty(tmp))
195 server = default_distrib_server;
196 else
197 server = get_hard_smp_processor_id(first_cpu(tmp));
200 return server;
203 #else
204 static int get_irq_server(unsigned int virq)
206 return default_server;
208 #endif
211 static void xics_unmask_irq(unsigned int virq)
213 unsigned int irq;
214 int call_status;
215 unsigned int server;
217 pr_debug("xics: unmask virq %d\n", virq);
219 irq = (unsigned int)irq_map[virq].hwirq;
220 pr_debug(" -> map to hwirq 0x%x\n", irq);
221 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
222 return;
224 server = get_irq_server(virq);
226 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
227 DEFAULT_PRIORITY);
228 if (call_status != 0) {
229 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
230 "returned %d\n", irq, call_status);
231 printk("set_xive %x, server %x\n", ibm_set_xive, server);
232 return;
235 /* Now unmask the interrupt (often a no-op) */
236 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
237 if (call_status != 0) {
238 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
239 "returned %d\n", irq, call_status);
240 return;
244 static void xics_mask_real_irq(unsigned int irq)
246 int call_status;
247 unsigned int server;
249 if (irq == XICS_IPI)
250 return;
252 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
253 if (call_status != 0) {
254 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
255 "ibm_int_off returned %d\n", irq, call_status);
256 return;
259 server = get_irq_server(irq);
260 /* Have to set XIVE to 0xff to be able to remove a slot */
261 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
262 if (call_status != 0) {
263 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
264 " returned %d\n", irq, call_status);
265 return;
269 static void xics_mask_irq(unsigned int virq)
271 unsigned int irq;
273 pr_debug("xics: mask virq %d\n", virq);
275 irq = (unsigned int)irq_map[virq].hwirq;
276 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
277 return;
278 xics_mask_real_irq(irq);
281 static unsigned int xics_startup(unsigned int virq)
283 unsigned int irq;
285 /* force a reverse mapping of the interrupt so it gets in the cache */
286 irq = (unsigned int)irq_map[virq].hwirq;
287 irq_radix_revmap(xics_host, irq);
289 /* unmask it */
290 xics_unmask_irq(virq);
291 return 0;
294 static void xics_eoi_direct(unsigned int virq)
296 int cpu = smp_processor_id();
297 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
299 iosync();
300 direct_xirr_info_set(cpu, (0xff << 24) | irq);
304 static void xics_eoi_lpar(unsigned int virq)
306 int cpu = smp_processor_id();
307 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
309 iosync();
310 lpar_xirr_info_set(cpu, (0xff << 24) | irq);
313 static inline unsigned int xics_remap_irq(unsigned int vec)
315 unsigned int irq;
317 vec &= 0x00ffffff;
319 if (vec == XICS_IRQ_SPURIOUS)
320 return NO_IRQ;
321 irq = irq_radix_revmap(xics_host, vec);
322 if (likely(irq != NO_IRQ))
323 return irq;
325 printk(KERN_ERR "Interrupt %u (real) is invalid,"
326 " disabling it.\n", vec);
327 xics_mask_real_irq(vec);
328 return NO_IRQ;
331 static unsigned int xics_get_irq_direct(struct pt_regs *regs)
333 unsigned int cpu = smp_processor_id();
335 return xics_remap_irq(direct_xirr_info_get(cpu));
338 static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
340 unsigned int cpu = smp_processor_id();
342 return xics_remap_irq(lpar_xirr_info_get(cpu));
345 #ifdef CONFIG_SMP
347 static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
349 WARN_ON(cpu_is_offline(cpu));
351 while (xics_ipi_message[cpu].value) {
352 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
353 &xics_ipi_message[cpu].value)) {
354 mb();
355 smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
357 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
358 &xics_ipi_message[cpu].value)) {
359 mb();
360 smp_message_recv(PPC_MSG_RESCHEDULE, regs);
362 #if 0
363 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
364 &xics_ipi_message[cpu].value)) {
365 mb();
366 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
368 #endif
369 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
370 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
371 &xics_ipi_message[cpu].value)) {
372 mb();
373 smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
375 #endif
377 return IRQ_HANDLED;
380 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
382 int cpu = smp_processor_id();
384 direct_qirr_info(cpu, 0xff);
386 return xics_ipi_dispatch(cpu, regs);
389 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
391 int cpu = smp_processor_id();
393 lpar_qirr_info(cpu, 0xff);
395 return xics_ipi_dispatch(cpu, regs);
398 void xics_cause_IPI(int cpu)
400 if (firmware_has_feature(FW_FEATURE_LPAR))
401 lpar_qirr_info(cpu, IPI_PRIORITY);
402 else
403 direct_qirr_info(cpu, IPI_PRIORITY);
406 #endif /* CONFIG_SMP */
408 static void xics_set_cpu_priority(int cpu, unsigned char cppr)
410 if (firmware_has_feature(FW_FEATURE_LPAR))
411 lpar_cppr_info(cpu, cppr);
412 else
413 direct_cppr_info(cpu, cppr);
414 iosync();
417 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
419 unsigned int irq;
420 int status;
421 int xics_status[2];
422 unsigned long newmask;
423 cpumask_t tmp = CPU_MASK_NONE;
425 irq = (unsigned int)irq_map[virq].hwirq;
426 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
427 return;
429 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
431 if (status) {
432 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
433 "returns %d\n", irq, status);
434 return;
437 /* For the moment only implement delivery to all cpus or one cpu */
438 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
439 newmask = default_distrib_server;
440 } else {
441 cpus_and(tmp, cpu_online_map, cpumask);
442 if (cpus_empty(tmp))
443 return;
444 newmask = get_hard_smp_processor_id(first_cpu(tmp));
447 status = rtas_call(ibm_set_xive, 3, 1, NULL,
448 irq, newmask, xics_status[1]);
450 if (status) {
451 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
452 "returns %d\n", irq, status);
453 return;
457 void xics_setup_cpu(void)
459 int cpu = smp_processor_id();
461 xics_set_cpu_priority(cpu, 0xff);
464 * Put the calling processor into the GIQ. This is really only
465 * necessary from a secondary thread as the OF start-cpu interface
466 * performs this function for us on primary threads.
468 * XXX: undo of teardown on kexec needs this too, as may hotplug
470 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
471 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
475 static struct irq_chip xics_pic_direct = {
476 .typename = " XICS ",
477 .startup = xics_startup,
478 .mask = xics_mask_irq,
479 .unmask = xics_unmask_irq,
480 .eoi = xics_eoi_direct,
481 .set_affinity = xics_set_affinity
485 static struct irq_chip xics_pic_lpar = {
486 .typename = " XICS ",
487 .startup = xics_startup,
488 .mask = xics_mask_irq,
489 .unmask = xics_unmask_irq,
490 .eoi = xics_eoi_lpar,
491 .set_affinity = xics_set_affinity
495 static int xics_host_match(struct irq_host *h, struct device_node *node)
497 /* IBM machines have interrupt parents of various funky types for things
498 * like vdevices, events, etc... The trick we use here is to match
499 * everything here except the legacy 8259 which is compatible "chrp,iic"
501 return !device_is_compatible(node, "chrp,iic");
504 static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
505 irq_hw_number_t hw, unsigned int flags)
507 unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
509 pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n",
510 virq, hw, flags);
512 if (sense && sense != IRQ_TYPE_LEVEL_LOW)
513 printk(KERN_WARNING "xics: using unsupported sense 0x%x"
514 " for irq %d (h: 0x%lx)\n", flags, virq, hw);
516 get_irq_desc(virq)->status |= IRQ_LEVEL;
517 set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
518 return 0;
521 static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
522 irq_hw_number_t hw, unsigned int flags)
524 unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
526 pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n",
527 virq, hw, flags);
529 if (sense && sense != IRQ_TYPE_LEVEL_LOW)
530 printk(KERN_WARNING "xics: using unsupported sense 0x%x"
531 " for irq %d (h: 0x%lx)\n", flags, virq, hw);
533 get_irq_desc(virq)->status |= IRQ_LEVEL;
534 set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
535 return 0;
538 static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
539 u32 *intspec, unsigned int intsize,
540 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
543 /* Current xics implementation translates everything
544 * to level. It is not technically right for MSIs but this
545 * is irrelevant at this point. We might get smarter in the future
547 *out_hwirq = intspec[0];
548 *out_flags = IRQ_TYPE_LEVEL_LOW;
550 return 0;
553 static struct irq_host_ops xics_host_direct_ops = {
554 .match = xics_host_match,
555 .map = xics_host_map_direct,
556 .xlate = xics_host_xlate,
559 static struct irq_host_ops xics_host_lpar_ops = {
560 .match = xics_host_match,
561 .map = xics_host_map_lpar,
562 .xlate = xics_host_xlate,
565 static void __init xics_init_host(void)
567 struct irq_host_ops *ops;
569 if (firmware_has_feature(FW_FEATURE_LPAR))
570 ops = &xics_host_lpar_ops;
571 else
572 ops = &xics_host_direct_ops;
573 xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
574 XICS_IRQ_SPURIOUS);
575 BUG_ON(xics_host == NULL);
576 irq_set_default_host(xics_host);
579 static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
580 unsigned long size)
582 #ifdef CONFIG_SMP
583 int i;
585 /* This may look gross but it's good enough for now, we don't quite
586 * have a hard -> linux processor id matching.
588 for_each_possible_cpu(i) {
589 if (!cpu_present(i))
590 continue;
591 if (hw_id == get_hard_smp_processor_id(i)) {
592 xics_per_cpu[i] = ioremap(addr, size);
593 return;
596 #else
597 if (hw_id != 0)
598 return;
599 xics_per_cpu[0] = ioremap(addr, size);
600 #endif /* CONFIG_SMP */
603 static void __init xics_init_one_node(struct device_node *np,
604 unsigned int *indx)
606 unsigned int ilen;
607 u32 *ireg;
609 /* This code does the theorically broken assumption that the interrupt
610 * server numbers are the same as the hard CPU numbers.
611 * This happens to be the case so far but we are playing with fire...
612 * should be fixed one of these days. -BenH.
614 ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL);
616 /* Do that ever happen ? we'll know soon enough... but even good'old
617 * f80 does have that property ..
619 WARN_ON(ireg == NULL);
620 if (ireg) {
622 * set node starting index for this node
624 *indx = *ireg;
626 ireg = (u32 *)get_property(np, "reg", &ilen);
627 if (!ireg)
628 panic("xics_init_IRQ: can't find interrupt reg property");
630 while (ilen >= (4 * sizeof(u32))) {
631 unsigned long addr, size;
633 /* XXX Use proper OF parsing code here !!! */
634 addr = (unsigned long)*ireg++ << 32;
635 ilen -= sizeof(u32);
636 addr |= *ireg++;
637 ilen -= sizeof(u32);
638 size = (unsigned long)*ireg++ << 32;
639 ilen -= sizeof(u32);
640 size |= *ireg++;
641 ilen -= sizeof(u32);
642 xics_map_one_cpu(*indx, addr, size);
643 (*indx)++;
648 static void __init xics_setup_8259_cascade(void)
650 struct device_node *np, *old, *found = NULL;
651 int cascade, naddr;
652 u32 *addrp;
653 unsigned long intack = 0;
655 for_each_node_by_type(np, "interrupt-controller")
656 if (device_is_compatible(np, "chrp,iic")) {
657 found = np;
658 break;
660 if (found == NULL) {
661 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
662 return;
664 cascade = irq_of_parse_and_map(found, 0);
665 if (cascade == NO_IRQ) {
666 printk(KERN_ERR "xics: failed to map cascade interrupt");
667 return;
669 pr_debug("xics: cascade mapped to irq %d\n", cascade);
671 for (old = of_node_get(found); old != NULL ; old = np) {
672 np = of_get_parent(old);
673 of_node_put(old);
674 if (np == NULL)
675 break;
676 if (strcmp(np->name, "pci") != 0)
677 continue;
678 addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL);
679 if (addrp == NULL)
680 continue;
681 naddr = prom_n_addr_cells(np);
682 intack = addrp[naddr-1];
683 if (naddr > 1)
684 intack |= ((unsigned long)addrp[naddr-2]) << 32;
686 if (intack)
687 printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
688 i8259_init(found, intack);
689 of_node_put(found);
690 set_irq_chained_handler(cascade, pseries_8259_cascade);
693 void __init xics_init_IRQ(void)
695 int i;
696 struct device_node *np;
697 u32 *ireg, ilen, indx = 0;
698 int found = 0;
700 ppc64_boot_msg(0x20, "XICS Init");
702 ibm_get_xive = rtas_token("ibm,get-xive");
703 ibm_set_xive = rtas_token("ibm,set-xive");
704 ibm_int_on = rtas_token("ibm,int-on");
705 ibm_int_off = rtas_token("ibm,int-off");
707 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
708 found = 1;
709 if (firmware_has_feature(FW_FEATURE_LPAR))
710 break;
711 xics_init_one_node(np, &indx);
713 if (found == 0)
714 return;
716 xics_init_host();
718 /* Find the server numbers for the boot cpu. */
719 for (np = of_find_node_by_type(NULL, "cpu");
721 np = of_find_node_by_type(np, "cpu")) {
722 ireg = (u32 *)get_property(np, "reg", &ilen);
723 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
724 ireg = (u32 *)get_property(np,
725 "ibm,ppc-interrupt-gserver#s",
726 &ilen);
727 i = ilen / sizeof(int);
728 if (ireg && i > 0) {
729 default_server = ireg[0];
730 /* take last element */
731 default_distrib_server = ireg[i-1];
733 ireg = (u32 *)get_property(np,
734 "ibm,interrupt-server#-size", NULL);
735 if (ireg)
736 interrupt_server_size = *ireg;
737 break;
740 of_node_put(np);
742 if (firmware_has_feature(FW_FEATURE_LPAR))
743 ppc_md.get_irq = xics_get_irq_lpar;
744 else
745 ppc_md.get_irq = xics_get_irq_direct;
747 xics_setup_cpu();
749 xics_setup_8259_cascade();
751 ppc64_boot_msg(0x21, "XICS Done");
755 #ifdef CONFIG_SMP
756 void xics_request_IPIs(void)
758 unsigned int ipi;
760 ipi = irq_create_mapping(xics_host, XICS_IPI, 0);
761 BUG_ON(ipi == NO_IRQ);
764 * IPIs are marked IRQF_DISABLED as they must run with irqs
765 * disabled
767 set_irq_handler(ipi, handle_percpu_irq);
768 if (firmware_has_feature(FW_FEATURE_LPAR))
769 request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
770 "IPI", NULL);
771 else
772 request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
773 "IPI", NULL);
775 #endif /* CONFIG_SMP */
777 void xics_teardown_cpu(int secondary)
779 int cpu = smp_processor_id();
780 unsigned int ipi;
781 struct irq_desc *desc;
783 xics_set_cpu_priority(cpu, 0);
786 * we need to EOI the IPI if we got here from kexec down IPI
788 * probably need to check all the other interrupts too
789 * should we be flagging idle loop instead?
790 * or creating some task to be scheduled?
793 ipi = irq_find_mapping(xics_host, XICS_IPI);
794 if (ipi == XICS_IRQ_SPURIOUS)
795 return;
796 desc = get_irq_desc(ipi);
797 if (desc->chip && desc->chip->eoi)
798 desc->chip->eoi(XICS_IPI);
801 * Some machines need to have at least one cpu in the GIQ,
802 * so leave the master cpu in the group.
804 if (secondary)
805 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
806 (1UL << interrupt_server_size) - 1 -
807 default_distrib_server, 0);
810 #ifdef CONFIG_HOTPLUG_CPU
812 /* Interrupts are disabled. */
813 void xics_migrate_irqs_away(void)
815 int status;
816 unsigned int irq, virq, cpu = smp_processor_id();
818 /* Reject any interrupt that was queued to us... */
819 xics_set_cpu_priority(cpu, 0);
821 /* remove ourselves from the global interrupt queue */
822 status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
823 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
824 WARN_ON(status < 0);
826 /* Allow IPIs again... */
827 xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
829 for_each_irq(virq) {
830 struct irq_desc *desc;
831 int xics_status[2];
832 unsigned long flags;
834 /* We cant set affinity on ISA interrupts */
835 if (virq < NUM_ISA_INTERRUPTS)
836 continue;
837 if (irq_map[virq].host != xics_host)
838 continue;
839 irq = (unsigned int)irq_map[virq].hwirq;
840 /* We need to get IPIs still. */
841 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
842 continue;
843 desc = get_irq_desc(virq);
845 /* We only need to migrate enabled IRQS */
846 if (desc == NULL || desc->chip == NULL
847 || desc->action == NULL
848 || desc->chip->set_affinity == NULL)
849 continue;
851 spin_lock_irqsave(&desc->lock, flags);
853 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
854 if (status) {
855 printk(KERN_ERR "migrate_irqs_away: irq=%u "
856 "ibm,get-xive returns %d\n",
857 virq, status);
858 goto unlock;
862 * We only support delivery to all cpus or to one cpu.
863 * The irq has to be migrated only in the single cpu
864 * case.
866 if (xics_status[0] != get_hard_smp_processor_id(cpu))
867 goto unlock;
869 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
870 virq, cpu);
872 /* Reset affinity to all cpus */
873 desc->chip->set_affinity(virq, CPU_MASK_ALL);
874 irq_desc[irq].affinity = CPU_MASK_ALL;
875 unlock:
876 spin_unlock_irqrestore(&desc->lock, flags);
879 #endif