2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
51 #include <asm/proto.h>
54 #include <asm/timer.h>
55 #include <asm/i8259.h>
57 #include <asm/msidef.h>
58 #include <asm/hypertransport.h>
59 #include <asm/setup.h>
60 #include <asm/irq_remapping.h>
62 #include <asm/hw_irq.h>
66 #define __apicdebuginit(type) static type __init
67 #define for_each_irq_pin(entry, head) \
68 for (entry = head; entry; entry = entry->next)
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
74 int sis_apic_bug
= -1;
76 static DEFINE_SPINLOCK(ioapic_lock
);
77 static DEFINE_SPINLOCK(vector_lock
);
80 * # of IRQ routing registers
82 int nr_ioapic_registers
[MAX_IO_APICS
];
84 /* I/O APIC entries */
85 struct mpc_ioapic mp_ioapics
[MAX_IO_APICS
];
88 /* IO APIC gsi routing info */
89 struct mp_ioapic_gsi mp_gsi_routing
[MAX_IO_APICS
];
91 /* MP IRQ source entries */
92 struct mpc_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
94 /* # of MP IRQ source entries */
97 /* Number of legacy interrupts */
98 static int nr_legacy_irqs __read_mostly
= NR_IRQS_LEGACY
;
100 static int nr_irqs_gsi
= NR_IRQS_LEGACY
;
102 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
103 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
106 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
108 int skip_ioapic_setup
;
110 void arch_disable_smp_support(void)
114 noioapicreroute
= -1;
116 skip_ioapic_setup
= 1;
119 static int __init
parse_noapic(char *str
)
121 /* disable IO-APIC */
122 arch_disable_smp_support();
125 early_param("noapic", parse_noapic
);
127 struct irq_pin_list
{
129 struct irq_pin_list
*next
;
132 static struct irq_pin_list
*get_one_free_irq_2_pin(int node
)
134 struct irq_pin_list
*pin
;
136 pin
= kzalloc_node(sizeof(*pin
), GFP_ATOMIC
, node
);
141 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
142 #ifdef CONFIG_SPARSE_IRQ
143 static struct irq_cfg irq_cfgx
[] = {
145 static struct irq_cfg irq_cfgx
[NR_IRQS
] = {
147 [0] = { .vector
= IRQ0_VECTOR
, },
148 [1] = { .vector
= IRQ1_VECTOR
, },
149 [2] = { .vector
= IRQ2_VECTOR
, },
150 [3] = { .vector
= IRQ3_VECTOR
, },
151 [4] = { .vector
= IRQ4_VECTOR
, },
152 [5] = { .vector
= IRQ5_VECTOR
, },
153 [6] = { .vector
= IRQ6_VECTOR
, },
154 [7] = { .vector
= IRQ7_VECTOR
, },
155 [8] = { .vector
= IRQ8_VECTOR
, },
156 [9] = { .vector
= IRQ9_VECTOR
, },
157 [10] = { .vector
= IRQ10_VECTOR
, },
158 [11] = { .vector
= IRQ11_VECTOR
, },
159 [12] = { .vector
= IRQ12_VECTOR
, },
160 [13] = { .vector
= IRQ13_VECTOR
, },
161 [14] = { .vector
= IRQ14_VECTOR
, },
162 [15] = { .vector
= IRQ15_VECTOR
, },
165 void __init
io_apic_disable_legacy(void)
171 int __init
arch_early_irq_init(void)
174 struct irq_desc
*desc
;
180 count
= ARRAY_SIZE(irq_cfgx
);
181 node
= cpu_to_node(boot_cpu_id
);
183 for (i
= 0; i
< count
; i
++) {
184 desc
= irq_to_desc(i
);
185 desc
->chip_data
= &cfg
[i
];
186 zalloc_cpumask_var_node(&cfg
[i
].domain
, GFP_NOWAIT
, node
);
187 zalloc_cpumask_var_node(&cfg
[i
].old_domain
, GFP_NOWAIT
, node
);
188 if (i
< nr_legacy_irqs
)
189 cpumask_setall(cfg
[i
].domain
);
195 #ifdef CONFIG_SPARSE_IRQ
196 struct irq_cfg
*irq_cfg(unsigned int irq
)
198 struct irq_cfg
*cfg
= NULL
;
199 struct irq_desc
*desc
;
201 desc
= irq_to_desc(irq
);
203 cfg
= desc
->chip_data
;
208 static struct irq_cfg
*get_one_free_irq_cfg(int node
)
212 cfg
= kzalloc_node(sizeof(*cfg
), GFP_ATOMIC
, node
);
214 if (!zalloc_cpumask_var_node(&cfg
->domain
, GFP_ATOMIC
, node
)) {
217 } else if (!zalloc_cpumask_var_node(&cfg
->old_domain
,
219 free_cpumask_var(cfg
->domain
);
228 int arch_init_chip_data(struct irq_desc
*desc
, int node
)
232 cfg
= desc
->chip_data
;
234 desc
->chip_data
= get_one_free_irq_cfg(node
);
235 if (!desc
->chip_data
) {
236 printk(KERN_ERR
"can not alloc irq_cfg\n");
244 /* for move_irq_desc */
246 init_copy_irq_2_pin(struct irq_cfg
*old_cfg
, struct irq_cfg
*cfg
, int node
)
248 struct irq_pin_list
*old_entry
, *head
, *tail
, *entry
;
250 cfg
->irq_2_pin
= NULL
;
251 old_entry
= old_cfg
->irq_2_pin
;
255 entry
= get_one_free_irq_2_pin(node
);
259 entry
->apic
= old_entry
->apic
;
260 entry
->pin
= old_entry
->pin
;
263 old_entry
= old_entry
->next
;
265 entry
= get_one_free_irq_2_pin(node
);
273 /* still use the old one */
276 entry
->apic
= old_entry
->apic
;
277 entry
->pin
= old_entry
->pin
;
280 old_entry
= old_entry
->next
;
284 cfg
->irq_2_pin
= head
;
287 static void free_irq_2_pin(struct irq_cfg
*old_cfg
, struct irq_cfg
*cfg
)
289 struct irq_pin_list
*entry
, *next
;
291 if (old_cfg
->irq_2_pin
== cfg
->irq_2_pin
)
294 entry
= old_cfg
->irq_2_pin
;
301 old_cfg
->irq_2_pin
= NULL
;
304 void arch_init_copy_chip_data(struct irq_desc
*old_desc
,
305 struct irq_desc
*desc
, int node
)
308 struct irq_cfg
*old_cfg
;
310 cfg
= get_one_free_irq_cfg(node
);
315 desc
->chip_data
= cfg
;
317 old_cfg
= old_desc
->chip_data
;
319 memcpy(cfg
, old_cfg
, sizeof(struct irq_cfg
));
321 init_copy_irq_2_pin(old_cfg
, cfg
, node
);
324 static void free_irq_cfg(struct irq_cfg
*old_cfg
)
329 void arch_free_chip_data(struct irq_desc
*old_desc
, struct irq_desc
*desc
)
331 struct irq_cfg
*old_cfg
, *cfg
;
333 old_cfg
= old_desc
->chip_data
;
334 cfg
= desc
->chip_data
;
340 free_irq_2_pin(old_cfg
, cfg
);
341 free_irq_cfg(old_cfg
);
342 old_desc
->chip_data
= NULL
;
345 /* end for move_irq_desc */
348 struct irq_cfg
*irq_cfg(unsigned int irq
)
350 return irq
< nr_irqs
? irq_cfgx
+ irq
: NULL
;
357 unsigned int unused
[3];
359 unsigned int unused2
[11];
363 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
365 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
366 + (mp_ioapics
[idx
].apicaddr
& ~PAGE_MASK
);
369 static inline void io_apic_eoi(unsigned int apic
, unsigned int vector
)
371 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
372 writel(vector
, &io_apic
->eoi
);
375 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
377 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
378 writel(reg
, &io_apic
->index
);
379 return readl(&io_apic
->data
);
382 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
384 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
385 writel(reg
, &io_apic
->index
);
386 writel(value
, &io_apic
->data
);
390 * Re-write a value: to be used for read-modify-write
391 * cycles where the read already set up the index register.
393 * Older SiS APIC requires we rewrite the index register
395 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
397 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
400 writel(reg
, &io_apic
->index
);
401 writel(value
, &io_apic
->data
);
404 static bool io_apic_level_ack_pending(struct irq_cfg
*cfg
)
406 struct irq_pin_list
*entry
;
409 spin_lock_irqsave(&ioapic_lock
, flags
);
410 for_each_irq_pin(entry
, cfg
->irq_2_pin
) {
415 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
416 /* Is the remote IRR bit set? */
417 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
418 spin_unlock_irqrestore(&ioapic_lock
, flags
);
422 spin_unlock_irqrestore(&ioapic_lock
, flags
);
428 struct { u32 w1
, w2
; };
429 struct IO_APIC_route_entry entry
;
432 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
434 union entry_union eu
;
436 spin_lock_irqsave(&ioapic_lock
, flags
);
437 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
438 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
439 spin_unlock_irqrestore(&ioapic_lock
, flags
);
444 * When we write a new IO APIC routing entry, we need to write the high
445 * word first! If the mask bit in the low word is clear, we will enable
446 * the interrupt, and we need to make sure the entry is fully populated
447 * before that happens.
450 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
452 union entry_union eu
= {{0, 0}};
455 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
456 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
459 void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
462 spin_lock_irqsave(&ioapic_lock
, flags
);
463 __ioapic_write_entry(apic
, pin
, e
);
464 spin_unlock_irqrestore(&ioapic_lock
, flags
);
468 * When we mask an IO APIC routing entry, we need to write the low
469 * word first, in order to set the mask bit before we change the
472 static void ioapic_mask_entry(int apic
, int pin
)
475 union entry_union eu
= { .entry
.mask
= 1 };
477 spin_lock_irqsave(&ioapic_lock
, flags
);
478 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
479 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
480 spin_unlock_irqrestore(&ioapic_lock
, flags
);
484 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
485 * shared ISA-space IRQs, so we have to support them. We are super
486 * fast in the common case, and fast for shared ISA-space IRQs.
489 add_pin_to_irq_node_nopanic(struct irq_cfg
*cfg
, int node
, int apic
, int pin
)
491 struct irq_pin_list
**last
, *entry
;
493 /* don't allow duplicates */
494 last
= &cfg
->irq_2_pin
;
495 for_each_irq_pin(entry
, cfg
->irq_2_pin
) {
496 if (entry
->apic
== apic
&& entry
->pin
== pin
)
501 entry
= get_one_free_irq_2_pin(node
);
503 printk(KERN_ERR
"can not alloc irq_pin_list (%d,%d,%d)\n",
514 static void add_pin_to_irq_node(struct irq_cfg
*cfg
, int node
, int apic
, int pin
)
516 if (add_pin_to_irq_node_nopanic(cfg
, node
, apic
, pin
))
517 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
521 * Reroute an IRQ to a different pin.
523 static void __init
replace_pin_at_irq_node(struct irq_cfg
*cfg
, int node
,
524 int oldapic
, int oldpin
,
525 int newapic
, int newpin
)
527 struct irq_pin_list
*entry
;
529 for_each_irq_pin(entry
, cfg
->irq_2_pin
) {
530 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
531 entry
->apic
= newapic
;
533 /* every one is different, right? */
538 /* old apic/pin didn't exist, so just add new ones */
539 add_pin_to_irq_node(cfg
, node
, newapic
, newpin
);
542 static void __io_apic_modify_irq(struct irq_pin_list
*entry
,
543 int mask_and
, int mask_or
,
544 void (*final
)(struct irq_pin_list
*entry
))
546 unsigned int reg
, pin
;
549 reg
= io_apic_read(entry
->apic
, 0x10 + pin
* 2);
552 io_apic_modify(entry
->apic
, 0x10 + pin
* 2, reg
);
557 static void io_apic_modify_irq(struct irq_cfg
*cfg
,
558 int mask_and
, int mask_or
,
559 void (*final
)(struct irq_pin_list
*entry
))
561 struct irq_pin_list
*entry
;
563 for_each_irq_pin(entry
, cfg
->irq_2_pin
)
564 __io_apic_modify_irq(entry
, mask_and
, mask_or
, final
);
567 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list
*entry
)
569 __io_apic_modify_irq(entry
, ~IO_APIC_REDIR_LEVEL_TRIGGER
,
570 IO_APIC_REDIR_MASKED
, NULL
);
573 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list
*entry
)
575 __io_apic_modify_irq(entry
, ~IO_APIC_REDIR_MASKED
,
576 IO_APIC_REDIR_LEVEL_TRIGGER
, NULL
);
579 static void __unmask_IO_APIC_irq(struct irq_cfg
*cfg
)
581 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_MASKED
, 0, NULL
);
584 static void io_apic_sync(struct irq_pin_list
*entry
)
587 * Synchronize the IO-APIC and the CPU by doing
588 * a dummy read from the IO-APIC
590 struct io_apic __iomem
*io_apic
;
591 io_apic
= io_apic_base(entry
->apic
);
592 readl(&io_apic
->data
);
595 static void __mask_IO_APIC_irq(struct irq_cfg
*cfg
)
597 io_apic_modify_irq(cfg
, ~0, IO_APIC_REDIR_MASKED
, &io_apic_sync
);
600 static void mask_IO_APIC_irq_desc(struct irq_desc
*desc
)
602 struct irq_cfg
*cfg
= desc
->chip_data
;
607 spin_lock_irqsave(&ioapic_lock
, flags
);
608 __mask_IO_APIC_irq(cfg
);
609 spin_unlock_irqrestore(&ioapic_lock
, flags
);
612 static void unmask_IO_APIC_irq_desc(struct irq_desc
*desc
)
614 struct irq_cfg
*cfg
= desc
->chip_data
;
617 spin_lock_irqsave(&ioapic_lock
, flags
);
618 __unmask_IO_APIC_irq(cfg
);
619 spin_unlock_irqrestore(&ioapic_lock
, flags
);
622 static void mask_IO_APIC_irq(unsigned int irq
)
624 struct irq_desc
*desc
= irq_to_desc(irq
);
626 mask_IO_APIC_irq_desc(desc
);
628 static void unmask_IO_APIC_irq(unsigned int irq
)
630 struct irq_desc
*desc
= irq_to_desc(irq
);
632 unmask_IO_APIC_irq_desc(desc
);
635 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
637 struct IO_APIC_route_entry entry
;
639 /* Check delivery_mode to be sure we're not clearing an SMI pin */
640 entry
= ioapic_read_entry(apic
, pin
);
641 if (entry
.delivery_mode
== dest_SMI
)
644 * Disable it in the IO-APIC irq-routing table:
646 ioapic_mask_entry(apic
, pin
);
649 static void clear_IO_APIC (void)
653 for (apic
= 0; apic
< nr_ioapics
; apic
++)
654 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
655 clear_IO_APIC_pin(apic
, pin
);
660 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
661 * specific CPU-side IRQs.
665 static int pirq_entries
[MAX_PIRQS
] = {
666 [0 ... MAX_PIRQS
- 1] = -1
669 static int __init
ioapic_pirq_setup(char *str
)
672 int ints
[MAX_PIRQS
+1];
674 get_options(str
, ARRAY_SIZE(ints
), ints
);
676 apic_printk(APIC_VERBOSE
, KERN_INFO
677 "PIRQ redirection, working around broken MP-BIOS.\n");
679 if (ints
[0] < MAX_PIRQS
)
682 for (i
= 0; i
< max
; i
++) {
683 apic_printk(APIC_VERBOSE
, KERN_DEBUG
684 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
686 * PIRQs are mapped upside down, usually.
688 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
693 __setup("pirq=", ioapic_pirq_setup
);
694 #endif /* CONFIG_X86_32 */
696 struct IO_APIC_route_entry
**alloc_ioapic_entries(void)
699 struct IO_APIC_route_entry
**ioapic_entries
;
701 ioapic_entries
= kzalloc(sizeof(*ioapic_entries
) * nr_ioapics
,
706 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
707 ioapic_entries
[apic
] =
708 kzalloc(sizeof(struct IO_APIC_route_entry
) *
709 nr_ioapic_registers
[apic
], GFP_ATOMIC
);
710 if (!ioapic_entries
[apic
])
714 return ioapic_entries
;
718 kfree(ioapic_entries
[apic
]);
719 kfree(ioapic_entries
);
725 * Saves all the IO-APIC RTE's
727 int save_IO_APIC_setup(struct IO_APIC_route_entry
**ioapic_entries
)
734 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
735 if (!ioapic_entries
[apic
])
738 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
739 ioapic_entries
[apic
][pin
] =
740 ioapic_read_entry(apic
, pin
);
747 * Mask all IO APIC entries.
749 void mask_IO_APIC_setup(struct IO_APIC_route_entry
**ioapic_entries
)
756 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
757 if (!ioapic_entries
[apic
])
760 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
761 struct IO_APIC_route_entry entry
;
763 entry
= ioapic_entries
[apic
][pin
];
766 ioapic_write_entry(apic
, pin
, entry
);
773 * Restore IO APIC entries which was saved in ioapic_entries.
775 int restore_IO_APIC_setup(struct IO_APIC_route_entry
**ioapic_entries
)
782 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
783 if (!ioapic_entries
[apic
])
786 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
787 ioapic_write_entry(apic
, pin
,
788 ioapic_entries
[apic
][pin
]);
793 void free_ioapic_entries(struct IO_APIC_route_entry
**ioapic_entries
)
797 for (apic
= 0; apic
< nr_ioapics
; apic
++)
798 kfree(ioapic_entries
[apic
]);
800 kfree(ioapic_entries
);
804 * Find the IRQ entry number of a certain pin.
806 static int find_irq_entry(int apic
, int pin
, int type
)
810 for (i
= 0; i
< mp_irq_entries
; i
++)
811 if (mp_irqs
[i
].irqtype
== type
&&
812 (mp_irqs
[i
].dstapic
== mp_ioapics
[apic
].apicid
||
813 mp_irqs
[i
].dstapic
== MP_APIC_ALL
) &&
814 mp_irqs
[i
].dstirq
== pin
)
821 * Find the pin to which IRQ[irq] (ISA) is connected
823 static int __init
find_isa_irq_pin(int irq
, int type
)
827 for (i
= 0; i
< mp_irq_entries
; i
++) {
828 int lbus
= mp_irqs
[i
].srcbus
;
830 if (test_bit(lbus
, mp_bus_not_pci
) &&
831 (mp_irqs
[i
].irqtype
== type
) &&
832 (mp_irqs
[i
].srcbusirq
== irq
))
834 return mp_irqs
[i
].dstirq
;
839 static int __init
find_isa_irq_apic(int irq
, int type
)
843 for (i
= 0; i
< mp_irq_entries
; i
++) {
844 int lbus
= mp_irqs
[i
].srcbus
;
846 if (test_bit(lbus
, mp_bus_not_pci
) &&
847 (mp_irqs
[i
].irqtype
== type
) &&
848 (mp_irqs
[i
].srcbusirq
== irq
))
851 if (i
< mp_irq_entries
) {
853 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
854 if (mp_ioapics
[apic
].apicid
== mp_irqs
[i
].dstapic
)
862 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
864 * EISA Edge/Level control register, ELCR
866 static int EISA_ELCR(unsigned int irq
)
868 if (irq
< nr_legacy_irqs
) {
869 unsigned int port
= 0x4d0 + (irq
>> 3);
870 return (inb(port
) >> (irq
& 7)) & 1;
872 apic_printk(APIC_VERBOSE
, KERN_INFO
873 "Broken MPtable reports ISA irq %d\n", irq
);
879 /* ISA interrupts are always polarity zero edge triggered,
880 * when listed as conforming in the MP table. */
882 #define default_ISA_trigger(idx) (0)
883 #define default_ISA_polarity(idx) (0)
885 /* EISA interrupts are always polarity zero and can be edge or level
886 * trigger depending on the ELCR value. If an interrupt is listed as
887 * EISA conforming in the MP table, that means its trigger type must
888 * be read in from the ELCR */
890 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
891 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
893 /* PCI interrupts are always polarity one level triggered,
894 * when listed as conforming in the MP table. */
896 #define default_PCI_trigger(idx) (1)
897 #define default_PCI_polarity(idx) (1)
899 /* MCA interrupts are always polarity zero level triggered,
900 * when listed as conforming in the MP table. */
902 #define default_MCA_trigger(idx) (1)
903 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
905 static int MPBIOS_polarity(int idx
)
907 int bus
= mp_irqs
[idx
].srcbus
;
911 * Determine IRQ line polarity (high active or low active):
913 switch (mp_irqs
[idx
].irqflag
& 3)
915 case 0: /* conforms, ie. bus-type dependent polarity */
916 if (test_bit(bus
, mp_bus_not_pci
))
917 polarity
= default_ISA_polarity(idx
);
919 polarity
= default_PCI_polarity(idx
);
921 case 1: /* high active */
926 case 2: /* reserved */
928 printk(KERN_WARNING
"broken BIOS!!\n");
932 case 3: /* low active */
937 default: /* invalid */
939 printk(KERN_WARNING
"broken BIOS!!\n");
947 static int MPBIOS_trigger(int idx
)
949 int bus
= mp_irqs
[idx
].srcbus
;
953 * Determine IRQ trigger mode (edge or level sensitive):
955 switch ((mp_irqs
[idx
].irqflag
>>2) & 3)
957 case 0: /* conforms, ie. bus-type dependent */
958 if (test_bit(bus
, mp_bus_not_pci
))
959 trigger
= default_ISA_trigger(idx
);
961 trigger
= default_PCI_trigger(idx
);
962 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
963 switch (mp_bus_id_to_type
[bus
]) {
964 case MP_BUS_ISA
: /* ISA pin */
966 /* set before the switch */
969 case MP_BUS_EISA
: /* EISA pin */
971 trigger
= default_EISA_trigger(idx
);
974 case MP_BUS_PCI
: /* PCI pin */
976 /* set before the switch */
979 case MP_BUS_MCA
: /* MCA pin */
981 trigger
= default_MCA_trigger(idx
);
986 printk(KERN_WARNING
"broken BIOS!!\n");
998 case 2: /* reserved */
1000 printk(KERN_WARNING
"broken BIOS!!\n");
1009 default: /* invalid */
1011 printk(KERN_WARNING
"broken BIOS!!\n");
1019 static inline int irq_polarity(int idx
)
1021 return MPBIOS_polarity(idx
);
1024 static inline int irq_trigger(int idx
)
1026 return MPBIOS_trigger(idx
);
1029 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
1030 static int pin_2_irq(int idx
, int apic
, int pin
)
1033 int bus
= mp_irqs
[idx
].srcbus
;
1036 * Debugging check, we are in big trouble if this message pops up!
1038 if (mp_irqs
[idx
].dstirq
!= pin
)
1039 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
1041 if (test_bit(bus
, mp_bus_not_pci
)) {
1042 irq
= mp_irqs
[idx
].srcbusirq
;
1045 * PCI IRQs are mapped in order
1049 irq
+= nr_ioapic_registers
[i
++];
1052 * For MPS mode, so far only needed by ES7000 platform
1054 if (ioapic_renumber_irq
)
1055 irq
= ioapic_renumber_irq(apic
, irq
);
1058 #ifdef CONFIG_X86_32
1060 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1062 if ((pin
>= 16) && (pin
<= 23)) {
1063 if (pirq_entries
[pin
-16] != -1) {
1064 if (!pirq_entries
[pin
-16]) {
1065 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1066 "disabling PIRQ%d\n", pin
-16);
1068 irq
= pirq_entries
[pin
-16];
1069 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1070 "using PIRQ%d -> IRQ %d\n",
1081 * Find a specific PCI IRQ entry.
1082 * Not an __init, possibly needed by modules
1084 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
,
1085 struct io_apic_irq_attr
*irq_attr
)
1087 int apic
, i
, best_guess
= -1;
1089 apic_printk(APIC_DEBUG
,
1090 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1092 if (test_bit(bus
, mp_bus_not_pci
)) {
1093 apic_printk(APIC_VERBOSE
,
1094 "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
1097 for (i
= 0; i
< mp_irq_entries
; i
++) {
1098 int lbus
= mp_irqs
[i
].srcbus
;
1100 for (apic
= 0; apic
< nr_ioapics
; apic
++)
1101 if (mp_ioapics
[apic
].apicid
== mp_irqs
[i
].dstapic
||
1102 mp_irqs
[i
].dstapic
== MP_APIC_ALL
)
1105 if (!test_bit(lbus
, mp_bus_not_pci
) &&
1106 !mp_irqs
[i
].irqtype
&&
1108 (slot
== ((mp_irqs
[i
].srcbusirq
>> 2) & 0x1f))) {
1109 int irq
= pin_2_irq(i
, apic
, mp_irqs
[i
].dstirq
);
1111 if (!(apic
|| IO_APIC_IRQ(irq
)))
1114 if (pin
== (mp_irqs
[i
].srcbusirq
& 3)) {
1115 set_io_apic_irq_attr(irq_attr
, apic
,
1122 * Use the first all-but-pin matching entry as a
1123 * best-guess fuzzy result for broken mptables.
1125 if (best_guess
< 0) {
1126 set_io_apic_irq_attr(irq_attr
, apic
,
1136 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
1138 void lock_vector_lock(void)
1140 /* Used to the online set of cpus does not change
1141 * during assign_irq_vector.
1143 spin_lock(&vector_lock
);
1146 void unlock_vector_lock(void)
1148 spin_unlock(&vector_lock
);
1152 __assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1155 * NOTE! The local APIC isn't very good at handling
1156 * multiple interrupts at the same interrupt level.
1157 * As the interrupt level is determined by taking the
1158 * vector number and shifting that right by 4, we
1159 * want to spread these out a bit so that they don't
1160 * all fall in the same interrupt level.
1162 * Also, we've got to be careful not to trash gate
1163 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1165 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1166 unsigned int old_vector
;
1168 cpumask_var_t tmp_mask
;
1170 if (cfg
->move_in_progress
)
1173 if (!alloc_cpumask_var(&tmp_mask
, GFP_ATOMIC
))
1176 old_vector
= cfg
->vector
;
1178 cpumask_and(tmp_mask
, mask
, cpu_online_mask
);
1179 cpumask_and(tmp_mask
, cfg
->domain
, tmp_mask
);
1180 if (!cpumask_empty(tmp_mask
)) {
1181 free_cpumask_var(tmp_mask
);
1186 /* Only try and allocate irqs on cpus that are present */
1188 for_each_cpu_and(cpu
, mask
, cpu_online_mask
) {
1192 apic
->vector_allocation_domain(cpu
, tmp_mask
);
1194 vector
= current_vector
;
1195 offset
= current_offset
;
1198 if (vector
>= first_system_vector
) {
1199 /* If out of vectors on large boxen, must share them. */
1200 offset
= (offset
+ 1) % 8;
1201 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1203 if (unlikely(current_vector
== vector
))
1206 if (test_bit(vector
, used_vectors
))
1209 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1210 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1213 current_vector
= vector
;
1214 current_offset
= offset
;
1216 cfg
->move_in_progress
= 1;
1217 cpumask_copy(cfg
->old_domain
, cfg
->domain
);
1219 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1220 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1221 cfg
->vector
= vector
;
1222 cpumask_copy(cfg
->domain
, tmp_mask
);
1226 free_cpumask_var(tmp_mask
);
1230 int assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1233 unsigned long flags
;
1235 spin_lock_irqsave(&vector_lock
, flags
);
1236 err
= __assign_irq_vector(irq
, cfg
, mask
);
1237 spin_unlock_irqrestore(&vector_lock
, flags
);
1241 static void __clear_irq_vector(int irq
, struct irq_cfg
*cfg
)
1245 BUG_ON(!cfg
->vector
);
1247 vector
= cfg
->vector
;
1248 for_each_cpu_and(cpu
, cfg
->domain
, cpu_online_mask
)
1249 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1252 cpumask_clear(cfg
->domain
);
1254 if (likely(!cfg
->move_in_progress
))
1256 for_each_cpu_and(cpu
, cfg
->old_domain
, cpu_online_mask
) {
1257 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
1259 if (per_cpu(vector_irq
, cpu
)[vector
] != irq
)
1261 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1265 cfg
->move_in_progress
= 0;
1268 void __setup_vector_irq(int cpu
)
1270 /* Initialize vector_irq on a new cpu */
1271 /* This function must be called with vector_lock held */
1273 struct irq_cfg
*cfg
;
1274 struct irq_desc
*desc
;
1276 /* Mark the inuse vectors */
1277 for_each_irq_desc(irq
, desc
) {
1278 cfg
= desc
->chip_data
;
1279 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1281 vector
= cfg
->vector
;
1282 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1284 /* Mark the free vectors */
1285 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1286 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1291 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1292 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1296 static struct irq_chip ioapic_chip
;
1297 static struct irq_chip ir_ioapic_chip
;
1299 #define IOAPIC_AUTO -1
1300 #define IOAPIC_EDGE 0
1301 #define IOAPIC_LEVEL 1
1303 #ifdef CONFIG_X86_32
1304 static inline int IO_APIC_irq_trigger(int irq
)
1308 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1309 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1310 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1311 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1312 return irq_trigger(idx
);
1316 * nonexistent IRQs are edge default
1321 static inline int IO_APIC_irq_trigger(int irq
)
1327 static void ioapic_register_intr(int irq
, struct irq_desc
*desc
, unsigned long trigger
)
1330 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1331 trigger
== IOAPIC_LEVEL
)
1332 desc
->status
|= IRQ_LEVEL
;
1334 desc
->status
&= ~IRQ_LEVEL
;
1336 if (irq_remapped(irq
)) {
1337 desc
->status
|= IRQ_MOVE_PCNTXT
;
1339 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1343 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1344 handle_edge_irq
, "edge");
1348 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1349 trigger
== IOAPIC_LEVEL
)
1350 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1354 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1355 handle_edge_irq
, "edge");
1358 int setup_ioapic_entry(int apic_id
, int irq
,
1359 struct IO_APIC_route_entry
*entry
,
1360 unsigned int destination
, int trigger
,
1361 int polarity
, int vector
, int pin
)
1364 * add it to the IO-APIC irq-routing table:
1366 memset(entry
,0,sizeof(*entry
));
1368 if (intr_remapping_enabled
) {
1369 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic_id
);
1371 struct IR_IO_APIC_route_entry
*ir_entry
=
1372 (struct IR_IO_APIC_route_entry
*) entry
;
1376 panic("No mapping iommu for ioapic %d\n", apic_id
);
1378 index
= alloc_irte(iommu
, irq
, 1);
1380 panic("Failed to allocate IRTE for ioapic %d\n", apic_id
);
1382 memset(&irte
, 0, sizeof(irte
));
1385 irte
.dst_mode
= apic
->irq_dest_mode
;
1387 * Trigger mode in the IRTE will always be edge, and the
1388 * actual level or edge trigger will be setup in the IO-APIC
1389 * RTE. This will help simplify level triggered irq migration.
1390 * For more details, see the comments above explainig IO-APIC
1391 * irq migration in the presence of interrupt-remapping.
1393 irte
.trigger_mode
= 0;
1394 irte
.dlvry_mode
= apic
->irq_delivery_mode
;
1395 irte
.vector
= vector
;
1396 irte
.dest_id
= IRTE_DEST(destination
);
1398 /* Set source-id of interrupt request */
1399 set_ioapic_sid(&irte
, apic_id
);
1401 modify_irte(irq
, &irte
);
1403 ir_entry
->index2
= (index
>> 15) & 0x1;
1405 ir_entry
->format
= 1;
1406 ir_entry
->index
= (index
& 0x7fff);
1408 * IO-APIC RTE will be configured with virtual vector.
1409 * irq handler will do the explicit EOI to the io-apic.
1411 ir_entry
->vector
= pin
;
1413 entry
->delivery_mode
= apic
->irq_delivery_mode
;
1414 entry
->dest_mode
= apic
->irq_dest_mode
;
1415 entry
->dest
= destination
;
1416 entry
->vector
= vector
;
1419 entry
->mask
= 0; /* enable IRQ */
1420 entry
->trigger
= trigger
;
1421 entry
->polarity
= polarity
;
1423 /* Mask level triggered irqs.
1424 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1431 static void setup_IO_APIC_irq(int apic_id
, int pin
, unsigned int irq
, struct irq_desc
*desc
,
1432 int trigger
, int polarity
)
1434 struct irq_cfg
*cfg
;
1435 struct IO_APIC_route_entry entry
;
1438 if (!IO_APIC_IRQ(irq
))
1441 cfg
= desc
->chip_data
;
1443 if (assign_irq_vector(irq
, cfg
, apic
->target_cpus()))
1446 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, apic
->target_cpus());
1448 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1449 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1450 "IRQ %d Mode:%i Active:%i)\n",
1451 apic_id
, mp_ioapics
[apic_id
].apicid
, pin
, cfg
->vector
,
1452 irq
, trigger
, polarity
);
1455 if (setup_ioapic_entry(mp_ioapics
[apic_id
].apicid
, irq
, &entry
,
1456 dest
, trigger
, polarity
, cfg
->vector
, pin
)) {
1457 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1458 mp_ioapics
[apic_id
].apicid
, pin
);
1459 __clear_irq_vector(irq
, cfg
);
1463 ioapic_register_intr(irq
, desc
, trigger
);
1464 if (irq
< nr_legacy_irqs
)
1465 disable_8259A_irq(irq
);
1467 ioapic_write_entry(apic_id
, pin
, entry
);
1471 DECLARE_BITMAP(pin_programmed
, MP_MAX_IOAPIC_PIN
+ 1);
1472 } mp_ioapic_routing
[MAX_IO_APICS
];
1474 static void __init
setup_IO_APIC_irqs(void)
1476 int apic_id
= 0, pin
, idx
, irq
;
1478 struct irq_desc
*desc
;
1479 struct irq_cfg
*cfg
;
1480 int node
= cpu_to_node(boot_cpu_id
);
1482 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1485 if (!acpi_disabled
&& acpi_ioapic
) {
1486 apic_id
= mp_find_ioapic(0);
1492 for (pin
= 0; pin
< nr_ioapic_registers
[apic_id
]; pin
++) {
1493 idx
= find_irq_entry(apic_id
, pin
, mp_INT
);
1497 apic_printk(APIC_VERBOSE
,
1498 KERN_DEBUG
" %d-%d",
1499 mp_ioapics
[apic_id
].apicid
, pin
);
1501 apic_printk(APIC_VERBOSE
, " %d-%d",
1502 mp_ioapics
[apic_id
].apicid
, pin
);
1506 apic_printk(APIC_VERBOSE
,
1507 " (apicid-pin) not connected\n");
1511 irq
= pin_2_irq(idx
, apic_id
, pin
);
1514 * Skip the timer IRQ if there's a quirk handler
1515 * installed and if it returns 1:
1517 if (apic
->multi_timer_check
&&
1518 apic
->multi_timer_check(apic_id
, irq
))
1521 desc
= irq_to_desc_alloc_node(irq
, node
);
1523 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
1526 cfg
= desc
->chip_data
;
1527 add_pin_to_irq_node(cfg
, node
, apic_id
, pin
);
1529 * don't mark it in pin_programmed, so later acpi could
1530 * set it correctly when irq < 16
1532 setup_IO_APIC_irq(apic_id
, pin
, irq
, desc
,
1533 irq_trigger(idx
), irq_polarity(idx
));
1537 apic_printk(APIC_VERBOSE
,
1538 " (apicid-pin) not connected\n");
1542 * Set up the timer pin, possibly with the 8259A-master behind.
1544 static void __init
setup_timer_IRQ0_pin(unsigned int apic_id
, unsigned int pin
,
1547 struct IO_APIC_route_entry entry
;
1549 if (intr_remapping_enabled
)
1552 memset(&entry
, 0, sizeof(entry
));
1555 * We use logical delivery to get the timer IRQ
1558 entry
.dest_mode
= apic
->irq_dest_mode
;
1559 entry
.mask
= 0; /* don't mask IRQ for edge */
1560 entry
.dest
= apic
->cpu_mask_to_apicid(apic
->target_cpus());
1561 entry
.delivery_mode
= apic
->irq_delivery_mode
;
1564 entry
.vector
= vector
;
1567 * The timer IRQ doesn't have to know that behind the
1568 * scene we may have a 8259A-master in AEOI mode ...
1570 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1573 * Add it to the IO-APIC irq-routing table:
1575 ioapic_write_entry(apic_id
, pin
, entry
);
1579 __apicdebuginit(void) print_IO_APIC(void)
1582 union IO_APIC_reg_00 reg_00
;
1583 union IO_APIC_reg_01 reg_01
;
1584 union IO_APIC_reg_02 reg_02
;
1585 union IO_APIC_reg_03 reg_03
;
1586 unsigned long flags
;
1587 struct irq_cfg
*cfg
;
1588 struct irq_desc
*desc
;
1591 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1592 for (i
= 0; i
< nr_ioapics
; i
++)
1593 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1594 mp_ioapics
[i
].apicid
, nr_ioapic_registers
[i
]);
1597 * We are a bit conservative about what we expect. We have to
1598 * know about every hardware change ASAP.
1600 printk(KERN_INFO
"testing the IO APIC.......................\n");
1602 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1604 spin_lock_irqsave(&ioapic_lock
, flags
);
1605 reg_00
.raw
= io_apic_read(apic
, 0);
1606 reg_01
.raw
= io_apic_read(apic
, 1);
1607 if (reg_01
.bits
.version
>= 0x10)
1608 reg_02
.raw
= io_apic_read(apic
, 2);
1609 if (reg_01
.bits
.version
>= 0x20)
1610 reg_03
.raw
= io_apic_read(apic
, 3);
1611 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1614 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].apicid
);
1615 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1616 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1617 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1618 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1620 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1621 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1623 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1624 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1627 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1628 * but the value of reg_02 is read as the previous read register
1629 * value, so ignore it if reg_02 == reg_01.
1631 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1632 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1633 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1637 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1638 * or reg_03, but the value of reg_0[23] is read as the previous read
1639 * register value, so ignore it if reg_03 == reg_0[12].
1641 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1642 reg_03
.raw
!= reg_01
.raw
) {
1643 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1644 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1647 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1649 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1650 " Stat Dmod Deli Vect: \n");
1652 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1653 struct IO_APIC_route_entry entry
;
1655 entry
= ioapic_read_entry(apic
, i
);
1657 printk(KERN_DEBUG
" %02x %03X ",
1662 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1667 entry
.delivery_status
,
1669 entry
.delivery_mode
,
1674 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1675 for_each_irq_desc(irq
, desc
) {
1676 struct irq_pin_list
*entry
;
1678 cfg
= desc
->chip_data
;
1679 entry
= cfg
->irq_2_pin
;
1682 printk(KERN_DEBUG
"IRQ%d ", irq
);
1683 for_each_irq_pin(entry
, cfg
->irq_2_pin
)
1684 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1688 printk(KERN_INFO
".................................... done.\n");
1693 __apicdebuginit(void) print_APIC_field(int base
)
1699 for (i
= 0; i
< 8; i
++)
1700 printk(KERN_CONT
"%08x", apic_read(base
+ i
*0x10));
1702 printk(KERN_CONT
"\n");
1705 __apicdebuginit(void) print_local_APIC(void *dummy
)
1707 unsigned int i
, v
, ver
, maxlvt
;
1710 printk(KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1711 smp_processor_id(), hard_smp_processor_id());
1712 v
= apic_read(APIC_ID
);
1713 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1714 v
= apic_read(APIC_LVR
);
1715 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1716 ver
= GET_APIC_VERSION(v
);
1717 maxlvt
= lapic_get_maxlvt();
1719 v
= apic_read(APIC_TASKPRI
);
1720 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1722 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1723 if (!APIC_XAPIC(ver
)) {
1724 v
= apic_read(APIC_ARBPRI
);
1725 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1726 v
& APIC_ARBPRI_MASK
);
1728 v
= apic_read(APIC_PROCPRI
);
1729 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1733 * Remote read supported only in the 82489DX and local APIC for
1734 * Pentium processors.
1736 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1737 v
= apic_read(APIC_RRR
);
1738 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1741 v
= apic_read(APIC_LDR
);
1742 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1743 if (!x2apic_enabled()) {
1744 v
= apic_read(APIC_DFR
);
1745 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1747 v
= apic_read(APIC_SPIV
);
1748 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1750 printk(KERN_DEBUG
"... APIC ISR field:\n");
1751 print_APIC_field(APIC_ISR
);
1752 printk(KERN_DEBUG
"... APIC TMR field:\n");
1753 print_APIC_field(APIC_TMR
);
1754 printk(KERN_DEBUG
"... APIC IRR field:\n");
1755 print_APIC_field(APIC_IRR
);
1757 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1758 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1759 apic_write(APIC_ESR
, 0);
1761 v
= apic_read(APIC_ESR
);
1762 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1765 icr
= apic_icr_read();
1766 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1767 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1769 v
= apic_read(APIC_LVTT
);
1770 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1772 if (maxlvt
> 3) { /* PC is LVT#4. */
1773 v
= apic_read(APIC_LVTPC
);
1774 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1776 v
= apic_read(APIC_LVT0
);
1777 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1778 v
= apic_read(APIC_LVT1
);
1779 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1781 if (maxlvt
> 2) { /* ERR is LVT#3. */
1782 v
= apic_read(APIC_LVTERR
);
1783 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1786 v
= apic_read(APIC_TMICT
);
1787 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1788 v
= apic_read(APIC_TMCCT
);
1789 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1790 v
= apic_read(APIC_TDCR
);
1791 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1793 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
1794 v
= apic_read(APIC_EFEAT
);
1795 maxlvt
= (v
>> 16) & 0xff;
1796 printk(KERN_DEBUG
"... APIC EFEAT: %08x\n", v
);
1797 v
= apic_read(APIC_ECTRL
);
1798 printk(KERN_DEBUG
"... APIC ECTRL: %08x\n", v
);
1799 for (i
= 0; i
< maxlvt
; i
++) {
1800 v
= apic_read(APIC_EILVTn(i
));
1801 printk(KERN_DEBUG
"... APIC EILVT%d: %08x\n", i
, v
);
1807 __apicdebuginit(void) print_local_APICs(int maxcpu
)
1815 for_each_online_cpu(cpu
) {
1818 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1823 __apicdebuginit(void) print_PIC(void)
1826 unsigned long flags
;
1828 if (!nr_legacy_irqs
)
1831 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1833 spin_lock_irqsave(&i8259A_lock
, flags
);
1835 v
= inb(0xa1) << 8 | inb(0x21);
1836 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1838 v
= inb(0xa0) << 8 | inb(0x20);
1839 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1843 v
= inb(0xa0) << 8 | inb(0x20);
1847 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1849 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1851 v
= inb(0x4d1) << 8 | inb(0x4d0);
1852 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1855 static int __initdata show_lapic
= 1;
1856 static __init
int setup_show_lapic(char *arg
)
1860 if (strcmp(arg
, "all") == 0) {
1861 show_lapic
= CONFIG_NR_CPUS
;
1863 get_option(&arg
, &num
);
1870 __setup("show_lapic=", setup_show_lapic
);
1872 __apicdebuginit(int) print_ICs(void)
1874 if (apic_verbosity
== APIC_QUIET
)
1879 /* don't print out if apic is not there */
1880 if (!cpu_has_apic
&& !apic_from_smp_config())
1883 print_local_APICs(show_lapic
);
1889 fs_initcall(print_ICs
);
1892 /* Where if anywhere is the i8259 connect in external int mode */
1893 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1895 void __init
enable_IO_APIC(void)
1897 union IO_APIC_reg_01 reg_01
;
1898 int i8259_apic
, i8259_pin
;
1900 unsigned long flags
;
1903 * The number of IO-APIC IRQ registers (== #pins):
1905 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1906 spin_lock_irqsave(&ioapic_lock
, flags
);
1907 reg_01
.raw
= io_apic_read(apic
, 1);
1908 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1909 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1912 if (!nr_legacy_irqs
)
1915 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1917 /* See if any of the pins is in ExtINT mode */
1918 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1919 struct IO_APIC_route_entry entry
;
1920 entry
= ioapic_read_entry(apic
, pin
);
1922 /* If the interrupt line is enabled and in ExtInt mode
1923 * I have found the pin where the i8259 is connected.
1925 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1926 ioapic_i8259
.apic
= apic
;
1927 ioapic_i8259
.pin
= pin
;
1933 /* Look to see what if the MP table has reported the ExtINT */
1934 /* If we could not find the appropriate pin by looking at the ioapic
1935 * the i8259 probably is not connected the ioapic but give the
1936 * mptable a chance anyway.
1938 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1939 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1940 /* Trust the MP table if nothing is setup in the hardware */
1941 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1942 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1943 ioapic_i8259
.pin
= i8259_pin
;
1944 ioapic_i8259
.apic
= i8259_apic
;
1946 /* Complain if the MP table and the hardware disagree */
1947 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1948 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1950 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1954 * Do not trust the IO-APIC being empty at bootup
1960 * Not an __init, needed by the reboot code
1962 void disable_IO_APIC(void)
1965 * Clear the IO-APIC before rebooting:
1969 if (!nr_legacy_irqs
)
1973 * If the i8259 is routed through an IOAPIC
1974 * Put that IOAPIC in virtual wire mode
1975 * so legacy interrupts can be delivered.
1977 * With interrupt-remapping, for now we will use virtual wire A mode,
1978 * as virtual wire B is little complex (need to configure both
1979 * IOAPIC RTE aswell as interrupt-remapping table entry).
1980 * As this gets called during crash dump, keep this simple for now.
1982 if (ioapic_i8259
.pin
!= -1 && !intr_remapping_enabled
) {
1983 struct IO_APIC_route_entry entry
;
1985 memset(&entry
, 0, sizeof(entry
));
1986 entry
.mask
= 0; /* Enabled */
1987 entry
.trigger
= 0; /* Edge */
1989 entry
.polarity
= 0; /* High */
1990 entry
.delivery_status
= 0;
1991 entry
.dest_mode
= 0; /* Physical */
1992 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1994 entry
.dest
= read_apic_id();
1997 * Add it to the IO-APIC irq-routing table:
1999 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
2003 * Use virtual wire A mode when interrupt remapping is enabled.
2005 if (cpu_has_apic
|| apic_from_smp_config())
2006 disconnect_bsp_APIC(!intr_remapping_enabled
&&
2007 ioapic_i8259
.pin
!= -1);
2010 #ifdef CONFIG_X86_32
2012 * function to set the IO-APIC physical IDs based on the
2013 * values stored in the MPC table.
2015 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2018 void __init
setup_ioapic_ids_from_mpc(void)
2020 union IO_APIC_reg_00 reg_00
;
2021 physid_mask_t phys_id_present_map
;
2024 unsigned char old_id
;
2025 unsigned long flags
;
2030 * Don't check I/O APIC IDs for xAPIC systems. They have
2031 * no meaning without the serial APIC bus.
2033 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
2034 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
2037 * This is broken; anything with a real cpu count has to
2038 * circumvent this idiocy regardless.
2040 apic
->ioapic_phys_id_map(&phys_cpu_present_map
, &phys_id_present_map
);
2043 * Set the IOAPIC ID to the value stored in the MPC table.
2045 for (apic_id
= 0; apic_id
< nr_ioapics
; apic_id
++) {
2047 /* Read the register 0 value */
2048 spin_lock_irqsave(&ioapic_lock
, flags
);
2049 reg_00
.raw
= io_apic_read(apic_id
, 0);
2050 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2052 old_id
= mp_ioapics
[apic_id
].apicid
;
2054 if (mp_ioapics
[apic_id
].apicid
>= get_physical_broadcast()) {
2055 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2056 apic_id
, mp_ioapics
[apic_id
].apicid
);
2057 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2059 mp_ioapics
[apic_id
].apicid
= reg_00
.bits
.ID
;
2063 * Sanity check, is the ID really free? Every APIC in a
2064 * system must have a unique ID or we get lots of nice
2065 * 'stuck on smp_invalidate_needed IPI wait' messages.
2067 if (apic
->check_apicid_used(&phys_id_present_map
,
2068 mp_ioapics
[apic_id
].apicid
)) {
2069 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2070 apic_id
, mp_ioapics
[apic_id
].apicid
);
2071 for (i
= 0; i
< get_physical_broadcast(); i
++)
2072 if (!physid_isset(i
, phys_id_present_map
))
2074 if (i
>= get_physical_broadcast())
2075 panic("Max APIC ID exceeded!\n");
2076 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2078 physid_set(i
, phys_id_present_map
);
2079 mp_ioapics
[apic_id
].apicid
= i
;
2082 apic
->apicid_to_cpu_present(mp_ioapics
[apic_id
].apicid
, &tmp
);
2083 apic_printk(APIC_VERBOSE
, "Setting %d in the "
2084 "phys_id_present_map\n",
2085 mp_ioapics
[apic_id
].apicid
);
2086 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
2091 * We need to adjust the IRQ routing table
2092 * if the ID changed.
2094 if (old_id
!= mp_ioapics
[apic_id
].apicid
)
2095 for (i
= 0; i
< mp_irq_entries
; i
++)
2096 if (mp_irqs
[i
].dstapic
== old_id
)
2098 = mp_ioapics
[apic_id
].apicid
;
2101 * Read the right value from the MPC table and
2102 * write it into the ID register.
2104 apic_printk(APIC_VERBOSE
, KERN_INFO
2105 "...changing IO-APIC physical APIC ID to %d ...",
2106 mp_ioapics
[apic_id
].apicid
);
2108 reg_00
.bits
.ID
= mp_ioapics
[apic_id
].apicid
;
2109 spin_lock_irqsave(&ioapic_lock
, flags
);
2110 io_apic_write(apic_id
, 0, reg_00
.raw
);
2111 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2116 spin_lock_irqsave(&ioapic_lock
, flags
);
2117 reg_00
.raw
= io_apic_read(apic_id
, 0);
2118 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2119 if (reg_00
.bits
.ID
!= mp_ioapics
[apic_id
].apicid
)
2120 printk("could not set ID!\n");
2122 apic_printk(APIC_VERBOSE
, " ok.\n");
2127 int no_timer_check __initdata
;
2129 static int __init
notimercheck(char *s
)
2134 __setup("no_timer_check", notimercheck
);
2137 * There is a nasty bug in some older SMP boards, their mptable lies
2138 * about the timer IRQ. We do the following to work around the situation:
2140 * - timer IRQ defaults to IO-APIC IRQ
2141 * - if this function detects that timer IRQs are defunct, then we fall
2142 * back to ISA timer IRQs
2144 static int __init
timer_irq_works(void)
2146 unsigned long t1
= jiffies
;
2147 unsigned long flags
;
2152 local_save_flags(flags
);
2154 /* Let ten ticks pass... */
2155 mdelay((10 * 1000) / HZ
);
2156 local_irq_restore(flags
);
2159 * Expect a few ticks at least, to be sure some possible
2160 * glue logic does not lock up after one or two first
2161 * ticks in a non-ExtINT mode. Also the local APIC
2162 * might have cached one ExtINT interrupt. Finally, at
2163 * least one tick may be lost due to delays.
2167 if (time_after(jiffies
, t1
+ 4))
2173 * In the SMP+IOAPIC case it might happen that there are an unspecified
2174 * number of pending IRQ events unhandled. These cases are very rare,
2175 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2176 * better to do it this way as thus we do not have to be aware of
2177 * 'pending' interrupts in the IRQ path, except at this point.
2180 * Edge triggered needs to resend any interrupt
2181 * that was delayed but this is now handled in the device
2186 * Starting up a edge-triggered IO-APIC interrupt is
2187 * nasty - we need to make sure that we get the edge.
2188 * If it is already asserted for some reason, we need
2189 * return 1 to indicate that is was pending.
2191 * This is not complete - we should be able to fake
2192 * an edge even if it isn't on the 8259A...
2195 static unsigned int startup_ioapic_irq(unsigned int irq
)
2197 int was_pending
= 0;
2198 unsigned long flags
;
2199 struct irq_cfg
*cfg
;
2201 spin_lock_irqsave(&ioapic_lock
, flags
);
2202 if (irq
< nr_legacy_irqs
) {
2203 disable_8259A_irq(irq
);
2204 if (i8259A_irq_pending(irq
))
2208 __unmask_IO_APIC_irq(cfg
);
2209 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2214 static int ioapic_retrigger_irq(unsigned int irq
)
2217 struct irq_cfg
*cfg
= irq_cfg(irq
);
2218 unsigned long flags
;
2220 spin_lock_irqsave(&vector_lock
, flags
);
2221 apic
->send_IPI_mask(cpumask_of(cpumask_first(cfg
->domain
)), cfg
->vector
);
2222 spin_unlock_irqrestore(&vector_lock
, flags
);
2228 * Level and edge triggered IO-APIC interrupts need different handling,
2229 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2230 * handled with the level-triggered descriptor, but that one has slightly
2231 * more overhead. Level-triggered interrupts cannot be handled with the
2232 * edge-triggered handler, without risking IRQ storms and other ugly
2237 void send_cleanup_vector(struct irq_cfg
*cfg
)
2239 cpumask_var_t cleanup_mask
;
2241 if (unlikely(!alloc_cpumask_var(&cleanup_mask
, GFP_ATOMIC
))) {
2243 for_each_cpu_and(i
, cfg
->old_domain
, cpu_online_mask
)
2244 apic
->send_IPI_mask(cpumask_of(i
), IRQ_MOVE_CLEANUP_VECTOR
);
2246 cpumask_and(cleanup_mask
, cfg
->old_domain
, cpu_online_mask
);
2247 apic
->send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2248 free_cpumask_var(cleanup_mask
);
2250 cfg
->move_in_progress
= 0;
2253 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, struct irq_cfg
*cfg
)
2256 struct irq_pin_list
*entry
;
2257 u8 vector
= cfg
->vector
;
2259 for_each_irq_pin(entry
, cfg
->irq_2_pin
) {
2265 * With interrupt-remapping, destination information comes
2266 * from interrupt-remapping table entry.
2268 if (!irq_remapped(irq
))
2269 io_apic_write(apic
, 0x11 + pin
*2, dest
);
2270 reg
= io_apic_read(apic
, 0x10 + pin
*2);
2271 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
2273 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
2278 * Either sets desc->affinity to a valid value, and returns
2279 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2280 * leaves desc->affinity untouched.
2283 set_desc_affinity(struct irq_desc
*desc
, const struct cpumask
*mask
,
2284 unsigned int *dest_id
)
2286 struct irq_cfg
*cfg
;
2289 if (!cpumask_intersects(mask
, cpu_online_mask
))
2293 cfg
= desc
->chip_data
;
2294 if (assign_irq_vector(irq
, cfg
, mask
))
2297 cpumask_copy(desc
->affinity
, mask
);
2299 *dest_id
= apic
->cpu_mask_to_apicid_and(desc
->affinity
, cfg
->domain
);
2304 set_ioapic_affinity_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
2306 struct irq_cfg
*cfg
;
2307 unsigned long flags
;
2313 cfg
= desc
->chip_data
;
2315 spin_lock_irqsave(&ioapic_lock
, flags
);
2316 ret
= set_desc_affinity(desc
, mask
, &dest
);
2318 /* Only the high 8 bits are valid. */
2319 dest
= SET_APIC_LOGICAL_ID(dest
);
2320 __target_IO_APIC_irq(irq
, dest
, cfg
);
2322 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2328 set_ioapic_affinity_irq(unsigned int irq
, const struct cpumask
*mask
)
2330 struct irq_desc
*desc
;
2332 desc
= irq_to_desc(irq
);
2334 return set_ioapic_affinity_irq_desc(desc
, mask
);
2337 #ifdef CONFIG_INTR_REMAP
2340 * Migrate the IO-APIC irq in the presence of intr-remapping.
2342 * For both level and edge triggered, irq migration is a simple atomic
2343 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2345 * For level triggered, we eliminate the io-apic RTE modification (with the
2346 * updated vector information), by using a virtual vector (io-apic pin number).
2347 * Real vector that is used for interrupting cpu will be coming from
2348 * the interrupt-remapping table entry.
2351 migrate_ioapic_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
2353 struct irq_cfg
*cfg
;
2359 if (!cpumask_intersects(mask
, cpu_online_mask
))
2363 if (get_irte(irq
, &irte
))
2366 cfg
= desc
->chip_data
;
2367 if (assign_irq_vector(irq
, cfg
, mask
))
2370 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, mask
);
2372 irte
.vector
= cfg
->vector
;
2373 irte
.dest_id
= IRTE_DEST(dest
);
2376 * Modified the IRTE and flushes the Interrupt entry cache.
2378 modify_irte(irq
, &irte
);
2380 if (cfg
->move_in_progress
)
2381 send_cleanup_vector(cfg
);
2383 cpumask_copy(desc
->affinity
, mask
);
2389 * Migrates the IRQ destination in the process context.
2391 static int set_ir_ioapic_affinity_irq_desc(struct irq_desc
*desc
,
2392 const struct cpumask
*mask
)
2394 return migrate_ioapic_irq_desc(desc
, mask
);
2396 static int set_ir_ioapic_affinity_irq(unsigned int irq
,
2397 const struct cpumask
*mask
)
2399 struct irq_desc
*desc
= irq_to_desc(irq
);
2401 return set_ir_ioapic_affinity_irq_desc(desc
, mask
);
2404 static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc
*desc
,
2405 const struct cpumask
*mask
)
2411 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
2413 unsigned vector
, me
;
2419 me
= smp_processor_id();
2420 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
2423 struct irq_desc
*desc
;
2424 struct irq_cfg
*cfg
;
2425 irq
= __get_cpu_var(vector_irq
)[vector
];
2430 desc
= irq_to_desc(irq
);
2435 raw_spin_lock(&desc
->lock
);
2438 * Check if the irq migration is in progress. If so, we
2439 * haven't received the cleanup request yet for this irq.
2441 if (cfg
->move_in_progress
)
2444 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
))
2447 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
2449 * Check if the vector that needs to be cleanedup is
2450 * registered at the cpu's IRR. If so, then this is not
2451 * the best time to clean it up. Lets clean it up in the
2452 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2455 if (irr
& (1 << (vector
% 32))) {
2456 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
2459 __get_cpu_var(vector_irq
)[vector
] = -1;
2461 raw_spin_unlock(&desc
->lock
);
2467 static void __irq_complete_move(struct irq_desc
**descp
, unsigned vector
)
2469 struct irq_desc
*desc
= *descp
;
2470 struct irq_cfg
*cfg
= desc
->chip_data
;
2473 if (likely(!cfg
->move_in_progress
))
2476 me
= smp_processor_id();
2478 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
))
2479 send_cleanup_vector(cfg
);
2482 static void irq_complete_move(struct irq_desc
**descp
)
2484 __irq_complete_move(descp
, ~get_irq_regs()->orig_ax
);
2487 void irq_force_complete_move(int irq
)
2489 struct irq_desc
*desc
= irq_to_desc(irq
);
2490 struct irq_cfg
*cfg
= desc
->chip_data
;
2492 __irq_complete_move(&desc
, cfg
->vector
);
2495 static inline void irq_complete_move(struct irq_desc
**descp
) {}
2498 static void ack_apic_edge(unsigned int irq
)
2500 struct irq_desc
*desc
= irq_to_desc(irq
);
2502 irq_complete_move(&desc
);
2503 move_native_irq(irq
);
2507 atomic_t irq_mis_count
;
2510 * IO-APIC versions below 0x20 don't support EOI register.
2511 * For the record, here is the information about various versions:
2513 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
2514 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
2517 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
2518 * version as 0x2. This is an error with documentation and these ICH chips
2519 * use io-apic's of version 0x20.
2521 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
2522 * Otherwise, we simulate the EOI message manually by changing the trigger
2523 * mode to edge and then back to level, with RTE being masked during this.
2525 static void __eoi_ioapic_irq(unsigned int irq
, struct irq_cfg
*cfg
)
2527 struct irq_pin_list
*entry
;
2529 for_each_irq_pin(entry
, cfg
->irq_2_pin
) {
2530 if (mp_ioapics
[entry
->apic
].apicver
>= 0x20) {
2532 * Intr-remapping uses pin number as the virtual vector
2533 * in the RTE. Actual vector is programmed in
2534 * intr-remapping table entry. Hence for the io-apic
2535 * EOI we use the pin number.
2537 if (irq_remapped(irq
))
2538 io_apic_eoi(entry
->apic
, entry
->pin
);
2540 io_apic_eoi(entry
->apic
, cfg
->vector
);
2542 __mask_and_edge_IO_APIC_irq(entry
);
2543 __unmask_and_level_IO_APIC_irq(entry
);
2548 static void eoi_ioapic_irq(struct irq_desc
*desc
)
2550 struct irq_cfg
*cfg
;
2551 unsigned long flags
;
2555 cfg
= desc
->chip_data
;
2557 spin_lock_irqsave(&ioapic_lock
, flags
);
2558 __eoi_ioapic_irq(irq
, cfg
);
2559 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2562 static void ack_apic_level(unsigned int irq
)
2564 struct irq_desc
*desc
= irq_to_desc(irq
);
2567 struct irq_cfg
*cfg
;
2568 int do_unmask_irq
= 0;
2570 irq_complete_move(&desc
);
2571 #ifdef CONFIG_GENERIC_PENDING_IRQ
2572 /* If we are moving the irq we need to mask it */
2573 if (unlikely(desc
->status
& IRQ_MOVE_PENDING
)) {
2575 mask_IO_APIC_irq_desc(desc
);
2580 * It appears there is an erratum which affects at least version 0x11
2581 * of I/O APIC (that's the 82093AA and cores integrated into various
2582 * chipsets). Under certain conditions a level-triggered interrupt is
2583 * erroneously delivered as edge-triggered one but the respective IRR
2584 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2585 * message but it will never arrive and further interrupts are blocked
2586 * from the source. The exact reason is so far unknown, but the
2587 * phenomenon was observed when two consecutive interrupt requests
2588 * from a given source get delivered to the same CPU and the source is
2589 * temporarily disabled in between.
2591 * A workaround is to simulate an EOI message manually. We achieve it
2592 * by setting the trigger mode to edge and then to level when the edge
2593 * trigger mode gets detected in the TMR of a local APIC for a
2594 * level-triggered interrupt. We mask the source for the time of the
2595 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2596 * The idea is from Manfred Spraul. --macro
2598 * Also in the case when cpu goes offline, fixup_irqs() will forward
2599 * any unhandled interrupt on the offlined cpu to the new cpu
2600 * destination that is handling the corresponding interrupt. This
2601 * interrupt forwarding is done via IPI's. Hence, in this case also
2602 * level-triggered io-apic interrupt will be seen as an edge
2603 * interrupt in the IRR. And we can't rely on the cpu's EOI
2604 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2605 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2606 * supporting EOI register, we do an explicit EOI to clear the
2607 * remote IRR and on IO-APIC's which don't have an EOI register,
2608 * we use the above logic (mask+edge followed by unmask+level) from
2609 * Manfred Spraul to clear the remote IRR.
2611 cfg
= desc
->chip_data
;
2613 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
2616 * We must acknowledge the irq before we move it or the acknowledge will
2617 * not propagate properly.
2622 * Tail end of clearing remote IRR bit (either by delivering the EOI
2623 * message via io-apic EOI register write or simulating it using
2624 * mask+edge followed by unnask+level logic) manually when the
2625 * level triggered interrupt is seen as the edge triggered interrupt
2628 if (!(v
& (1 << (i
& 0x1f)))) {
2629 atomic_inc(&irq_mis_count
);
2631 eoi_ioapic_irq(desc
);
2634 /* Now we can move and renable the irq */
2635 if (unlikely(do_unmask_irq
)) {
2636 /* Only migrate the irq if the ack has been received.
2638 * On rare occasions the broadcast level triggered ack gets
2639 * delayed going to ioapics, and if we reprogram the
2640 * vector while Remote IRR is still set the irq will never
2643 * To prevent this scenario we read the Remote IRR bit
2644 * of the ioapic. This has two effects.
2645 * - On any sane system the read of the ioapic will
2646 * flush writes (and acks) going to the ioapic from
2648 * - We get to see if the ACK has actually been delivered.
2650 * Based on failed experiments of reprogramming the
2651 * ioapic entry from outside of irq context starting
2652 * with masking the ioapic entry and then polling until
2653 * Remote IRR was clear before reprogramming the
2654 * ioapic I don't trust the Remote IRR bit to be
2655 * completey accurate.
2657 * However there appears to be no other way to plug
2658 * this race, so if the Remote IRR bit is not
2659 * accurate and is causing problems then it is a hardware bug
2660 * and you can go talk to the chipset vendor about it.
2662 cfg
= desc
->chip_data
;
2663 if (!io_apic_level_ack_pending(cfg
))
2664 move_masked_irq(irq
);
2665 unmask_IO_APIC_irq_desc(desc
);
2669 #ifdef CONFIG_INTR_REMAP
2670 static void ir_ack_apic_edge(unsigned int irq
)
2675 static void ir_ack_apic_level(unsigned int irq
)
2677 struct irq_desc
*desc
= irq_to_desc(irq
);
2680 eoi_ioapic_irq(desc
);
2682 #endif /* CONFIG_INTR_REMAP */
2684 static struct irq_chip ioapic_chip __read_mostly
= {
2686 .startup
= startup_ioapic_irq
,
2687 .mask
= mask_IO_APIC_irq
,
2688 .unmask
= unmask_IO_APIC_irq
,
2689 .ack
= ack_apic_edge
,
2690 .eoi
= ack_apic_level
,
2692 .set_affinity
= set_ioapic_affinity_irq
,
2694 .retrigger
= ioapic_retrigger_irq
,
2697 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2698 .name
= "IR-IO-APIC",
2699 .startup
= startup_ioapic_irq
,
2700 .mask
= mask_IO_APIC_irq
,
2701 .unmask
= unmask_IO_APIC_irq
,
2702 #ifdef CONFIG_INTR_REMAP
2703 .ack
= ir_ack_apic_edge
,
2704 .eoi
= ir_ack_apic_level
,
2706 .set_affinity
= set_ir_ioapic_affinity_irq
,
2709 .retrigger
= ioapic_retrigger_irq
,
2712 static inline void init_IO_APIC_traps(void)
2715 struct irq_desc
*desc
;
2716 struct irq_cfg
*cfg
;
2719 * NOTE! The local APIC isn't very good at handling
2720 * multiple interrupts at the same interrupt level.
2721 * As the interrupt level is determined by taking the
2722 * vector number and shifting that right by 4, we
2723 * want to spread these out a bit so that they don't
2724 * all fall in the same interrupt level.
2726 * Also, we've got to be careful not to trash gate
2727 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2729 for_each_irq_desc(irq
, desc
) {
2730 cfg
= desc
->chip_data
;
2731 if (IO_APIC_IRQ(irq
) && cfg
&& !cfg
->vector
) {
2733 * Hmm.. We don't have an entry for this,
2734 * so default to an old-fashioned 8259
2735 * interrupt if we can..
2737 if (irq
< nr_legacy_irqs
)
2738 make_8259A_irq(irq
);
2740 /* Strange. Oh, well.. */
2741 desc
->chip
= &no_irq_chip
;
2747 * The local APIC irq-chip implementation:
2750 static void mask_lapic_irq(unsigned int irq
)
2754 v
= apic_read(APIC_LVT0
);
2755 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2758 static void unmask_lapic_irq(unsigned int irq
)
2762 v
= apic_read(APIC_LVT0
);
2763 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2766 static void ack_lapic_irq(unsigned int irq
)
2771 static struct irq_chip lapic_chip __read_mostly
= {
2772 .name
= "local-APIC",
2773 .mask
= mask_lapic_irq
,
2774 .unmask
= unmask_lapic_irq
,
2775 .ack
= ack_lapic_irq
,
2778 static void lapic_register_intr(int irq
, struct irq_desc
*desc
)
2780 desc
->status
&= ~IRQ_LEVEL
;
2781 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2785 static void __init
setup_nmi(void)
2788 * Dirty trick to enable the NMI watchdog ...
2789 * We put the 8259A master into AEOI mode and
2790 * unmask on all local APICs LVT0 as NMI.
2792 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2793 * is from Maciej W. Rozycki - so we do not have to EOI from
2794 * the NMI handler or the timer interrupt.
2796 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2798 enable_NMI_through_LVT0();
2800 apic_printk(APIC_VERBOSE
, " done.\n");
2804 * This looks a bit hackish but it's about the only one way of sending
2805 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2806 * not support the ExtINT mode, unfortunately. We need to send these
2807 * cycles as some i82489DX-based boards have glue logic that keeps the
2808 * 8259A interrupt line asserted until INTA. --macro
2810 static inline void __init
unlock_ExtINT_logic(void)
2813 struct IO_APIC_route_entry entry0
, entry1
;
2814 unsigned char save_control
, save_freq_select
;
2816 pin
= find_isa_irq_pin(8, mp_INT
);
2821 apic
= find_isa_irq_apic(8, mp_INT
);
2827 entry0
= ioapic_read_entry(apic
, pin
);
2828 clear_IO_APIC_pin(apic
, pin
);
2830 memset(&entry1
, 0, sizeof(entry1
));
2832 entry1
.dest_mode
= 0; /* physical delivery */
2833 entry1
.mask
= 0; /* unmask IRQ now */
2834 entry1
.dest
= hard_smp_processor_id();
2835 entry1
.delivery_mode
= dest_ExtINT
;
2836 entry1
.polarity
= entry0
.polarity
;
2840 ioapic_write_entry(apic
, pin
, entry1
);
2842 save_control
= CMOS_READ(RTC_CONTROL
);
2843 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2844 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2846 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2851 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2855 CMOS_WRITE(save_control
, RTC_CONTROL
);
2856 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2857 clear_IO_APIC_pin(apic
, pin
);
2859 ioapic_write_entry(apic
, pin
, entry0
);
2862 static int disable_timer_pin_1 __initdata
;
2863 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2864 static int __init
disable_timer_pin_setup(char *arg
)
2866 disable_timer_pin_1
= 1;
2869 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2871 int timer_through_8259 __initdata
;
2874 * This code may look a bit paranoid, but it's supposed to cooperate with
2875 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2876 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2877 * fanatically on his truly buggy board.
2879 * FIXME: really need to revamp this for all platforms.
2881 static inline void __init
check_timer(void)
2883 struct irq_desc
*desc
= irq_to_desc(0);
2884 struct irq_cfg
*cfg
= desc
->chip_data
;
2885 int node
= cpu_to_node(boot_cpu_id
);
2886 int apic1
, pin1
, apic2
, pin2
;
2887 unsigned long flags
;
2890 local_irq_save(flags
);
2893 * get/set the timer IRQ vector:
2895 disable_8259A_irq(0);
2896 assign_irq_vector(0, cfg
, apic
->target_cpus());
2899 * As IRQ0 is to be enabled in the 8259A, the virtual
2900 * wire has to be disabled in the local APIC. Also
2901 * timer interrupts need to be acknowledged manually in
2902 * the 8259A for the i82489DX when using the NMI
2903 * watchdog as that APIC treats NMIs as level-triggered.
2904 * The AEOI mode will finish them in the 8259A
2907 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2909 #ifdef CONFIG_X86_32
2913 ver
= apic_read(APIC_LVR
);
2914 ver
= GET_APIC_VERSION(ver
);
2915 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2919 pin1
= find_isa_irq_pin(0, mp_INT
);
2920 apic1
= find_isa_irq_apic(0, mp_INT
);
2921 pin2
= ioapic_i8259
.pin
;
2922 apic2
= ioapic_i8259
.apic
;
2924 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2925 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2926 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2929 * Some BIOS writers are clueless and report the ExtINTA
2930 * I/O APIC input from the cascaded 8259A as the timer
2931 * interrupt input. So just in case, if only one pin
2932 * was found above, try it both directly and through the
2936 if (intr_remapping_enabled
)
2937 panic("BIOS bug: timer not connected to IO-APIC");
2941 } else if (pin2
== -1) {
2948 * Ok, does IRQ0 through the IOAPIC work?
2951 add_pin_to_irq_node(cfg
, node
, apic1
, pin1
);
2952 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2954 /* for edge trigger, setup_IO_APIC_irq already
2955 * leave it unmasked.
2956 * so only need to unmask if it is level-trigger
2957 * do we really have level trigger timer?
2960 idx
= find_irq_entry(apic1
, pin1
, mp_INT
);
2961 if (idx
!= -1 && irq_trigger(idx
))
2962 unmask_IO_APIC_irq_desc(desc
);
2964 if (timer_irq_works()) {
2965 if (nmi_watchdog
== NMI_IO_APIC
) {
2967 enable_8259A_irq(0);
2969 if (disable_timer_pin_1
> 0)
2970 clear_IO_APIC_pin(0, pin1
);
2973 if (intr_remapping_enabled
)
2974 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2975 local_irq_disable();
2976 clear_IO_APIC_pin(apic1
, pin1
);
2978 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2979 "8254 timer not connected to IO-APIC\n");
2981 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2982 "(IRQ0) through the 8259A ...\n");
2983 apic_printk(APIC_QUIET
, KERN_INFO
2984 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2986 * legacy devices should be connected to IO APIC #0
2988 replace_pin_at_irq_node(cfg
, node
, apic1
, pin1
, apic2
, pin2
);
2989 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2990 enable_8259A_irq(0);
2991 if (timer_irq_works()) {
2992 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2993 timer_through_8259
= 1;
2994 if (nmi_watchdog
== NMI_IO_APIC
) {
2995 disable_8259A_irq(0);
2997 enable_8259A_irq(0);
3002 * Cleanup, just in case ...
3004 local_irq_disable();
3005 disable_8259A_irq(0);
3006 clear_IO_APIC_pin(apic2
, pin2
);
3007 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
3010 if (nmi_watchdog
== NMI_IO_APIC
) {
3011 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
3012 "through the IO-APIC - disabling NMI Watchdog!\n");
3013 nmi_watchdog
= NMI_NONE
;
3015 #ifdef CONFIG_X86_32
3019 apic_printk(APIC_QUIET
, KERN_INFO
3020 "...trying to set up timer as Virtual Wire IRQ...\n");
3022 lapic_register_intr(0, desc
);
3023 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
3024 enable_8259A_irq(0);
3026 if (timer_irq_works()) {
3027 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
3030 local_irq_disable();
3031 disable_8259A_irq(0);
3032 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
3033 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
3035 apic_printk(APIC_QUIET
, KERN_INFO
3036 "...trying to set up timer as ExtINT IRQ...\n");
3040 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
3042 unlock_ExtINT_logic();
3044 if (timer_irq_works()) {
3045 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
3048 local_irq_disable();
3049 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
3050 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3051 "report. Then try booting with the 'noapic' option.\n");
3053 local_irq_restore(flags
);
3057 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3058 * to devices. However there may be an I/O APIC pin available for
3059 * this interrupt regardless. The pin may be left unconnected, but
3060 * typically it will be reused as an ExtINT cascade interrupt for
3061 * the master 8259A. In the MPS case such a pin will normally be
3062 * reported as an ExtINT interrupt in the MP table. With ACPI
3063 * there is no provision for ExtINT interrupts, and in the absence
3064 * of an override it would be treated as an ordinary ISA I/O APIC
3065 * interrupt, that is edge-triggered and unmasked by default. We
3066 * used to do this, but it caused problems on some systems because
3067 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3068 * the same ExtINT cascade interrupt to drive the local APIC of the
3069 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3070 * the I/O APIC in all cases now. No actual device should request
3071 * it anyway. --macro
3073 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
3075 void __init
setup_IO_APIC(void)
3079 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3081 io_apic_irqs
= nr_legacy_irqs
? ~PIC_IRQS
: ~0UL;
3083 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
3085 * Set up IO-APIC IRQ routing.
3087 x86_init
.mpparse
.setup_ioapic_ids();
3090 setup_IO_APIC_irqs();
3091 init_IO_APIC_traps();
3097 * Called after all the initialization is done. If we didnt find any
3098 * APIC bugs then we can allow the modify fast path
3101 static int __init
io_apic_bug_finalize(void)
3103 if (sis_apic_bug
== -1)
3108 late_initcall(io_apic_bug_finalize
);
3110 struct sysfs_ioapic_data
{
3111 struct sys_device dev
;
3112 struct IO_APIC_route_entry entry
[0];
3114 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
3116 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
3118 struct IO_APIC_route_entry
*entry
;
3119 struct sysfs_ioapic_data
*data
;
3122 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
3123 entry
= data
->entry
;
3124 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
3125 *entry
= ioapic_read_entry(dev
->id
, i
);
3130 static int ioapic_resume(struct sys_device
*dev
)
3132 struct IO_APIC_route_entry
*entry
;
3133 struct sysfs_ioapic_data
*data
;
3134 unsigned long flags
;
3135 union IO_APIC_reg_00 reg_00
;
3138 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
3139 entry
= data
->entry
;
3141 spin_lock_irqsave(&ioapic_lock
, flags
);
3142 reg_00
.raw
= io_apic_read(dev
->id
, 0);
3143 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].apicid
) {
3144 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].apicid
;
3145 io_apic_write(dev
->id
, 0, reg_00
.raw
);
3147 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3148 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
3149 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
3154 static struct sysdev_class ioapic_sysdev_class
= {
3156 .suspend
= ioapic_suspend
,
3157 .resume
= ioapic_resume
,
3160 static int __init
ioapic_init_sysfs(void)
3162 struct sys_device
* dev
;
3165 error
= sysdev_class_register(&ioapic_sysdev_class
);
3169 for (i
= 0; i
< nr_ioapics
; i
++ ) {
3170 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
3171 * sizeof(struct IO_APIC_route_entry
);
3172 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
3173 if (!mp_ioapic_data
[i
]) {
3174 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3177 dev
= &mp_ioapic_data
[i
]->dev
;
3179 dev
->cls
= &ioapic_sysdev_class
;
3180 error
= sysdev_register(dev
);
3182 kfree(mp_ioapic_data
[i
]);
3183 mp_ioapic_data
[i
] = NULL
;
3184 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3192 device_initcall(ioapic_init_sysfs
);
3195 * Dynamic irq allocate and deallocation
3197 unsigned int create_irq_nr(unsigned int irq_want
, int node
)
3199 /* Allocate an unused irq */
3202 unsigned long flags
;
3203 struct irq_cfg
*cfg_new
= NULL
;
3204 struct irq_desc
*desc_new
= NULL
;
3207 if (irq_want
< nr_irqs_gsi
)
3208 irq_want
= nr_irqs_gsi
;
3210 spin_lock_irqsave(&vector_lock
, flags
);
3211 for (new = irq_want
; new < nr_irqs
; new++) {
3212 desc_new
= irq_to_desc_alloc_node(new, node
);
3214 printk(KERN_INFO
"can not get irq_desc for %d\n", new);
3217 cfg_new
= desc_new
->chip_data
;
3219 if (cfg_new
->vector
!= 0)
3222 desc_new
= move_irq_desc(desc_new
, node
);
3223 cfg_new
= desc_new
->chip_data
;
3225 if (__assign_irq_vector(new, cfg_new
, apic
->target_cpus()) == 0)
3229 spin_unlock_irqrestore(&vector_lock
, flags
);
3232 dynamic_irq_init_keep_chip_data(irq
);
3237 int create_irq(void)
3239 int node
= cpu_to_node(boot_cpu_id
);
3240 unsigned int irq_want
;
3243 irq_want
= nr_irqs_gsi
;
3244 irq
= create_irq_nr(irq_want
, node
);
3252 void destroy_irq(unsigned int irq
)
3254 unsigned long flags
;
3255 struct irq_cfg
*cfg
;
3257 dynamic_irq_cleanup_keep_chip_data(irq
);
3260 spin_lock_irqsave(&vector_lock
, flags
);
3261 cfg
= irq_to_desc(irq
)->chip_data
;
3262 __clear_irq_vector(irq
, cfg
);
3263 spin_unlock_irqrestore(&vector_lock
, flags
);
3267 * MSI message composition
3269 #ifdef CONFIG_PCI_MSI
3270 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
,
3271 struct msi_msg
*msg
, u8 hpet_id
)
3273 struct irq_cfg
*cfg
;
3281 err
= assign_irq_vector(irq
, cfg
, apic
->target_cpus());
3285 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, apic
->target_cpus());
3287 if (irq_remapped(irq
)) {
3292 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
3293 BUG_ON(ir_index
== -1);
3295 memset (&irte
, 0, sizeof(irte
));
3298 irte
.dst_mode
= apic
->irq_dest_mode
;
3299 irte
.trigger_mode
= 0; /* edge */
3300 irte
.dlvry_mode
= apic
->irq_delivery_mode
;
3301 irte
.vector
= cfg
->vector
;
3302 irte
.dest_id
= IRTE_DEST(dest
);
3304 /* Set source-id of interrupt request */
3306 set_msi_sid(&irte
, pdev
);
3308 set_hpet_sid(&irte
, hpet_id
);
3310 modify_irte(irq
, &irte
);
3312 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3313 msg
->data
= sub_handle
;
3314 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
3316 MSI_ADDR_IR_INDEX1(ir_index
) |
3317 MSI_ADDR_IR_INDEX2(ir_index
);
3319 if (x2apic_enabled())
3320 msg
->address_hi
= MSI_ADDR_BASE_HI
|
3321 MSI_ADDR_EXT_DEST_ID(dest
);
3323 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3327 ((apic
->irq_dest_mode
== 0) ?
3328 MSI_ADDR_DEST_MODE_PHYSICAL
:
3329 MSI_ADDR_DEST_MODE_LOGICAL
) |
3330 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3331 MSI_ADDR_REDIRECTION_CPU
:
3332 MSI_ADDR_REDIRECTION_LOWPRI
) |
3333 MSI_ADDR_DEST_ID(dest
);
3336 MSI_DATA_TRIGGER_EDGE
|
3337 MSI_DATA_LEVEL_ASSERT
|
3338 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3339 MSI_DATA_DELIVERY_FIXED
:
3340 MSI_DATA_DELIVERY_LOWPRI
) |
3341 MSI_DATA_VECTOR(cfg
->vector
);
3347 static int set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3349 struct irq_desc
*desc
= irq_to_desc(irq
);
3350 struct irq_cfg
*cfg
;
3354 if (set_desc_affinity(desc
, mask
, &dest
))
3357 cfg
= desc
->chip_data
;
3359 read_msi_msg_desc(desc
, &msg
);
3361 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3362 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3363 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3364 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3366 write_msi_msg_desc(desc
, &msg
);
3370 #ifdef CONFIG_INTR_REMAP
3372 * Migrate the MSI irq to another cpumask. This migration is
3373 * done in the process context using interrupt-remapping hardware.
3376 ir_set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3378 struct irq_desc
*desc
= irq_to_desc(irq
);
3379 struct irq_cfg
*cfg
= desc
->chip_data
;
3383 if (get_irte(irq
, &irte
))
3386 if (set_desc_affinity(desc
, mask
, &dest
))
3389 irte
.vector
= cfg
->vector
;
3390 irte
.dest_id
= IRTE_DEST(dest
);
3393 * atomically update the IRTE with the new destination and vector.
3395 modify_irte(irq
, &irte
);
3398 * After this point, all the interrupts will start arriving
3399 * at the new destination. So, time to cleanup the previous
3400 * vector allocation.
3402 if (cfg
->move_in_progress
)
3403 send_cleanup_vector(cfg
);
3409 #endif /* CONFIG_SMP */
3412 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3413 * which implement the MSI or MSI-X Capability Structure.
3415 static struct irq_chip msi_chip
= {
3417 .unmask
= unmask_msi_irq
,
3418 .mask
= mask_msi_irq
,
3419 .ack
= ack_apic_edge
,
3421 .set_affinity
= set_msi_irq_affinity
,
3423 .retrigger
= ioapic_retrigger_irq
,
3426 static struct irq_chip msi_ir_chip
= {
3427 .name
= "IR-PCI-MSI",
3428 .unmask
= unmask_msi_irq
,
3429 .mask
= mask_msi_irq
,
3430 #ifdef CONFIG_INTR_REMAP
3431 .ack
= ir_ack_apic_edge
,
3433 .set_affinity
= ir_set_msi_irq_affinity
,
3436 .retrigger
= ioapic_retrigger_irq
,
3440 * Map the PCI dev to the corresponding remapping hardware unit
3441 * and allocate 'nvec' consecutive interrupt-remapping table entries
3444 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
3446 struct intel_iommu
*iommu
;
3449 iommu
= map_dev_to_ir(dev
);
3452 "Unable to map PCI %s to iommu\n", pci_name(dev
));
3456 index
= alloc_irte(iommu
, irq
, nvec
);
3459 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
3466 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int irq
)
3471 ret
= msi_compose_msg(dev
, irq
, &msg
, -1);
3475 set_irq_msi(irq
, msidesc
);
3476 write_msi_msg(irq
, &msg
);
3478 if (irq_remapped(irq
)) {
3479 struct irq_desc
*desc
= irq_to_desc(irq
);
3481 * irq migration in process context
3483 desc
->status
|= IRQ_MOVE_PCNTXT
;
3484 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
3486 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
3488 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for MSI/MSI-X\n", irq
);
3493 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
3496 int ret
, sub_handle
;
3497 struct msi_desc
*msidesc
;
3498 unsigned int irq_want
;
3499 struct intel_iommu
*iommu
= NULL
;
3503 /* x86 doesn't support multiple MSI yet */
3504 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
3507 node
= dev_to_node(&dev
->dev
);
3508 irq_want
= nr_irqs_gsi
;
3510 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
3511 irq
= create_irq_nr(irq_want
, node
);
3515 if (!intr_remapping_enabled
)
3520 * allocate the consecutive block of IRTE's
3523 index
= msi_alloc_irte(dev
, irq
, nvec
);
3529 iommu
= map_dev_to_ir(dev
);
3535 * setup the mapping between the irq and the IRTE
3536 * base index, the sub_handle pointing to the
3537 * appropriate interrupt remap table entry.
3539 set_irte_irq(irq
, iommu
, index
, sub_handle
);
3542 ret
= setup_msi_irq(dev
, msidesc
, irq
);
3554 void arch_teardown_msi_irq(unsigned int irq
)
3559 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3561 static int dmar_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3563 struct irq_desc
*desc
= irq_to_desc(irq
);
3564 struct irq_cfg
*cfg
;
3568 if (set_desc_affinity(desc
, mask
, &dest
))
3571 cfg
= desc
->chip_data
;
3573 dmar_msi_read(irq
, &msg
);
3575 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3576 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3577 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3578 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3580 dmar_msi_write(irq
, &msg
);
3585 #endif /* CONFIG_SMP */
3587 static struct irq_chip dmar_msi_type
= {
3589 .unmask
= dmar_msi_unmask
,
3590 .mask
= dmar_msi_mask
,
3591 .ack
= ack_apic_edge
,
3593 .set_affinity
= dmar_msi_set_affinity
,
3595 .retrigger
= ioapic_retrigger_irq
,
3598 int arch_setup_dmar_msi(unsigned int irq
)
3603 ret
= msi_compose_msg(NULL
, irq
, &msg
, -1);
3606 dmar_msi_write(irq
, &msg
);
3607 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3613 #ifdef CONFIG_HPET_TIMER
3616 static int hpet_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3618 struct irq_desc
*desc
= irq_to_desc(irq
);
3619 struct irq_cfg
*cfg
;
3623 if (set_desc_affinity(desc
, mask
, &dest
))
3626 cfg
= desc
->chip_data
;
3628 hpet_msi_read(irq
, &msg
);
3630 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3631 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3632 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3633 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3635 hpet_msi_write(irq
, &msg
);
3640 #endif /* CONFIG_SMP */
3642 static struct irq_chip ir_hpet_msi_type
= {
3643 .name
= "IR-HPET_MSI",
3644 .unmask
= hpet_msi_unmask
,
3645 .mask
= hpet_msi_mask
,
3646 #ifdef CONFIG_INTR_REMAP
3647 .ack
= ir_ack_apic_edge
,
3649 .set_affinity
= ir_set_msi_irq_affinity
,
3652 .retrigger
= ioapic_retrigger_irq
,
3655 static struct irq_chip hpet_msi_type
= {
3657 .unmask
= hpet_msi_unmask
,
3658 .mask
= hpet_msi_mask
,
3659 .ack
= ack_apic_edge
,
3661 .set_affinity
= hpet_msi_set_affinity
,
3663 .retrigger
= ioapic_retrigger_irq
,
3666 int arch_setup_hpet_msi(unsigned int irq
, unsigned int id
)
3670 struct irq_desc
*desc
= irq_to_desc(irq
);
3672 if (intr_remapping_enabled
) {
3673 struct intel_iommu
*iommu
= map_hpet_to_ir(id
);
3679 index
= alloc_irte(iommu
, irq
, 1);
3684 ret
= msi_compose_msg(NULL
, irq
, &msg
, id
);
3688 hpet_msi_write(irq
, &msg
);
3689 desc
->status
|= IRQ_MOVE_PCNTXT
;
3690 if (irq_remapped(irq
))
3691 set_irq_chip_and_handler_name(irq
, &ir_hpet_msi_type
,
3692 handle_edge_irq
, "edge");
3694 set_irq_chip_and_handler_name(irq
, &hpet_msi_type
,
3695 handle_edge_irq
, "edge");
3701 #endif /* CONFIG_PCI_MSI */
3703 * Hypertransport interrupt support
3705 #ifdef CONFIG_HT_IRQ
3709 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3711 struct ht_irq_msg msg
;
3712 fetch_ht_irq_msg(irq
, &msg
);
3714 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3715 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3717 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3718 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3720 write_ht_irq_msg(irq
, &msg
);
3723 static int set_ht_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3725 struct irq_desc
*desc
= irq_to_desc(irq
);
3726 struct irq_cfg
*cfg
;
3729 if (set_desc_affinity(desc
, mask
, &dest
))
3732 cfg
= desc
->chip_data
;
3734 target_ht_irq(irq
, dest
, cfg
->vector
);
3741 static struct irq_chip ht_irq_chip
= {
3743 .mask
= mask_ht_irq
,
3744 .unmask
= unmask_ht_irq
,
3745 .ack
= ack_apic_edge
,
3747 .set_affinity
= set_ht_irq_affinity
,
3749 .retrigger
= ioapic_retrigger_irq
,
3752 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3754 struct irq_cfg
*cfg
;
3761 err
= assign_irq_vector(irq
, cfg
, apic
->target_cpus());
3763 struct ht_irq_msg msg
;
3766 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
,
3767 apic
->target_cpus());
3769 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3773 HT_IRQ_LOW_DEST_ID(dest
) |
3774 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3775 ((apic
->irq_dest_mode
== 0) ?
3776 HT_IRQ_LOW_DM_PHYSICAL
:
3777 HT_IRQ_LOW_DM_LOGICAL
) |
3778 HT_IRQ_LOW_RQEOI_EDGE
|
3779 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3780 HT_IRQ_LOW_MT_FIXED
:
3781 HT_IRQ_LOW_MT_ARBITRATED
) |
3782 HT_IRQ_LOW_IRQ_MASKED
;
3784 write_ht_irq_msg(irq
, &msg
);
3786 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3787 handle_edge_irq
, "edge");
3789 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for HT\n", irq
);
3793 #endif /* CONFIG_HT_IRQ */
3795 int __init
io_apic_get_redir_entries (int ioapic
)
3797 union IO_APIC_reg_01 reg_01
;
3798 unsigned long flags
;
3800 spin_lock_irqsave(&ioapic_lock
, flags
);
3801 reg_01
.raw
= io_apic_read(ioapic
, 1);
3802 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3804 return reg_01
.bits
.entries
;
3807 void __init
probe_nr_irqs_gsi(void)
3811 nr
= acpi_probe_gsi();
3812 if (nr
> nr_irqs_gsi
) {
3815 /* for acpi=off or acpi is not compiled in */
3819 for (idx
= 0; idx
< nr_ioapics
; idx
++)
3820 nr
+= io_apic_get_redir_entries(idx
) + 1;
3822 if (nr
> nr_irqs_gsi
)
3826 printk(KERN_DEBUG
"nr_irqs_gsi: %d\n", nr_irqs_gsi
);
3829 #ifdef CONFIG_SPARSE_IRQ
3830 int __init
arch_probe_nr_irqs(void)
3834 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
3835 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
3837 nr
= nr_irqs_gsi
+ 8 * nr_cpu_ids
;
3838 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3840 * for MSI and HT dyn irq
3842 nr
+= nr_irqs_gsi
* 16;
3851 static int __io_apic_set_pci_routing(struct device
*dev
, int irq
,
3852 struct io_apic_irq_attr
*irq_attr
)
3854 struct irq_desc
*desc
;
3855 struct irq_cfg
*cfg
;
3858 int trigger
, polarity
;
3860 ioapic
= irq_attr
->ioapic
;
3861 if (!IO_APIC_IRQ(irq
)) {
3862 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3868 node
= dev_to_node(dev
);
3870 node
= cpu_to_node(boot_cpu_id
);
3872 desc
= irq_to_desc_alloc_node(irq
, node
);
3874 printk(KERN_INFO
"can not get irq_desc %d\n", irq
);
3878 pin
= irq_attr
->ioapic_pin
;
3879 trigger
= irq_attr
->trigger
;
3880 polarity
= irq_attr
->polarity
;
3883 * IRQs < 16 are already in the irq_2_pin[] map
3885 if (irq
>= nr_legacy_irqs
) {
3886 cfg
= desc
->chip_data
;
3887 if (add_pin_to_irq_node_nopanic(cfg
, node
, ioapic
, pin
)) {
3888 printk(KERN_INFO
"can not add pin %d for irq %d\n",
3894 setup_IO_APIC_irq(ioapic
, pin
, irq
, desc
, trigger
, polarity
);
3899 int io_apic_set_pci_routing(struct device
*dev
, int irq
,
3900 struct io_apic_irq_attr
*irq_attr
)
3904 * Avoid pin reprogramming. PRTs typically include entries
3905 * with redundant pin->gsi mappings (but unique PCI devices);
3906 * we only program the IOAPIC on the first.
3908 ioapic
= irq_attr
->ioapic
;
3909 pin
= irq_attr
->ioapic_pin
;
3910 if (test_bit(pin
, mp_ioapic_routing
[ioapic
].pin_programmed
)) {
3911 pr_debug("Pin %d-%d already programmed\n",
3912 mp_ioapics
[ioapic
].apicid
, pin
);
3915 set_bit(pin
, mp_ioapic_routing
[ioapic
].pin_programmed
);
3917 return __io_apic_set_pci_routing(dev
, irq
, irq_attr
);
3920 u8 __init
io_apic_unique_id(u8 id
)
3922 #ifdef CONFIG_X86_32
3923 if ((boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) &&
3924 !APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
3925 return io_apic_get_unique_id(nr_ioapics
, id
);
3930 DECLARE_BITMAP(used
, 256);
3932 bitmap_zero(used
, 256);
3933 for (i
= 0; i
< nr_ioapics
; i
++) {
3934 struct mpc_ioapic
*ia
= &mp_ioapics
[i
];
3935 __set_bit(ia
->apicid
, used
);
3937 if (!test_bit(id
, used
))
3939 return find_first_zero_bit(used
, 256);
3943 #ifdef CONFIG_X86_32
3944 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
3946 union IO_APIC_reg_00 reg_00
;
3947 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
3949 unsigned long flags
;
3953 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3954 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3955 * supports up to 16 on one shared APIC bus.
3957 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3958 * advantage of new APIC bus architecture.
3961 if (physids_empty(apic_id_map
))
3962 apic
->ioapic_phys_id_map(&phys_cpu_present_map
, &apic_id_map
);
3964 spin_lock_irqsave(&ioapic_lock
, flags
);
3965 reg_00
.raw
= io_apic_read(ioapic
, 0);
3966 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3968 if (apic_id
>= get_physical_broadcast()) {
3969 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
3970 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
3971 apic_id
= reg_00
.bits
.ID
;
3975 * Every APIC in a system must have a unique ID or we get lots of nice
3976 * 'stuck on smp_invalidate_needed IPI wait' messages.
3978 if (apic
->check_apicid_used(&apic_id_map
, apic_id
)) {
3980 for (i
= 0; i
< get_physical_broadcast(); i
++) {
3981 if (!apic
->check_apicid_used(&apic_id_map
, i
))
3985 if (i
== get_physical_broadcast())
3986 panic("Max apic_id exceeded!\n");
3988 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
3989 "trying %d\n", ioapic
, apic_id
, i
);
3994 apic
->apicid_to_cpu_present(apic_id
, &tmp
);
3995 physids_or(apic_id_map
, apic_id_map
, tmp
);
3997 if (reg_00
.bits
.ID
!= apic_id
) {
3998 reg_00
.bits
.ID
= apic_id
;
4000 spin_lock_irqsave(&ioapic_lock
, flags
);
4001 io_apic_write(ioapic
, 0, reg_00
.raw
);
4002 reg_00
.raw
= io_apic_read(ioapic
, 0);
4003 spin_unlock_irqrestore(&ioapic_lock
, flags
);
4006 if (reg_00
.bits
.ID
!= apic_id
) {
4007 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
4012 apic_printk(APIC_VERBOSE
, KERN_INFO
4013 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
4019 int __init
io_apic_get_version(int ioapic
)
4021 union IO_APIC_reg_01 reg_01
;
4022 unsigned long flags
;
4024 spin_lock_irqsave(&ioapic_lock
, flags
);
4025 reg_01
.raw
= io_apic_read(ioapic
, 1);
4026 spin_unlock_irqrestore(&ioapic_lock
, flags
);
4028 return reg_01
.bits
.version
;
4031 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
4035 if (skip_ioapic_setup
)
4038 for (i
= 0; i
< mp_irq_entries
; i
++)
4039 if (mp_irqs
[i
].irqtype
== mp_INT
&&
4040 mp_irqs
[i
].srcbusirq
== bus_irq
)
4042 if (i
>= mp_irq_entries
)
4045 *trigger
= irq_trigger(i
);
4046 *polarity
= irq_polarity(i
);
4051 * This function currently is only a helper for the i386 smp boot process where
4052 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4053 * so mask in all cases should simply be apic->target_cpus()
4056 void __init
setup_ioapic_dest(void)
4058 int pin
, ioapic
= 0, irq
, irq_entry
;
4059 struct irq_desc
*desc
;
4060 const struct cpumask
*mask
;
4062 if (skip_ioapic_setup
== 1)
4066 if (!acpi_disabled
&& acpi_ioapic
) {
4067 ioapic
= mp_find_ioapic(0);
4073 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
4074 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
4075 if (irq_entry
== -1)
4077 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
4079 desc
= irq_to_desc(irq
);
4082 * Honour affinities which have been set in early boot
4085 (IRQ_NO_BALANCING
| IRQ_AFFINITY_SET
))
4086 mask
= desc
->affinity
;
4088 mask
= apic
->target_cpus();
4090 if (intr_remapping_enabled
)
4091 set_ir_ioapic_affinity_irq_desc(desc
, mask
);
4093 set_ioapic_affinity_irq_desc(desc
, mask
);
4099 #define IOAPIC_RESOURCE_NAME_SIZE 11
4101 static struct resource
*ioapic_resources
;
4103 static struct resource
* __init
ioapic_setup_resources(int nr_ioapics
)
4106 struct resource
*res
;
4110 if (nr_ioapics
<= 0)
4113 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
4116 mem
= alloc_bootmem(n
);
4119 mem
+= sizeof(struct resource
) * nr_ioapics
;
4121 for (i
= 0; i
< nr_ioapics
; i
++) {
4123 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
4124 snprintf(mem
, IOAPIC_RESOURCE_NAME_SIZE
, "IOAPIC %u", i
);
4125 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
4128 ioapic_resources
= res
;
4133 void __init
ioapic_init_mappings(void)
4135 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
4136 struct resource
*ioapic_res
;
4139 ioapic_res
= ioapic_setup_resources(nr_ioapics
);
4140 for (i
= 0; i
< nr_ioapics
; i
++) {
4141 if (smp_found_config
) {
4142 ioapic_phys
= mp_ioapics
[i
].apicaddr
;
4143 #ifdef CONFIG_X86_32
4146 "WARNING: bogus zero IO-APIC "
4147 "address found in MPTABLE, "
4148 "disabling IO/APIC support!\n");
4149 smp_found_config
= 0;
4150 skip_ioapic_setup
= 1;
4151 goto fake_ioapic_page
;
4155 #ifdef CONFIG_X86_32
4158 ioapic_phys
= (unsigned long)alloc_bootmem_pages(PAGE_SIZE
);
4159 ioapic_phys
= __pa(ioapic_phys
);
4161 set_fixmap_nocache(idx
, ioapic_phys
);
4162 apic_printk(APIC_VERBOSE
, "mapped IOAPIC to %08lx (%08lx)\n",
4163 __fix_to_virt(idx
) + (ioapic_phys
& ~PAGE_MASK
),
4167 ioapic_res
->start
= ioapic_phys
;
4168 ioapic_res
->end
= ioapic_phys
+ IO_APIC_SLOT_SIZE
- 1;
4173 void __init
ioapic_insert_resources(void)
4176 struct resource
*r
= ioapic_resources
;
4181 "IO APIC resources couldn't be allocated.\n");
4185 for (i
= 0; i
< nr_ioapics
; i
++) {
4186 insert_resource(&iomem_resource
, r
);
4191 int mp_find_ioapic(int gsi
)
4195 /* Find the IOAPIC that manages this GSI. */
4196 for (i
= 0; i
< nr_ioapics
; i
++) {
4197 if ((gsi
>= mp_gsi_routing
[i
].gsi_base
)
4198 && (gsi
<= mp_gsi_routing
[i
].gsi_end
))
4202 printk(KERN_ERR
"ERROR: Unable to locate IOAPIC for GSI %d\n", gsi
);
4206 int mp_find_ioapic_pin(int ioapic
, int gsi
)
4208 if (WARN_ON(ioapic
== -1))
4210 if (WARN_ON(gsi
> mp_gsi_routing
[ioapic
].gsi_end
))
4213 return gsi
- mp_gsi_routing
[ioapic
].gsi_base
;
4216 static int bad_ioapic(unsigned long address
)
4218 if (nr_ioapics
>= MAX_IO_APICS
) {
4219 printk(KERN_WARNING
"WARING: Max # of I/O APICs (%d) exceeded "
4220 "(found %d), skipping\n", MAX_IO_APICS
, nr_ioapics
);
4224 printk(KERN_WARNING
"WARNING: Bogus (zero) I/O APIC address"
4225 " found in table, skipping!\n");
4231 void __init
mp_register_ioapic(int id
, u32 address
, u32 gsi_base
)
4235 if (bad_ioapic(address
))
4240 mp_ioapics
[idx
].type
= MP_IOAPIC
;
4241 mp_ioapics
[idx
].flags
= MPC_APIC_USABLE
;
4242 mp_ioapics
[idx
].apicaddr
= address
;
4244 set_fixmap_nocache(FIX_IO_APIC_BASE_0
+ idx
, address
);
4245 mp_ioapics
[idx
].apicid
= io_apic_unique_id(id
);
4246 mp_ioapics
[idx
].apicver
= io_apic_get_version(idx
);
4249 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4250 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4252 mp_gsi_routing
[idx
].gsi_base
= gsi_base
;
4253 mp_gsi_routing
[idx
].gsi_end
= gsi_base
+
4254 io_apic_get_redir_entries(idx
);
4256 printk(KERN_INFO
"IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4257 "GSI %d-%d\n", idx
, mp_ioapics
[idx
].apicid
,
4258 mp_ioapics
[idx
].apicver
, mp_ioapics
[idx
].apicaddr
,
4259 mp_gsi_routing
[idx
].gsi_base
, mp_gsi_routing
[idx
].gsi_end
);