2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
51 #include <asm/proto.h>
54 #include <asm/timer.h>
55 #include <asm/i8259.h>
57 #include <asm/msidef.h>
58 #include <asm/hypertransport.h>
59 #include <asm/setup.h>
60 #include <asm/irq_remapping.h>
62 #include <asm/uv/uv_hub.h>
63 #include <asm/uv/uv_irq.h>
67 #define __apicdebuginit(type) static type __init
70 * Is the SiS APIC rmw bug present ?
71 * -1 = don't know, 0 = no, 1 = yes
73 int sis_apic_bug
= -1;
75 static DEFINE_SPINLOCK(ioapic_lock
);
76 static DEFINE_SPINLOCK(vector_lock
);
79 * # of IRQ routing registers
81 int nr_ioapic_registers
[MAX_IO_APICS
];
83 /* I/O APIC entries */
84 struct mpc_ioapic mp_ioapics
[MAX_IO_APICS
];
87 /* MP IRQ source entries */
88 struct mpc_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
90 /* # of MP IRQ source entries */
93 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
94 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
97 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
99 int skip_ioapic_setup
;
101 void arch_disable_smp_support(void)
105 noioapicreroute
= -1;
107 skip_ioapic_setup
= 1;
110 static int __init
parse_noapic(char *str
)
112 /* disable IO-APIC */
113 arch_disable_smp_support();
116 early_param("noapic", parse_noapic
);
121 * This is performance-critical, we want to do it O(1)
123 * the indexing order of this array favors 1:1 mappings
124 * between pins and IRQs.
127 struct irq_pin_list
{
129 struct irq_pin_list
*next
;
132 static struct irq_pin_list
*get_one_free_irq_2_pin(int cpu
)
134 struct irq_pin_list
*pin
;
137 node
= cpu_to_node(cpu
);
139 pin
= kzalloc_node(sizeof(*pin
), GFP_ATOMIC
, node
);
145 struct irq_pin_list
*irq_2_pin
;
146 cpumask_var_t domain
;
147 cpumask_var_t old_domain
;
148 unsigned move_cleanup_count
;
150 u8 move_in_progress
: 1;
151 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
152 u8 move_desc_pending
: 1;
156 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
157 #ifdef CONFIG_SPARSE_IRQ
158 static struct irq_cfg irq_cfgx
[] = {
160 static struct irq_cfg irq_cfgx
[NR_IRQS
] = {
162 [0] = { .vector
= IRQ0_VECTOR
, },
163 [1] = { .vector
= IRQ1_VECTOR
, },
164 [2] = { .vector
= IRQ2_VECTOR
, },
165 [3] = { .vector
= IRQ3_VECTOR
, },
166 [4] = { .vector
= IRQ4_VECTOR
, },
167 [5] = { .vector
= IRQ5_VECTOR
, },
168 [6] = { .vector
= IRQ6_VECTOR
, },
169 [7] = { .vector
= IRQ7_VECTOR
, },
170 [8] = { .vector
= IRQ8_VECTOR
, },
171 [9] = { .vector
= IRQ9_VECTOR
, },
172 [10] = { .vector
= IRQ10_VECTOR
, },
173 [11] = { .vector
= IRQ11_VECTOR
, },
174 [12] = { .vector
= IRQ12_VECTOR
, },
175 [13] = { .vector
= IRQ13_VECTOR
, },
176 [14] = { .vector
= IRQ14_VECTOR
, },
177 [15] = { .vector
= IRQ15_VECTOR
, },
180 int __init
arch_early_irq_init(void)
183 struct irq_desc
*desc
;
188 count
= ARRAY_SIZE(irq_cfgx
);
190 for (i
= 0; i
< count
; i
++) {
191 desc
= irq_to_desc(i
);
192 desc
->chip_data
= &cfg
[i
];
193 alloc_bootmem_cpumask_var(&cfg
[i
].domain
);
194 alloc_bootmem_cpumask_var(&cfg
[i
].old_domain
);
195 if (i
< NR_IRQS_LEGACY
)
196 cpumask_setall(cfg
[i
].domain
);
202 #ifdef CONFIG_SPARSE_IRQ
203 static struct irq_cfg
*irq_cfg(unsigned int irq
)
205 struct irq_cfg
*cfg
= NULL
;
206 struct irq_desc
*desc
;
208 desc
= irq_to_desc(irq
);
210 cfg
= desc
->chip_data
;
215 static struct irq_cfg
*get_one_free_irq_cfg(int cpu
)
220 node
= cpu_to_node(cpu
);
222 cfg
= kzalloc_node(sizeof(*cfg
), GFP_ATOMIC
, node
);
224 if (!alloc_cpumask_var_node(&cfg
->domain
, GFP_ATOMIC
, node
)) {
227 } else if (!alloc_cpumask_var_node(&cfg
->old_domain
,
229 free_cpumask_var(cfg
->domain
);
233 cpumask_clear(cfg
->domain
);
234 cpumask_clear(cfg
->old_domain
);
241 int arch_init_chip_data(struct irq_desc
*desc
, int cpu
)
245 cfg
= desc
->chip_data
;
247 desc
->chip_data
= get_one_free_irq_cfg(cpu
);
248 if (!desc
->chip_data
) {
249 printk(KERN_ERR
"can not alloc irq_cfg\n");
257 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
260 init_copy_irq_2_pin(struct irq_cfg
*old_cfg
, struct irq_cfg
*cfg
, int cpu
)
262 struct irq_pin_list
*old_entry
, *head
, *tail
, *entry
;
264 cfg
->irq_2_pin
= NULL
;
265 old_entry
= old_cfg
->irq_2_pin
;
269 entry
= get_one_free_irq_2_pin(cpu
);
273 entry
->apic
= old_entry
->apic
;
274 entry
->pin
= old_entry
->pin
;
277 old_entry
= old_entry
->next
;
279 entry
= get_one_free_irq_2_pin(cpu
);
287 /* still use the old one */
290 entry
->apic
= old_entry
->apic
;
291 entry
->pin
= old_entry
->pin
;
294 old_entry
= old_entry
->next
;
298 cfg
->irq_2_pin
= head
;
301 static void free_irq_2_pin(struct irq_cfg
*old_cfg
, struct irq_cfg
*cfg
)
303 struct irq_pin_list
*entry
, *next
;
305 if (old_cfg
->irq_2_pin
== cfg
->irq_2_pin
)
308 entry
= old_cfg
->irq_2_pin
;
315 old_cfg
->irq_2_pin
= NULL
;
318 void arch_init_copy_chip_data(struct irq_desc
*old_desc
,
319 struct irq_desc
*desc
, int cpu
)
322 struct irq_cfg
*old_cfg
;
324 cfg
= get_one_free_irq_cfg(cpu
);
329 desc
->chip_data
= cfg
;
331 old_cfg
= old_desc
->chip_data
;
333 memcpy(cfg
, old_cfg
, sizeof(struct irq_cfg
));
335 init_copy_irq_2_pin(old_cfg
, cfg
, cpu
);
338 static void free_irq_cfg(struct irq_cfg
*old_cfg
)
343 void arch_free_chip_data(struct irq_desc
*old_desc
, struct irq_desc
*desc
)
345 struct irq_cfg
*old_cfg
, *cfg
;
347 old_cfg
= old_desc
->chip_data
;
348 cfg
= desc
->chip_data
;
354 free_irq_2_pin(old_cfg
, cfg
);
355 free_irq_cfg(old_cfg
);
356 old_desc
->chip_data
= NULL
;
361 set_extra_move_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
363 struct irq_cfg
*cfg
= desc
->chip_data
;
365 if (!cfg
->move_in_progress
) {
366 /* it means that domain is not changed */
367 if (!cpumask_intersects(desc
->affinity
, mask
))
368 cfg
->move_desc_pending
= 1;
374 static struct irq_cfg
*irq_cfg(unsigned int irq
)
376 return irq
< nr_irqs
? irq_cfgx
+ irq
: NULL
;
381 #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
383 set_extra_move_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
390 unsigned int unused
[3];
394 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
396 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
397 + (mp_ioapics
[idx
].apicaddr
& ~PAGE_MASK
);
400 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
402 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
403 writel(reg
, &io_apic
->index
);
404 return readl(&io_apic
->data
);
407 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
409 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
410 writel(reg
, &io_apic
->index
);
411 writel(value
, &io_apic
->data
);
415 * Re-write a value: to be used for read-modify-write
416 * cycles where the read already set up the index register.
418 * Older SiS APIC requires we rewrite the index register
420 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
422 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
425 writel(reg
, &io_apic
->index
);
426 writel(value
, &io_apic
->data
);
429 static bool io_apic_level_ack_pending(struct irq_cfg
*cfg
)
431 struct irq_pin_list
*entry
;
434 spin_lock_irqsave(&ioapic_lock
, flags
);
435 entry
= cfg
->irq_2_pin
;
443 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
444 /* Is the remote IRR bit set? */
445 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
446 spin_unlock_irqrestore(&ioapic_lock
, flags
);
453 spin_unlock_irqrestore(&ioapic_lock
, flags
);
459 struct { u32 w1
, w2
; };
460 struct IO_APIC_route_entry entry
;
463 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
465 union entry_union eu
;
467 spin_lock_irqsave(&ioapic_lock
, flags
);
468 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
469 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
470 spin_unlock_irqrestore(&ioapic_lock
, flags
);
475 * When we write a new IO APIC routing entry, we need to write the high
476 * word first! If the mask bit in the low word is clear, we will enable
477 * the interrupt, and we need to make sure the entry is fully populated
478 * before that happens.
481 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
483 union entry_union eu
;
485 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
486 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
489 void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
492 spin_lock_irqsave(&ioapic_lock
, flags
);
493 __ioapic_write_entry(apic
, pin
, e
);
494 spin_unlock_irqrestore(&ioapic_lock
, flags
);
498 * When we mask an IO APIC routing entry, we need to write the low
499 * word first, in order to set the mask bit before we change the
502 static void ioapic_mask_entry(int apic
, int pin
)
505 union entry_union eu
= { .entry
.mask
= 1 };
507 spin_lock_irqsave(&ioapic_lock
, flags
);
508 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
509 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
510 spin_unlock_irqrestore(&ioapic_lock
, flags
);
514 static void send_cleanup_vector(struct irq_cfg
*cfg
)
516 cpumask_var_t cleanup_mask
;
518 if (unlikely(!alloc_cpumask_var(&cleanup_mask
, GFP_ATOMIC
))) {
520 cfg
->move_cleanup_count
= 0;
521 for_each_cpu_and(i
, cfg
->old_domain
, cpu_online_mask
)
522 cfg
->move_cleanup_count
++;
523 for_each_cpu_and(i
, cfg
->old_domain
, cpu_online_mask
)
524 apic
->send_IPI_mask(cpumask_of(i
), IRQ_MOVE_CLEANUP_VECTOR
);
526 cpumask_and(cleanup_mask
, cfg
->old_domain
, cpu_online_mask
);
527 cfg
->move_cleanup_count
= cpumask_weight(cleanup_mask
);
528 apic
->send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
529 free_cpumask_var(cleanup_mask
);
531 cfg
->move_in_progress
= 0;
534 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, struct irq_cfg
*cfg
)
537 struct irq_pin_list
*entry
;
538 u8 vector
= cfg
->vector
;
540 entry
= cfg
->irq_2_pin
;
549 #ifdef CONFIG_INTR_REMAP
551 * With interrupt-remapping, destination information comes
552 * from interrupt-remapping table entry.
554 if (!irq_remapped(irq
))
555 io_apic_write(apic
, 0x11 + pin
*2, dest
);
557 io_apic_write(apic
, 0x11 + pin
*2, dest
);
559 reg
= io_apic_read(apic
, 0x10 + pin
*2);
560 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
562 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
570 assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
);
573 * Either sets desc->affinity to a valid value, and returns
574 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
575 * leaves desc->affinity untouched.
578 set_desc_affinity(struct irq_desc
*desc
, const struct cpumask
*mask
)
583 if (!cpumask_intersects(mask
, cpu_online_mask
))
587 cfg
= desc
->chip_data
;
588 if (assign_irq_vector(irq
, cfg
, mask
))
591 cpumask_and(desc
->affinity
, cfg
->domain
, mask
);
592 set_extra_move_desc(desc
, mask
);
594 return apic
->cpu_mask_to_apicid_and(desc
->affinity
, cpu_online_mask
);
598 set_ioapic_affinity_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
606 cfg
= desc
->chip_data
;
608 spin_lock_irqsave(&ioapic_lock
, flags
);
609 dest
= set_desc_affinity(desc
, mask
);
610 if (dest
!= BAD_APICID
) {
611 /* Only the high 8 bits are valid. */
612 dest
= SET_APIC_LOGICAL_ID(dest
);
613 __target_IO_APIC_irq(irq
, dest
, cfg
);
615 spin_unlock_irqrestore(&ioapic_lock
, flags
);
619 set_ioapic_affinity_irq(unsigned int irq
, const struct cpumask
*mask
)
621 struct irq_desc
*desc
;
623 desc
= irq_to_desc(irq
);
625 set_ioapic_affinity_irq_desc(desc
, mask
);
627 #endif /* CONFIG_SMP */
630 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
631 * shared ISA-space IRQs, so we have to support them. We are super
632 * fast in the common case, and fast for shared ISA-space IRQs.
634 static void add_pin_to_irq_cpu(struct irq_cfg
*cfg
, int cpu
, int apic
, int pin
)
636 struct irq_pin_list
*entry
;
638 entry
= cfg
->irq_2_pin
;
640 entry
= get_one_free_irq_2_pin(cpu
);
642 printk(KERN_ERR
"can not alloc irq_2_pin to add %d - %d\n",
646 cfg
->irq_2_pin
= entry
;
652 while (entry
->next
) {
653 /* not again, please */
654 if (entry
->apic
== apic
&& entry
->pin
== pin
)
660 entry
->next
= get_one_free_irq_2_pin(cpu
);
667 * Reroute an IRQ to a different pin.
669 static void __init
replace_pin_at_irq_cpu(struct irq_cfg
*cfg
, int cpu
,
670 int oldapic
, int oldpin
,
671 int newapic
, int newpin
)
673 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
677 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
678 entry
->apic
= newapic
;
681 /* every one is different, right? */
687 /* why? call replace before add? */
689 add_pin_to_irq_cpu(cfg
, cpu
, newapic
, newpin
);
692 static inline void io_apic_modify_irq(struct irq_cfg
*cfg
,
693 int mask_and
, int mask_or
,
694 void (*final
)(struct irq_pin_list
*entry
))
697 struct irq_pin_list
*entry
;
699 for (entry
= cfg
->irq_2_pin
; entry
!= NULL
; entry
= entry
->next
) {
702 reg
= io_apic_read(entry
->apic
, 0x10 + pin
* 2);
705 io_apic_modify(entry
->apic
, 0x10 + pin
* 2, reg
);
711 static void __unmask_IO_APIC_irq(struct irq_cfg
*cfg
)
713 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_MASKED
, 0, NULL
);
717 static void io_apic_sync(struct irq_pin_list
*entry
)
720 * Synchronize the IO-APIC and the CPU by doing
721 * a dummy read from the IO-APIC
723 struct io_apic __iomem
*io_apic
;
724 io_apic
= io_apic_base(entry
->apic
);
725 readl(&io_apic
->data
);
728 static void __mask_IO_APIC_irq(struct irq_cfg
*cfg
)
730 io_apic_modify_irq(cfg
, ~0, IO_APIC_REDIR_MASKED
, &io_apic_sync
);
732 #else /* CONFIG_X86_32 */
733 static void __mask_IO_APIC_irq(struct irq_cfg
*cfg
)
735 io_apic_modify_irq(cfg
, ~0, IO_APIC_REDIR_MASKED
, NULL
);
738 static void __mask_and_edge_IO_APIC_irq(struct irq_cfg
*cfg
)
740 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_LEVEL_TRIGGER
,
741 IO_APIC_REDIR_MASKED
, NULL
);
744 static void __unmask_and_level_IO_APIC_irq(struct irq_cfg
*cfg
)
746 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_MASKED
,
747 IO_APIC_REDIR_LEVEL_TRIGGER
, NULL
);
749 #endif /* CONFIG_X86_32 */
751 static void mask_IO_APIC_irq_desc(struct irq_desc
*desc
)
753 struct irq_cfg
*cfg
= desc
->chip_data
;
758 spin_lock_irqsave(&ioapic_lock
, flags
);
759 __mask_IO_APIC_irq(cfg
);
760 spin_unlock_irqrestore(&ioapic_lock
, flags
);
763 static void unmask_IO_APIC_irq_desc(struct irq_desc
*desc
)
765 struct irq_cfg
*cfg
= desc
->chip_data
;
768 spin_lock_irqsave(&ioapic_lock
, flags
);
769 __unmask_IO_APIC_irq(cfg
);
770 spin_unlock_irqrestore(&ioapic_lock
, flags
);
773 static void mask_IO_APIC_irq(unsigned int irq
)
775 struct irq_desc
*desc
= irq_to_desc(irq
);
777 mask_IO_APIC_irq_desc(desc
);
779 static void unmask_IO_APIC_irq(unsigned int irq
)
781 struct irq_desc
*desc
= irq_to_desc(irq
);
783 unmask_IO_APIC_irq_desc(desc
);
786 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
788 struct IO_APIC_route_entry entry
;
790 /* Check delivery_mode to be sure we're not clearing an SMI pin */
791 entry
= ioapic_read_entry(apic
, pin
);
792 if (entry
.delivery_mode
== dest_SMI
)
795 * Disable it in the IO-APIC irq-routing table:
797 ioapic_mask_entry(apic
, pin
);
800 static void clear_IO_APIC (void)
804 for (apic
= 0; apic
< nr_ioapics
; apic
++)
805 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
806 clear_IO_APIC_pin(apic
, pin
);
811 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
812 * specific CPU-side IRQs.
816 static int pirq_entries
[MAX_PIRQS
] = {
817 [0 ... MAX_PIRQS
- 1] = -1
820 static int __init
ioapic_pirq_setup(char *str
)
823 int ints
[MAX_PIRQS
+1];
825 get_options(str
, ARRAY_SIZE(ints
), ints
);
827 apic_printk(APIC_VERBOSE
, KERN_INFO
828 "PIRQ redirection, working around broken MP-BIOS.\n");
830 if (ints
[0] < MAX_PIRQS
)
833 for (i
= 0; i
< max
; i
++) {
834 apic_printk(APIC_VERBOSE
, KERN_DEBUG
835 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
837 * PIRQs are mapped upside down, usually.
839 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
844 __setup("pirq=", ioapic_pirq_setup
);
845 #endif /* CONFIG_X86_32 */
847 #ifdef CONFIG_INTR_REMAP
848 /* I/O APIC RTE contents at the OS boot up */
849 static struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
852 * Saves and masks all the unmasked IO-APIC RTE's
854 int save_mask_IO_APIC_setup(void)
856 union IO_APIC_reg_01 reg_01
;
861 * The number of IO-APIC IRQ registers (== #pins):
863 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
864 spin_lock_irqsave(&ioapic_lock
, flags
);
865 reg_01
.raw
= io_apic_read(apic
, 1);
866 spin_unlock_irqrestore(&ioapic_lock
, flags
);
867 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
870 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
871 early_ioapic_entries
[apic
] =
872 kzalloc(sizeof(struct IO_APIC_route_entry
) *
873 nr_ioapic_registers
[apic
], GFP_KERNEL
);
874 if (!early_ioapic_entries
[apic
])
878 for (apic
= 0; apic
< nr_ioapics
; apic
++)
879 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
880 struct IO_APIC_route_entry entry
;
882 entry
= early_ioapic_entries
[apic
][pin
] =
883 ioapic_read_entry(apic
, pin
);
886 ioapic_write_entry(apic
, pin
, entry
);
894 kfree(early_ioapic_entries
[apic
--]);
895 memset(early_ioapic_entries
, 0,
896 ARRAY_SIZE(early_ioapic_entries
));
901 void restore_IO_APIC_setup(void)
905 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
906 if (!early_ioapic_entries
[apic
])
908 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
909 ioapic_write_entry(apic
, pin
,
910 early_ioapic_entries
[apic
][pin
]);
911 kfree(early_ioapic_entries
[apic
]);
912 early_ioapic_entries
[apic
] = NULL
;
916 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
919 * for now plain restore of previous settings.
920 * TBD: In the case of OS enabling interrupt-remapping,
921 * IO-APIC RTE's need to be setup to point to interrupt-remapping
922 * table entries. for now, do a plain restore, and wait for
923 * the setup_IO_APIC_irqs() to do proper initialization.
925 restore_IO_APIC_setup();
930 * Find the IRQ entry number of a certain pin.
932 static int find_irq_entry(int apic
, int pin
, int type
)
936 for (i
= 0; i
< mp_irq_entries
; i
++)
937 if (mp_irqs
[i
].irqtype
== type
&&
938 (mp_irqs
[i
].dstapic
== mp_ioapics
[apic
].apicid
||
939 mp_irqs
[i
].dstapic
== MP_APIC_ALL
) &&
940 mp_irqs
[i
].dstirq
== pin
)
947 * Find the pin to which IRQ[irq] (ISA) is connected
949 static int __init
find_isa_irq_pin(int irq
, int type
)
953 for (i
= 0; i
< mp_irq_entries
; i
++) {
954 int lbus
= mp_irqs
[i
].srcbus
;
956 if (test_bit(lbus
, mp_bus_not_pci
) &&
957 (mp_irqs
[i
].irqtype
== type
) &&
958 (mp_irqs
[i
].srcbusirq
== irq
))
960 return mp_irqs
[i
].dstirq
;
965 static int __init
find_isa_irq_apic(int irq
, int type
)
969 for (i
= 0; i
< mp_irq_entries
; i
++) {
970 int lbus
= mp_irqs
[i
].srcbus
;
972 if (test_bit(lbus
, mp_bus_not_pci
) &&
973 (mp_irqs
[i
].irqtype
== type
) &&
974 (mp_irqs
[i
].srcbusirq
== irq
))
977 if (i
< mp_irq_entries
) {
979 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
980 if (mp_ioapics
[apic
].apicid
== mp_irqs
[i
].dstapic
)
989 * Find a specific PCI IRQ entry.
990 * Not an __init, possibly needed by modules
992 static int pin_2_irq(int idx
, int apic
, int pin
);
994 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
996 int apic
, i
, best_guess
= -1;
998 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1000 if (test_bit(bus
, mp_bus_not_pci
)) {
1001 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
1004 for (i
= 0; i
< mp_irq_entries
; i
++) {
1005 int lbus
= mp_irqs
[i
].srcbus
;
1007 for (apic
= 0; apic
< nr_ioapics
; apic
++)
1008 if (mp_ioapics
[apic
].apicid
== mp_irqs
[i
].dstapic
||
1009 mp_irqs
[i
].dstapic
== MP_APIC_ALL
)
1012 if (!test_bit(lbus
, mp_bus_not_pci
) &&
1013 !mp_irqs
[i
].irqtype
&&
1015 (slot
== ((mp_irqs
[i
].srcbusirq
>> 2) & 0x1f))) {
1016 int irq
= pin_2_irq(i
, apic
, mp_irqs
[i
].dstirq
);
1018 if (!(apic
|| IO_APIC_IRQ(irq
)))
1021 if (pin
== (mp_irqs
[i
].srcbusirq
& 3))
1024 * Use the first all-but-pin matching entry as a
1025 * best-guess fuzzy result for broken mptables.
1034 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
1036 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1038 * EISA Edge/Level control register, ELCR
1040 static int EISA_ELCR(unsigned int irq
)
1042 if (irq
< NR_IRQS_LEGACY
) {
1043 unsigned int port
= 0x4d0 + (irq
>> 3);
1044 return (inb(port
) >> (irq
& 7)) & 1;
1046 apic_printk(APIC_VERBOSE
, KERN_INFO
1047 "Broken MPtable reports ISA irq %d\n", irq
);
1053 /* ISA interrupts are always polarity zero edge triggered,
1054 * when listed as conforming in the MP table. */
1056 #define default_ISA_trigger(idx) (0)
1057 #define default_ISA_polarity(idx) (0)
1059 /* EISA interrupts are always polarity zero and can be edge or level
1060 * trigger depending on the ELCR value. If an interrupt is listed as
1061 * EISA conforming in the MP table, that means its trigger type must
1062 * be read in from the ELCR */
1064 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
1065 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
1067 /* PCI interrupts are always polarity one level triggered,
1068 * when listed as conforming in the MP table. */
1070 #define default_PCI_trigger(idx) (1)
1071 #define default_PCI_polarity(idx) (1)
1073 /* MCA interrupts are always polarity zero level triggered,
1074 * when listed as conforming in the MP table. */
1076 #define default_MCA_trigger(idx) (1)
1077 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
1079 static int MPBIOS_polarity(int idx
)
1081 int bus
= mp_irqs
[idx
].srcbus
;
1085 * Determine IRQ line polarity (high active or low active):
1087 switch (mp_irqs
[idx
].irqflag
& 3)
1089 case 0: /* conforms, ie. bus-type dependent polarity */
1090 if (test_bit(bus
, mp_bus_not_pci
))
1091 polarity
= default_ISA_polarity(idx
);
1093 polarity
= default_PCI_polarity(idx
);
1095 case 1: /* high active */
1100 case 2: /* reserved */
1102 printk(KERN_WARNING
"broken BIOS!!\n");
1106 case 3: /* low active */
1111 default: /* invalid */
1113 printk(KERN_WARNING
"broken BIOS!!\n");
1121 static int MPBIOS_trigger(int idx
)
1123 int bus
= mp_irqs
[idx
].srcbus
;
1127 * Determine IRQ trigger mode (edge or level sensitive):
1129 switch ((mp_irqs
[idx
].irqflag
>>2) & 3)
1131 case 0: /* conforms, ie. bus-type dependent */
1132 if (test_bit(bus
, mp_bus_not_pci
))
1133 trigger
= default_ISA_trigger(idx
);
1135 trigger
= default_PCI_trigger(idx
);
1136 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1137 switch (mp_bus_id_to_type
[bus
]) {
1138 case MP_BUS_ISA
: /* ISA pin */
1140 /* set before the switch */
1143 case MP_BUS_EISA
: /* EISA pin */
1145 trigger
= default_EISA_trigger(idx
);
1148 case MP_BUS_PCI
: /* PCI pin */
1150 /* set before the switch */
1153 case MP_BUS_MCA
: /* MCA pin */
1155 trigger
= default_MCA_trigger(idx
);
1160 printk(KERN_WARNING
"broken BIOS!!\n");
1172 case 2: /* reserved */
1174 printk(KERN_WARNING
"broken BIOS!!\n");
1183 default: /* invalid */
1185 printk(KERN_WARNING
"broken BIOS!!\n");
1193 static inline int irq_polarity(int idx
)
1195 return MPBIOS_polarity(idx
);
1198 static inline int irq_trigger(int idx
)
1200 return MPBIOS_trigger(idx
);
1203 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
1204 static int pin_2_irq(int idx
, int apic
, int pin
)
1207 int bus
= mp_irqs
[idx
].srcbus
;
1210 * Debugging check, we are in big trouble if this message pops up!
1212 if (mp_irqs
[idx
].dstirq
!= pin
)
1213 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
1215 if (test_bit(bus
, mp_bus_not_pci
)) {
1216 irq
= mp_irqs
[idx
].srcbusirq
;
1219 * PCI IRQs are mapped in order
1223 irq
+= nr_ioapic_registers
[i
++];
1226 * For MPS mode, so far only needed by ES7000 platform
1228 if (ioapic_renumber_irq
)
1229 irq
= ioapic_renumber_irq(apic
, irq
);
1232 #ifdef CONFIG_X86_32
1234 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1236 if ((pin
>= 16) && (pin
<= 23)) {
1237 if (pirq_entries
[pin
-16] != -1) {
1238 if (!pirq_entries
[pin
-16]) {
1239 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1240 "disabling PIRQ%d\n", pin
-16);
1242 irq
= pirq_entries
[pin
-16];
1243 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1244 "using PIRQ%d -> IRQ %d\n",
1254 void lock_vector_lock(void)
1256 /* Used to the online set of cpus does not change
1257 * during assign_irq_vector.
1259 spin_lock(&vector_lock
);
1262 void unlock_vector_lock(void)
1264 spin_unlock(&vector_lock
);
1268 __assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1271 * NOTE! The local APIC isn't very good at handling
1272 * multiple interrupts at the same interrupt level.
1273 * As the interrupt level is determined by taking the
1274 * vector number and shifting that right by 4, we
1275 * want to spread these out a bit so that they don't
1276 * all fall in the same interrupt level.
1278 * Also, we've got to be careful not to trash gate
1279 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1281 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1282 unsigned int old_vector
;
1284 cpumask_var_t tmp_mask
;
1286 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
1289 if (!alloc_cpumask_var(&tmp_mask
, GFP_ATOMIC
))
1292 old_vector
= cfg
->vector
;
1294 cpumask_and(tmp_mask
, mask
, cpu_online_mask
);
1295 cpumask_and(tmp_mask
, cfg
->domain
, tmp_mask
);
1296 if (!cpumask_empty(tmp_mask
)) {
1297 free_cpumask_var(tmp_mask
);
1302 /* Only try and allocate irqs on cpus that are present */
1304 for_each_cpu_and(cpu
, mask
, cpu_online_mask
) {
1308 apic
->vector_allocation_domain(cpu
, tmp_mask
);
1310 vector
= current_vector
;
1311 offset
= current_offset
;
1314 if (vector
>= first_system_vector
) {
1315 /* If out of vectors on large boxen, must share them. */
1316 offset
= (offset
+ 1) % 8;
1317 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1319 if (unlikely(current_vector
== vector
))
1322 if (test_bit(vector
, used_vectors
))
1325 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1326 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1329 current_vector
= vector
;
1330 current_offset
= offset
;
1332 cfg
->move_in_progress
= 1;
1333 cpumask_copy(cfg
->old_domain
, cfg
->domain
);
1335 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1336 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1337 cfg
->vector
= vector
;
1338 cpumask_copy(cfg
->domain
, tmp_mask
);
1342 free_cpumask_var(tmp_mask
);
1347 assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1350 unsigned long flags
;
1352 spin_lock_irqsave(&vector_lock
, flags
);
1353 err
= __assign_irq_vector(irq
, cfg
, mask
);
1354 spin_unlock_irqrestore(&vector_lock
, flags
);
1358 static void __clear_irq_vector(int irq
, struct irq_cfg
*cfg
)
1362 BUG_ON(!cfg
->vector
);
1364 vector
= cfg
->vector
;
1365 for_each_cpu_and(cpu
, cfg
->domain
, cpu_online_mask
)
1366 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1369 cpumask_clear(cfg
->domain
);
1371 if (likely(!cfg
->move_in_progress
))
1373 for_each_cpu_and(cpu
, cfg
->old_domain
, cpu_online_mask
) {
1374 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
1376 if (per_cpu(vector_irq
, cpu
)[vector
] != irq
)
1378 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1382 cfg
->move_in_progress
= 0;
1385 void __setup_vector_irq(int cpu
)
1387 /* Initialize vector_irq on a new cpu */
1388 /* This function must be called with vector_lock held */
1390 struct irq_cfg
*cfg
;
1391 struct irq_desc
*desc
;
1393 /* Mark the inuse vectors */
1394 for_each_irq_desc(irq
, desc
) {
1395 cfg
= desc
->chip_data
;
1396 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1398 vector
= cfg
->vector
;
1399 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1401 /* Mark the free vectors */
1402 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1403 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1408 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1409 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1413 static struct irq_chip ioapic_chip
;
1414 #ifdef CONFIG_INTR_REMAP
1415 static struct irq_chip ir_ioapic_chip
;
1418 #define IOAPIC_AUTO -1
1419 #define IOAPIC_EDGE 0
1420 #define IOAPIC_LEVEL 1
1422 #ifdef CONFIG_X86_32
1423 static inline int IO_APIC_irq_trigger(int irq
)
1427 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1428 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1429 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1430 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1431 return irq_trigger(idx
);
1435 * nonexistent IRQs are edge default
1440 static inline int IO_APIC_irq_trigger(int irq
)
1446 static void ioapic_register_intr(int irq
, struct irq_desc
*desc
, unsigned long trigger
)
1449 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1450 trigger
== IOAPIC_LEVEL
)
1451 desc
->status
|= IRQ_LEVEL
;
1453 desc
->status
&= ~IRQ_LEVEL
;
1455 #ifdef CONFIG_INTR_REMAP
1456 if (irq_remapped(irq
)) {
1457 desc
->status
|= IRQ_MOVE_PCNTXT
;
1459 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1463 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1464 handle_edge_irq
, "edge");
1468 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1469 trigger
== IOAPIC_LEVEL
)
1470 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1474 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1475 handle_edge_irq
, "edge");
1478 int setup_ioapic_entry(int apic_id
, int irq
,
1479 struct IO_APIC_route_entry
*entry
,
1480 unsigned int destination
, int trigger
,
1481 int polarity
, int vector
)
1484 * add it to the IO-APIC irq-routing table:
1486 memset(entry
,0,sizeof(*entry
));
1488 #ifdef CONFIG_INTR_REMAP
1489 if (intr_remapping_enabled
) {
1490 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic_id
);
1492 struct IR_IO_APIC_route_entry
*ir_entry
=
1493 (struct IR_IO_APIC_route_entry
*) entry
;
1497 panic("No mapping iommu for ioapic %d\n", apic_id
);
1499 index
= alloc_irte(iommu
, irq
, 1);
1501 panic("Failed to allocate IRTE for ioapic %d\n", apic_id
);
1503 memset(&irte
, 0, sizeof(irte
));
1506 irte
.dst_mode
= apic
->irq_dest_mode
;
1507 irte
.trigger_mode
= trigger
;
1508 irte
.dlvry_mode
= apic
->irq_delivery_mode
;
1509 irte
.vector
= vector
;
1510 irte
.dest_id
= IRTE_DEST(destination
);
1512 modify_irte(irq
, &irte
);
1514 ir_entry
->index2
= (index
>> 15) & 0x1;
1516 ir_entry
->format
= 1;
1517 ir_entry
->index
= (index
& 0x7fff);
1521 entry
->delivery_mode
= apic
->irq_delivery_mode
;
1522 entry
->dest_mode
= apic
->irq_dest_mode
;
1523 entry
->dest
= destination
;
1526 entry
->mask
= 0; /* enable IRQ */
1527 entry
->trigger
= trigger
;
1528 entry
->polarity
= polarity
;
1529 entry
->vector
= vector
;
1531 /* Mask level triggered irqs.
1532 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1539 static void setup_IO_APIC_irq(int apic_id
, int pin
, unsigned int irq
, struct irq_desc
*desc
,
1540 int trigger
, int polarity
)
1542 struct irq_cfg
*cfg
;
1543 struct IO_APIC_route_entry entry
;
1546 if (!IO_APIC_IRQ(irq
))
1549 cfg
= desc
->chip_data
;
1551 if (assign_irq_vector(irq
, cfg
, apic
->target_cpus()))
1554 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, apic
->target_cpus());
1556 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1557 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1558 "IRQ %d Mode:%i Active:%i)\n",
1559 apic_id
, mp_ioapics
[apic_id
].apicid
, pin
, cfg
->vector
,
1560 irq
, trigger
, polarity
);
1563 if (setup_ioapic_entry(mp_ioapics
[apic_id
].apicid
, irq
, &entry
,
1564 dest
, trigger
, polarity
, cfg
->vector
)) {
1565 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1566 mp_ioapics
[apic_id
].apicid
, pin
);
1567 __clear_irq_vector(irq
, cfg
);
1571 ioapic_register_intr(irq
, desc
, trigger
);
1572 if (irq
< NR_IRQS_LEGACY
)
1573 disable_8259A_irq(irq
);
1575 ioapic_write_entry(apic_id
, pin
, entry
);
1578 static void __init
setup_IO_APIC_irqs(void)
1580 int apic_id
, pin
, idx
, irq
;
1582 struct irq_desc
*desc
;
1583 struct irq_cfg
*cfg
;
1584 int cpu
= boot_cpu_id
;
1586 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1588 for (apic_id
= 0; apic_id
< nr_ioapics
; apic_id
++) {
1589 for (pin
= 0; pin
< nr_ioapic_registers
[apic_id
]; pin
++) {
1591 idx
= find_irq_entry(apic_id
, pin
, mp_INT
);
1595 apic_printk(APIC_VERBOSE
,
1596 KERN_DEBUG
" %d-%d",
1597 mp_ioapics
[apic_id
].apicid
, pin
);
1599 apic_printk(APIC_VERBOSE
, " %d-%d",
1600 mp_ioapics
[apic_id
].apicid
, pin
);
1604 apic_printk(APIC_VERBOSE
,
1605 " (apicid-pin) not connected\n");
1609 irq
= pin_2_irq(idx
, apic_id
, pin
);
1612 * Skip the timer IRQ if there's a quirk handler
1613 * installed and if it returns 1:
1615 if (apic
->multi_timer_check
&&
1616 apic
->multi_timer_check(apic_id
, irq
))
1619 desc
= irq_to_desc_alloc_cpu(irq
, cpu
);
1621 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
1624 cfg
= desc
->chip_data
;
1625 add_pin_to_irq_cpu(cfg
, cpu
, apic_id
, pin
);
1627 setup_IO_APIC_irq(apic_id
, pin
, irq
, desc
,
1628 irq_trigger(idx
), irq_polarity(idx
));
1633 apic_printk(APIC_VERBOSE
,
1634 " (apicid-pin) not connected\n");
1638 * Set up the timer pin, possibly with the 8259A-master behind.
1640 static void __init
setup_timer_IRQ0_pin(unsigned int apic_id
, unsigned int pin
,
1643 struct IO_APIC_route_entry entry
;
1645 #ifdef CONFIG_INTR_REMAP
1646 if (intr_remapping_enabled
)
1650 memset(&entry
, 0, sizeof(entry
));
1653 * We use logical delivery to get the timer IRQ
1656 entry
.dest_mode
= apic
->irq_dest_mode
;
1657 entry
.mask
= 0; /* don't mask IRQ for edge */
1658 entry
.dest
= apic
->cpu_mask_to_apicid(apic
->target_cpus());
1659 entry
.delivery_mode
= apic
->irq_delivery_mode
;
1662 entry
.vector
= vector
;
1665 * The timer IRQ doesn't have to know that behind the
1666 * scene we may have a 8259A-master in AEOI mode ...
1668 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1671 * Add it to the IO-APIC irq-routing table:
1673 ioapic_write_entry(apic_id
, pin
, entry
);
1677 __apicdebuginit(void) print_IO_APIC(void)
1680 union IO_APIC_reg_00 reg_00
;
1681 union IO_APIC_reg_01 reg_01
;
1682 union IO_APIC_reg_02 reg_02
;
1683 union IO_APIC_reg_03 reg_03
;
1684 unsigned long flags
;
1685 struct irq_cfg
*cfg
;
1686 struct irq_desc
*desc
;
1689 if (apic_verbosity
== APIC_QUIET
)
1692 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1693 for (i
= 0; i
< nr_ioapics
; i
++)
1694 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1695 mp_ioapics
[i
].apicid
, nr_ioapic_registers
[i
]);
1698 * We are a bit conservative about what we expect. We have to
1699 * know about every hardware change ASAP.
1701 printk(KERN_INFO
"testing the IO APIC.......................\n");
1703 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1705 spin_lock_irqsave(&ioapic_lock
, flags
);
1706 reg_00
.raw
= io_apic_read(apic
, 0);
1707 reg_01
.raw
= io_apic_read(apic
, 1);
1708 if (reg_01
.bits
.version
>= 0x10)
1709 reg_02
.raw
= io_apic_read(apic
, 2);
1710 if (reg_01
.bits
.version
>= 0x20)
1711 reg_03
.raw
= io_apic_read(apic
, 3);
1712 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1715 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].apicid
);
1716 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1717 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1718 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1719 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1721 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1722 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1724 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1725 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1728 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1729 * but the value of reg_02 is read as the previous read register
1730 * value, so ignore it if reg_02 == reg_01.
1732 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1733 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1734 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1738 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1739 * or reg_03, but the value of reg_0[23] is read as the previous read
1740 * register value, so ignore it if reg_03 == reg_0[12].
1742 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1743 reg_03
.raw
!= reg_01
.raw
) {
1744 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1745 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1748 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1750 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1751 " Stat Dmod Deli Vect: \n");
1753 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1754 struct IO_APIC_route_entry entry
;
1756 entry
= ioapic_read_entry(apic
, i
);
1758 printk(KERN_DEBUG
" %02x %03X ",
1763 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1768 entry
.delivery_status
,
1770 entry
.delivery_mode
,
1775 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1776 for_each_irq_desc(irq
, desc
) {
1777 struct irq_pin_list
*entry
;
1779 cfg
= desc
->chip_data
;
1780 entry
= cfg
->irq_2_pin
;
1783 printk(KERN_DEBUG
"IRQ%d ", irq
);
1785 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1788 entry
= entry
->next
;
1793 printk(KERN_INFO
".................................... done.\n");
1798 __apicdebuginit(void) print_APIC_bitfield(int base
)
1803 if (apic_verbosity
== APIC_QUIET
)
1806 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1807 for (i
= 0; i
< 8; i
++) {
1808 v
= apic_read(base
+ i
*0x10);
1809 for (j
= 0; j
< 32; j
++) {
1819 __apicdebuginit(void) print_local_APIC(void *dummy
)
1821 unsigned int v
, ver
, maxlvt
;
1824 if (apic_verbosity
== APIC_QUIET
)
1827 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1828 smp_processor_id(), hard_smp_processor_id());
1829 v
= apic_read(APIC_ID
);
1830 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1831 v
= apic_read(APIC_LVR
);
1832 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1833 ver
= GET_APIC_VERSION(v
);
1834 maxlvt
= lapic_get_maxlvt();
1836 v
= apic_read(APIC_TASKPRI
);
1837 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1839 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1840 if (!APIC_XAPIC(ver
)) {
1841 v
= apic_read(APIC_ARBPRI
);
1842 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1843 v
& APIC_ARBPRI_MASK
);
1845 v
= apic_read(APIC_PROCPRI
);
1846 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1850 * Remote read supported only in the 82489DX and local APIC for
1851 * Pentium processors.
1853 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1854 v
= apic_read(APIC_RRR
);
1855 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1858 v
= apic_read(APIC_LDR
);
1859 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1860 if (!x2apic_enabled()) {
1861 v
= apic_read(APIC_DFR
);
1862 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1864 v
= apic_read(APIC_SPIV
);
1865 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1867 printk(KERN_DEBUG
"... APIC ISR field:\n");
1868 print_APIC_bitfield(APIC_ISR
);
1869 printk(KERN_DEBUG
"... APIC TMR field:\n");
1870 print_APIC_bitfield(APIC_TMR
);
1871 printk(KERN_DEBUG
"... APIC IRR field:\n");
1872 print_APIC_bitfield(APIC_IRR
);
1874 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1875 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1876 apic_write(APIC_ESR
, 0);
1878 v
= apic_read(APIC_ESR
);
1879 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1882 icr
= apic_icr_read();
1883 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1884 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1886 v
= apic_read(APIC_LVTT
);
1887 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1889 if (maxlvt
> 3) { /* PC is LVT#4. */
1890 v
= apic_read(APIC_LVTPC
);
1891 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1893 v
= apic_read(APIC_LVT0
);
1894 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1895 v
= apic_read(APIC_LVT1
);
1896 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1898 if (maxlvt
> 2) { /* ERR is LVT#3. */
1899 v
= apic_read(APIC_LVTERR
);
1900 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1903 v
= apic_read(APIC_TMICT
);
1904 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1905 v
= apic_read(APIC_TMCCT
);
1906 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1907 v
= apic_read(APIC_TDCR
);
1908 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1912 __apicdebuginit(void) print_all_local_APICs(void)
1917 for_each_online_cpu(cpu
)
1918 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1922 __apicdebuginit(void) print_PIC(void)
1925 unsigned long flags
;
1927 if (apic_verbosity
== APIC_QUIET
)
1930 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1932 spin_lock_irqsave(&i8259A_lock
, flags
);
1934 v
= inb(0xa1) << 8 | inb(0x21);
1935 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1937 v
= inb(0xa0) << 8 | inb(0x20);
1938 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1942 v
= inb(0xa0) << 8 | inb(0x20);
1946 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1948 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1950 v
= inb(0x4d1) << 8 | inb(0x4d0);
1951 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1954 __apicdebuginit(int) print_all_ICs(void)
1957 print_all_local_APICs();
1963 fs_initcall(print_all_ICs
);
1966 /* Where if anywhere is the i8259 connect in external int mode */
1967 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1969 void __init
enable_IO_APIC(void)
1971 union IO_APIC_reg_01 reg_01
;
1972 int i8259_apic
, i8259_pin
;
1974 unsigned long flags
;
1977 * The number of IO-APIC IRQ registers (== #pins):
1979 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1980 spin_lock_irqsave(&ioapic_lock
, flags
);
1981 reg_01
.raw
= io_apic_read(apic
, 1);
1982 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1983 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1985 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1987 /* See if any of the pins is in ExtINT mode */
1988 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1989 struct IO_APIC_route_entry entry
;
1990 entry
= ioapic_read_entry(apic
, pin
);
1992 /* If the interrupt line is enabled and in ExtInt mode
1993 * I have found the pin where the i8259 is connected.
1995 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1996 ioapic_i8259
.apic
= apic
;
1997 ioapic_i8259
.pin
= pin
;
2003 /* Look to see what if the MP table has reported the ExtINT */
2004 /* If we could not find the appropriate pin by looking at the ioapic
2005 * the i8259 probably is not connected the ioapic but give the
2006 * mptable a chance anyway.
2008 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
2009 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
2010 /* Trust the MP table if nothing is setup in the hardware */
2011 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
2012 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
2013 ioapic_i8259
.pin
= i8259_pin
;
2014 ioapic_i8259
.apic
= i8259_apic
;
2016 /* Complain if the MP table and the hardware disagree */
2017 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
2018 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
2020 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
2024 * Do not trust the IO-APIC being empty at bootup
2030 * Not an __init, needed by the reboot code
2032 void disable_IO_APIC(void)
2035 * Clear the IO-APIC before rebooting:
2040 * If the i8259 is routed through an IOAPIC
2041 * Put that IOAPIC in virtual wire mode
2042 * so legacy interrupts can be delivered.
2044 * With interrupt-remapping, for now we will use virtual wire A mode,
2045 * as virtual wire B is little complex (need to configure both
2046 * IOAPIC RTE aswell as interrupt-remapping table entry).
2047 * As this gets called during crash dump, keep this simple for now.
2049 if (ioapic_i8259
.pin
!= -1 && !intr_remapping_enabled
) {
2050 struct IO_APIC_route_entry entry
;
2052 memset(&entry
, 0, sizeof(entry
));
2053 entry
.mask
= 0; /* Enabled */
2054 entry
.trigger
= 0; /* Edge */
2056 entry
.polarity
= 0; /* High */
2057 entry
.delivery_status
= 0;
2058 entry
.dest_mode
= 0; /* Physical */
2059 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
2061 entry
.dest
= read_apic_id();
2064 * Add it to the IO-APIC irq-routing table:
2066 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
2070 * Use virtual wire A mode when interrupt remapping is enabled.
2072 disconnect_bsp_APIC(!intr_remapping_enabled
&& ioapic_i8259
.pin
!= -1);
2075 #ifdef CONFIG_X86_32
2077 * function to set the IO-APIC physical IDs based on the
2078 * values stored in the MPC table.
2080 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2083 static void __init
setup_ioapic_ids_from_mpc(void)
2085 union IO_APIC_reg_00 reg_00
;
2086 physid_mask_t phys_id_present_map
;
2089 unsigned char old_id
;
2090 unsigned long flags
;
2092 if (x86_quirks
->setup_ioapic_ids
&& x86_quirks
->setup_ioapic_ids())
2096 * Don't check I/O APIC IDs for xAPIC systems. They have
2097 * no meaning without the serial APIC bus.
2099 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
2100 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
2103 * This is broken; anything with a real cpu count has to
2104 * circumvent this idiocy regardless.
2106 phys_id_present_map
= apic
->ioapic_phys_id_map(phys_cpu_present_map
);
2109 * Set the IOAPIC ID to the value stored in the MPC table.
2111 for (apic_id
= 0; apic_id
< nr_ioapics
; apic_id
++) {
2113 /* Read the register 0 value */
2114 spin_lock_irqsave(&ioapic_lock
, flags
);
2115 reg_00
.raw
= io_apic_read(apic_id
, 0);
2116 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2118 old_id
= mp_ioapics
[apic_id
].apicid
;
2120 if (mp_ioapics
[apic_id
].apicid
>= get_physical_broadcast()) {
2121 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2122 apic_id
, mp_ioapics
[apic_id
].apicid
);
2123 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2125 mp_ioapics
[apic_id
].apicid
= reg_00
.bits
.ID
;
2129 * Sanity check, is the ID really free? Every APIC in a
2130 * system must have a unique ID or we get lots of nice
2131 * 'stuck on smp_invalidate_needed IPI wait' messages.
2133 if (apic
->check_apicid_used(phys_id_present_map
,
2134 mp_ioapics
[apic_id
].apicid
)) {
2135 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2136 apic_id
, mp_ioapics
[apic_id
].apicid
);
2137 for (i
= 0; i
< get_physical_broadcast(); i
++)
2138 if (!physid_isset(i
, phys_id_present_map
))
2140 if (i
>= get_physical_broadcast())
2141 panic("Max APIC ID exceeded!\n");
2142 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2144 physid_set(i
, phys_id_present_map
);
2145 mp_ioapics
[apic_id
].apicid
= i
;
2148 tmp
= apic
->apicid_to_cpu_present(mp_ioapics
[apic_id
].apicid
);
2149 apic_printk(APIC_VERBOSE
, "Setting %d in the "
2150 "phys_id_present_map\n",
2151 mp_ioapics
[apic_id
].apicid
);
2152 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
2157 * We need to adjust the IRQ routing table
2158 * if the ID changed.
2160 if (old_id
!= mp_ioapics
[apic_id
].apicid
)
2161 for (i
= 0; i
< mp_irq_entries
; i
++)
2162 if (mp_irqs
[i
].dstapic
== old_id
)
2164 = mp_ioapics
[apic_id
].apicid
;
2167 * Read the right value from the MPC table and
2168 * write it into the ID register.
2170 apic_printk(APIC_VERBOSE
, KERN_INFO
2171 "...changing IO-APIC physical APIC ID to %d ...",
2172 mp_ioapics
[apic_id
].apicid
);
2174 reg_00
.bits
.ID
= mp_ioapics
[apic_id
].apicid
;
2175 spin_lock_irqsave(&ioapic_lock
, flags
);
2176 io_apic_write(apic_id
, 0, reg_00
.raw
);
2177 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2182 spin_lock_irqsave(&ioapic_lock
, flags
);
2183 reg_00
.raw
= io_apic_read(apic_id
, 0);
2184 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2185 if (reg_00
.bits
.ID
!= mp_ioapics
[apic_id
].apicid
)
2186 printk("could not set ID!\n");
2188 apic_printk(APIC_VERBOSE
, " ok.\n");
2193 int no_timer_check __initdata
;
2195 static int __init
notimercheck(char *s
)
2200 __setup("no_timer_check", notimercheck
);
2203 * There is a nasty bug in some older SMP boards, their mptable lies
2204 * about the timer IRQ. We do the following to work around the situation:
2206 * - timer IRQ defaults to IO-APIC IRQ
2207 * - if this function detects that timer IRQs are defunct, then we fall
2208 * back to ISA timer IRQs
2210 static int __init
timer_irq_works(void)
2212 unsigned long t1
= jiffies
;
2213 unsigned long flags
;
2218 local_save_flags(flags
);
2220 /* Let ten ticks pass... */
2221 mdelay((10 * 1000) / HZ
);
2222 local_irq_restore(flags
);
2225 * Expect a few ticks at least, to be sure some possible
2226 * glue logic does not lock up after one or two first
2227 * ticks in a non-ExtINT mode. Also the local APIC
2228 * might have cached one ExtINT interrupt. Finally, at
2229 * least one tick may be lost due to delays.
2233 if (time_after(jiffies
, t1
+ 4))
2239 * In the SMP+IOAPIC case it might happen that there are an unspecified
2240 * number of pending IRQ events unhandled. These cases are very rare,
2241 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2242 * better to do it this way as thus we do not have to be aware of
2243 * 'pending' interrupts in the IRQ path, except at this point.
2246 * Edge triggered needs to resend any interrupt
2247 * that was delayed but this is now handled in the device
2252 * Starting up a edge-triggered IO-APIC interrupt is
2253 * nasty - we need to make sure that we get the edge.
2254 * If it is already asserted for some reason, we need
2255 * return 1 to indicate that is was pending.
2257 * This is not complete - we should be able to fake
2258 * an edge even if it isn't on the 8259A...
2261 static unsigned int startup_ioapic_irq(unsigned int irq
)
2263 int was_pending
= 0;
2264 unsigned long flags
;
2265 struct irq_cfg
*cfg
;
2267 spin_lock_irqsave(&ioapic_lock
, flags
);
2268 if (irq
< NR_IRQS_LEGACY
) {
2269 disable_8259A_irq(irq
);
2270 if (i8259A_irq_pending(irq
))
2274 __unmask_IO_APIC_irq(cfg
);
2275 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2280 #ifdef CONFIG_X86_64
2281 static int ioapic_retrigger_irq(unsigned int irq
)
2284 struct irq_cfg
*cfg
= irq_cfg(irq
);
2285 unsigned long flags
;
2287 spin_lock_irqsave(&vector_lock
, flags
);
2288 apic
->send_IPI_mask(cpumask_of(cpumask_first(cfg
->domain
)), cfg
->vector
);
2289 spin_unlock_irqrestore(&vector_lock
, flags
);
2294 static int ioapic_retrigger_irq(unsigned int irq
)
2296 apic
->send_IPI_self(irq_cfg(irq
)->vector
);
2303 * Level and edge triggered IO-APIC interrupts need different handling,
2304 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2305 * handled with the level-triggered descriptor, but that one has slightly
2306 * more overhead. Level-triggered interrupts cannot be handled with the
2307 * edge-triggered handler, without risking IRQ storms and other ugly
2313 #ifdef CONFIG_INTR_REMAP
2314 static void ir_irq_migration(struct work_struct
*work
);
2316 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
2319 * Migrate the IO-APIC irq in the presence of intr-remapping.
2321 * For edge triggered, irq migration is a simple atomic update(of vector
2322 * and cpu destination) of IRTE and flush the hardware cache.
2324 * For level triggered, we need to modify the io-apic RTE aswell with the update
2325 * vector information, along with modifying IRTE with vector and destination.
2326 * So irq migration for level triggered is little bit more complex compared to
2327 * edge triggered migration. But the good news is, we use the same algorithm
2328 * for level triggered migration as we have today, only difference being,
2329 * we now initiate the irq migration from process context instead of the
2330 * interrupt context.
2332 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2333 * suppression) to the IO-APIC, level triggered irq migration will also be
2334 * as simple as edge triggered migration and we can do the irq migration
2335 * with a simple atomic update to IO-APIC RTE.
2338 migrate_ioapic_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
2340 struct irq_cfg
*cfg
;
2342 int modify_ioapic_rte
;
2344 unsigned long flags
;
2347 if (!cpumask_intersects(mask
, cpu_online_mask
))
2351 if (get_irte(irq
, &irte
))
2354 cfg
= desc
->chip_data
;
2355 if (assign_irq_vector(irq
, cfg
, mask
))
2358 set_extra_move_desc(desc
, mask
);
2360 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, mask
);
2362 modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
2363 if (modify_ioapic_rte
) {
2364 spin_lock_irqsave(&ioapic_lock
, flags
);
2365 __target_IO_APIC_irq(irq
, dest
, cfg
);
2366 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2369 irte
.vector
= cfg
->vector
;
2370 irte
.dest_id
= IRTE_DEST(dest
);
2373 * Modified the IRTE and flushes the Interrupt entry cache.
2375 modify_irte(irq
, &irte
);
2377 if (cfg
->move_in_progress
)
2378 send_cleanup_vector(cfg
);
2380 cpumask_copy(desc
->affinity
, mask
);
2383 static int migrate_irq_remapped_level_desc(struct irq_desc
*desc
)
2386 struct irq_cfg
*cfg
= desc
->chip_data
;
2388 mask_IO_APIC_irq_desc(desc
);
2390 if (io_apic_level_ack_pending(cfg
)) {
2392 * Interrupt in progress. Migrating irq now will change the
2393 * vector information in the IO-APIC RTE and that will confuse
2394 * the EOI broadcast performed by cpu.
2395 * So, delay the irq migration to the next instance.
2397 schedule_delayed_work(&ir_migration_work
, 1);
2401 /* everthing is clear. we have right of way */
2402 migrate_ioapic_irq_desc(desc
, desc
->pending_mask
);
2405 desc
->status
&= ~IRQ_MOVE_PENDING
;
2406 cpumask_clear(desc
->pending_mask
);
2409 unmask_IO_APIC_irq_desc(desc
);
2414 static void ir_irq_migration(struct work_struct
*work
)
2417 struct irq_desc
*desc
;
2419 for_each_irq_desc(irq
, desc
) {
2420 if (desc
->status
& IRQ_MOVE_PENDING
) {
2421 unsigned long flags
;
2423 spin_lock_irqsave(&desc
->lock
, flags
);
2424 if (!desc
->chip
->set_affinity
||
2425 !(desc
->status
& IRQ_MOVE_PENDING
)) {
2426 desc
->status
&= ~IRQ_MOVE_PENDING
;
2427 spin_unlock_irqrestore(&desc
->lock
, flags
);
2431 desc
->chip
->set_affinity(irq
, desc
->pending_mask
);
2432 spin_unlock_irqrestore(&desc
->lock
, flags
);
2438 * Migrates the IRQ destination in the process context.
2440 static void set_ir_ioapic_affinity_irq_desc(struct irq_desc
*desc
,
2441 const struct cpumask
*mask
)
2443 if (desc
->status
& IRQ_LEVEL
) {
2444 desc
->status
|= IRQ_MOVE_PENDING
;
2445 cpumask_copy(desc
->pending_mask
, mask
);
2446 migrate_irq_remapped_level_desc(desc
);
2450 migrate_ioapic_irq_desc(desc
, mask
);
2452 static void set_ir_ioapic_affinity_irq(unsigned int irq
,
2453 const struct cpumask
*mask
)
2455 struct irq_desc
*desc
= irq_to_desc(irq
);
2457 set_ir_ioapic_affinity_irq_desc(desc
, mask
);
2461 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
2463 unsigned vector
, me
;
2469 me
= smp_processor_id();
2470 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
2472 struct irq_desc
*desc
;
2473 struct irq_cfg
*cfg
;
2474 irq
= __get_cpu_var(vector_irq
)[vector
];
2479 desc
= irq_to_desc(irq
);
2484 spin_lock(&desc
->lock
);
2485 if (!cfg
->move_cleanup_count
)
2488 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
))
2491 __get_cpu_var(vector_irq
)[vector
] = -1;
2492 cfg
->move_cleanup_count
--;
2494 spin_unlock(&desc
->lock
);
2500 static void irq_complete_move(struct irq_desc
**descp
)
2502 struct irq_desc
*desc
= *descp
;
2503 struct irq_cfg
*cfg
= desc
->chip_data
;
2504 unsigned vector
, me
;
2506 if (likely(!cfg
->move_in_progress
)) {
2507 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2508 if (likely(!cfg
->move_desc_pending
))
2511 /* domain has not changed, but affinity did */
2512 me
= smp_processor_id();
2513 if (cpumask_test_cpu(me
, desc
->affinity
)) {
2514 *descp
= desc
= move_irq_desc(desc
, me
);
2515 /* get the new one */
2516 cfg
= desc
->chip_data
;
2517 cfg
->move_desc_pending
= 0;
2523 vector
= ~get_irq_regs()->orig_ax
;
2524 me
= smp_processor_id();
2526 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
)) {
2527 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2528 *descp
= desc
= move_irq_desc(desc
, me
);
2529 /* get the new one */
2530 cfg
= desc
->chip_data
;
2532 send_cleanup_vector(cfg
);
2536 static inline void irq_complete_move(struct irq_desc
**descp
) {}
2539 #ifdef CONFIG_INTR_REMAP
2540 static void ack_x2apic_level(unsigned int irq
)
2545 static void ack_x2apic_edge(unsigned int irq
)
2552 static void ack_apic_edge(unsigned int irq
)
2554 struct irq_desc
*desc
= irq_to_desc(irq
);
2556 irq_complete_move(&desc
);
2557 move_native_irq(irq
);
2561 atomic_t irq_mis_count
;
2563 static void ack_apic_level(unsigned int irq
)
2565 struct irq_desc
*desc
= irq_to_desc(irq
);
2567 #ifdef CONFIG_X86_32
2571 struct irq_cfg
*cfg
;
2572 int do_unmask_irq
= 0;
2574 irq_complete_move(&desc
);
2575 #ifdef CONFIG_GENERIC_PENDING_IRQ
2576 /* If we are moving the irq we need to mask it */
2577 if (unlikely(desc
->status
& IRQ_MOVE_PENDING
)) {
2579 mask_IO_APIC_irq_desc(desc
);
2583 #ifdef CONFIG_X86_32
2585 * It appears there is an erratum which affects at least version 0x11
2586 * of I/O APIC (that's the 82093AA and cores integrated into various
2587 * chipsets). Under certain conditions a level-triggered interrupt is
2588 * erroneously delivered as edge-triggered one but the respective IRR
2589 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2590 * message but it will never arrive and further interrupts are blocked
2591 * from the source. The exact reason is so far unknown, but the
2592 * phenomenon was observed when two consecutive interrupt requests
2593 * from a given source get delivered to the same CPU and the source is
2594 * temporarily disabled in between.
2596 * A workaround is to simulate an EOI message manually. We achieve it
2597 * by setting the trigger mode to edge and then to level when the edge
2598 * trigger mode gets detected in the TMR of a local APIC for a
2599 * level-triggered interrupt. We mask the source for the time of the
2600 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2601 * The idea is from Manfred Spraul. --macro
2603 cfg
= desc
->chip_data
;
2606 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
2610 * We must acknowledge the irq before we move it or the acknowledge will
2611 * not propagate properly.
2615 /* Now we can move and renable the irq */
2616 if (unlikely(do_unmask_irq
)) {
2617 /* Only migrate the irq if the ack has been received.
2619 * On rare occasions the broadcast level triggered ack gets
2620 * delayed going to ioapics, and if we reprogram the
2621 * vector while Remote IRR is still set the irq will never
2624 * To prevent this scenario we read the Remote IRR bit
2625 * of the ioapic. This has two effects.
2626 * - On any sane system the read of the ioapic will
2627 * flush writes (and acks) going to the ioapic from
2629 * - We get to see if the ACK has actually been delivered.
2631 * Based on failed experiments of reprogramming the
2632 * ioapic entry from outside of irq context starting
2633 * with masking the ioapic entry and then polling until
2634 * Remote IRR was clear before reprogramming the
2635 * ioapic I don't trust the Remote IRR bit to be
2636 * completey accurate.
2638 * However there appears to be no other way to plug
2639 * this race, so if the Remote IRR bit is not
2640 * accurate and is causing problems then it is a hardware bug
2641 * and you can go talk to the chipset vendor about it.
2643 cfg
= desc
->chip_data
;
2644 if (!io_apic_level_ack_pending(cfg
))
2645 move_masked_irq(irq
);
2646 unmask_IO_APIC_irq_desc(desc
);
2649 #ifdef CONFIG_X86_32
2650 if (!(v
& (1 << (i
& 0x1f)))) {
2651 atomic_inc(&irq_mis_count
);
2652 spin_lock(&ioapic_lock
);
2653 __mask_and_edge_IO_APIC_irq(cfg
);
2654 __unmask_and_level_IO_APIC_irq(cfg
);
2655 spin_unlock(&ioapic_lock
);
2660 static struct irq_chip ioapic_chip __read_mostly
= {
2662 .startup
= startup_ioapic_irq
,
2663 .mask
= mask_IO_APIC_irq
,
2664 .unmask
= unmask_IO_APIC_irq
,
2665 .ack
= ack_apic_edge
,
2666 .eoi
= ack_apic_level
,
2668 .set_affinity
= set_ioapic_affinity_irq
,
2670 .retrigger
= ioapic_retrigger_irq
,
2673 #ifdef CONFIG_INTR_REMAP
2674 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2675 .name
= "IR-IO-APIC",
2676 .startup
= startup_ioapic_irq
,
2677 .mask
= mask_IO_APIC_irq
,
2678 .unmask
= unmask_IO_APIC_irq
,
2679 .ack
= ack_x2apic_edge
,
2680 .eoi
= ack_x2apic_level
,
2682 .set_affinity
= set_ir_ioapic_affinity_irq
,
2684 .retrigger
= ioapic_retrigger_irq
,
2688 static inline void init_IO_APIC_traps(void)
2691 struct irq_desc
*desc
;
2692 struct irq_cfg
*cfg
;
2695 * NOTE! The local APIC isn't very good at handling
2696 * multiple interrupts at the same interrupt level.
2697 * As the interrupt level is determined by taking the
2698 * vector number and shifting that right by 4, we
2699 * want to spread these out a bit so that they don't
2700 * all fall in the same interrupt level.
2702 * Also, we've got to be careful not to trash gate
2703 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2705 for_each_irq_desc(irq
, desc
) {
2706 cfg
= desc
->chip_data
;
2707 if (IO_APIC_IRQ(irq
) && cfg
&& !cfg
->vector
) {
2709 * Hmm.. We don't have an entry for this,
2710 * so default to an old-fashioned 8259
2711 * interrupt if we can..
2713 if (irq
< NR_IRQS_LEGACY
)
2714 make_8259A_irq(irq
);
2716 /* Strange. Oh, well.. */
2717 desc
->chip
= &no_irq_chip
;
2723 * The local APIC irq-chip implementation:
2726 static void mask_lapic_irq(unsigned int irq
)
2730 v
= apic_read(APIC_LVT0
);
2731 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2734 static void unmask_lapic_irq(unsigned int irq
)
2738 v
= apic_read(APIC_LVT0
);
2739 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2742 static void ack_lapic_irq(unsigned int irq
)
2747 static struct irq_chip lapic_chip __read_mostly
= {
2748 .name
= "local-APIC",
2749 .mask
= mask_lapic_irq
,
2750 .unmask
= unmask_lapic_irq
,
2751 .ack
= ack_lapic_irq
,
2754 static void lapic_register_intr(int irq
, struct irq_desc
*desc
)
2756 desc
->status
&= ~IRQ_LEVEL
;
2757 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2761 static void __init
setup_nmi(void)
2764 * Dirty trick to enable the NMI watchdog ...
2765 * We put the 8259A master into AEOI mode and
2766 * unmask on all local APICs LVT0 as NMI.
2768 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2769 * is from Maciej W. Rozycki - so we do not have to EOI from
2770 * the NMI handler or the timer interrupt.
2772 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2774 enable_NMI_through_LVT0();
2776 apic_printk(APIC_VERBOSE
, " done.\n");
2780 * This looks a bit hackish but it's about the only one way of sending
2781 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2782 * not support the ExtINT mode, unfortunately. We need to send these
2783 * cycles as some i82489DX-based boards have glue logic that keeps the
2784 * 8259A interrupt line asserted until INTA. --macro
2786 static inline void __init
unlock_ExtINT_logic(void)
2789 struct IO_APIC_route_entry entry0
, entry1
;
2790 unsigned char save_control
, save_freq_select
;
2792 pin
= find_isa_irq_pin(8, mp_INT
);
2797 apic
= find_isa_irq_apic(8, mp_INT
);
2803 entry0
= ioapic_read_entry(apic
, pin
);
2804 clear_IO_APIC_pin(apic
, pin
);
2806 memset(&entry1
, 0, sizeof(entry1
));
2808 entry1
.dest_mode
= 0; /* physical delivery */
2809 entry1
.mask
= 0; /* unmask IRQ now */
2810 entry1
.dest
= hard_smp_processor_id();
2811 entry1
.delivery_mode
= dest_ExtINT
;
2812 entry1
.polarity
= entry0
.polarity
;
2816 ioapic_write_entry(apic
, pin
, entry1
);
2818 save_control
= CMOS_READ(RTC_CONTROL
);
2819 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2820 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2822 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2827 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2831 CMOS_WRITE(save_control
, RTC_CONTROL
);
2832 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2833 clear_IO_APIC_pin(apic
, pin
);
2835 ioapic_write_entry(apic
, pin
, entry0
);
2838 static int disable_timer_pin_1 __initdata
;
2839 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2840 static int __init
disable_timer_pin_setup(char *arg
)
2842 disable_timer_pin_1
= 1;
2845 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2847 int timer_through_8259 __initdata
;
2850 * This code may look a bit paranoid, but it's supposed to cooperate with
2851 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2852 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2853 * fanatically on his truly buggy board.
2855 * FIXME: really need to revamp this for all platforms.
2857 static inline void __init
check_timer(void)
2859 struct irq_desc
*desc
= irq_to_desc(0);
2860 struct irq_cfg
*cfg
= desc
->chip_data
;
2861 int cpu
= boot_cpu_id
;
2862 int apic1
, pin1
, apic2
, pin2
;
2863 unsigned long flags
;
2866 local_irq_save(flags
);
2869 * get/set the timer IRQ vector:
2871 disable_8259A_irq(0);
2872 assign_irq_vector(0, cfg
, apic
->target_cpus());
2875 * As IRQ0 is to be enabled in the 8259A, the virtual
2876 * wire has to be disabled in the local APIC. Also
2877 * timer interrupts need to be acknowledged manually in
2878 * the 8259A for the i82489DX when using the NMI
2879 * watchdog as that APIC treats NMIs as level-triggered.
2880 * The AEOI mode will finish them in the 8259A
2883 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2885 #ifdef CONFIG_X86_32
2889 ver
= apic_read(APIC_LVR
);
2890 ver
= GET_APIC_VERSION(ver
);
2891 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2895 pin1
= find_isa_irq_pin(0, mp_INT
);
2896 apic1
= find_isa_irq_apic(0, mp_INT
);
2897 pin2
= ioapic_i8259
.pin
;
2898 apic2
= ioapic_i8259
.apic
;
2900 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2901 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2902 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2905 * Some BIOS writers are clueless and report the ExtINTA
2906 * I/O APIC input from the cascaded 8259A as the timer
2907 * interrupt input. So just in case, if only one pin
2908 * was found above, try it both directly and through the
2912 #ifdef CONFIG_INTR_REMAP
2913 if (intr_remapping_enabled
)
2914 panic("BIOS bug: timer not connected to IO-APIC");
2919 } else if (pin2
== -1) {
2926 * Ok, does IRQ0 through the IOAPIC work?
2929 add_pin_to_irq_cpu(cfg
, cpu
, apic1
, pin1
);
2930 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2932 /* for edge trigger, setup_IO_APIC_irq already
2933 * leave it unmasked.
2934 * so only need to unmask if it is level-trigger
2935 * do we really have level trigger timer?
2938 idx
= find_irq_entry(apic1
, pin1
, mp_INT
);
2939 if (idx
!= -1 && irq_trigger(idx
))
2940 unmask_IO_APIC_irq_desc(desc
);
2942 if (timer_irq_works()) {
2943 if (nmi_watchdog
== NMI_IO_APIC
) {
2945 enable_8259A_irq(0);
2947 if (disable_timer_pin_1
> 0)
2948 clear_IO_APIC_pin(0, pin1
);
2951 #ifdef CONFIG_INTR_REMAP
2952 if (intr_remapping_enabled
)
2953 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2955 local_irq_disable();
2956 clear_IO_APIC_pin(apic1
, pin1
);
2958 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2959 "8254 timer not connected to IO-APIC\n");
2961 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2962 "(IRQ0) through the 8259A ...\n");
2963 apic_printk(APIC_QUIET
, KERN_INFO
2964 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2966 * legacy devices should be connected to IO APIC #0
2968 replace_pin_at_irq_cpu(cfg
, cpu
, apic1
, pin1
, apic2
, pin2
);
2969 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2970 enable_8259A_irq(0);
2971 if (timer_irq_works()) {
2972 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2973 timer_through_8259
= 1;
2974 if (nmi_watchdog
== NMI_IO_APIC
) {
2975 disable_8259A_irq(0);
2977 enable_8259A_irq(0);
2982 * Cleanup, just in case ...
2984 local_irq_disable();
2985 disable_8259A_irq(0);
2986 clear_IO_APIC_pin(apic2
, pin2
);
2987 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2990 if (nmi_watchdog
== NMI_IO_APIC
) {
2991 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2992 "through the IO-APIC - disabling NMI Watchdog!\n");
2993 nmi_watchdog
= NMI_NONE
;
2995 #ifdef CONFIG_X86_32
2999 apic_printk(APIC_QUIET
, KERN_INFO
3000 "...trying to set up timer as Virtual Wire IRQ...\n");
3002 lapic_register_intr(0, desc
);
3003 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
3004 enable_8259A_irq(0);
3006 if (timer_irq_works()) {
3007 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
3010 local_irq_disable();
3011 disable_8259A_irq(0);
3012 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
3013 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
3015 apic_printk(APIC_QUIET
, KERN_INFO
3016 "...trying to set up timer as ExtINT IRQ...\n");
3020 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
3022 unlock_ExtINT_logic();
3024 if (timer_irq_works()) {
3025 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
3028 local_irq_disable();
3029 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
3030 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3031 "report. Then try booting with the 'noapic' option.\n");
3033 local_irq_restore(flags
);
3037 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3038 * to devices. However there may be an I/O APIC pin available for
3039 * this interrupt regardless. The pin may be left unconnected, but
3040 * typically it will be reused as an ExtINT cascade interrupt for
3041 * the master 8259A. In the MPS case such a pin will normally be
3042 * reported as an ExtINT interrupt in the MP table. With ACPI
3043 * there is no provision for ExtINT interrupts, and in the absence
3044 * of an override it would be treated as an ordinary ISA I/O APIC
3045 * interrupt, that is edge-triggered and unmasked by default. We
3046 * used to do this, but it caused problems on some systems because
3047 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3048 * the same ExtINT cascade interrupt to drive the local APIC of the
3049 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3050 * the I/O APIC in all cases now. No actual device should request
3051 * it anyway. --macro
3053 #define PIC_IRQS (1 << PIC_CASCADE_IR)
3055 void __init
setup_IO_APIC(void)
3059 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3062 io_apic_irqs
= ~PIC_IRQS
;
3064 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
3066 * Set up IO-APIC IRQ routing.
3068 #ifdef CONFIG_X86_32
3070 setup_ioapic_ids_from_mpc();
3073 setup_IO_APIC_irqs();
3074 init_IO_APIC_traps();
3079 * Called after all the initialization is done. If we didnt find any
3080 * APIC bugs then we can allow the modify fast path
3083 static int __init
io_apic_bug_finalize(void)
3085 if (sis_apic_bug
== -1)
3090 late_initcall(io_apic_bug_finalize
);
3092 struct sysfs_ioapic_data
{
3093 struct sys_device dev
;
3094 struct IO_APIC_route_entry entry
[0];
3096 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
3098 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
3100 struct IO_APIC_route_entry
*entry
;
3101 struct sysfs_ioapic_data
*data
;
3104 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
3105 entry
= data
->entry
;
3106 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
3107 *entry
= ioapic_read_entry(dev
->id
, i
);
3112 static int ioapic_resume(struct sys_device
*dev
)
3114 struct IO_APIC_route_entry
*entry
;
3115 struct sysfs_ioapic_data
*data
;
3116 unsigned long flags
;
3117 union IO_APIC_reg_00 reg_00
;
3120 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
3121 entry
= data
->entry
;
3123 spin_lock_irqsave(&ioapic_lock
, flags
);
3124 reg_00
.raw
= io_apic_read(dev
->id
, 0);
3125 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].apicid
) {
3126 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].apicid
;
3127 io_apic_write(dev
->id
, 0, reg_00
.raw
);
3129 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3130 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
3131 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
3136 static struct sysdev_class ioapic_sysdev_class
= {
3138 .suspend
= ioapic_suspend
,
3139 .resume
= ioapic_resume
,
3142 static int __init
ioapic_init_sysfs(void)
3144 struct sys_device
* dev
;
3147 error
= sysdev_class_register(&ioapic_sysdev_class
);
3151 for (i
= 0; i
< nr_ioapics
; i
++ ) {
3152 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
3153 * sizeof(struct IO_APIC_route_entry
);
3154 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
3155 if (!mp_ioapic_data
[i
]) {
3156 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3159 dev
= &mp_ioapic_data
[i
]->dev
;
3161 dev
->cls
= &ioapic_sysdev_class
;
3162 error
= sysdev_register(dev
);
3164 kfree(mp_ioapic_data
[i
]);
3165 mp_ioapic_data
[i
] = NULL
;
3166 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3174 device_initcall(ioapic_init_sysfs
);
3176 static int nr_irqs_gsi
= NR_IRQS_LEGACY
;
3178 * Dynamic irq allocate and deallocation
3180 unsigned int create_irq_nr(unsigned int irq_want
)
3182 /* Allocate an unused irq */
3185 unsigned long flags
;
3186 struct irq_cfg
*cfg_new
= NULL
;
3187 int cpu
= boot_cpu_id
;
3188 struct irq_desc
*desc_new
= NULL
;
3191 if (irq_want
< nr_irqs_gsi
)
3192 irq_want
= nr_irqs_gsi
;
3194 spin_lock_irqsave(&vector_lock
, flags
);
3195 for (new = irq_want
; new < nr_irqs
; new++) {
3196 desc_new
= irq_to_desc_alloc_cpu(new, cpu
);
3198 printk(KERN_INFO
"can not get irq_desc for %d\n", new);
3201 cfg_new
= desc_new
->chip_data
;
3203 if (cfg_new
->vector
!= 0)
3205 if (__assign_irq_vector(new, cfg_new
, apic
->target_cpus()) == 0)
3209 spin_unlock_irqrestore(&vector_lock
, flags
);
3212 dynamic_irq_init(irq
);
3213 /* restore it, in case dynamic_irq_init clear it */
3215 desc_new
->chip_data
= cfg_new
;
3220 int create_irq(void)
3222 unsigned int irq_want
;
3225 irq_want
= nr_irqs_gsi
;
3226 irq
= create_irq_nr(irq_want
);
3234 void destroy_irq(unsigned int irq
)
3236 unsigned long flags
;
3237 struct irq_cfg
*cfg
;
3238 struct irq_desc
*desc
;
3240 /* store it, in case dynamic_irq_cleanup clear it */
3241 desc
= irq_to_desc(irq
);
3242 cfg
= desc
->chip_data
;
3243 dynamic_irq_cleanup(irq
);
3244 /* connect back irq_cfg */
3246 desc
->chip_data
= cfg
;
3248 #ifdef CONFIG_INTR_REMAP
3251 spin_lock_irqsave(&vector_lock
, flags
);
3252 __clear_irq_vector(irq
, cfg
);
3253 spin_unlock_irqrestore(&vector_lock
, flags
);
3257 * MSI message composition
3259 #ifdef CONFIG_PCI_MSI
3260 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
3262 struct irq_cfg
*cfg
;
3270 err
= assign_irq_vector(irq
, cfg
, apic
->target_cpus());
3274 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, apic
->target_cpus());
3276 #ifdef CONFIG_INTR_REMAP
3277 if (irq_remapped(irq
)) {
3282 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
3283 BUG_ON(ir_index
== -1);
3285 memset (&irte
, 0, sizeof(irte
));
3288 irte
.dst_mode
= apic
->irq_dest_mode
;
3289 irte
.trigger_mode
= 0; /* edge */
3290 irte
.dlvry_mode
= apic
->irq_delivery_mode
;
3291 irte
.vector
= cfg
->vector
;
3292 irte
.dest_id
= IRTE_DEST(dest
);
3294 modify_irte(irq
, &irte
);
3296 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3297 msg
->data
= sub_handle
;
3298 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
3300 MSI_ADDR_IR_INDEX1(ir_index
) |
3301 MSI_ADDR_IR_INDEX2(ir_index
);
3305 if (x2apic_enabled())
3306 msg
->address_hi
= MSI_ADDR_BASE_HI
|
3307 MSI_ADDR_EXT_DEST_ID(dest
);
3309 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3313 ((apic
->irq_dest_mode
== 0) ?
3314 MSI_ADDR_DEST_MODE_PHYSICAL
:
3315 MSI_ADDR_DEST_MODE_LOGICAL
) |
3316 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3317 MSI_ADDR_REDIRECTION_CPU
:
3318 MSI_ADDR_REDIRECTION_LOWPRI
) |
3319 MSI_ADDR_DEST_ID(dest
);
3322 MSI_DATA_TRIGGER_EDGE
|
3323 MSI_DATA_LEVEL_ASSERT
|
3324 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3325 MSI_DATA_DELIVERY_FIXED
:
3326 MSI_DATA_DELIVERY_LOWPRI
) |
3327 MSI_DATA_VECTOR(cfg
->vector
);
3333 static void set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3335 struct irq_desc
*desc
= irq_to_desc(irq
);
3336 struct irq_cfg
*cfg
;
3340 dest
= set_desc_affinity(desc
, mask
);
3341 if (dest
== BAD_APICID
)
3344 cfg
= desc
->chip_data
;
3346 read_msi_msg_desc(desc
, &msg
);
3348 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3349 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3350 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3351 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3353 write_msi_msg_desc(desc
, &msg
);
3355 #ifdef CONFIG_INTR_REMAP
3357 * Migrate the MSI irq to another cpumask. This migration is
3358 * done in the process context using interrupt-remapping hardware.
3361 ir_set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3363 struct irq_desc
*desc
= irq_to_desc(irq
);
3364 struct irq_cfg
*cfg
= desc
->chip_data
;
3368 if (get_irte(irq
, &irte
))
3371 dest
= set_desc_affinity(desc
, mask
);
3372 if (dest
== BAD_APICID
)
3375 irte
.vector
= cfg
->vector
;
3376 irte
.dest_id
= IRTE_DEST(dest
);
3379 * atomically update the IRTE with the new destination and vector.
3381 modify_irte(irq
, &irte
);
3384 * After this point, all the interrupts will start arriving
3385 * at the new destination. So, time to cleanup the previous
3386 * vector allocation.
3388 if (cfg
->move_in_progress
)
3389 send_cleanup_vector(cfg
);
3393 #endif /* CONFIG_SMP */
3396 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3397 * which implement the MSI or MSI-X Capability Structure.
3399 static struct irq_chip msi_chip
= {
3401 .unmask
= unmask_msi_irq
,
3402 .mask
= mask_msi_irq
,
3403 .ack
= ack_apic_edge
,
3405 .set_affinity
= set_msi_irq_affinity
,
3407 .retrigger
= ioapic_retrigger_irq
,
3410 #ifdef CONFIG_INTR_REMAP
3411 static struct irq_chip msi_ir_chip
= {
3412 .name
= "IR-PCI-MSI",
3413 .unmask
= unmask_msi_irq
,
3414 .mask
= mask_msi_irq
,
3415 .ack
= ack_x2apic_edge
,
3417 .set_affinity
= ir_set_msi_irq_affinity
,
3419 .retrigger
= ioapic_retrigger_irq
,
3423 * Map the PCI dev to the corresponding remapping hardware unit
3424 * and allocate 'nvec' consecutive interrupt-remapping table entries
3427 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
3429 struct intel_iommu
*iommu
;
3432 iommu
= map_dev_to_ir(dev
);
3435 "Unable to map PCI %s to iommu\n", pci_name(dev
));
3439 index
= alloc_irte(iommu
, irq
, nvec
);
3442 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
3450 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int irq
)
3455 ret
= msi_compose_msg(dev
, irq
, &msg
);
3459 set_irq_msi(irq
, msidesc
);
3460 write_msi_msg(irq
, &msg
);
3462 #ifdef CONFIG_INTR_REMAP
3463 if (irq_remapped(irq
)) {
3464 struct irq_desc
*desc
= irq_to_desc(irq
);
3466 * irq migration in process context
3468 desc
->status
|= IRQ_MOVE_PCNTXT
;
3469 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
3472 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
3474 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for MSI/MSI-X\n", irq
);
3479 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
3482 int ret
, sub_handle
;
3483 struct msi_desc
*msidesc
;
3484 unsigned int irq_want
;
3486 #ifdef CONFIG_INTR_REMAP
3487 struct intel_iommu
*iommu
= 0;
3491 irq_want
= nr_irqs_gsi
;
3493 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
3494 irq
= create_irq_nr(irq_want
);
3498 #ifdef CONFIG_INTR_REMAP
3499 if (!intr_remapping_enabled
)
3504 * allocate the consecutive block of IRTE's
3507 index
= msi_alloc_irte(dev
, irq
, nvec
);
3513 iommu
= map_dev_to_ir(dev
);
3519 * setup the mapping between the irq and the IRTE
3520 * base index, the sub_handle pointing to the
3521 * appropriate interrupt remap table entry.
3523 set_irte_irq(irq
, iommu
, index
, sub_handle
);
3527 ret
= setup_msi_irq(dev
, msidesc
, irq
);
3539 void arch_teardown_msi_irq(unsigned int irq
)
3544 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3546 static void dmar_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3548 struct irq_desc
*desc
= irq_to_desc(irq
);
3549 struct irq_cfg
*cfg
;
3553 dest
= set_desc_affinity(desc
, mask
);
3554 if (dest
== BAD_APICID
)
3557 cfg
= desc
->chip_data
;
3559 dmar_msi_read(irq
, &msg
);
3561 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3562 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3563 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3564 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3566 dmar_msi_write(irq
, &msg
);
3569 #endif /* CONFIG_SMP */
3571 struct irq_chip dmar_msi_type
= {
3573 .unmask
= dmar_msi_unmask
,
3574 .mask
= dmar_msi_mask
,
3575 .ack
= ack_apic_edge
,
3577 .set_affinity
= dmar_msi_set_affinity
,
3579 .retrigger
= ioapic_retrigger_irq
,
3582 int arch_setup_dmar_msi(unsigned int irq
)
3587 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3590 dmar_msi_write(irq
, &msg
);
3591 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3597 #ifdef CONFIG_HPET_TIMER
3600 static void hpet_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3602 struct irq_desc
*desc
= irq_to_desc(irq
);
3603 struct irq_cfg
*cfg
;
3607 dest
= set_desc_affinity(desc
, mask
);
3608 if (dest
== BAD_APICID
)
3611 cfg
= desc
->chip_data
;
3613 hpet_msi_read(irq
, &msg
);
3615 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3616 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3617 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3618 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3620 hpet_msi_write(irq
, &msg
);
3623 #endif /* CONFIG_SMP */
3625 struct irq_chip hpet_msi_type
= {
3627 .unmask
= hpet_msi_unmask
,
3628 .mask
= hpet_msi_mask
,
3629 .ack
= ack_apic_edge
,
3631 .set_affinity
= hpet_msi_set_affinity
,
3633 .retrigger
= ioapic_retrigger_irq
,
3636 int arch_setup_hpet_msi(unsigned int irq
)
3641 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3645 hpet_msi_write(irq
, &msg
);
3646 set_irq_chip_and_handler_name(irq
, &hpet_msi_type
, handle_edge_irq
,
3653 #endif /* CONFIG_PCI_MSI */
3655 * Hypertransport interrupt support
3657 #ifdef CONFIG_HT_IRQ
3661 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3663 struct ht_irq_msg msg
;
3664 fetch_ht_irq_msg(irq
, &msg
);
3666 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3667 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3669 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3670 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3672 write_ht_irq_msg(irq
, &msg
);
3675 static void set_ht_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3677 struct irq_desc
*desc
= irq_to_desc(irq
);
3678 struct irq_cfg
*cfg
;
3681 dest
= set_desc_affinity(desc
, mask
);
3682 if (dest
== BAD_APICID
)
3685 cfg
= desc
->chip_data
;
3687 target_ht_irq(irq
, dest
, cfg
->vector
);
3692 static struct irq_chip ht_irq_chip
= {
3694 .mask
= mask_ht_irq
,
3695 .unmask
= unmask_ht_irq
,
3696 .ack
= ack_apic_edge
,
3698 .set_affinity
= set_ht_irq_affinity
,
3700 .retrigger
= ioapic_retrigger_irq
,
3703 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3705 struct irq_cfg
*cfg
;
3712 err
= assign_irq_vector(irq
, cfg
, apic
->target_cpus());
3714 struct ht_irq_msg msg
;
3717 dest
= apic
->cpu_mask_to_apicid_and(cfg
->domain
,
3718 apic
->target_cpus());
3720 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3724 HT_IRQ_LOW_DEST_ID(dest
) |
3725 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3726 ((apic
->irq_dest_mode
== 0) ?
3727 HT_IRQ_LOW_DM_PHYSICAL
:
3728 HT_IRQ_LOW_DM_LOGICAL
) |
3729 HT_IRQ_LOW_RQEOI_EDGE
|
3730 ((apic
->irq_delivery_mode
!= dest_LowestPrio
) ?
3731 HT_IRQ_LOW_MT_FIXED
:
3732 HT_IRQ_LOW_MT_ARBITRATED
) |
3733 HT_IRQ_LOW_IRQ_MASKED
;
3735 write_ht_irq_msg(irq
, &msg
);
3737 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3738 handle_edge_irq
, "edge");
3740 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for HT\n", irq
);
3744 #endif /* CONFIG_HT_IRQ */
3746 #ifdef CONFIG_X86_UV
3748 * Re-target the irq to the specified CPU and enable the specified MMR located
3749 * on the specified blade to allow the sending of MSIs to the specified CPU.
3751 int arch_enable_uv_irq(char *irq_name
, unsigned int irq
, int cpu
, int mmr_blade
,
3752 unsigned long mmr_offset
)
3754 const struct cpumask
*eligible_cpu
= cpumask_of(cpu
);
3755 struct irq_cfg
*cfg
;
3757 unsigned long mmr_value
;
3758 struct uv_IO_APIC_route_entry
*entry
;
3759 unsigned long flags
;
3764 err
= assign_irq_vector(irq
, cfg
, eligible_cpu
);
3768 spin_lock_irqsave(&vector_lock
, flags
);
3769 set_irq_chip_and_handler_name(irq
, &uv_irq_chip
, handle_percpu_irq
,
3771 spin_unlock_irqrestore(&vector_lock
, flags
);
3774 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3775 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3777 entry
->vector
= cfg
->vector
;
3778 entry
->delivery_mode
= apic
->irq_delivery_mode
;
3779 entry
->dest_mode
= apic
->irq_dest_mode
;
3780 entry
->polarity
= 0;
3783 entry
->dest
= apic
->cpu_mask_to_apicid(eligible_cpu
);
3785 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3786 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3792 * Disable the specified MMR located on the specified blade so that MSIs are
3793 * longer allowed to be sent.
3795 void arch_disable_uv_irq(int mmr_blade
, unsigned long mmr_offset
)
3797 unsigned long mmr_value
;
3798 struct uv_IO_APIC_route_entry
*entry
;
3802 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3803 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3807 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3808 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3810 #endif /* CONFIG_X86_64 */
3812 int __init
io_apic_get_redir_entries (int ioapic
)
3814 union IO_APIC_reg_01 reg_01
;
3815 unsigned long flags
;
3817 spin_lock_irqsave(&ioapic_lock
, flags
);
3818 reg_01
.raw
= io_apic_read(ioapic
, 1);
3819 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3821 return reg_01
.bits
.entries
;
3824 void __init
probe_nr_irqs_gsi(void)
3828 nr
= acpi_probe_gsi();
3829 if (nr
> nr_irqs_gsi
) {
3832 /* for acpi=off or acpi is not compiled in */
3836 for (idx
= 0; idx
< nr_ioapics
; idx
++)
3837 nr
+= io_apic_get_redir_entries(idx
) + 1;
3839 if (nr
> nr_irqs_gsi
)
3843 printk(KERN_DEBUG
"nr_irqs_gsi: %d\n", nr_irqs_gsi
);
3846 #ifdef CONFIG_SPARSE_IRQ
3847 int __init
arch_probe_nr_irqs(void)
3851 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
3852 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
3854 nr
= nr_irqs_gsi
+ 8 * nr_cpu_ids
;
3855 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3857 * for MSI and HT dyn irq
3859 nr
+= nr_irqs_gsi
* 16;
3868 /* --------------------------------------------------------------------------
3869 ACPI-based IOAPIC Configuration
3870 -------------------------------------------------------------------------- */
3874 #ifdef CONFIG_X86_32
3875 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
3877 union IO_APIC_reg_00 reg_00
;
3878 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
3880 unsigned long flags
;
3884 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3885 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3886 * supports up to 16 on one shared APIC bus.
3888 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3889 * advantage of new APIC bus architecture.
3892 if (physids_empty(apic_id_map
))
3893 apic_id_map
= apic
->ioapic_phys_id_map(phys_cpu_present_map
);
3895 spin_lock_irqsave(&ioapic_lock
, flags
);
3896 reg_00
.raw
= io_apic_read(ioapic
, 0);
3897 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3899 if (apic_id
>= get_physical_broadcast()) {
3900 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
3901 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
3902 apic_id
= reg_00
.bits
.ID
;
3906 * Every APIC in a system must have a unique ID or we get lots of nice
3907 * 'stuck on smp_invalidate_needed IPI wait' messages.
3909 if (apic
->check_apicid_used(apic_id_map
, apic_id
)) {
3911 for (i
= 0; i
< get_physical_broadcast(); i
++) {
3912 if (!apic
->check_apicid_used(apic_id_map
, i
))
3916 if (i
== get_physical_broadcast())
3917 panic("Max apic_id exceeded!\n");
3919 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
3920 "trying %d\n", ioapic
, apic_id
, i
);
3925 tmp
= apic
->apicid_to_cpu_present(apic_id
);
3926 physids_or(apic_id_map
, apic_id_map
, tmp
);
3928 if (reg_00
.bits
.ID
!= apic_id
) {
3929 reg_00
.bits
.ID
= apic_id
;
3931 spin_lock_irqsave(&ioapic_lock
, flags
);
3932 io_apic_write(ioapic
, 0, reg_00
.raw
);
3933 reg_00
.raw
= io_apic_read(ioapic
, 0);
3934 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3937 if (reg_00
.bits
.ID
!= apic_id
) {
3938 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
3943 apic_printk(APIC_VERBOSE
, KERN_INFO
3944 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
3949 int __init
io_apic_get_version(int ioapic
)
3951 union IO_APIC_reg_01 reg_01
;
3952 unsigned long flags
;
3954 spin_lock_irqsave(&ioapic_lock
, flags
);
3955 reg_01
.raw
= io_apic_read(ioapic
, 1);
3956 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3958 return reg_01
.bits
.version
;
3962 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
3964 struct irq_desc
*desc
;
3965 struct irq_cfg
*cfg
;
3966 int cpu
= boot_cpu_id
;
3968 if (!IO_APIC_IRQ(irq
)) {
3969 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3974 desc
= irq_to_desc_alloc_cpu(irq
, cpu
);
3976 printk(KERN_INFO
"can not get irq_desc %d\n", irq
);
3981 * IRQs < 16 are already in the irq_2_pin[] map
3983 if (irq
>= NR_IRQS_LEGACY
) {
3984 cfg
= desc
->chip_data
;
3985 add_pin_to_irq_cpu(cfg
, cpu
, ioapic
, pin
);
3988 setup_IO_APIC_irq(ioapic
, pin
, irq
, desc
, triggering
, polarity
);
3994 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
3998 if (skip_ioapic_setup
)
4001 for (i
= 0; i
< mp_irq_entries
; i
++)
4002 if (mp_irqs
[i
].irqtype
== mp_INT
&&
4003 mp_irqs
[i
].srcbusirq
== bus_irq
)
4005 if (i
>= mp_irq_entries
)
4008 *trigger
= irq_trigger(i
);
4009 *polarity
= irq_polarity(i
);
4013 #endif /* CONFIG_ACPI */
4016 * This function currently is only a helper for the i386 smp boot process where
4017 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4018 * so mask in all cases should simply be apic->target_cpus()
4021 void __init
setup_ioapic_dest(void)
4023 int pin
, ioapic
, irq
, irq_entry
;
4024 struct irq_desc
*desc
;
4025 struct irq_cfg
*cfg
;
4026 const struct cpumask
*mask
;
4028 if (skip_ioapic_setup
== 1)
4031 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
4032 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
4033 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
4034 if (irq_entry
== -1)
4036 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
4038 /* setup_IO_APIC_irqs could fail to get vector for some device
4039 * when you have too many devices, because at that time only boot
4042 desc
= irq_to_desc(irq
);
4043 cfg
= desc
->chip_data
;
4045 setup_IO_APIC_irq(ioapic
, pin
, irq
, desc
,
4046 irq_trigger(irq_entry
),
4047 irq_polarity(irq_entry
));
4053 * Honour affinities which have been set in early boot
4056 (IRQ_NO_BALANCING
| IRQ_AFFINITY_SET
))
4057 mask
= desc
->affinity
;
4059 mask
= apic
->target_cpus();
4061 #ifdef CONFIG_INTR_REMAP
4062 if (intr_remapping_enabled
)
4063 set_ir_ioapic_affinity_irq_desc(desc
, mask
);
4066 set_ioapic_affinity_irq_desc(desc
, mask
);
4073 #define IOAPIC_RESOURCE_NAME_SIZE 11
4075 static struct resource
*ioapic_resources
;
4077 static struct resource
* __init
ioapic_setup_resources(void)
4080 struct resource
*res
;
4084 if (nr_ioapics
<= 0)
4087 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
4090 mem
= alloc_bootmem(n
);
4094 mem
+= sizeof(struct resource
) * nr_ioapics
;
4096 for (i
= 0; i
< nr_ioapics
; i
++) {
4098 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
4099 sprintf(mem
, "IOAPIC %u", i
);
4100 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
4104 ioapic_resources
= res
;
4109 void __init
ioapic_init_mappings(void)
4111 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
4112 struct resource
*ioapic_res
;
4115 ioapic_res
= ioapic_setup_resources();
4116 for (i
= 0; i
< nr_ioapics
; i
++) {
4117 if (smp_found_config
) {
4118 ioapic_phys
= mp_ioapics
[i
].apicaddr
;
4119 #ifdef CONFIG_X86_32
4122 "WARNING: bogus zero IO-APIC "
4123 "address found in MPTABLE, "
4124 "disabling IO/APIC support!\n");
4125 smp_found_config
= 0;
4126 skip_ioapic_setup
= 1;
4127 goto fake_ioapic_page
;
4131 #ifdef CONFIG_X86_32
4134 ioapic_phys
= (unsigned long)
4135 alloc_bootmem_pages(PAGE_SIZE
);
4136 ioapic_phys
= __pa(ioapic_phys
);
4138 set_fixmap_nocache(idx
, ioapic_phys
);
4139 apic_printk(APIC_VERBOSE
,
4140 "mapped IOAPIC to %08lx (%08lx)\n",
4141 __fix_to_virt(idx
), ioapic_phys
);
4144 if (ioapic_res
!= NULL
) {
4145 ioapic_res
->start
= ioapic_phys
;
4146 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
4152 static int __init
ioapic_insert_resources(void)
4155 struct resource
*r
= ioapic_resources
;
4159 "IO APIC resources could be not be allocated.\n");
4163 for (i
= 0; i
< nr_ioapics
; i
++) {
4164 insert_resource(&iomem_resource
, r
);
4171 /* Insert the IO APIC resources after PCI initialization has occured to handle
4172 * IO APICS that are mapped in on a BAR in PCI space. */
4173 late_initcall(ioapic_insert_resources
);