2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
50 #include <asm/proto.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
61 #include <asm/uv/uv_hub.h>
62 #include <asm/uv/uv_irq.h>
65 #include <mach_apic.h>
66 #include <mach_apicdef.h>
68 #define __apicdebuginit(type) static type __init
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
74 int sis_apic_bug
= -1;
76 static DEFINE_SPINLOCK(ioapic_lock
);
77 static DEFINE_SPINLOCK(vector_lock
);
80 * # of IRQ routing registers
82 int nr_ioapic_registers
[MAX_IO_APICS
];
84 /* I/O APIC entries */
85 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
88 /* MP IRQ source entries */
89 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
91 /* # of MP IRQ source entries */
94 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
95 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
98 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
100 int skip_ioapic_setup
;
102 static int __init
parse_noapic(char *str
)
104 /* disable IO-APIC */
105 disable_ioapic_setup();
108 early_param("noapic", parse_noapic
);
113 * This is performance-critical, we want to do it O(1)
115 * the indexing order of this array favors 1:1 mappings
116 * between pins and IRQs.
119 struct irq_pin_list
{
121 struct irq_pin_list
*next
;
124 static struct irq_pin_list
*get_one_free_irq_2_pin(int cpu
)
126 struct irq_pin_list
*pin
;
129 node
= cpu_to_node(cpu
);
131 pin
= kzalloc_node(sizeof(*pin
), GFP_ATOMIC
, node
);
132 printk(KERN_DEBUG
" alloc irq_2_pin on cpu %d node %d\n", cpu
, node
);
138 struct irq_pin_list
*irq_2_pin
;
139 cpumask_var_t domain
;
140 cpumask_var_t old_domain
;
141 unsigned move_cleanup_count
;
143 u8 move_in_progress
: 1;
146 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
147 #ifdef CONFIG_SPARSE_IRQ
148 static struct irq_cfg irq_cfgx
[] = {
150 static struct irq_cfg irq_cfgx
[NR_IRQS
] = {
152 [0] = { .vector
= IRQ0_VECTOR
, },
153 [1] = { .vector
= IRQ1_VECTOR
, },
154 [2] = { .vector
= IRQ2_VECTOR
, },
155 [3] = { .vector
= IRQ3_VECTOR
, },
156 [4] = { .vector
= IRQ4_VECTOR
, },
157 [5] = { .vector
= IRQ5_VECTOR
, },
158 [6] = { .vector
= IRQ6_VECTOR
, },
159 [7] = { .vector
= IRQ7_VECTOR
, },
160 [8] = { .vector
= IRQ8_VECTOR
, },
161 [9] = { .vector
= IRQ9_VECTOR
, },
162 [10] = { .vector
= IRQ10_VECTOR
, },
163 [11] = { .vector
= IRQ11_VECTOR
, },
164 [12] = { .vector
= IRQ12_VECTOR
, },
165 [13] = { .vector
= IRQ13_VECTOR
, },
166 [14] = { .vector
= IRQ14_VECTOR
, },
167 [15] = { .vector
= IRQ15_VECTOR
, },
170 void __init
arch_early_irq_init(void)
173 struct irq_desc
*desc
;
178 count
= ARRAY_SIZE(irq_cfgx
);
180 for (i
= 0; i
< count
; i
++) {
181 desc
= irq_to_desc(i
);
182 desc
->chip_data
= &cfg
[i
];
183 alloc_bootmem_cpumask_var(&cfg
[i
].domain
);
184 alloc_bootmem_cpumask_var(&cfg
[i
].old_domain
);
185 if (i
< NR_IRQS_LEGACY
)
186 cpumask_setall(cfg
[i
].domain
);
190 #ifdef CONFIG_SPARSE_IRQ
191 static struct irq_cfg
*irq_cfg(unsigned int irq
)
193 struct irq_cfg
*cfg
= NULL
;
194 struct irq_desc
*desc
;
196 desc
= irq_to_desc(irq
);
198 cfg
= desc
->chip_data
;
203 static struct irq_cfg
*get_one_free_irq_cfg(int cpu
)
208 node
= cpu_to_node(cpu
);
210 cfg
= kzalloc_node(sizeof(*cfg
), GFP_ATOMIC
, node
);
212 /* FIXME: needs alloc_cpumask_var_node() */
213 if (!alloc_cpumask_var(&cfg
->domain
, GFP_ATOMIC
)) {
216 } else if (!alloc_cpumask_var(&cfg
->old_domain
, GFP_ATOMIC
)) {
217 free_cpumask_var(cfg
->domain
);
221 cpumask_clear(cfg
->domain
);
222 cpumask_clear(cfg
->old_domain
);
225 printk(KERN_DEBUG
" alloc irq_cfg on cpu %d node %d\n", cpu
, node
);
230 void arch_init_chip_data(struct irq_desc
*desc
, int cpu
)
234 cfg
= desc
->chip_data
;
236 desc
->chip_data
= get_one_free_irq_cfg(cpu
);
237 if (!desc
->chip_data
) {
238 printk(KERN_ERR
"can not alloc irq_cfg\n");
245 static struct irq_cfg
*irq_cfg(unsigned int irq
)
247 return irq
< nr_irqs
? irq_cfgx
+ irq
: NULL
;
253 set_extra_move_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
259 unsigned int unused
[3];
263 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
265 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
266 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
269 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
271 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
272 writel(reg
, &io_apic
->index
);
273 return readl(&io_apic
->data
);
276 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
278 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
279 writel(reg
, &io_apic
->index
);
280 writel(value
, &io_apic
->data
);
284 * Re-write a value: to be used for read-modify-write
285 * cycles where the read already set up the index register.
287 * Older SiS APIC requires we rewrite the index register
289 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
291 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
294 writel(reg
, &io_apic
->index
);
295 writel(value
, &io_apic
->data
);
298 static bool io_apic_level_ack_pending(struct irq_cfg
*cfg
)
300 struct irq_pin_list
*entry
;
303 spin_lock_irqsave(&ioapic_lock
, flags
);
304 entry
= cfg
->irq_2_pin
;
312 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
313 /* Is the remote IRR bit set? */
314 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
315 spin_unlock_irqrestore(&ioapic_lock
, flags
);
322 spin_unlock_irqrestore(&ioapic_lock
, flags
);
328 struct { u32 w1
, w2
; };
329 struct IO_APIC_route_entry entry
;
332 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
334 union entry_union eu
;
336 spin_lock_irqsave(&ioapic_lock
, flags
);
337 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
338 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
339 spin_unlock_irqrestore(&ioapic_lock
, flags
);
344 * When we write a new IO APIC routing entry, we need to write the high
345 * word first! If the mask bit in the low word is clear, we will enable
346 * the interrupt, and we need to make sure the entry is fully populated
347 * before that happens.
350 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
352 union entry_union eu
;
354 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
355 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
358 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
361 spin_lock_irqsave(&ioapic_lock
, flags
);
362 __ioapic_write_entry(apic
, pin
, e
);
363 spin_unlock_irqrestore(&ioapic_lock
, flags
);
367 * When we mask an IO APIC routing entry, we need to write the low
368 * word first, in order to set the mask bit before we change the
371 static void ioapic_mask_entry(int apic
, int pin
)
374 union entry_union eu
= { .entry
.mask
= 1 };
376 spin_lock_irqsave(&ioapic_lock
, flags
);
377 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
378 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
379 spin_unlock_irqrestore(&ioapic_lock
, flags
);
383 static void send_cleanup_vector(struct irq_cfg
*cfg
)
385 cpumask_var_t cleanup_mask
;
387 if (unlikely(!alloc_cpumask_var(&cleanup_mask
, GFP_ATOMIC
))) {
389 cfg
->move_cleanup_count
= 0;
390 for_each_cpu_and(i
, cfg
->old_domain
, cpu_online_mask
)
391 cfg
->move_cleanup_count
++;
392 for_each_cpu_and(i
, cfg
->old_domain
, cpu_online_mask
)
393 send_IPI_mask(cpumask_of(i
), IRQ_MOVE_CLEANUP_VECTOR
);
395 cpumask_and(cleanup_mask
, cfg
->old_domain
, cpu_online_mask
);
396 cfg
->move_cleanup_count
= cpumask_weight(cleanup_mask
);
397 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
398 free_cpumask_var(cleanup_mask
);
400 cfg
->move_in_progress
= 0;
403 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, struct irq_cfg
*cfg
)
406 struct irq_pin_list
*entry
;
407 u8 vector
= cfg
->vector
;
409 entry
= cfg
->irq_2_pin
;
418 #ifdef CONFIG_INTR_REMAP
420 * With interrupt-remapping, destination information comes
421 * from interrupt-remapping table entry.
423 if (!irq_remapped(irq
))
424 io_apic_write(apic
, 0x11 + pin
*2, dest
);
426 io_apic_write(apic
, 0x11 + pin
*2, dest
);
428 reg
= io_apic_read(apic
, 0x10 + pin
*2);
429 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
431 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
439 assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
);
442 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
443 * of that, or returns BAD_APICID and leaves desc->affinity untouched.
446 set_desc_affinity(struct irq_desc
*desc
, const struct cpumask
*mask
)
451 if (!cpumask_intersects(mask
, cpu_online_mask
))
455 cfg
= desc
->chip_data
;
456 if (assign_irq_vector(irq
, cfg
, mask
))
459 cpumask_and(&desc
->affinity
, cfg
->domain
, mask
);
460 set_extra_move_desc(desc
, mask
);
461 return cpu_mask_to_apicid_and(&desc
->affinity
, cpu_online_mask
);
465 set_ioapic_affinity_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
473 cfg
= desc
->chip_data
;
475 spin_lock_irqsave(&ioapic_lock
, flags
);
476 dest
= set_desc_affinity(desc
, mask
);
477 if (dest
!= BAD_APICID
) {
478 /* Only the high 8 bits are valid. */
479 dest
= SET_APIC_LOGICAL_ID(dest
);
480 __target_IO_APIC_irq(irq
, dest
, cfg
);
482 spin_unlock_irqrestore(&ioapic_lock
, flags
);
486 set_ioapic_affinity_irq(unsigned int irq
, const struct cpumask
*mask
)
488 struct irq_desc
*desc
;
490 desc
= irq_to_desc(irq
);
492 set_ioapic_affinity_irq_desc(desc
, mask
);
494 #endif /* CONFIG_SMP */
497 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
498 * shared ISA-space IRQs, so we have to support them. We are super
499 * fast in the common case, and fast for shared ISA-space IRQs.
501 static void add_pin_to_irq_cpu(struct irq_cfg
*cfg
, int cpu
, int apic
, int pin
)
503 struct irq_pin_list
*entry
;
505 entry
= cfg
->irq_2_pin
;
507 entry
= get_one_free_irq_2_pin(cpu
);
509 printk(KERN_ERR
"can not alloc irq_2_pin to add %d - %d\n",
513 cfg
->irq_2_pin
= entry
;
519 while (entry
->next
) {
520 /* not again, please */
521 if (entry
->apic
== apic
&& entry
->pin
== pin
)
527 entry
->next
= get_one_free_irq_2_pin(cpu
);
534 * Reroute an IRQ to a different pin.
536 static void __init
replace_pin_at_irq_cpu(struct irq_cfg
*cfg
, int cpu
,
537 int oldapic
, int oldpin
,
538 int newapic
, int newpin
)
540 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
544 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
545 entry
->apic
= newapic
;
548 /* every one is different, right? */
554 /* why? call replace before add? */
556 add_pin_to_irq_cpu(cfg
, cpu
, newapic
, newpin
);
559 static inline void io_apic_modify_irq(struct irq_cfg
*cfg
,
560 int mask_and
, int mask_or
,
561 void (*final
)(struct irq_pin_list
*entry
))
564 struct irq_pin_list
*entry
;
566 for (entry
= cfg
->irq_2_pin
; entry
!= NULL
; entry
= entry
->next
) {
569 reg
= io_apic_read(entry
->apic
, 0x10 + pin
* 2);
572 io_apic_modify(entry
->apic
, 0x10 + pin
* 2, reg
);
578 static void __unmask_IO_APIC_irq(struct irq_cfg
*cfg
)
580 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_MASKED
, 0, NULL
);
584 void io_apic_sync(struct irq_pin_list
*entry
)
587 * Synchronize the IO-APIC and the CPU by doing
588 * a dummy read from the IO-APIC
590 struct io_apic __iomem
*io_apic
;
591 io_apic
= io_apic_base(entry
->apic
);
592 readl(&io_apic
->data
);
595 static void __mask_IO_APIC_irq(struct irq_cfg
*cfg
)
597 io_apic_modify_irq(cfg
, ~0, IO_APIC_REDIR_MASKED
, &io_apic_sync
);
599 #else /* CONFIG_X86_32 */
600 static void __mask_IO_APIC_irq(struct irq_cfg
*cfg
)
602 io_apic_modify_irq(cfg
, ~0, IO_APIC_REDIR_MASKED
, NULL
);
605 static void __mask_and_edge_IO_APIC_irq(struct irq_cfg
*cfg
)
607 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_LEVEL_TRIGGER
,
608 IO_APIC_REDIR_MASKED
, NULL
);
611 static void __unmask_and_level_IO_APIC_irq(struct irq_cfg
*cfg
)
613 io_apic_modify_irq(cfg
, ~IO_APIC_REDIR_MASKED
,
614 IO_APIC_REDIR_LEVEL_TRIGGER
, NULL
);
616 #endif /* CONFIG_X86_32 */
618 static void mask_IO_APIC_irq_desc(struct irq_desc
*desc
)
620 struct irq_cfg
*cfg
= desc
->chip_data
;
625 spin_lock_irqsave(&ioapic_lock
, flags
);
626 __mask_IO_APIC_irq(cfg
);
627 spin_unlock_irqrestore(&ioapic_lock
, flags
);
630 static void unmask_IO_APIC_irq_desc(struct irq_desc
*desc
)
632 struct irq_cfg
*cfg
= desc
->chip_data
;
635 spin_lock_irqsave(&ioapic_lock
, flags
);
636 __unmask_IO_APIC_irq(cfg
);
637 spin_unlock_irqrestore(&ioapic_lock
, flags
);
640 static void mask_IO_APIC_irq(unsigned int irq
)
642 struct irq_desc
*desc
= irq_to_desc(irq
);
644 mask_IO_APIC_irq_desc(desc
);
646 static void unmask_IO_APIC_irq(unsigned int irq
)
648 struct irq_desc
*desc
= irq_to_desc(irq
);
650 unmask_IO_APIC_irq_desc(desc
);
653 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
655 struct IO_APIC_route_entry entry
;
657 /* Check delivery_mode to be sure we're not clearing an SMI pin */
658 entry
= ioapic_read_entry(apic
, pin
);
659 if (entry
.delivery_mode
== dest_SMI
)
662 * Disable it in the IO-APIC irq-routing table:
664 ioapic_mask_entry(apic
, pin
);
667 static void clear_IO_APIC (void)
671 for (apic
= 0; apic
< nr_ioapics
; apic
++)
672 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
673 clear_IO_APIC_pin(apic
, pin
);
676 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
677 void send_IPI_self(int vector
)
684 apic_wait_icr_idle();
685 cfg
= APIC_DM_FIXED
| APIC_DEST_SELF
| vector
| APIC_DEST_LOGICAL
;
687 * Send the IPI. The write to APIC_ICR fires this off.
689 apic_write(APIC_ICR
, cfg
);
691 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
695 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
696 * specific CPU-side IRQs.
700 static int pirq_entries
[MAX_PIRQS
];
701 static int pirqs_enabled
;
703 static int __init
ioapic_pirq_setup(char *str
)
706 int ints
[MAX_PIRQS
+1];
708 get_options(str
, ARRAY_SIZE(ints
), ints
);
710 for (i
= 0; i
< MAX_PIRQS
; i
++)
711 pirq_entries
[i
] = -1;
714 apic_printk(APIC_VERBOSE
, KERN_INFO
715 "PIRQ redirection, working around broken MP-BIOS.\n");
717 if (ints
[0] < MAX_PIRQS
)
720 for (i
= 0; i
< max
; i
++) {
721 apic_printk(APIC_VERBOSE
, KERN_DEBUG
722 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
724 * PIRQs are mapped upside down, usually.
726 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
731 __setup("pirq=", ioapic_pirq_setup
);
732 #endif /* CONFIG_X86_32 */
734 #ifdef CONFIG_INTR_REMAP
735 /* I/O APIC RTE contents at the OS boot up */
736 static struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
739 * Saves and masks all the unmasked IO-APIC RTE's
741 int save_mask_IO_APIC_setup(void)
743 union IO_APIC_reg_01 reg_01
;
748 * The number of IO-APIC IRQ registers (== #pins):
750 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
751 spin_lock_irqsave(&ioapic_lock
, flags
);
752 reg_01
.raw
= io_apic_read(apic
, 1);
753 spin_unlock_irqrestore(&ioapic_lock
, flags
);
754 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
757 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
758 early_ioapic_entries
[apic
] =
759 kzalloc(sizeof(struct IO_APIC_route_entry
) *
760 nr_ioapic_registers
[apic
], GFP_KERNEL
);
761 if (!early_ioapic_entries
[apic
])
765 for (apic
= 0; apic
< nr_ioapics
; apic
++)
766 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
767 struct IO_APIC_route_entry entry
;
769 entry
= early_ioapic_entries
[apic
][pin
] =
770 ioapic_read_entry(apic
, pin
);
773 ioapic_write_entry(apic
, pin
, entry
);
781 kfree(early_ioapic_entries
[apic
--]);
782 memset(early_ioapic_entries
, 0,
783 ARRAY_SIZE(early_ioapic_entries
));
788 void restore_IO_APIC_setup(void)
792 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
793 if (!early_ioapic_entries
[apic
])
795 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
796 ioapic_write_entry(apic
, pin
,
797 early_ioapic_entries
[apic
][pin
]);
798 kfree(early_ioapic_entries
[apic
]);
799 early_ioapic_entries
[apic
] = NULL
;
803 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
806 * for now plain restore of previous settings.
807 * TBD: In the case of OS enabling interrupt-remapping,
808 * IO-APIC RTE's need to be setup to point to interrupt-remapping
809 * table entries. for now, do a plain restore, and wait for
810 * the setup_IO_APIC_irqs() to do proper initialization.
812 restore_IO_APIC_setup();
817 * Find the IRQ entry number of a certain pin.
819 static int find_irq_entry(int apic
, int pin
, int type
)
823 for (i
= 0; i
< mp_irq_entries
; i
++)
824 if (mp_irqs
[i
].mp_irqtype
== type
&&
825 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
826 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
827 mp_irqs
[i
].mp_dstirq
== pin
)
834 * Find the pin to which IRQ[irq] (ISA) is connected
836 static int __init
find_isa_irq_pin(int irq
, int type
)
840 for (i
= 0; i
< mp_irq_entries
; i
++) {
841 int lbus
= mp_irqs
[i
].mp_srcbus
;
843 if (test_bit(lbus
, mp_bus_not_pci
) &&
844 (mp_irqs
[i
].mp_irqtype
== type
) &&
845 (mp_irqs
[i
].mp_srcbusirq
== irq
))
847 return mp_irqs
[i
].mp_dstirq
;
852 static int __init
find_isa_irq_apic(int irq
, int type
)
856 for (i
= 0; i
< mp_irq_entries
; i
++) {
857 int lbus
= mp_irqs
[i
].mp_srcbus
;
859 if (test_bit(lbus
, mp_bus_not_pci
) &&
860 (mp_irqs
[i
].mp_irqtype
== type
) &&
861 (mp_irqs
[i
].mp_srcbusirq
== irq
))
864 if (i
< mp_irq_entries
) {
866 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
867 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
876 * Find a specific PCI IRQ entry.
877 * Not an __init, possibly needed by modules
879 static int pin_2_irq(int idx
, int apic
, int pin
);
881 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
883 int apic
, i
, best_guess
= -1;
885 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
887 if (test_bit(bus
, mp_bus_not_pci
)) {
888 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
891 for (i
= 0; i
< mp_irq_entries
; i
++) {
892 int lbus
= mp_irqs
[i
].mp_srcbus
;
894 for (apic
= 0; apic
< nr_ioapics
; apic
++)
895 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
896 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
899 if (!test_bit(lbus
, mp_bus_not_pci
) &&
900 !mp_irqs
[i
].mp_irqtype
&&
902 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
903 int irq
= pin_2_irq(i
,apic
,mp_irqs
[i
].mp_dstirq
);
905 if (!(apic
|| IO_APIC_IRQ(irq
)))
908 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
911 * Use the first all-but-pin matching entry as a
912 * best-guess fuzzy result for broken mptables.
921 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
923 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
925 * EISA Edge/Level control register, ELCR
927 static int EISA_ELCR(unsigned int irq
)
929 if (irq
< NR_IRQS_LEGACY
) {
930 unsigned int port
= 0x4d0 + (irq
>> 3);
931 return (inb(port
) >> (irq
& 7)) & 1;
933 apic_printk(APIC_VERBOSE
, KERN_INFO
934 "Broken MPtable reports ISA irq %d\n", irq
);
940 /* ISA interrupts are always polarity zero edge triggered,
941 * when listed as conforming in the MP table. */
943 #define default_ISA_trigger(idx) (0)
944 #define default_ISA_polarity(idx) (0)
946 /* EISA interrupts are always polarity zero and can be edge or level
947 * trigger depending on the ELCR value. If an interrupt is listed as
948 * EISA conforming in the MP table, that means its trigger type must
949 * be read in from the ELCR */
951 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
952 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
954 /* PCI interrupts are always polarity one level triggered,
955 * when listed as conforming in the MP table. */
957 #define default_PCI_trigger(idx) (1)
958 #define default_PCI_polarity(idx) (1)
960 /* MCA interrupts are always polarity zero level triggered,
961 * when listed as conforming in the MP table. */
963 #define default_MCA_trigger(idx) (1)
964 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
966 static int MPBIOS_polarity(int idx
)
968 int bus
= mp_irqs
[idx
].mp_srcbus
;
972 * Determine IRQ line polarity (high active or low active):
974 switch (mp_irqs
[idx
].mp_irqflag
& 3)
976 case 0: /* conforms, ie. bus-type dependent polarity */
977 if (test_bit(bus
, mp_bus_not_pci
))
978 polarity
= default_ISA_polarity(idx
);
980 polarity
= default_PCI_polarity(idx
);
982 case 1: /* high active */
987 case 2: /* reserved */
989 printk(KERN_WARNING
"broken BIOS!!\n");
993 case 3: /* low active */
998 default: /* invalid */
1000 printk(KERN_WARNING
"broken BIOS!!\n");
1008 static int MPBIOS_trigger(int idx
)
1010 int bus
= mp_irqs
[idx
].mp_srcbus
;
1014 * Determine IRQ trigger mode (edge or level sensitive):
1016 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3)
1018 case 0: /* conforms, ie. bus-type dependent */
1019 if (test_bit(bus
, mp_bus_not_pci
))
1020 trigger
= default_ISA_trigger(idx
);
1022 trigger
= default_PCI_trigger(idx
);
1023 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1024 switch (mp_bus_id_to_type
[bus
]) {
1025 case MP_BUS_ISA
: /* ISA pin */
1027 /* set before the switch */
1030 case MP_BUS_EISA
: /* EISA pin */
1032 trigger
= default_EISA_trigger(idx
);
1035 case MP_BUS_PCI
: /* PCI pin */
1037 /* set before the switch */
1040 case MP_BUS_MCA
: /* MCA pin */
1042 trigger
= default_MCA_trigger(idx
);
1047 printk(KERN_WARNING
"broken BIOS!!\n");
1059 case 2: /* reserved */
1061 printk(KERN_WARNING
"broken BIOS!!\n");
1070 default: /* invalid */
1072 printk(KERN_WARNING
"broken BIOS!!\n");
1080 static inline int irq_polarity(int idx
)
1082 return MPBIOS_polarity(idx
);
1085 static inline int irq_trigger(int idx
)
1087 return MPBIOS_trigger(idx
);
1090 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
1091 static int pin_2_irq(int idx
, int apic
, int pin
)
1094 int bus
= mp_irqs
[idx
].mp_srcbus
;
1097 * Debugging check, we are in big trouble if this message pops up!
1099 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
1100 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
1102 if (test_bit(bus
, mp_bus_not_pci
)) {
1103 irq
= mp_irqs
[idx
].mp_srcbusirq
;
1106 * PCI IRQs are mapped in order
1110 irq
+= nr_ioapic_registers
[i
++];
1113 * For MPS mode, so far only needed by ES7000 platform
1115 if (ioapic_renumber_irq
)
1116 irq
= ioapic_renumber_irq(apic
, irq
);
1119 #ifdef CONFIG_X86_32
1121 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1123 if ((pin
>= 16) && (pin
<= 23)) {
1124 if (pirq_entries
[pin
-16] != -1) {
1125 if (!pirq_entries
[pin
-16]) {
1126 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1127 "disabling PIRQ%d\n", pin
-16);
1129 irq
= pirq_entries
[pin
-16];
1130 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1131 "using PIRQ%d -> IRQ %d\n",
1141 void lock_vector_lock(void)
1143 /* Used to the online set of cpus does not change
1144 * during assign_irq_vector.
1146 spin_lock(&vector_lock
);
1149 void unlock_vector_lock(void)
1151 spin_unlock(&vector_lock
);
1155 __assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1158 * NOTE! The local APIC isn't very good at handling
1159 * multiple interrupts at the same interrupt level.
1160 * As the interrupt level is determined by taking the
1161 * vector number and shifting that right by 4, we
1162 * want to spread these out a bit so that they don't
1163 * all fall in the same interrupt level.
1165 * Also, we've got to be careful not to trash gate
1166 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1168 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1169 unsigned int old_vector
;
1171 cpumask_var_t tmp_mask
;
1173 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
1176 if (!alloc_cpumask_var(&tmp_mask
, GFP_ATOMIC
))
1179 old_vector
= cfg
->vector
;
1181 cpumask_and(tmp_mask
, mask
, cpu_online_mask
);
1182 cpumask_and(tmp_mask
, cfg
->domain
, tmp_mask
);
1183 if (!cpumask_empty(tmp_mask
)) {
1184 free_cpumask_var(tmp_mask
);
1189 /* Only try and allocate irqs on cpus that are present */
1191 for_each_cpu_and(cpu
, mask
, cpu_online_mask
) {
1195 vector_allocation_domain(cpu
, tmp_mask
);
1197 vector
= current_vector
;
1198 offset
= current_offset
;
1201 if (vector
>= first_system_vector
) {
1202 /* If out of vectors on large boxen, must share them. */
1203 offset
= (offset
+ 1) % 8;
1204 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1206 if (unlikely(current_vector
== vector
))
1208 #ifdef CONFIG_X86_64
1209 if (vector
== IA32_SYSCALL_VECTOR
)
1212 if (vector
== SYSCALL_VECTOR
)
1215 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1216 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1219 current_vector
= vector
;
1220 current_offset
= offset
;
1222 cfg
->move_in_progress
= 1;
1223 cpumask_copy(cfg
->old_domain
, cfg
->domain
);
1225 for_each_cpu_and(new_cpu
, tmp_mask
, cpu_online_mask
)
1226 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1227 cfg
->vector
= vector
;
1228 cpumask_copy(cfg
->domain
, tmp_mask
);
1232 free_cpumask_var(tmp_mask
);
1237 assign_irq_vector(int irq
, struct irq_cfg
*cfg
, const struct cpumask
*mask
)
1240 unsigned long flags
;
1242 spin_lock_irqsave(&vector_lock
, flags
);
1243 err
= __assign_irq_vector(irq
, cfg
, mask
);
1244 spin_unlock_irqrestore(&vector_lock
, flags
);
1248 static void __clear_irq_vector(int irq
, struct irq_cfg
*cfg
)
1252 BUG_ON(!cfg
->vector
);
1254 vector
= cfg
->vector
;
1255 for_each_cpu_and(cpu
, cfg
->domain
, cpu_online_mask
)
1256 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1259 cpumask_clear(cfg
->domain
);
1261 if (likely(!cfg
->move_in_progress
))
1263 for_each_cpu_and(cpu
, cfg
->old_domain
, cpu_online_mask
) {
1264 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
1266 if (per_cpu(vector_irq
, cpu
)[vector
] != irq
)
1268 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1272 cfg
->move_in_progress
= 0;
1275 void __setup_vector_irq(int cpu
)
1277 /* Initialize vector_irq on a new cpu */
1278 /* This function must be called with vector_lock held */
1280 struct irq_cfg
*cfg
;
1281 struct irq_desc
*desc
;
1283 /* Mark the inuse vectors */
1284 for_each_irq_desc(irq
, desc
) {
1287 cfg
= desc
->chip_data
;
1288 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1290 vector
= cfg
->vector
;
1291 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1293 /* Mark the free vectors */
1294 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1295 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1300 if (!cpumask_test_cpu(cpu
, cfg
->domain
))
1301 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1305 static struct irq_chip ioapic_chip
;
1306 #ifdef CONFIG_INTR_REMAP
1307 static struct irq_chip ir_ioapic_chip
;
1310 #define IOAPIC_AUTO -1
1311 #define IOAPIC_EDGE 0
1312 #define IOAPIC_LEVEL 1
1314 #ifdef CONFIG_X86_32
1315 static inline int IO_APIC_irq_trigger(int irq
)
1319 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1320 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1321 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1322 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1323 return irq_trigger(idx
);
1327 * nonexistent IRQs are edge default
1332 static inline int IO_APIC_irq_trigger(int irq
)
1338 static void ioapic_register_intr(int irq
, struct irq_desc
*desc
, unsigned long trigger
)
1341 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1342 trigger
== IOAPIC_LEVEL
)
1343 desc
->status
|= IRQ_LEVEL
;
1345 desc
->status
&= ~IRQ_LEVEL
;
1347 #ifdef CONFIG_INTR_REMAP
1348 if (irq_remapped(irq
)) {
1349 desc
->status
|= IRQ_MOVE_PCNTXT
;
1351 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1355 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1356 handle_edge_irq
, "edge");
1360 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1361 trigger
== IOAPIC_LEVEL
)
1362 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1366 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1367 handle_edge_irq
, "edge");
1370 static int setup_ioapic_entry(int apic
, int irq
,
1371 struct IO_APIC_route_entry
*entry
,
1372 unsigned int destination
, int trigger
,
1373 int polarity
, int vector
)
1376 * add it to the IO-APIC irq-routing table:
1378 memset(entry
,0,sizeof(*entry
));
1380 #ifdef CONFIG_INTR_REMAP
1381 if (intr_remapping_enabled
) {
1382 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic
);
1384 struct IR_IO_APIC_route_entry
*ir_entry
=
1385 (struct IR_IO_APIC_route_entry
*) entry
;
1389 panic("No mapping iommu for ioapic %d\n", apic
);
1391 index
= alloc_irte(iommu
, irq
, 1);
1393 panic("Failed to allocate IRTE for ioapic %d\n", apic
);
1395 memset(&irte
, 0, sizeof(irte
));
1398 irte
.dst_mode
= INT_DEST_MODE
;
1399 irte
.trigger_mode
= trigger
;
1400 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
1401 irte
.vector
= vector
;
1402 irte
.dest_id
= IRTE_DEST(destination
);
1404 modify_irte(irq
, &irte
);
1406 ir_entry
->index2
= (index
>> 15) & 0x1;
1408 ir_entry
->format
= 1;
1409 ir_entry
->index
= (index
& 0x7fff);
1413 entry
->delivery_mode
= INT_DELIVERY_MODE
;
1414 entry
->dest_mode
= INT_DEST_MODE
;
1415 entry
->dest
= destination
;
1418 entry
->mask
= 0; /* enable IRQ */
1419 entry
->trigger
= trigger
;
1420 entry
->polarity
= polarity
;
1421 entry
->vector
= vector
;
1423 /* Mask level triggered irqs.
1424 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1431 static void setup_IO_APIC_irq(int apic
, int pin
, unsigned int irq
, struct irq_desc
*desc
,
1432 int trigger
, int polarity
)
1434 struct irq_cfg
*cfg
;
1435 struct IO_APIC_route_entry entry
;
1438 if (!IO_APIC_IRQ(irq
))
1441 cfg
= desc
->chip_data
;
1443 if (assign_irq_vector(irq
, cfg
, TARGET_CPUS
))
1446 dest
= cpu_mask_to_apicid_and(cfg
->domain
, TARGET_CPUS
);
1448 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1449 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1450 "IRQ %d Mode:%i Active:%i)\n",
1451 apic
, mp_ioapics
[apic
].mp_apicid
, pin
, cfg
->vector
,
1452 irq
, trigger
, polarity
);
1455 if (setup_ioapic_entry(mp_ioapics
[apic
].mp_apicid
, irq
, &entry
,
1456 dest
, trigger
, polarity
, cfg
->vector
)) {
1457 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1458 mp_ioapics
[apic
].mp_apicid
, pin
);
1459 __clear_irq_vector(irq
, cfg
);
1463 ioapic_register_intr(irq
, desc
, trigger
);
1464 if (irq
< NR_IRQS_LEGACY
)
1465 disable_8259A_irq(irq
);
1467 ioapic_write_entry(apic
, pin
, entry
);
1470 static void __init
setup_IO_APIC_irqs(void)
1472 int apic
, pin
, idx
, irq
;
1474 struct irq_desc
*desc
;
1475 struct irq_cfg
*cfg
;
1476 int cpu
= boot_cpu_id
;
1478 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1480 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1481 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1483 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1487 apic_printk(APIC_VERBOSE
,
1488 KERN_DEBUG
" %d-%d",
1489 mp_ioapics
[apic
].mp_apicid
,
1492 apic_printk(APIC_VERBOSE
, " %d-%d",
1493 mp_ioapics
[apic
].mp_apicid
,
1498 apic_printk(APIC_VERBOSE
,
1499 " (apicid-pin) not connected\n");
1503 irq
= pin_2_irq(idx
, apic
, pin
);
1504 #ifdef CONFIG_X86_32
1505 if (multi_timer_check(apic
, irq
))
1508 desc
= irq_to_desc_alloc_cpu(irq
, cpu
);
1510 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
1513 cfg
= desc
->chip_data
;
1514 add_pin_to_irq_cpu(cfg
, cpu
, apic
, pin
);
1516 setup_IO_APIC_irq(apic
, pin
, irq
, desc
,
1517 irq_trigger(idx
), irq_polarity(idx
));
1522 apic_printk(APIC_VERBOSE
,
1523 " (apicid-pin) not connected\n");
1527 * Set up the timer pin, possibly with the 8259A-master behind.
1529 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1532 struct IO_APIC_route_entry entry
;
1534 #ifdef CONFIG_INTR_REMAP
1535 if (intr_remapping_enabled
)
1539 memset(&entry
, 0, sizeof(entry
));
1542 * We use logical delivery to get the timer IRQ
1545 entry
.dest_mode
= INT_DEST_MODE
;
1546 entry
.mask
= 1; /* mask IRQ now */
1547 entry
.dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1548 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1551 entry
.vector
= vector
;
1554 * The timer IRQ doesn't have to know that behind the
1555 * scene we may have a 8259A-master in AEOI mode ...
1557 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1560 * Add it to the IO-APIC irq-routing table:
1562 ioapic_write_entry(apic
, pin
, entry
);
1566 __apicdebuginit(void) print_IO_APIC(void)
1569 union IO_APIC_reg_00 reg_00
;
1570 union IO_APIC_reg_01 reg_01
;
1571 union IO_APIC_reg_02 reg_02
;
1572 union IO_APIC_reg_03 reg_03
;
1573 unsigned long flags
;
1574 struct irq_cfg
*cfg
;
1575 struct irq_desc
*desc
;
1578 if (apic_verbosity
== APIC_QUIET
)
1581 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1582 for (i
= 0; i
< nr_ioapics
; i
++)
1583 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1584 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1587 * We are a bit conservative about what we expect. We have to
1588 * know about every hardware change ASAP.
1590 printk(KERN_INFO
"testing the IO APIC.......................\n");
1592 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1594 spin_lock_irqsave(&ioapic_lock
, flags
);
1595 reg_00
.raw
= io_apic_read(apic
, 0);
1596 reg_01
.raw
= io_apic_read(apic
, 1);
1597 if (reg_01
.bits
.version
>= 0x10)
1598 reg_02
.raw
= io_apic_read(apic
, 2);
1599 if (reg_01
.bits
.version
>= 0x20)
1600 reg_03
.raw
= io_apic_read(apic
, 3);
1601 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1604 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1605 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1606 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1607 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1608 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1610 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1611 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1613 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1614 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1617 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1618 * but the value of reg_02 is read as the previous read register
1619 * value, so ignore it if reg_02 == reg_01.
1621 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1622 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1623 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1627 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1628 * or reg_03, but the value of reg_0[23] is read as the previous read
1629 * register value, so ignore it if reg_03 == reg_0[12].
1631 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1632 reg_03
.raw
!= reg_01
.raw
) {
1633 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1634 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1637 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1639 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1640 " Stat Dmod Deli Vect: \n");
1642 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1643 struct IO_APIC_route_entry entry
;
1645 entry
= ioapic_read_entry(apic
, i
);
1647 printk(KERN_DEBUG
" %02x %03X ",
1652 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1657 entry
.delivery_status
,
1659 entry
.delivery_mode
,
1664 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1665 for_each_irq_desc(irq
, desc
) {
1666 struct irq_pin_list
*entry
;
1670 cfg
= desc
->chip_data
;
1671 entry
= cfg
->irq_2_pin
;
1674 printk(KERN_DEBUG
"IRQ%d ", irq
);
1676 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1679 entry
= entry
->next
;
1684 printk(KERN_INFO
".................................... done.\n");
1689 __apicdebuginit(void) print_APIC_bitfield(int base
)
1694 if (apic_verbosity
== APIC_QUIET
)
1697 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1698 for (i
= 0; i
< 8; i
++) {
1699 v
= apic_read(base
+ i
*0x10);
1700 for (j
= 0; j
< 32; j
++) {
1710 __apicdebuginit(void) print_local_APIC(void *dummy
)
1712 unsigned int v
, ver
, maxlvt
;
1715 if (apic_verbosity
== APIC_QUIET
)
1718 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1719 smp_processor_id(), hard_smp_processor_id());
1720 v
= apic_read(APIC_ID
);
1721 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1722 v
= apic_read(APIC_LVR
);
1723 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1724 ver
= GET_APIC_VERSION(v
);
1725 maxlvt
= lapic_get_maxlvt();
1727 v
= apic_read(APIC_TASKPRI
);
1728 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1730 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1731 if (!APIC_XAPIC(ver
)) {
1732 v
= apic_read(APIC_ARBPRI
);
1733 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1734 v
& APIC_ARBPRI_MASK
);
1736 v
= apic_read(APIC_PROCPRI
);
1737 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1741 * Remote read supported only in the 82489DX and local APIC for
1742 * Pentium processors.
1744 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1745 v
= apic_read(APIC_RRR
);
1746 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1749 v
= apic_read(APIC_LDR
);
1750 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1751 if (!x2apic_enabled()) {
1752 v
= apic_read(APIC_DFR
);
1753 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1755 v
= apic_read(APIC_SPIV
);
1756 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1758 printk(KERN_DEBUG
"... APIC ISR field:\n");
1759 print_APIC_bitfield(APIC_ISR
);
1760 printk(KERN_DEBUG
"... APIC TMR field:\n");
1761 print_APIC_bitfield(APIC_TMR
);
1762 printk(KERN_DEBUG
"... APIC IRR field:\n");
1763 print_APIC_bitfield(APIC_IRR
);
1765 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1766 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1767 apic_write(APIC_ESR
, 0);
1769 v
= apic_read(APIC_ESR
);
1770 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1773 icr
= apic_icr_read();
1774 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1775 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1777 v
= apic_read(APIC_LVTT
);
1778 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1780 if (maxlvt
> 3) { /* PC is LVT#4. */
1781 v
= apic_read(APIC_LVTPC
);
1782 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1784 v
= apic_read(APIC_LVT0
);
1785 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1786 v
= apic_read(APIC_LVT1
);
1787 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1789 if (maxlvt
> 2) { /* ERR is LVT#3. */
1790 v
= apic_read(APIC_LVTERR
);
1791 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1794 v
= apic_read(APIC_TMICT
);
1795 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1796 v
= apic_read(APIC_TMCCT
);
1797 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1798 v
= apic_read(APIC_TDCR
);
1799 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1803 __apicdebuginit(void) print_all_local_APICs(void)
1808 for_each_online_cpu(cpu
)
1809 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1813 __apicdebuginit(void) print_PIC(void)
1816 unsigned long flags
;
1818 if (apic_verbosity
== APIC_QUIET
)
1821 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1823 spin_lock_irqsave(&i8259A_lock
, flags
);
1825 v
= inb(0xa1) << 8 | inb(0x21);
1826 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1828 v
= inb(0xa0) << 8 | inb(0x20);
1829 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1833 v
= inb(0xa0) << 8 | inb(0x20);
1837 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1839 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1841 v
= inb(0x4d1) << 8 | inb(0x4d0);
1842 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1845 __apicdebuginit(int) print_all_ICs(void)
1848 print_all_local_APICs();
1854 fs_initcall(print_all_ICs
);
1857 /* Where if anywhere is the i8259 connect in external int mode */
1858 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1860 void __init
enable_IO_APIC(void)
1862 union IO_APIC_reg_01 reg_01
;
1863 int i8259_apic
, i8259_pin
;
1865 unsigned long flags
;
1867 #ifdef CONFIG_X86_32
1870 for (i
= 0; i
< MAX_PIRQS
; i
++)
1871 pirq_entries
[i
] = -1;
1875 * The number of IO-APIC IRQ registers (== #pins):
1877 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1878 spin_lock_irqsave(&ioapic_lock
, flags
);
1879 reg_01
.raw
= io_apic_read(apic
, 1);
1880 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1881 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1883 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1885 /* See if any of the pins is in ExtINT mode */
1886 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1887 struct IO_APIC_route_entry entry
;
1888 entry
= ioapic_read_entry(apic
, pin
);
1890 /* If the interrupt line is enabled and in ExtInt mode
1891 * I have found the pin where the i8259 is connected.
1893 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1894 ioapic_i8259
.apic
= apic
;
1895 ioapic_i8259
.pin
= pin
;
1901 /* Look to see what if the MP table has reported the ExtINT */
1902 /* If we could not find the appropriate pin by looking at the ioapic
1903 * the i8259 probably is not connected the ioapic but give the
1904 * mptable a chance anyway.
1906 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1907 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1908 /* Trust the MP table if nothing is setup in the hardware */
1909 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1910 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1911 ioapic_i8259
.pin
= i8259_pin
;
1912 ioapic_i8259
.apic
= i8259_apic
;
1914 /* Complain if the MP table and the hardware disagree */
1915 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1916 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1918 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1922 * Do not trust the IO-APIC being empty at bootup
1928 * Not an __init, needed by the reboot code
1930 void disable_IO_APIC(void)
1933 * Clear the IO-APIC before rebooting:
1938 * If the i8259 is routed through an IOAPIC
1939 * Put that IOAPIC in virtual wire mode
1940 * so legacy interrupts can be delivered.
1942 if (ioapic_i8259
.pin
!= -1) {
1943 struct IO_APIC_route_entry entry
;
1945 memset(&entry
, 0, sizeof(entry
));
1946 entry
.mask
= 0; /* Enabled */
1947 entry
.trigger
= 0; /* Edge */
1949 entry
.polarity
= 0; /* High */
1950 entry
.delivery_status
= 0;
1951 entry
.dest_mode
= 0; /* Physical */
1952 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1954 entry
.dest
= read_apic_id();
1957 * Add it to the IO-APIC irq-routing table:
1959 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1962 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1965 #ifdef CONFIG_X86_32
1967 * function to set the IO-APIC physical IDs based on the
1968 * values stored in the MPC table.
1970 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1973 static void __init
setup_ioapic_ids_from_mpc(void)
1975 union IO_APIC_reg_00 reg_00
;
1976 physid_mask_t phys_id_present_map
;
1979 unsigned char old_id
;
1980 unsigned long flags
;
1982 if (x86_quirks
->setup_ioapic_ids
&& x86_quirks
->setup_ioapic_ids())
1986 * Don't check I/O APIC IDs for xAPIC systems. They have
1987 * no meaning without the serial APIC bus.
1989 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
1990 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
1993 * This is broken; anything with a real cpu count has to
1994 * circumvent this idiocy regardless.
1996 phys_id_present_map
= ioapic_phys_id_map(phys_cpu_present_map
);
1999 * Set the IOAPIC ID to the value stored in the MPC table.
2001 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
2003 /* Read the register 0 value */
2004 spin_lock_irqsave(&ioapic_lock
, flags
);
2005 reg_00
.raw
= io_apic_read(apic
, 0);
2006 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2008 old_id
= mp_ioapics
[apic
].mp_apicid
;
2010 if (mp_ioapics
[apic
].mp_apicid
>= get_physical_broadcast()) {
2011 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2012 apic
, mp_ioapics
[apic
].mp_apicid
);
2013 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2015 mp_ioapics
[apic
].mp_apicid
= reg_00
.bits
.ID
;
2019 * Sanity check, is the ID really free? Every APIC in a
2020 * system must have a unique ID or we get lots of nice
2021 * 'stuck on smp_invalidate_needed IPI wait' messages.
2023 if (check_apicid_used(phys_id_present_map
,
2024 mp_ioapics
[apic
].mp_apicid
)) {
2025 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2026 apic
, mp_ioapics
[apic
].mp_apicid
);
2027 for (i
= 0; i
< get_physical_broadcast(); i
++)
2028 if (!physid_isset(i
, phys_id_present_map
))
2030 if (i
>= get_physical_broadcast())
2031 panic("Max APIC ID exceeded!\n");
2032 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2034 physid_set(i
, phys_id_present_map
);
2035 mp_ioapics
[apic
].mp_apicid
= i
;
2038 tmp
= apicid_to_cpu_present(mp_ioapics
[apic
].mp_apicid
);
2039 apic_printk(APIC_VERBOSE
, "Setting %d in the "
2040 "phys_id_present_map\n",
2041 mp_ioapics
[apic
].mp_apicid
);
2042 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
2047 * We need to adjust the IRQ routing table
2048 * if the ID changed.
2050 if (old_id
!= mp_ioapics
[apic
].mp_apicid
)
2051 for (i
= 0; i
< mp_irq_entries
; i
++)
2052 if (mp_irqs
[i
].mp_dstapic
== old_id
)
2053 mp_irqs
[i
].mp_dstapic
2054 = mp_ioapics
[apic
].mp_apicid
;
2057 * Read the right value from the MPC table and
2058 * write it into the ID register.
2060 apic_printk(APIC_VERBOSE
, KERN_INFO
2061 "...changing IO-APIC physical APIC ID to %d ...",
2062 mp_ioapics
[apic
].mp_apicid
);
2064 reg_00
.bits
.ID
= mp_ioapics
[apic
].mp_apicid
;
2065 spin_lock_irqsave(&ioapic_lock
, flags
);
2066 io_apic_write(apic
, 0, reg_00
.raw
);
2067 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2072 spin_lock_irqsave(&ioapic_lock
, flags
);
2073 reg_00
.raw
= io_apic_read(apic
, 0);
2074 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2075 if (reg_00
.bits
.ID
!= mp_ioapics
[apic
].mp_apicid
)
2076 printk("could not set ID!\n");
2078 apic_printk(APIC_VERBOSE
, " ok.\n");
2083 int no_timer_check __initdata
;
2085 static int __init
notimercheck(char *s
)
2090 __setup("no_timer_check", notimercheck
);
2093 * There is a nasty bug in some older SMP boards, their mptable lies
2094 * about the timer IRQ. We do the following to work around the situation:
2096 * - timer IRQ defaults to IO-APIC IRQ
2097 * - if this function detects that timer IRQs are defunct, then we fall
2098 * back to ISA timer IRQs
2100 static int __init
timer_irq_works(void)
2102 unsigned long t1
= jiffies
;
2103 unsigned long flags
;
2108 local_save_flags(flags
);
2110 /* Let ten ticks pass... */
2111 mdelay((10 * 1000) / HZ
);
2112 local_irq_restore(flags
);
2115 * Expect a few ticks at least, to be sure some possible
2116 * glue logic does not lock up after one or two first
2117 * ticks in a non-ExtINT mode. Also the local APIC
2118 * might have cached one ExtINT interrupt. Finally, at
2119 * least one tick may be lost due to delays.
2123 if (time_after(jiffies
, t1
+ 4))
2129 * In the SMP+IOAPIC case it might happen that there are an unspecified
2130 * number of pending IRQ events unhandled. These cases are very rare,
2131 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2132 * better to do it this way as thus we do not have to be aware of
2133 * 'pending' interrupts in the IRQ path, except at this point.
2136 * Edge triggered needs to resend any interrupt
2137 * that was delayed but this is now handled in the device
2142 * Starting up a edge-triggered IO-APIC interrupt is
2143 * nasty - we need to make sure that we get the edge.
2144 * If it is already asserted for some reason, we need
2145 * return 1 to indicate that is was pending.
2147 * This is not complete - we should be able to fake
2148 * an edge even if it isn't on the 8259A...
2151 static unsigned int startup_ioapic_irq(unsigned int irq
)
2153 int was_pending
= 0;
2154 unsigned long flags
;
2155 struct irq_cfg
*cfg
;
2157 spin_lock_irqsave(&ioapic_lock
, flags
);
2158 if (irq
< NR_IRQS_LEGACY
) {
2159 disable_8259A_irq(irq
);
2160 if (i8259A_irq_pending(irq
))
2164 __unmask_IO_APIC_irq(cfg
);
2165 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2170 #ifdef CONFIG_X86_64
2171 static int ioapic_retrigger_irq(unsigned int irq
)
2174 struct irq_cfg
*cfg
= irq_cfg(irq
);
2175 unsigned long flags
;
2177 spin_lock_irqsave(&vector_lock
, flags
);
2178 send_IPI_mask(cpumask_of(cpumask_first(cfg
->domain
)), cfg
->vector
);
2179 spin_unlock_irqrestore(&vector_lock
, flags
);
2184 static int ioapic_retrigger_irq(unsigned int irq
)
2186 send_IPI_self(irq_cfg(irq
)->vector
);
2193 * Level and edge triggered IO-APIC interrupts need different handling,
2194 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2195 * handled with the level-triggered descriptor, but that one has slightly
2196 * more overhead. Level-triggered interrupts cannot be handled with the
2197 * edge-triggered handler, without risking IRQ storms and other ugly
2203 #ifdef CONFIG_INTR_REMAP
2204 static void ir_irq_migration(struct work_struct
*work
);
2206 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
2209 * Migrate the IO-APIC irq in the presence of intr-remapping.
2211 * For edge triggered, irq migration is a simple atomic update(of vector
2212 * and cpu destination) of IRTE and flush the hardware cache.
2214 * For level triggered, we need to modify the io-apic RTE aswell with the update
2215 * vector information, along with modifying IRTE with vector and destination.
2216 * So irq migration for level triggered is little bit more complex compared to
2217 * edge triggered migration. But the good news is, we use the same algorithm
2218 * for level triggered migration as we have today, only difference being,
2219 * we now initiate the irq migration from process context instead of the
2220 * interrupt context.
2222 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2223 * suppression) to the IO-APIC, level triggered irq migration will also be
2224 * as simple as edge triggered migration and we can do the irq migration
2225 * with a simple atomic update to IO-APIC RTE.
2228 migrate_ioapic_irq_desc(struct irq_desc
*desc
, const struct cpumask
*mask
)
2230 struct irq_cfg
*cfg
;
2232 int modify_ioapic_rte
;
2234 unsigned long flags
;
2237 if (!cpumask_intersects(mask
, cpu_online_mask
))
2241 if (get_irte(irq
, &irte
))
2244 cfg
= desc
->chip_data
;
2245 if (assign_irq_vector(irq
, cfg
, mask
))
2248 set_extra_move_desc(desc
, mask
);
2250 dest
= cpu_mask_to_apicid_and(cfg
->domain
, mask
);
2252 modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
2253 if (modify_ioapic_rte
) {
2254 spin_lock_irqsave(&ioapic_lock
, flags
);
2255 __target_IO_APIC_irq(irq
, dest
, cfg
);
2256 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2259 irte
.vector
= cfg
->vector
;
2260 irte
.dest_id
= IRTE_DEST(dest
);
2263 * Modified the IRTE and flushes the Interrupt entry cache.
2265 modify_irte(irq
, &irte
);
2267 if (cfg
->move_in_progress
)
2268 send_cleanup_vector(cfg
);
2270 cpumask_copy(&desc
->affinity
, mask
);
2273 static int migrate_irq_remapped_level_desc(struct irq_desc
*desc
)
2276 struct irq_cfg
*cfg
= desc
->chip_data
;
2278 mask_IO_APIC_irq_desc(desc
);
2280 if (io_apic_level_ack_pending(cfg
)) {
2282 * Interrupt in progress. Migrating irq now will change the
2283 * vector information in the IO-APIC RTE and that will confuse
2284 * the EOI broadcast performed by cpu.
2285 * So, delay the irq migration to the next instance.
2287 schedule_delayed_work(&ir_migration_work
, 1);
2291 /* everthing is clear. we have right of way */
2292 migrate_ioapic_irq_desc(desc
, &desc
->pending_mask
);
2295 desc
->status
&= ~IRQ_MOVE_PENDING
;
2296 cpumask_clear(&desc
->pending_mask
);
2299 unmask_IO_APIC_irq_desc(desc
);
2304 static void ir_irq_migration(struct work_struct
*work
)
2307 struct irq_desc
*desc
;
2309 for_each_irq_desc(irq
, desc
) {
2313 if (desc
->status
& IRQ_MOVE_PENDING
) {
2314 unsigned long flags
;
2316 spin_lock_irqsave(&desc
->lock
, flags
);
2317 if (!desc
->chip
->set_affinity
||
2318 !(desc
->status
& IRQ_MOVE_PENDING
)) {
2319 desc
->status
&= ~IRQ_MOVE_PENDING
;
2320 spin_unlock_irqrestore(&desc
->lock
, flags
);
2324 desc
->chip
->set_affinity(irq
, &desc
->pending_mask
);
2325 spin_unlock_irqrestore(&desc
->lock
, flags
);
2331 * Migrates the IRQ destination in the process context.
2333 static void set_ir_ioapic_affinity_irq_desc(struct irq_desc
*desc
,
2334 const struct cpumask
*mask
)
2336 if (desc
->status
& IRQ_LEVEL
) {
2337 desc
->status
|= IRQ_MOVE_PENDING
;
2338 cpumask_copy(&desc
->pending_mask
, mask
);
2339 migrate_irq_remapped_level_desc(desc
);
2343 migrate_ioapic_irq_desc(desc
, mask
);
2345 static void set_ir_ioapic_affinity_irq(unsigned int irq
,
2346 const struct cpumask
*mask
)
2348 struct irq_desc
*desc
= irq_to_desc(irq
);
2350 set_ir_ioapic_affinity_irq_desc(desc
, mask
);
2354 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
2356 unsigned vector
, me
;
2358 #ifdef CONFIG_X86_64
2363 me
= smp_processor_id();
2364 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
2366 struct irq_desc
*desc
;
2367 struct irq_cfg
*cfg
;
2368 irq
= __get_cpu_var(vector_irq
)[vector
];
2373 desc
= irq_to_desc(irq
);
2378 spin_lock(&desc
->lock
);
2379 if (!cfg
->move_cleanup_count
)
2382 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
))
2385 __get_cpu_var(vector_irq
)[vector
] = -1;
2386 cfg
->move_cleanup_count
--;
2388 spin_unlock(&desc
->lock
);
2394 static void irq_complete_move(struct irq_desc
**descp
)
2396 struct irq_desc
*desc
= *descp
;
2397 struct irq_cfg
*cfg
= desc
->chip_data
;
2398 unsigned vector
, me
;
2400 if (likely(!cfg
->move_in_progress
))
2403 vector
= ~get_irq_regs()->orig_ax
;
2404 me
= smp_processor_id();
2405 if (vector
== cfg
->vector
&& cpumask_test_cpu(me
, cfg
->domain
))
2406 send_cleanup_vector(cfg
);
2409 static inline void irq_complete_move(struct irq_desc
**descp
) {}
2412 #ifdef CONFIG_INTR_REMAP
2413 static void ack_x2apic_level(unsigned int irq
)
2418 static void ack_x2apic_edge(unsigned int irq
)
2425 static void ack_apic_edge(unsigned int irq
)
2427 struct irq_desc
*desc
= irq_to_desc(irq
);
2429 irq_complete_move(&desc
);
2430 move_native_irq(irq
);
2434 atomic_t irq_mis_count
;
2436 static void ack_apic_level(unsigned int irq
)
2438 struct irq_desc
*desc
= irq_to_desc(irq
);
2440 #ifdef CONFIG_X86_32
2444 struct irq_cfg
*cfg
;
2445 int do_unmask_irq
= 0;
2447 irq_complete_move(&desc
);
2448 #ifdef CONFIG_GENERIC_PENDING_IRQ
2449 /* If we are moving the irq we need to mask it */
2450 if (unlikely(desc
->status
& IRQ_MOVE_PENDING
)) {
2452 mask_IO_APIC_irq_desc(desc
);
2456 #ifdef CONFIG_X86_32
2458 * It appears there is an erratum which affects at least version 0x11
2459 * of I/O APIC (that's the 82093AA and cores integrated into various
2460 * chipsets). Under certain conditions a level-triggered interrupt is
2461 * erroneously delivered as edge-triggered one but the respective IRR
2462 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2463 * message but it will never arrive and further interrupts are blocked
2464 * from the source. The exact reason is so far unknown, but the
2465 * phenomenon was observed when two consecutive interrupt requests
2466 * from a given source get delivered to the same CPU and the source is
2467 * temporarily disabled in between.
2469 * A workaround is to simulate an EOI message manually. We achieve it
2470 * by setting the trigger mode to edge and then to level when the edge
2471 * trigger mode gets detected in the TMR of a local APIC for a
2472 * level-triggered interrupt. We mask the source for the time of the
2473 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2474 * The idea is from Manfred Spraul. --macro
2476 cfg
= desc
->chip_data
;
2479 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
2483 * We must acknowledge the irq before we move it or the acknowledge will
2484 * not propagate properly.
2488 /* Now we can move and renable the irq */
2489 if (unlikely(do_unmask_irq
)) {
2490 /* Only migrate the irq if the ack has been received.
2492 * On rare occasions the broadcast level triggered ack gets
2493 * delayed going to ioapics, and if we reprogram the
2494 * vector while Remote IRR is still set the irq will never
2497 * To prevent this scenario we read the Remote IRR bit
2498 * of the ioapic. This has two effects.
2499 * - On any sane system the read of the ioapic will
2500 * flush writes (and acks) going to the ioapic from
2502 * - We get to see if the ACK has actually been delivered.
2504 * Based on failed experiments of reprogramming the
2505 * ioapic entry from outside of irq context starting
2506 * with masking the ioapic entry and then polling until
2507 * Remote IRR was clear before reprogramming the
2508 * ioapic I don't trust the Remote IRR bit to be
2509 * completey accurate.
2511 * However there appears to be no other way to plug
2512 * this race, so if the Remote IRR bit is not
2513 * accurate and is causing problems then it is a hardware bug
2514 * and you can go talk to the chipset vendor about it.
2516 cfg
= desc
->chip_data
;
2517 if (!io_apic_level_ack_pending(cfg
))
2518 move_masked_irq(irq
);
2519 unmask_IO_APIC_irq_desc(desc
);
2522 #ifdef CONFIG_X86_32
2523 if (!(v
& (1 << (i
& 0x1f)))) {
2524 atomic_inc(&irq_mis_count
);
2525 spin_lock(&ioapic_lock
);
2526 __mask_and_edge_IO_APIC_irq(cfg
);
2527 __unmask_and_level_IO_APIC_irq(cfg
);
2528 spin_unlock(&ioapic_lock
);
2533 static struct irq_chip ioapic_chip __read_mostly
= {
2535 .startup
= startup_ioapic_irq
,
2536 .mask
= mask_IO_APIC_irq
,
2537 .unmask
= unmask_IO_APIC_irq
,
2538 .ack
= ack_apic_edge
,
2539 .eoi
= ack_apic_level
,
2541 .set_affinity
= set_ioapic_affinity_irq
,
2543 .retrigger
= ioapic_retrigger_irq
,
2546 #ifdef CONFIG_INTR_REMAP
2547 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2548 .name
= "IR-IO-APIC",
2549 .startup
= startup_ioapic_irq
,
2550 .mask
= mask_IO_APIC_irq
,
2551 .unmask
= unmask_IO_APIC_irq
,
2552 .ack
= ack_x2apic_edge
,
2553 .eoi
= ack_x2apic_level
,
2555 .set_affinity
= set_ir_ioapic_affinity_irq
,
2557 .retrigger
= ioapic_retrigger_irq
,
2561 static inline void init_IO_APIC_traps(void)
2564 struct irq_desc
*desc
;
2565 struct irq_cfg
*cfg
;
2568 * NOTE! The local APIC isn't very good at handling
2569 * multiple interrupts at the same interrupt level.
2570 * As the interrupt level is determined by taking the
2571 * vector number and shifting that right by 4, we
2572 * want to spread these out a bit so that they don't
2573 * all fall in the same interrupt level.
2575 * Also, we've got to be careful not to trash gate
2576 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2578 for_each_irq_desc(irq
, desc
) {
2582 cfg
= desc
->chip_data
;
2583 if (IO_APIC_IRQ(irq
) && cfg
&& !cfg
->vector
) {
2585 * Hmm.. We don't have an entry for this,
2586 * so default to an old-fashioned 8259
2587 * interrupt if we can..
2589 if (irq
< NR_IRQS_LEGACY
)
2590 make_8259A_irq(irq
);
2592 /* Strange. Oh, well.. */
2593 desc
->chip
= &no_irq_chip
;
2599 * The local APIC irq-chip implementation:
2602 static void mask_lapic_irq(unsigned int irq
)
2606 v
= apic_read(APIC_LVT0
);
2607 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2610 static void unmask_lapic_irq(unsigned int irq
)
2614 v
= apic_read(APIC_LVT0
);
2615 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2618 static void ack_lapic_irq(unsigned int irq
)
2623 static struct irq_chip lapic_chip __read_mostly
= {
2624 .name
= "local-APIC",
2625 .mask
= mask_lapic_irq
,
2626 .unmask
= unmask_lapic_irq
,
2627 .ack
= ack_lapic_irq
,
2630 static void lapic_register_intr(int irq
, struct irq_desc
*desc
)
2632 desc
->status
&= ~IRQ_LEVEL
;
2633 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2637 static void __init
setup_nmi(void)
2640 * Dirty trick to enable the NMI watchdog ...
2641 * We put the 8259A master into AEOI mode and
2642 * unmask on all local APICs LVT0 as NMI.
2644 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2645 * is from Maciej W. Rozycki - so we do not have to EOI from
2646 * the NMI handler or the timer interrupt.
2648 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2650 enable_NMI_through_LVT0();
2652 apic_printk(APIC_VERBOSE
, " done.\n");
2656 * This looks a bit hackish but it's about the only one way of sending
2657 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2658 * not support the ExtINT mode, unfortunately. We need to send these
2659 * cycles as some i82489DX-based boards have glue logic that keeps the
2660 * 8259A interrupt line asserted until INTA. --macro
2662 static inline void __init
unlock_ExtINT_logic(void)
2665 struct IO_APIC_route_entry entry0
, entry1
;
2666 unsigned char save_control
, save_freq_select
;
2668 pin
= find_isa_irq_pin(8, mp_INT
);
2673 apic
= find_isa_irq_apic(8, mp_INT
);
2679 entry0
= ioapic_read_entry(apic
, pin
);
2680 clear_IO_APIC_pin(apic
, pin
);
2682 memset(&entry1
, 0, sizeof(entry1
));
2684 entry1
.dest_mode
= 0; /* physical delivery */
2685 entry1
.mask
= 0; /* unmask IRQ now */
2686 entry1
.dest
= hard_smp_processor_id();
2687 entry1
.delivery_mode
= dest_ExtINT
;
2688 entry1
.polarity
= entry0
.polarity
;
2692 ioapic_write_entry(apic
, pin
, entry1
);
2694 save_control
= CMOS_READ(RTC_CONTROL
);
2695 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2696 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2698 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2703 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2707 CMOS_WRITE(save_control
, RTC_CONTROL
);
2708 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2709 clear_IO_APIC_pin(apic
, pin
);
2711 ioapic_write_entry(apic
, pin
, entry0
);
2714 static int disable_timer_pin_1 __initdata
;
2715 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2716 static int __init
disable_timer_pin_setup(char *arg
)
2718 disable_timer_pin_1
= 1;
2721 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2723 int timer_through_8259 __initdata
;
2726 * This code may look a bit paranoid, but it's supposed to cooperate with
2727 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2728 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2729 * fanatically on his truly buggy board.
2731 * FIXME: really need to revamp this for all platforms.
2733 static inline void __init
check_timer(void)
2735 struct irq_desc
*desc
= irq_to_desc(0);
2736 struct irq_cfg
*cfg
= desc
->chip_data
;
2737 int cpu
= boot_cpu_id
;
2738 int apic1
, pin1
, apic2
, pin2
;
2739 unsigned long flags
;
2743 local_irq_save(flags
);
2745 ver
= apic_read(APIC_LVR
);
2746 ver
= GET_APIC_VERSION(ver
);
2749 * get/set the timer IRQ vector:
2751 disable_8259A_irq(0);
2752 assign_irq_vector(0, cfg
, TARGET_CPUS
);
2755 * As IRQ0 is to be enabled in the 8259A, the virtual
2756 * wire has to be disabled in the local APIC. Also
2757 * timer interrupts need to be acknowledged manually in
2758 * the 8259A for the i82489DX when using the NMI
2759 * watchdog as that APIC treats NMIs as level-triggered.
2760 * The AEOI mode will finish them in the 8259A
2763 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2765 #ifdef CONFIG_X86_32
2766 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2769 pin1
= find_isa_irq_pin(0, mp_INT
);
2770 apic1
= find_isa_irq_apic(0, mp_INT
);
2771 pin2
= ioapic_i8259
.pin
;
2772 apic2
= ioapic_i8259
.apic
;
2774 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2775 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2776 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2779 * Some BIOS writers are clueless and report the ExtINTA
2780 * I/O APIC input from the cascaded 8259A as the timer
2781 * interrupt input. So just in case, if only one pin
2782 * was found above, try it both directly and through the
2786 #ifdef CONFIG_INTR_REMAP
2787 if (intr_remapping_enabled
)
2788 panic("BIOS bug: timer not connected to IO-APIC");
2793 } else if (pin2
== -1) {
2800 * Ok, does IRQ0 through the IOAPIC work?
2803 add_pin_to_irq_cpu(cfg
, cpu
, apic1
, pin1
);
2804 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2806 unmask_IO_APIC_irq_desc(desc
);
2807 if (timer_irq_works()) {
2808 if (nmi_watchdog
== NMI_IO_APIC
) {
2810 enable_8259A_irq(0);
2812 if (disable_timer_pin_1
> 0)
2813 clear_IO_APIC_pin(0, pin1
);
2816 #ifdef CONFIG_INTR_REMAP
2817 if (intr_remapping_enabled
)
2818 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2820 clear_IO_APIC_pin(apic1
, pin1
);
2822 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2823 "8254 timer not connected to IO-APIC\n");
2825 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2826 "(IRQ0) through the 8259A ...\n");
2827 apic_printk(APIC_QUIET
, KERN_INFO
2828 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2830 * legacy devices should be connected to IO APIC #0
2832 replace_pin_at_irq_cpu(cfg
, cpu
, apic1
, pin1
, apic2
, pin2
);
2833 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2834 unmask_IO_APIC_irq_desc(desc
);
2835 enable_8259A_irq(0);
2836 if (timer_irq_works()) {
2837 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2838 timer_through_8259
= 1;
2839 if (nmi_watchdog
== NMI_IO_APIC
) {
2840 disable_8259A_irq(0);
2842 enable_8259A_irq(0);
2847 * Cleanup, just in case ...
2849 disable_8259A_irq(0);
2850 clear_IO_APIC_pin(apic2
, pin2
);
2851 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2854 if (nmi_watchdog
== NMI_IO_APIC
) {
2855 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2856 "through the IO-APIC - disabling NMI Watchdog!\n");
2857 nmi_watchdog
= NMI_NONE
;
2859 #ifdef CONFIG_X86_32
2863 apic_printk(APIC_QUIET
, KERN_INFO
2864 "...trying to set up timer as Virtual Wire IRQ...\n");
2866 lapic_register_intr(0, desc
);
2867 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
2868 enable_8259A_irq(0);
2870 if (timer_irq_works()) {
2871 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2874 disable_8259A_irq(0);
2875 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
2876 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
2878 apic_printk(APIC_QUIET
, KERN_INFO
2879 "...trying to set up timer as ExtINT IRQ...\n");
2883 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2885 unlock_ExtINT_logic();
2887 if (timer_irq_works()) {
2888 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2891 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
2892 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2893 "report. Then try booting with the 'noapic' option.\n");
2895 local_irq_restore(flags
);
2899 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2900 * to devices. However there may be an I/O APIC pin available for
2901 * this interrupt regardless. The pin may be left unconnected, but
2902 * typically it will be reused as an ExtINT cascade interrupt for
2903 * the master 8259A. In the MPS case such a pin will normally be
2904 * reported as an ExtINT interrupt in the MP table. With ACPI
2905 * there is no provision for ExtINT interrupts, and in the absence
2906 * of an override it would be treated as an ordinary ISA I/O APIC
2907 * interrupt, that is edge-triggered and unmasked by default. We
2908 * used to do this, but it caused problems on some systems because
2909 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2910 * the same ExtINT cascade interrupt to drive the local APIC of the
2911 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2912 * the I/O APIC in all cases now. No actual device should request
2913 * it anyway. --macro
2915 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2917 void __init
setup_IO_APIC(void)
2920 #ifdef CONFIG_X86_32
2924 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2928 io_apic_irqs
= ~PIC_IRQS
;
2930 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
2932 * Set up IO-APIC IRQ routing.
2934 #ifdef CONFIG_X86_32
2936 setup_ioapic_ids_from_mpc();
2939 setup_IO_APIC_irqs();
2940 init_IO_APIC_traps();
2945 * Called after all the initialization is done. If we didnt find any
2946 * APIC bugs then we can allow the modify fast path
2949 static int __init
io_apic_bug_finalize(void)
2951 if (sis_apic_bug
== -1)
2956 late_initcall(io_apic_bug_finalize
);
2958 struct sysfs_ioapic_data
{
2959 struct sys_device dev
;
2960 struct IO_APIC_route_entry entry
[0];
2962 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
2964 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2966 struct IO_APIC_route_entry
*entry
;
2967 struct sysfs_ioapic_data
*data
;
2970 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2971 entry
= data
->entry
;
2972 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
2973 *entry
= ioapic_read_entry(dev
->id
, i
);
2978 static int ioapic_resume(struct sys_device
*dev
)
2980 struct IO_APIC_route_entry
*entry
;
2981 struct sysfs_ioapic_data
*data
;
2982 unsigned long flags
;
2983 union IO_APIC_reg_00 reg_00
;
2986 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2987 entry
= data
->entry
;
2989 spin_lock_irqsave(&ioapic_lock
, flags
);
2990 reg_00
.raw
= io_apic_read(dev
->id
, 0);
2991 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
2992 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
2993 io_apic_write(dev
->id
, 0, reg_00
.raw
);
2995 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2996 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2997 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
3002 static struct sysdev_class ioapic_sysdev_class
= {
3004 .suspend
= ioapic_suspend
,
3005 .resume
= ioapic_resume
,
3008 static int __init
ioapic_init_sysfs(void)
3010 struct sys_device
* dev
;
3013 error
= sysdev_class_register(&ioapic_sysdev_class
);
3017 for (i
= 0; i
< nr_ioapics
; i
++ ) {
3018 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
3019 * sizeof(struct IO_APIC_route_entry
);
3020 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
3021 if (!mp_ioapic_data
[i
]) {
3022 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3025 dev
= &mp_ioapic_data
[i
]->dev
;
3027 dev
->cls
= &ioapic_sysdev_class
;
3028 error
= sysdev_register(dev
);
3030 kfree(mp_ioapic_data
[i
]);
3031 mp_ioapic_data
[i
] = NULL
;
3032 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3040 device_initcall(ioapic_init_sysfs
);
3043 * Dynamic irq allocate and deallocation
3045 unsigned int create_irq_nr(unsigned int irq_want
)
3047 /* Allocate an unused irq */
3050 unsigned long flags
;
3051 struct irq_cfg
*cfg_new
= NULL
;
3052 int cpu
= boot_cpu_id
;
3053 struct irq_desc
*desc_new
= NULL
;
3056 spin_lock_irqsave(&vector_lock
, flags
);
3057 for (new = irq_want
; new < NR_IRQS
; new++) {
3058 if (platform_legacy_irq(new))
3061 desc_new
= irq_to_desc_alloc_cpu(new, cpu
);
3063 printk(KERN_INFO
"can not get irq_desc for %d\n", new);
3066 cfg_new
= desc_new
->chip_data
;
3068 if (cfg_new
->vector
!= 0)
3070 if (__assign_irq_vector(new, cfg_new
, TARGET_CPUS
) == 0)
3074 spin_unlock_irqrestore(&vector_lock
, flags
);
3077 dynamic_irq_init(irq
);
3078 /* restore it, in case dynamic_irq_init clear it */
3080 desc_new
->chip_data
= cfg_new
;
3085 static int nr_irqs_gsi
= NR_IRQS_LEGACY
;
3086 int create_irq(void)
3088 unsigned int irq_want
;
3091 irq_want
= nr_irqs_gsi
;
3092 irq
= create_irq_nr(irq_want
);
3100 void destroy_irq(unsigned int irq
)
3102 unsigned long flags
;
3103 struct irq_cfg
*cfg
;
3104 struct irq_desc
*desc
;
3106 /* store it, in case dynamic_irq_cleanup clear it */
3107 desc
= irq_to_desc(irq
);
3108 cfg
= desc
->chip_data
;
3109 dynamic_irq_cleanup(irq
);
3110 /* connect back irq_cfg */
3112 desc
->chip_data
= cfg
;
3114 #ifdef CONFIG_INTR_REMAP
3117 spin_lock_irqsave(&vector_lock
, flags
);
3118 __clear_irq_vector(irq
, cfg
);
3119 spin_unlock_irqrestore(&vector_lock
, flags
);
3123 * MSI message composition
3125 #ifdef CONFIG_PCI_MSI
3126 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
3128 struct irq_cfg
*cfg
;
3133 err
= assign_irq_vector(irq
, cfg
, TARGET_CPUS
);
3137 dest
= cpu_mask_to_apicid_and(cfg
->domain
, TARGET_CPUS
);
3139 #ifdef CONFIG_INTR_REMAP
3140 if (irq_remapped(irq
)) {
3145 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
3146 BUG_ON(ir_index
== -1);
3148 memset (&irte
, 0, sizeof(irte
));
3151 irte
.dst_mode
= INT_DEST_MODE
;
3152 irte
.trigger_mode
= 0; /* edge */
3153 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
3154 irte
.vector
= cfg
->vector
;
3155 irte
.dest_id
= IRTE_DEST(dest
);
3157 modify_irte(irq
, &irte
);
3159 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3160 msg
->data
= sub_handle
;
3161 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
3163 MSI_ADDR_IR_INDEX1(ir_index
) |
3164 MSI_ADDR_IR_INDEX2(ir_index
);
3168 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3171 ((INT_DEST_MODE
== 0) ?
3172 MSI_ADDR_DEST_MODE_PHYSICAL
:
3173 MSI_ADDR_DEST_MODE_LOGICAL
) |
3174 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3175 MSI_ADDR_REDIRECTION_CPU
:
3176 MSI_ADDR_REDIRECTION_LOWPRI
) |
3177 MSI_ADDR_DEST_ID(dest
);
3180 MSI_DATA_TRIGGER_EDGE
|
3181 MSI_DATA_LEVEL_ASSERT
|
3182 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3183 MSI_DATA_DELIVERY_FIXED
:
3184 MSI_DATA_DELIVERY_LOWPRI
) |
3185 MSI_DATA_VECTOR(cfg
->vector
);
3191 static void set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3193 struct irq_desc
*desc
= irq_to_desc(irq
);
3194 struct irq_cfg
*cfg
;
3198 dest
= set_desc_affinity(desc
, mask
);
3199 if (dest
== BAD_APICID
)
3202 cfg
= desc
->chip_data
;
3204 read_msi_msg_desc(desc
, &msg
);
3206 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3207 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3208 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3209 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3211 write_msi_msg_desc(desc
, &msg
);
3213 #ifdef CONFIG_INTR_REMAP
3215 * Migrate the MSI irq to another cpumask. This migration is
3216 * done in the process context using interrupt-remapping hardware.
3219 ir_set_msi_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3221 struct irq_desc
*desc
= irq_to_desc(irq
);
3222 struct irq_cfg
*cfg
;
3226 if (get_irte(irq
, &irte
))
3229 dest
= set_desc_affinity(desc
, mask
);
3230 if (dest
== BAD_APICID
)
3233 irte
.vector
= cfg
->vector
;
3234 irte
.dest_id
= IRTE_DEST(dest
);
3237 * atomically update the IRTE with the new destination and vector.
3239 modify_irte(irq
, &irte
);
3242 * After this point, all the interrupts will start arriving
3243 * at the new destination. So, time to cleanup the previous
3244 * vector allocation.
3246 if (cfg
->move_in_progress
)
3247 send_cleanup_vector(cfg
);
3251 #endif /* CONFIG_SMP */
3254 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3255 * which implement the MSI or MSI-X Capability Structure.
3257 static struct irq_chip msi_chip
= {
3259 .unmask
= unmask_msi_irq
,
3260 .mask
= mask_msi_irq
,
3261 .ack
= ack_apic_edge
,
3263 .set_affinity
= set_msi_irq_affinity
,
3265 .retrigger
= ioapic_retrigger_irq
,
3268 #ifdef CONFIG_INTR_REMAP
3269 static struct irq_chip msi_ir_chip
= {
3270 .name
= "IR-PCI-MSI",
3271 .unmask
= unmask_msi_irq
,
3272 .mask
= mask_msi_irq
,
3273 .ack
= ack_x2apic_edge
,
3275 .set_affinity
= ir_set_msi_irq_affinity
,
3277 .retrigger
= ioapic_retrigger_irq
,
3281 * Map the PCI dev to the corresponding remapping hardware unit
3282 * and allocate 'nvec' consecutive interrupt-remapping table entries
3285 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
3287 struct intel_iommu
*iommu
;
3290 iommu
= map_dev_to_ir(dev
);
3293 "Unable to map PCI %s to iommu\n", pci_name(dev
));
3297 index
= alloc_irte(iommu
, irq
, nvec
);
3300 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
3308 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int irq
)
3313 ret
= msi_compose_msg(dev
, irq
, &msg
);
3317 set_irq_msi(irq
, msidesc
);
3318 write_msi_msg(irq
, &msg
);
3320 #ifdef CONFIG_INTR_REMAP
3321 if (irq_remapped(irq
)) {
3322 struct irq_desc
*desc
= irq_to_desc(irq
);
3324 * irq migration in process context
3326 desc
->status
|= IRQ_MOVE_PCNTXT
;
3327 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
3330 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
3332 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for MSI/MSI-X\n", irq
);
3337 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
)
3341 unsigned int irq_want
;
3343 irq_want
= nr_irqs_gsi
;
3344 irq
= create_irq_nr(irq_want
);
3348 #ifdef CONFIG_INTR_REMAP
3349 if (!intr_remapping_enabled
)
3352 ret
= msi_alloc_irte(dev
, irq
, 1);
3357 ret
= setup_msi_irq(dev
, msidesc
, irq
);
3364 #ifdef CONFIG_INTR_REMAP
3371 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
3374 int ret
, sub_handle
;
3375 struct msi_desc
*msidesc
;
3376 unsigned int irq_want
;
3378 #ifdef CONFIG_INTR_REMAP
3379 struct intel_iommu
*iommu
= 0;
3383 irq_want
= nr_irqs_gsi
;
3385 list_for_each_entry(msidesc
, &dev
->msi_list
, list
) {
3386 irq
= create_irq_nr(irq_want
);
3390 #ifdef CONFIG_INTR_REMAP
3391 if (!intr_remapping_enabled
)
3396 * allocate the consecutive block of IRTE's
3399 index
= msi_alloc_irte(dev
, irq
, nvec
);
3405 iommu
= map_dev_to_ir(dev
);
3411 * setup the mapping between the irq and the IRTE
3412 * base index, the sub_handle pointing to the
3413 * appropriate interrupt remap table entry.
3415 set_irte_irq(irq
, iommu
, index
, sub_handle
);
3419 ret
= setup_msi_irq(dev
, msidesc
, irq
);
3431 void arch_teardown_msi_irq(unsigned int irq
)
3438 static void dmar_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3440 struct irq_desc
*desc
= irq_to_desc(irq
);
3441 struct irq_cfg
*cfg
;
3445 dest
= set_desc_affinity(desc
, mask
);
3446 if (dest
== BAD_APICID
)
3449 cfg
= desc
->chip_data
;
3451 dmar_msi_read(irq
, &msg
);
3453 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3454 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3455 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3456 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3458 dmar_msi_write(irq
, &msg
);
3461 #endif /* CONFIG_SMP */
3463 struct irq_chip dmar_msi_type
= {
3465 .unmask
= dmar_msi_unmask
,
3466 .mask
= dmar_msi_mask
,
3467 .ack
= ack_apic_edge
,
3469 .set_affinity
= dmar_msi_set_affinity
,
3471 .retrigger
= ioapic_retrigger_irq
,
3474 int arch_setup_dmar_msi(unsigned int irq
)
3479 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3482 dmar_msi_write(irq
, &msg
);
3483 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3489 #ifdef CONFIG_HPET_TIMER
3492 static void hpet_msi_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
3494 struct irq_desc
*desc
= irq_to_desc(irq
);
3495 struct irq_cfg
*cfg
;
3499 dest
= set_desc_affinity(desc
, mask
);
3500 if (dest
== BAD_APICID
)
3503 cfg
= desc
->chip_data
;
3505 hpet_msi_read(irq
, &msg
);
3507 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3508 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3509 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3510 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3512 hpet_msi_write(irq
, &msg
);
3515 #endif /* CONFIG_SMP */
3517 struct irq_chip hpet_msi_type
= {
3519 .unmask
= hpet_msi_unmask
,
3520 .mask
= hpet_msi_mask
,
3521 .ack
= ack_apic_edge
,
3523 .set_affinity
= hpet_msi_set_affinity
,
3525 .retrigger
= ioapic_retrigger_irq
,
3528 int arch_setup_hpet_msi(unsigned int irq
)
3533 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3537 hpet_msi_write(irq
, &msg
);
3538 set_irq_chip_and_handler_name(irq
, &hpet_msi_type
, handle_edge_irq
,
3545 #endif /* CONFIG_PCI_MSI */
3547 * Hypertransport interrupt support
3549 #ifdef CONFIG_HT_IRQ
3553 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3555 struct ht_irq_msg msg
;
3556 fetch_ht_irq_msg(irq
, &msg
);
3558 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3559 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3561 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3562 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3564 write_ht_irq_msg(irq
, &msg
);
3567 static void set_ht_irq_affinity(unsigned int irq
, const struct cpumask
*mask
)
3569 struct irq_desc
*desc
= irq_to_desc(irq
);
3570 struct irq_cfg
*cfg
;
3573 dest
= set_desc_affinity(desc
, mask
);
3574 if (dest
== BAD_APICID
)
3577 cfg
= desc
->chip_data
;
3579 target_ht_irq(irq
, dest
, cfg
->vector
);
3584 static struct irq_chip ht_irq_chip
= {
3586 .mask
= mask_ht_irq
,
3587 .unmask
= unmask_ht_irq
,
3588 .ack
= ack_apic_edge
,
3590 .set_affinity
= set_ht_irq_affinity
,
3592 .retrigger
= ioapic_retrigger_irq
,
3595 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3597 struct irq_cfg
*cfg
;
3601 err
= assign_irq_vector(irq
, cfg
, TARGET_CPUS
);
3603 struct ht_irq_msg msg
;
3606 dest
= cpu_mask_to_apicid_and(cfg
->domain
, TARGET_CPUS
);
3608 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3612 HT_IRQ_LOW_DEST_ID(dest
) |
3613 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3614 ((INT_DEST_MODE
== 0) ?
3615 HT_IRQ_LOW_DM_PHYSICAL
:
3616 HT_IRQ_LOW_DM_LOGICAL
) |
3617 HT_IRQ_LOW_RQEOI_EDGE
|
3618 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3619 HT_IRQ_LOW_MT_FIXED
:
3620 HT_IRQ_LOW_MT_ARBITRATED
) |
3621 HT_IRQ_LOW_IRQ_MASKED
;
3623 write_ht_irq_msg(irq
, &msg
);
3625 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3626 handle_edge_irq
, "edge");
3628 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for HT\n", irq
);
3632 #endif /* CONFIG_HT_IRQ */
3634 #ifdef CONFIG_X86_64
3636 * Re-target the irq to the specified CPU and enable the specified MMR located
3637 * on the specified blade to allow the sending of MSIs to the specified CPU.
3639 int arch_enable_uv_irq(char *irq_name
, unsigned int irq
, int cpu
, int mmr_blade
,
3640 unsigned long mmr_offset
)
3642 const struct cpumask
*eligible_cpu
= cpumask_of(cpu
);
3643 struct irq_cfg
*cfg
;
3645 unsigned long mmr_value
;
3646 struct uv_IO_APIC_route_entry
*entry
;
3647 unsigned long flags
;
3652 err
= assign_irq_vector(irq
, cfg
, eligible_cpu
);
3656 spin_lock_irqsave(&vector_lock
, flags
);
3657 set_irq_chip_and_handler_name(irq
, &uv_irq_chip
, handle_percpu_irq
,
3659 spin_unlock_irqrestore(&vector_lock
, flags
);
3662 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3663 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3665 entry
->vector
= cfg
->vector
;
3666 entry
->delivery_mode
= INT_DELIVERY_MODE
;
3667 entry
->dest_mode
= INT_DEST_MODE
;
3668 entry
->polarity
= 0;
3671 entry
->dest
= cpu_mask_to_apicid(eligible_cpu
);
3673 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3674 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3680 * Disable the specified MMR located on the specified blade so that MSIs are
3681 * longer allowed to be sent.
3683 void arch_disable_uv_irq(int mmr_blade
, unsigned long mmr_offset
)
3685 unsigned long mmr_value
;
3686 struct uv_IO_APIC_route_entry
*entry
;
3690 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3691 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3695 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3696 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3698 #endif /* CONFIG_X86_64 */
3700 int __init
io_apic_get_redir_entries (int ioapic
)
3702 union IO_APIC_reg_01 reg_01
;
3703 unsigned long flags
;
3705 spin_lock_irqsave(&ioapic_lock
, flags
);
3706 reg_01
.raw
= io_apic_read(ioapic
, 1);
3707 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3709 return reg_01
.bits
.entries
;
3712 void __init
probe_nr_irqs_gsi(void)
3717 for (idx
= 0; idx
< nr_ioapics
; idx
++)
3718 nr
+= io_apic_get_redir_entries(idx
) + 1;
3720 if (nr
> nr_irqs_gsi
)
3724 /* --------------------------------------------------------------------------
3725 ACPI-based IOAPIC Configuration
3726 -------------------------------------------------------------------------- */
3730 #ifdef CONFIG_X86_32
3731 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
3733 union IO_APIC_reg_00 reg_00
;
3734 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
3736 unsigned long flags
;
3740 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3741 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3742 * supports up to 16 on one shared APIC bus.
3744 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3745 * advantage of new APIC bus architecture.
3748 if (physids_empty(apic_id_map
))
3749 apic_id_map
= ioapic_phys_id_map(phys_cpu_present_map
);
3751 spin_lock_irqsave(&ioapic_lock
, flags
);
3752 reg_00
.raw
= io_apic_read(ioapic
, 0);
3753 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3755 if (apic_id
>= get_physical_broadcast()) {
3756 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
3757 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
3758 apic_id
= reg_00
.bits
.ID
;
3762 * Every APIC in a system must have a unique ID or we get lots of nice
3763 * 'stuck on smp_invalidate_needed IPI wait' messages.
3765 if (check_apicid_used(apic_id_map
, apic_id
)) {
3767 for (i
= 0; i
< get_physical_broadcast(); i
++) {
3768 if (!check_apicid_used(apic_id_map
, i
))
3772 if (i
== get_physical_broadcast())
3773 panic("Max apic_id exceeded!\n");
3775 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
3776 "trying %d\n", ioapic
, apic_id
, i
);
3781 tmp
= apicid_to_cpu_present(apic_id
);
3782 physids_or(apic_id_map
, apic_id_map
, tmp
);
3784 if (reg_00
.bits
.ID
!= apic_id
) {
3785 reg_00
.bits
.ID
= apic_id
;
3787 spin_lock_irqsave(&ioapic_lock
, flags
);
3788 io_apic_write(ioapic
, 0, reg_00
.raw
);
3789 reg_00
.raw
= io_apic_read(ioapic
, 0);
3790 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3793 if (reg_00
.bits
.ID
!= apic_id
) {
3794 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
3799 apic_printk(APIC_VERBOSE
, KERN_INFO
3800 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
3805 int __init
io_apic_get_version(int ioapic
)
3807 union IO_APIC_reg_01 reg_01
;
3808 unsigned long flags
;
3810 spin_lock_irqsave(&ioapic_lock
, flags
);
3811 reg_01
.raw
= io_apic_read(ioapic
, 1);
3812 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3814 return reg_01
.bits
.version
;
3818 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
3820 struct irq_desc
*desc
;
3821 struct irq_cfg
*cfg
;
3822 int cpu
= boot_cpu_id
;
3824 if (!IO_APIC_IRQ(irq
)) {
3825 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3830 desc
= irq_to_desc_alloc_cpu(irq
, cpu
);
3832 printk(KERN_INFO
"can not get irq_desc %d\n", irq
);
3837 * IRQs < 16 are already in the irq_2_pin[] map
3839 if (irq
>= NR_IRQS_LEGACY
) {
3840 cfg
= desc
->chip_data
;
3841 add_pin_to_irq_cpu(cfg
, cpu
, ioapic
, pin
);
3844 setup_IO_APIC_irq(ioapic
, pin
, irq
, desc
, triggering
, polarity
);
3850 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
3854 if (skip_ioapic_setup
)
3857 for (i
= 0; i
< mp_irq_entries
; i
++)
3858 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
3859 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
3861 if (i
>= mp_irq_entries
)
3864 *trigger
= irq_trigger(i
);
3865 *polarity
= irq_polarity(i
);
3869 #endif /* CONFIG_ACPI */
3872 * This function currently is only a helper for the i386 smp boot process where
3873 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3874 * so mask in all cases should simply be TARGET_CPUS
3877 void __init
setup_ioapic_dest(void)
3879 int pin
, ioapic
, irq
, irq_entry
;
3880 struct irq_desc
*desc
;
3881 struct irq_cfg
*cfg
;
3882 const struct cpumask
*mask
;
3884 if (skip_ioapic_setup
== 1)
3887 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
3888 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
3889 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
3890 if (irq_entry
== -1)
3892 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
3894 /* setup_IO_APIC_irqs could fail to get vector for some device
3895 * when you have too many devices, because at that time only boot
3898 desc
= irq_to_desc(irq
);
3899 cfg
= desc
->chip_data
;
3901 setup_IO_APIC_irq(ioapic
, pin
, irq
, desc
,
3902 irq_trigger(irq_entry
),
3903 irq_polarity(irq_entry
));
3909 * Honour affinities which have been set in early boot
3912 (IRQ_NO_BALANCING
| IRQ_AFFINITY_SET
))
3913 mask
= &desc
->affinity
;
3917 #ifdef CONFIG_INTR_REMAP
3918 if (intr_remapping_enabled
)
3919 set_ir_ioapic_affinity_irq_desc(desc
, mask
);
3922 set_ioapic_affinity_irq_desc(desc
, mask
);
3929 #define IOAPIC_RESOURCE_NAME_SIZE 11
3931 static struct resource
*ioapic_resources
;
3933 static struct resource
* __init
ioapic_setup_resources(void)
3936 struct resource
*res
;
3940 if (nr_ioapics
<= 0)
3943 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
3946 mem
= alloc_bootmem(n
);
3950 mem
+= sizeof(struct resource
) * nr_ioapics
;
3952 for (i
= 0; i
< nr_ioapics
; i
++) {
3954 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
3955 sprintf(mem
, "IOAPIC %u", i
);
3956 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
3960 ioapic_resources
= res
;
3965 void __init
ioapic_init_mappings(void)
3967 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
3968 struct resource
*ioapic_res
;
3971 ioapic_res
= ioapic_setup_resources();
3972 for (i
= 0; i
< nr_ioapics
; i
++) {
3973 if (smp_found_config
) {
3974 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
3975 #ifdef CONFIG_X86_32
3978 "WARNING: bogus zero IO-APIC "
3979 "address found in MPTABLE, "
3980 "disabling IO/APIC support!\n");
3981 smp_found_config
= 0;
3982 skip_ioapic_setup
= 1;
3983 goto fake_ioapic_page
;
3987 #ifdef CONFIG_X86_32
3990 ioapic_phys
= (unsigned long)
3991 alloc_bootmem_pages(PAGE_SIZE
);
3992 ioapic_phys
= __pa(ioapic_phys
);
3994 set_fixmap_nocache(idx
, ioapic_phys
);
3995 apic_printk(APIC_VERBOSE
,
3996 "mapped IOAPIC to %08lx (%08lx)\n",
3997 __fix_to_virt(idx
), ioapic_phys
);
4000 if (ioapic_res
!= NULL
) {
4001 ioapic_res
->start
= ioapic_phys
;
4002 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
4008 static int __init
ioapic_insert_resources(void)
4011 struct resource
*r
= ioapic_resources
;
4015 "IO APIC resources could be not be allocated.\n");
4019 for (i
= 0; i
< nr_ioapics
; i
++) {
4020 insert_resource(&iomem_resource
, r
);
4027 /* Insert the IO APIC resources after PCI initialization has occured to handle
4028 * IO APICS that are mapped in on a BAR in PCI space. */
4029 late_initcall(ioapic_insert_resources
);