2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
49 #include <asm/proto.h>
52 #include <asm/timer.h>
53 #include <asm/i8259.h>
55 #include <asm/msidef.h>
56 #include <asm/hypertransport.h>
57 #include <asm/setup.h>
58 #include <asm/irq_remapping.h>
61 #include <mach_apic.h>
62 #include <mach_apicdef.h>
64 #define __apicdebuginit(type) static type __init
69 * Is the SiS APIC rmw bug present ?
70 * -1 = don't know, 0 = no, 1 = yes
72 int sis_apic_bug
= -1;
74 static DEFINE_SPINLOCK(ioapic_lock
);
75 static DEFINE_SPINLOCK(vector_lock
);
79 * Rough estimation of how many shared IRQs there are, can
85 * # of IRQ routing registers
87 int nr_ioapic_registers
[MAX_IO_APICS
];
89 /* I/O APIC entries */
90 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
93 /* MP IRQ source entries */
94 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
96 /* # of MP IRQ source entries */
99 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
101 int skip_ioapic_setup
;
103 static int __init
parse_noapic(char *str
)
105 disable_ioapic_setup();
108 early_param("noapic", parse_noapic
);
115 struct irq_cfg
*next
;
116 struct irq_pin_list
*irq_2_pin
;
118 cpumask_t old_domain
;
119 unsigned move_cleanup_count
;
121 u8 move_in_progress
: 1;
124 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
125 static struct irq_cfg irq_cfg_legacy
[] __initdata
= {
126 [0] = { .irq
= 0, .domain
= CPU_MASK_ALL
, .vector
= IRQ0_VECTOR
, },
127 [1] = { .irq
= 1, .domain
= CPU_MASK_ALL
, .vector
= IRQ1_VECTOR
, },
128 [2] = { .irq
= 2, .domain
= CPU_MASK_ALL
, .vector
= IRQ2_VECTOR
, },
129 [3] = { .irq
= 3, .domain
= CPU_MASK_ALL
, .vector
= IRQ3_VECTOR
, },
130 [4] = { .irq
= 4, .domain
= CPU_MASK_ALL
, .vector
= IRQ4_VECTOR
, },
131 [5] = { .irq
= 5, .domain
= CPU_MASK_ALL
, .vector
= IRQ5_VECTOR
, },
132 [6] = { .irq
= 6, .domain
= CPU_MASK_ALL
, .vector
= IRQ6_VECTOR
, },
133 [7] = { .irq
= 7, .domain
= CPU_MASK_ALL
, .vector
= IRQ7_VECTOR
, },
134 [8] = { .irq
= 8, .domain
= CPU_MASK_ALL
, .vector
= IRQ8_VECTOR
, },
135 [9] = { .irq
= 9, .domain
= CPU_MASK_ALL
, .vector
= IRQ9_VECTOR
, },
136 [10] = { .irq
= 10, .domain
= CPU_MASK_ALL
, .vector
= IRQ10_VECTOR
, },
137 [11] = { .irq
= 11, .domain
= CPU_MASK_ALL
, .vector
= IRQ11_VECTOR
, },
138 [12] = { .irq
= 12, .domain
= CPU_MASK_ALL
, .vector
= IRQ12_VECTOR
, },
139 [13] = { .irq
= 13, .domain
= CPU_MASK_ALL
, .vector
= IRQ13_VECTOR
, },
140 [14] = { .irq
= 14, .domain
= CPU_MASK_ALL
, .vector
= IRQ14_VECTOR
, },
141 [15] = { .irq
= 15, .domain
= CPU_MASK_ALL
, .vector
= IRQ15_VECTOR
, },
144 static struct irq_cfg irq_cfg_init
= { .irq
= -1U, };
145 /* need to be biger than size of irq_cfg_legacy */
146 static int nr_irq_cfg
= 32;
148 static int __init
parse_nr_irq_cfg(char *arg
)
151 nr_irq_cfg
= simple_strtoul(arg
, NULL
, 0);
158 early_param("nr_irq_cfg", parse_nr_irq_cfg
);
160 static void init_one_irq_cfg(struct irq_cfg
*cfg
)
162 memcpy(cfg
, &irq_cfg_init
, sizeof(struct irq_cfg
));
165 static struct irq_cfg
*irq_cfgx
;
166 static struct irq_cfg
*irq_cfgx_free
;
167 static void __init
init_work(void *data
)
169 struct dyn_array
*da
= data
;
176 memcpy(cfg
, irq_cfg_legacy
, sizeof(irq_cfg_legacy
));
178 legacy_count
= sizeof(irq_cfg_legacy
)/sizeof(irq_cfg_legacy
[0]);
179 for (i
= legacy_count
; i
< *da
->nr
; i
++)
180 init_one_irq_cfg(&cfg
[i
]);
182 for (i
= 1; i
< *da
->nr
; i
++)
183 cfg
[i
-1].next
= &cfg
[i
];
185 irq_cfgx_free
= &irq_cfgx
[legacy_count
];
186 irq_cfgx
[legacy_count
- 1].next
= NULL
;
189 #define for_each_irq_cfg(cfg) \
190 for (cfg = irq_cfgx; cfg; cfg = cfg->next)
192 DEFINE_DYN_ARRAY(irq_cfgx
, sizeof(struct irq_cfg
), nr_irq_cfg
, PAGE_SIZE
, init_work
);
194 static struct irq_cfg
*irq_cfg(unsigned int irq
)
209 static struct irq_cfg
*irq_cfg_alloc(unsigned int irq
)
211 struct irq_cfg
*cfg
, *cfg_pri
;
215 cfg_pri
= cfg
= irq_cfgx
;
225 if (!irq_cfgx_free
) {
227 unsigned long total_bytes
;
229 * we run out of pre-allocate ones, allocate more
231 printk(KERN_DEBUG
"try to get more irq_cfg %d\n", nr_irq_cfg
);
233 total_bytes
= sizeof(struct irq_cfg
) * nr_irq_cfg
;
235 cfg
= kzalloc(total_bytes
, GFP_ATOMIC
);
237 cfg
= __alloc_bootmem_nopanic(total_bytes
, PAGE_SIZE
, 0);
240 panic("please boot with nr_irq_cfg= %d\n", count
* 2);
243 printk(KERN_DEBUG
"irq_irq ==> [%#lx - %#lx]\n", phys
, phys
+ total_bytes
);
245 for (i
= 0; i
< nr_irq_cfg
; i
++)
246 init_one_irq_cfg(&cfg
[i
]);
248 for (i
= 1; i
< nr_irq_cfg
; i
++)
249 cfg
[i
-1].next
= &cfg
[i
];
255 irq_cfgx_free
= irq_cfgx_free
->next
;
262 printk(KERN_DEBUG
"found new irq_cfg for irq %d\n", cfg
->irq
);
263 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
265 /* dump the results */
268 unsigned long bytes
= sizeof(struct irq_cfg
);
270 printk(KERN_DEBUG
"=========================== %d\n", irq
);
271 printk(KERN_DEBUG
"irq_cfg dump after get that for %d\n", irq
);
272 for_each_irq_cfg(cfg
) {
274 printk(KERN_DEBUG
"irq_cfg %d ==> [%#lx - %#lx]\n", cfg
->irq
, phys
, phys
+ bytes
);
276 printk(KERN_DEBUG
"===========================\n");
283 * This is performance-critical, we want to do it O(1)
285 * the indexing order of this array favors 1:1 mappings
286 * between pins and IRQs.
289 struct irq_pin_list
{
291 struct irq_pin_list
*next
;
294 static struct irq_pin_list
*irq_2_pin_head
;
295 /* fill one page ? */
296 static int nr_irq_2_pin
= 0x100;
297 static struct irq_pin_list
*irq_2_pin_ptr
;
298 static void __init
irq_2_pin_init_work(void *data
)
300 struct dyn_array
*da
= data
;
301 struct irq_pin_list
*pin
;
306 for (i
= 1; i
< *da
->nr
; i
++)
307 pin
[i
-1].next
= &pin
[i
];
309 irq_2_pin_ptr
= &pin
[0];
311 DEFINE_DYN_ARRAY(irq_2_pin_head
, sizeof(struct irq_pin_list
), nr_irq_2_pin
, PAGE_SIZE
, irq_2_pin_init_work
);
313 static struct irq_pin_list
*get_one_free_irq_2_pin(void)
315 struct irq_pin_list
*pin
;
321 irq_2_pin_ptr
= pin
->next
;
327 * we run out of pre-allocate ones, allocate more
329 printk(KERN_DEBUG
"try to get more irq_2_pin %d\n", nr_irq_2_pin
);
332 pin
= kzalloc(sizeof(struct irq_pin_list
)*nr_irq_2_pin
,
335 pin
= __alloc_bootmem_nopanic(sizeof(struct irq_pin_list
) *
336 nr_irq_2_pin
, PAGE_SIZE
, 0);
339 panic("can not get more irq_2_pin\n");
341 for (i
= 1; i
< nr_irq_2_pin
; i
++)
342 pin
[i
-1].next
= &pin
[i
];
344 irq_2_pin_ptr
= pin
->next
;
352 unsigned int unused
[3];
356 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
358 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
359 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
362 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
364 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
365 writel(reg
, &io_apic
->index
);
366 return readl(&io_apic
->data
);
369 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
371 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
372 writel(reg
, &io_apic
->index
);
373 writel(value
, &io_apic
->data
);
377 * Re-write a value: to be used for read-modify-write
378 * cycles where the read already set up the index register.
380 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
382 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
384 writel(reg
, &io_apic
->index
);
385 writel(value
, &io_apic
->data
);
388 static bool io_apic_level_ack_pending(unsigned int irq
)
390 struct irq_pin_list
*entry
;
392 struct irq_cfg
*cfg
= irq_cfg(irq
);
394 spin_lock_irqsave(&ioapic_lock
, flags
);
395 entry
= cfg
->irq_2_pin
;
403 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
404 /* Is the remote IRR bit set? */
405 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
406 spin_unlock_irqrestore(&ioapic_lock
, flags
);
413 spin_unlock_irqrestore(&ioapic_lock
, flags
);
419 struct { u32 w1
, w2
; };
420 struct IO_APIC_route_entry entry
;
423 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
425 union entry_union eu
;
427 spin_lock_irqsave(&ioapic_lock
, flags
);
428 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
429 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
430 spin_unlock_irqrestore(&ioapic_lock
, flags
);
435 * When we write a new IO APIC routing entry, we need to write the high
436 * word first! If the mask bit in the low word is clear, we will enable
437 * the interrupt, and we need to make sure the entry is fully populated
438 * before that happens.
441 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
443 union entry_union eu
;
445 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
446 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
449 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
452 spin_lock_irqsave(&ioapic_lock
, flags
);
453 __ioapic_write_entry(apic
, pin
, e
);
454 spin_unlock_irqrestore(&ioapic_lock
, flags
);
458 * When we mask an IO APIC routing entry, we need to write the low
459 * word first, in order to set the mask bit before we change the
462 static void ioapic_mask_entry(int apic
, int pin
)
465 union entry_union eu
= { .entry
.mask
= 1 };
467 spin_lock_irqsave(&ioapic_lock
, flags
);
468 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
469 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
470 spin_unlock_irqrestore(&ioapic_lock
, flags
);
474 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
478 struct irq_pin_list
*entry
;
481 entry
= cfg
->irq_2_pin
;
490 #ifdef CONFIG_INTR_REMAP
492 * With interrupt-remapping, destination information comes
493 * from interrupt-remapping table entry.
495 if (!irq_remapped(irq
))
496 io_apic_write(apic
, 0x11 + pin
*2, dest
);
498 io_apic_write(apic
, 0x11 + pin
*2, dest
);
500 reg
= io_apic_read(apic
, 0x10 + pin
*2);
501 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
503 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
510 static int assign_irq_vector(int irq
, cpumask_t mask
);
512 static void set_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
514 struct irq_cfg
*cfg
= irq_cfg(irq
);
518 struct irq_desc
*desc
;
520 cpus_and(tmp
, mask
, cpu_online_map
);
524 if (assign_irq_vector(irq
, mask
))
527 cpus_and(tmp
, cfg
->domain
, mask
);
528 dest
= cpu_mask_to_apicid(tmp
);
531 * Only the high 8 bits are valid.
533 dest
= SET_APIC_LOGICAL_ID(dest
);
535 desc
= irq_to_desc(irq
);
536 spin_lock_irqsave(&ioapic_lock
, flags
);
537 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
538 desc
->affinity
= mask
;
539 spin_unlock_irqrestore(&ioapic_lock
, flags
);
544 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
545 * shared ISA-space IRQs, so we have to support them. We are super
546 * fast in the common case, and fast for shared ISA-space IRQs.
548 static void add_pin_to_irq(unsigned int irq
, int apic
, int pin
)
551 struct irq_pin_list
*entry
;
553 /* first time to refer irq_cfg, so with new */
554 cfg
= irq_cfg_alloc(irq
);
555 entry
= cfg
->irq_2_pin
;
557 entry
= get_one_free_irq_2_pin();
558 cfg
->irq_2_pin
= entry
;
561 printk(KERN_DEBUG
" 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq
, apic
, pin
);
565 while (entry
->next
) {
566 /* not again, please */
567 if (entry
->apic
== apic
&& entry
->pin
== pin
)
573 entry
->next
= get_one_free_irq_2_pin();
577 printk(KERN_DEBUG
" x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq
, apic
, pin
);
581 * Reroute an IRQ to a different pin.
583 static void __init
replace_pin_at_irq(unsigned int irq
,
584 int oldapic
, int oldpin
,
585 int newapic
, int newpin
)
587 struct irq_cfg
*cfg
= irq_cfg(irq
);
588 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
592 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
593 entry
->apic
= newapic
;
596 /* every one is different, right? */
602 /* why? call replace before add? */
604 add_pin_to_irq(irq
, newapic
, newpin
);
608 * Synchronize the IO-APIC and the CPU by doing
609 * a dummy read from the IO-APIC
611 static inline void io_apic_sync(unsigned int apic
)
613 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
614 readl(&io_apic
->data
);
617 #define __DO_ACTION(R, ACTION, FINAL) \
621 struct irq_cfg *cfg; \
622 struct irq_pin_list *entry; \
624 cfg = irq_cfg(irq); \
625 entry = cfg->irq_2_pin; \
631 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
633 io_apic_modify(entry->apic, 0x10 + R + pin*2, reg); \
637 entry = entry->next; \
641 #define DO_ACTION(name,R,ACTION, FINAL) \
643 static void name##_IO_APIC_irq (unsigned int irq) \
644 __DO_ACTION(R, ACTION, FINAL)
647 DO_ACTION(__mask
, 0, |= IO_APIC_REDIR_MASKED
, io_apic_sync(entry
->apic
))
650 DO_ACTION(__unmask
, 0, &= ~IO_APIC_REDIR_MASKED
, )
652 static void mask_IO_APIC_irq (unsigned int irq
)
656 spin_lock_irqsave(&ioapic_lock
, flags
);
657 __mask_IO_APIC_irq(irq
);
658 spin_unlock_irqrestore(&ioapic_lock
, flags
);
661 static void unmask_IO_APIC_irq (unsigned int irq
)
665 spin_lock_irqsave(&ioapic_lock
, flags
);
666 __unmask_IO_APIC_irq(irq
);
667 spin_unlock_irqrestore(&ioapic_lock
, flags
);
670 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
672 struct IO_APIC_route_entry entry
;
674 /* Check delivery_mode to be sure we're not clearing an SMI pin */
675 entry
= ioapic_read_entry(apic
, pin
);
676 if (entry
.delivery_mode
== dest_SMI
)
679 * Disable it in the IO-APIC irq-routing table:
681 ioapic_mask_entry(apic
, pin
);
684 static void clear_IO_APIC (void)
688 for (apic
= 0; apic
< nr_ioapics
; apic
++)
689 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
690 clear_IO_APIC_pin(apic
, pin
);
693 #ifdef CONFIG_INTR_REMAP
694 /* I/O APIC RTE contents at the OS boot up */
695 static struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
698 * Saves and masks all the unmasked IO-APIC RTE's
700 int save_mask_IO_APIC_setup(void)
702 union IO_APIC_reg_01 reg_01
;
707 * The number of IO-APIC IRQ registers (== #pins):
709 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
710 spin_lock_irqsave(&ioapic_lock
, flags
);
711 reg_01
.raw
= io_apic_read(apic
, 1);
712 spin_unlock_irqrestore(&ioapic_lock
, flags
);
713 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
716 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
717 early_ioapic_entries
[apic
] =
718 kzalloc(sizeof(struct IO_APIC_route_entry
) *
719 nr_ioapic_registers
[apic
], GFP_KERNEL
);
720 if (!early_ioapic_entries
[apic
])
724 for (apic
= 0; apic
< nr_ioapics
; apic
++)
725 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
726 struct IO_APIC_route_entry entry
;
728 entry
= early_ioapic_entries
[apic
][pin
] =
729 ioapic_read_entry(apic
, pin
);
732 ioapic_write_entry(apic
, pin
, entry
);
738 void restore_IO_APIC_setup(void)
742 for (apic
= 0; apic
< nr_ioapics
; apic
++)
743 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
744 ioapic_write_entry(apic
, pin
,
745 early_ioapic_entries
[apic
][pin
]);
748 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
751 * for now plain restore of previous settings.
752 * TBD: In the case of OS enabling interrupt-remapping,
753 * IO-APIC RTE's need to be setup to point to interrupt-remapping
754 * table entries. for now, do a plain restore, and wait for
755 * the setup_IO_APIC_irqs() to do proper initialization.
757 restore_IO_APIC_setup();
762 * Find the IRQ entry number of a certain pin.
764 static int find_irq_entry(int apic
, int pin
, int type
)
768 for (i
= 0; i
< mp_irq_entries
; i
++)
769 if (mp_irqs
[i
].mp_irqtype
== type
&&
770 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
771 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
772 mp_irqs
[i
].mp_dstirq
== pin
)
779 * Find the pin to which IRQ[irq] (ISA) is connected
781 static int __init
find_isa_irq_pin(int irq
, int type
)
785 for (i
= 0; i
< mp_irq_entries
; i
++) {
786 int lbus
= mp_irqs
[i
].mp_srcbus
;
788 if (test_bit(lbus
, mp_bus_not_pci
) &&
789 (mp_irqs
[i
].mp_irqtype
== type
) &&
790 (mp_irqs
[i
].mp_srcbusirq
== irq
))
792 return mp_irqs
[i
].mp_dstirq
;
797 static int __init
find_isa_irq_apic(int irq
, int type
)
801 for (i
= 0; i
< mp_irq_entries
; i
++) {
802 int lbus
= mp_irqs
[i
].mp_srcbus
;
804 if (test_bit(lbus
, mp_bus_not_pci
) &&
805 (mp_irqs
[i
].mp_irqtype
== type
) &&
806 (mp_irqs
[i
].mp_srcbusirq
== irq
))
809 if (i
< mp_irq_entries
) {
811 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
812 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
821 * Find a specific PCI IRQ entry.
822 * Not an __init, possibly needed by modules
824 static int pin_2_irq(int idx
, int apic
, int pin
);
826 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
828 int apic
, i
, best_guess
= -1;
830 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
832 if (test_bit(bus
, mp_bus_not_pci
)) {
833 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
836 for (i
= 0; i
< mp_irq_entries
; i
++) {
837 int lbus
= mp_irqs
[i
].mp_srcbus
;
839 for (apic
= 0; apic
< nr_ioapics
; apic
++)
840 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
841 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
844 if (!test_bit(lbus
, mp_bus_not_pci
) &&
845 !mp_irqs
[i
].mp_irqtype
&&
847 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
848 int irq
= pin_2_irq(i
,apic
,mp_irqs
[i
].mp_dstirq
);
850 if (!(apic
|| IO_APIC_IRQ(irq
)))
853 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
856 * Use the first all-but-pin matching entry as a
857 * best-guess fuzzy result for broken mptables.
866 /* ISA interrupts are always polarity zero edge triggered,
867 * when listed as conforming in the MP table. */
869 #define default_ISA_trigger(idx) (0)
870 #define default_ISA_polarity(idx) (0)
872 /* PCI interrupts are always polarity one level triggered,
873 * when listed as conforming in the MP table. */
875 #define default_PCI_trigger(idx) (1)
876 #define default_PCI_polarity(idx) (1)
878 static int MPBIOS_polarity(int idx
)
880 int bus
= mp_irqs
[idx
].mp_srcbus
;
884 * Determine IRQ line polarity (high active or low active):
886 switch (mp_irqs
[idx
].mp_irqflag
& 3)
888 case 0: /* conforms, ie. bus-type dependent polarity */
889 if (test_bit(bus
, mp_bus_not_pci
))
890 polarity
= default_ISA_polarity(idx
);
892 polarity
= default_PCI_polarity(idx
);
894 case 1: /* high active */
899 case 2: /* reserved */
901 printk(KERN_WARNING
"broken BIOS!!\n");
905 case 3: /* low active */
910 default: /* invalid */
912 printk(KERN_WARNING
"broken BIOS!!\n");
920 static int MPBIOS_trigger(int idx
)
922 int bus
= mp_irqs
[idx
].mp_srcbus
;
926 * Determine IRQ trigger mode (edge or level sensitive):
928 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3)
930 case 0: /* conforms, ie. bus-type dependent */
931 if (test_bit(bus
, mp_bus_not_pci
))
932 trigger
= default_ISA_trigger(idx
);
934 trigger
= default_PCI_trigger(idx
);
941 case 2: /* reserved */
943 printk(KERN_WARNING
"broken BIOS!!\n");
952 default: /* invalid */
954 printk(KERN_WARNING
"broken BIOS!!\n");
962 static inline int irq_polarity(int idx
)
964 return MPBIOS_polarity(idx
);
967 static inline int irq_trigger(int idx
)
969 return MPBIOS_trigger(idx
);
972 static int pin_2_irq(int idx
, int apic
, int pin
)
975 int bus
= mp_irqs
[idx
].mp_srcbus
;
978 * Debugging check, we are in big trouble if this message pops up!
980 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
981 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
983 if (test_bit(bus
, mp_bus_not_pci
)) {
984 irq
= mp_irqs
[idx
].mp_srcbusirq
;
987 * PCI IRQs are mapped in order
991 irq
+= nr_ioapic_registers
[i
++];
997 void lock_vector_lock(void)
999 /* Used to the online set of cpus does not change
1000 * during assign_irq_vector.
1002 spin_lock(&vector_lock
);
1005 void unlock_vector_lock(void)
1007 spin_unlock(&vector_lock
);
1010 static int __assign_irq_vector(int irq
, cpumask_t mask
)
1013 * NOTE! The local APIC isn't very good at handling
1014 * multiple interrupts at the same interrupt level.
1015 * As the interrupt level is determined by taking the
1016 * vector number and shifting that right by 4, we
1017 * want to spread these out a bit so that they don't
1018 * all fall in the same interrupt level.
1020 * Also, we've got to be careful not to trash gate
1021 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1023 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1024 unsigned int old_vector
;
1026 struct irq_cfg
*cfg
;
1030 /* Only try and allocate irqs on cpus that are present */
1031 cpus_and(mask
, mask
, cpu_online_map
);
1033 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
1036 old_vector
= cfg
->vector
;
1039 cpus_and(tmp
, cfg
->domain
, mask
);
1040 if (!cpus_empty(tmp
))
1044 for_each_cpu_mask_nr(cpu
, mask
) {
1045 cpumask_t domain
, new_mask
;
1049 domain
= vector_allocation_domain(cpu
);
1050 cpus_and(new_mask
, domain
, cpu_online_map
);
1052 vector
= current_vector
;
1053 offset
= current_offset
;
1056 if (vector
>= first_system_vector
) {
1057 /* If we run out of vectors on large boxen, must share them. */
1058 offset
= (offset
+ 1) % 8;
1059 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1061 if (unlikely(current_vector
== vector
))
1063 if (vector
== IA32_SYSCALL_VECTOR
)
1065 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1066 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1069 current_vector
= vector
;
1070 current_offset
= offset
;
1072 cfg
->move_in_progress
= 1;
1073 cfg
->old_domain
= cfg
->domain
;
1075 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1076 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1077 cfg
->vector
= vector
;
1078 cfg
->domain
= domain
;
1084 static int assign_irq_vector(int irq
, cpumask_t mask
)
1087 unsigned long flags
;
1089 spin_lock_irqsave(&vector_lock
, flags
);
1090 err
= __assign_irq_vector(irq
, mask
);
1091 spin_unlock_irqrestore(&vector_lock
, flags
);
1095 static void __clear_irq_vector(int irq
)
1097 struct irq_cfg
*cfg
;
1102 BUG_ON(!cfg
->vector
);
1104 vector
= cfg
->vector
;
1105 cpus_and(mask
, cfg
->domain
, cpu_online_map
);
1106 for_each_cpu_mask_nr(cpu
, mask
)
1107 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1110 cpus_clear(cfg
->domain
);
1113 void __setup_vector_irq(int cpu
)
1115 /* Initialize vector_irq on a new cpu */
1116 /* This function must be called with vector_lock held */
1118 struct irq_cfg
*cfg
;
1120 /* Mark the inuse vectors */
1121 for_each_irq_cfg(cfg
) {
1122 if (!cpu_isset(cpu
, cfg
->domain
))
1124 vector
= cfg
->vector
;
1126 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1128 /* Mark the free vectors */
1129 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1130 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1135 if (!cpu_isset(cpu
, cfg
->domain
))
1136 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1140 static struct irq_chip ioapic_chip
;
1141 #ifdef CONFIG_INTR_REMAP
1142 static struct irq_chip ir_ioapic_chip
;
1145 static void ioapic_register_intr(int irq
, unsigned long trigger
)
1147 struct irq_desc
*desc
;
1149 /* first time to use this irq_desc */
1151 desc
= irq_to_desc(irq
);
1153 desc
= irq_to_desc_alloc(irq
);
1156 desc
->status
|= IRQ_LEVEL
;
1158 desc
->status
&= ~IRQ_LEVEL
;
1160 #ifdef CONFIG_INTR_REMAP
1161 if (irq_remapped(irq
)) {
1162 desc
->status
|= IRQ_MOVE_PCNTXT
;
1164 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1168 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1169 handle_edge_irq
, "edge");
1174 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1178 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1179 handle_edge_irq
, "edge");
1182 static int setup_ioapic_entry(int apic
, int irq
,
1183 struct IO_APIC_route_entry
*entry
,
1184 unsigned int destination
, int trigger
,
1185 int polarity
, int vector
)
1188 * add it to the IO-APIC irq-routing table:
1190 memset(entry
,0,sizeof(*entry
));
1192 #ifdef CONFIG_INTR_REMAP
1193 if (intr_remapping_enabled
) {
1194 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic
);
1196 struct IR_IO_APIC_route_entry
*ir_entry
=
1197 (struct IR_IO_APIC_route_entry
*) entry
;
1201 panic("No mapping iommu for ioapic %d\n", apic
);
1203 index
= alloc_irte(iommu
, irq
, 1);
1205 panic("Failed to allocate IRTE for ioapic %d\n", apic
);
1207 memset(&irte
, 0, sizeof(irte
));
1210 irte
.dst_mode
= INT_DEST_MODE
;
1211 irte
.trigger_mode
= trigger
;
1212 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
1213 irte
.vector
= vector
;
1214 irte
.dest_id
= IRTE_DEST(destination
);
1216 modify_irte(irq
, &irte
);
1218 ir_entry
->index2
= (index
>> 15) & 0x1;
1220 ir_entry
->format
= 1;
1221 ir_entry
->index
= (index
& 0x7fff);
1225 entry
->delivery_mode
= INT_DELIVERY_MODE
;
1226 entry
->dest_mode
= INT_DEST_MODE
;
1227 entry
->dest
= destination
;
1230 entry
->mask
= 0; /* enable IRQ */
1231 entry
->trigger
= trigger
;
1232 entry
->polarity
= polarity
;
1233 entry
->vector
= vector
;
1235 /* Mask level triggered irqs.
1236 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1243 static void setup_IO_APIC_irq(int apic
, int pin
, unsigned int irq
,
1244 int trigger
, int polarity
)
1246 struct irq_cfg
*cfg
;
1247 struct IO_APIC_route_entry entry
;
1250 if (!IO_APIC_IRQ(irq
))
1256 if (assign_irq_vector(irq
, mask
))
1259 cpus_and(mask
, cfg
->domain
, mask
);
1261 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1262 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1263 "IRQ %d Mode:%i Active:%i)\n",
1264 apic
, mp_ioapics
[apic
].mp_apicid
, pin
, cfg
->vector
,
1265 irq
, trigger
, polarity
);
1268 if (setup_ioapic_entry(mp_ioapics
[apic
].mp_apicid
, irq
, &entry
,
1269 cpu_mask_to_apicid(mask
), trigger
, polarity
,
1271 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1272 mp_ioapics
[apic
].mp_apicid
, pin
);
1273 __clear_irq_vector(irq
);
1277 ioapic_register_intr(irq
, trigger
);
1279 disable_8259A_irq(irq
);
1281 ioapic_write_entry(apic
, pin
, entry
);
1284 static void __init
setup_IO_APIC_irqs(void)
1286 int apic
, pin
, idx
, irq
, first_notcon
= 1;
1288 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1290 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1291 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1293 idx
= find_irq_entry(apic
,pin
,mp_INT
);
1296 apic_printk(APIC_VERBOSE
, KERN_DEBUG
" IO-APIC (apicid-pin) %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1299 apic_printk(APIC_VERBOSE
, ", %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1302 if (!first_notcon
) {
1303 apic_printk(APIC_VERBOSE
, " not connected.\n");
1307 irq
= pin_2_irq(idx
, apic
, pin
);
1308 add_pin_to_irq(irq
, apic
, pin
);
1310 setup_IO_APIC_irq(apic
, pin
, irq
,
1311 irq_trigger(idx
), irq_polarity(idx
));
1316 apic_printk(APIC_VERBOSE
, " not connected.\n");
1320 * Set up the timer pin, possibly with the 8259A-master behind.
1322 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1325 struct IO_APIC_route_entry entry
;
1327 #ifdef CONFIG_INTR_REMAP
1328 if (intr_remapping_enabled
)
1332 memset(&entry
, 0, sizeof(entry
));
1335 * We use logical delivery to get the timer IRQ
1338 entry
.dest_mode
= INT_DEST_MODE
;
1339 entry
.mask
= 1; /* mask IRQ now */
1340 entry
.dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1341 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1344 entry
.vector
= vector
;
1347 * The timer IRQ doesn't have to know that behind the
1348 * scene we may have a 8259A-master in AEOI mode ...
1350 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1353 * Add it to the IO-APIC irq-routing table:
1355 ioapic_write_entry(apic
, pin
, entry
);
1359 __apicdebuginit(void) print_IO_APIC(void)
1362 union IO_APIC_reg_00 reg_00
;
1363 union IO_APIC_reg_01 reg_01
;
1364 union IO_APIC_reg_02 reg_02
;
1365 unsigned long flags
;
1366 struct irq_cfg
*cfg
;
1368 if (apic_verbosity
== APIC_QUIET
)
1371 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1372 for (i
= 0; i
< nr_ioapics
; i
++)
1373 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1374 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1377 * We are a bit conservative about what we expect. We have to
1378 * know about every hardware change ASAP.
1380 printk(KERN_INFO
"testing the IO APIC.......................\n");
1382 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1384 spin_lock_irqsave(&ioapic_lock
, flags
);
1385 reg_00
.raw
= io_apic_read(apic
, 0);
1386 reg_01
.raw
= io_apic_read(apic
, 1);
1387 if (reg_01
.bits
.version
>= 0x10)
1388 reg_02
.raw
= io_apic_read(apic
, 2);
1389 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1392 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1393 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1394 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1395 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1396 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1398 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1399 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1401 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1402 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1404 if (reg_01
.bits
.version
>= 0x10) {
1405 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1406 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1409 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1411 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1412 " Stat Dmod Deli Vect: \n");
1414 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1415 struct IO_APIC_route_entry entry
;
1417 entry
= ioapic_read_entry(apic
, i
);
1419 printk(KERN_DEBUG
" %02x %03X ",
1424 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1429 entry
.delivery_status
,
1431 entry
.delivery_mode
,
1436 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1437 for_each_irq_cfg(cfg
) {
1438 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
1441 printk(KERN_DEBUG
"IRQ%d ", cfg
->irq
);
1443 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1446 entry
= entry
->next
;
1451 printk(KERN_INFO
".................................... done.\n");
1456 __apicdebuginit(void) print_APIC_bitfield(int base
)
1461 if (apic_verbosity
== APIC_QUIET
)
1464 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1465 for (i
= 0; i
< 8; i
++) {
1466 v
= apic_read(base
+ i
*0x10);
1467 for (j
= 0; j
< 32; j
++) {
1477 __apicdebuginit(void) print_local_APIC(void *dummy
)
1479 unsigned int v
, ver
, maxlvt
;
1482 if (apic_verbosity
== APIC_QUIET
)
1485 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1486 smp_processor_id(), hard_smp_processor_id());
1487 v
= apic_read(APIC_ID
);
1488 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1489 v
= apic_read(APIC_LVR
);
1490 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1491 ver
= GET_APIC_VERSION(v
);
1492 maxlvt
= lapic_get_maxlvt();
1494 v
= apic_read(APIC_TASKPRI
);
1495 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1497 v
= apic_read(APIC_ARBPRI
);
1498 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1499 v
& APIC_ARBPRI_MASK
);
1500 v
= apic_read(APIC_PROCPRI
);
1501 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1503 v
= apic_read(APIC_EOI
);
1504 printk(KERN_DEBUG
"... APIC EOI: %08x\n", v
);
1505 v
= apic_read(APIC_RRR
);
1506 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1507 v
= apic_read(APIC_LDR
);
1508 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1509 v
= apic_read(APIC_DFR
);
1510 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1511 v
= apic_read(APIC_SPIV
);
1512 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1514 printk(KERN_DEBUG
"... APIC ISR field:\n");
1515 print_APIC_bitfield(APIC_ISR
);
1516 printk(KERN_DEBUG
"... APIC TMR field:\n");
1517 print_APIC_bitfield(APIC_TMR
);
1518 printk(KERN_DEBUG
"... APIC IRR field:\n");
1519 print_APIC_bitfield(APIC_IRR
);
1521 v
= apic_read(APIC_ESR
);
1522 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1524 icr
= apic_icr_read();
1525 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1526 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1528 v
= apic_read(APIC_LVTT
);
1529 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1531 if (maxlvt
> 3) { /* PC is LVT#4. */
1532 v
= apic_read(APIC_LVTPC
);
1533 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1535 v
= apic_read(APIC_LVT0
);
1536 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1537 v
= apic_read(APIC_LVT1
);
1538 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1540 if (maxlvt
> 2) { /* ERR is LVT#3. */
1541 v
= apic_read(APIC_LVTERR
);
1542 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1545 v
= apic_read(APIC_TMICT
);
1546 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1547 v
= apic_read(APIC_TMCCT
);
1548 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1549 v
= apic_read(APIC_TDCR
);
1550 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1554 __apicdebuginit(void) print_all_local_APICs(void)
1556 on_each_cpu(print_local_APIC
, NULL
, 1);
1559 __apicdebuginit(void) print_PIC(void)
1562 unsigned long flags
;
1564 if (apic_verbosity
== APIC_QUIET
)
1567 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1569 spin_lock_irqsave(&i8259A_lock
, flags
);
1571 v
= inb(0xa1) << 8 | inb(0x21);
1572 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1574 v
= inb(0xa0) << 8 | inb(0x20);
1575 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1579 v
= inb(0xa0) << 8 | inb(0x20);
1583 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1585 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1587 v
= inb(0x4d1) << 8 | inb(0x4d0);
1588 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1591 __apicdebuginit(int) print_all_ICs(void)
1594 print_all_local_APICs();
1600 fs_initcall(print_all_ICs
);
1603 /* Where if anywhere is the i8259 connect in external int mode */
1604 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1606 void __init
enable_IO_APIC(void)
1608 union IO_APIC_reg_01 reg_01
;
1609 int i8259_apic
, i8259_pin
;
1611 unsigned long flags
;
1614 * The number of IO-APIC IRQ registers (== #pins):
1616 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1617 spin_lock_irqsave(&ioapic_lock
, flags
);
1618 reg_01
.raw
= io_apic_read(apic
, 1);
1619 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1620 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1622 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1624 /* See if any of the pins is in ExtINT mode */
1625 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1626 struct IO_APIC_route_entry entry
;
1627 entry
= ioapic_read_entry(apic
, pin
);
1629 /* If the interrupt line is enabled and in ExtInt mode
1630 * I have found the pin where the i8259 is connected.
1632 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1633 ioapic_i8259
.apic
= apic
;
1634 ioapic_i8259
.pin
= pin
;
1640 /* Look to see what if the MP table has reported the ExtINT */
1641 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1642 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1643 /* Trust the MP table if nothing is setup in the hardware */
1644 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1645 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1646 ioapic_i8259
.pin
= i8259_pin
;
1647 ioapic_i8259
.apic
= i8259_apic
;
1649 /* Complain if the MP table and the hardware disagree */
1650 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1651 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1653 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1657 * Do not trust the IO-APIC being empty at bootup
1663 * Not an __init, needed by the reboot code
1665 void disable_IO_APIC(void)
1668 * Clear the IO-APIC before rebooting:
1673 * If the i8259 is routed through an IOAPIC
1674 * Put that IOAPIC in virtual wire mode
1675 * so legacy interrupts can be delivered.
1677 if (ioapic_i8259
.pin
!= -1) {
1678 struct IO_APIC_route_entry entry
;
1680 memset(&entry
, 0, sizeof(entry
));
1681 entry
.mask
= 0; /* Enabled */
1682 entry
.trigger
= 0; /* Edge */
1684 entry
.polarity
= 0; /* High */
1685 entry
.delivery_status
= 0;
1686 entry
.dest_mode
= 0; /* Physical */
1687 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1689 entry
.dest
= read_apic_id();
1692 * Add it to the IO-APIC irq-routing table:
1694 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1697 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1700 int no_timer_check __initdata
;
1702 static int __init
notimercheck(char *s
)
1707 __setup("no_timer_check", notimercheck
);
1710 * There is a nasty bug in some older SMP boards, their mptable lies
1711 * about the timer IRQ. We do the following to work around the situation:
1713 * - timer IRQ defaults to IO-APIC IRQ
1714 * - if this function detects that timer IRQs are defunct, then we fall
1715 * back to ISA timer IRQs
1717 static int __init
timer_irq_works(void)
1719 unsigned long t1
= jiffies
;
1720 unsigned long flags
;
1725 local_save_flags(flags
);
1727 /* Let ten ticks pass... */
1728 mdelay((10 * 1000) / HZ
);
1729 local_irq_restore(flags
);
1732 * Expect a few ticks at least, to be sure some possible
1733 * glue logic does not lock up after one or two first
1734 * ticks in a non-ExtINT mode. Also the local APIC
1735 * might have cached one ExtINT interrupt. Finally, at
1736 * least one tick may be lost due to delays.
1740 if (time_after(jiffies
, t1
+ 4))
1746 * In the SMP+IOAPIC case it might happen that there are an unspecified
1747 * number of pending IRQ events unhandled. These cases are very rare,
1748 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1749 * better to do it this way as thus we do not have to be aware of
1750 * 'pending' interrupts in the IRQ path, except at this point.
1753 * Edge triggered needs to resend any interrupt
1754 * that was delayed but this is now handled in the device
1759 * Starting up a edge-triggered IO-APIC interrupt is
1760 * nasty - we need to make sure that we get the edge.
1761 * If it is already asserted for some reason, we need
1762 * return 1 to indicate that is was pending.
1764 * This is not complete - we should be able to fake
1765 * an edge even if it isn't on the 8259A...
1768 static unsigned int startup_ioapic_irq(unsigned int irq
)
1770 int was_pending
= 0;
1771 unsigned long flags
;
1773 spin_lock_irqsave(&ioapic_lock
, flags
);
1775 disable_8259A_irq(irq
);
1776 if (i8259A_irq_pending(irq
))
1779 __unmask_IO_APIC_irq(irq
);
1780 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1785 static int ioapic_retrigger_irq(unsigned int irq
)
1787 struct irq_cfg
*cfg
= irq_cfg(irq
);
1788 unsigned long flags
;
1790 spin_lock_irqsave(&vector_lock
, flags
);
1791 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg
->domain
)), cfg
->vector
);
1792 spin_unlock_irqrestore(&vector_lock
, flags
);
1798 * Level and edge triggered IO-APIC interrupts need different handling,
1799 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1800 * handled with the level-triggered descriptor, but that one has slightly
1801 * more overhead. Level-triggered interrupts cannot be handled with the
1802 * edge-triggered handler, without risking IRQ storms and other ugly
1808 #ifdef CONFIG_INTR_REMAP
1809 static void ir_irq_migration(struct work_struct
*work
);
1811 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
1814 * Migrate the IO-APIC irq in the presence of intr-remapping.
1816 * For edge triggered, irq migration is a simple atomic update(of vector
1817 * and cpu destination) of IRTE and flush the hardware cache.
1819 * For level triggered, we need to modify the io-apic RTE aswell with the update
1820 * vector information, along with modifying IRTE with vector and destination.
1821 * So irq migration for level triggered is little bit more complex compared to
1822 * edge triggered migration. But the good news is, we use the same algorithm
1823 * for level triggered migration as we have today, only difference being,
1824 * we now initiate the irq migration from process context instead of the
1825 * interrupt context.
1827 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1828 * suppression) to the IO-APIC, level triggered irq migration will also be
1829 * as simple as edge triggered migration and we can do the irq migration
1830 * with a simple atomic update to IO-APIC RTE.
1832 static void migrate_ioapic_irq(int irq
, cpumask_t mask
)
1834 struct irq_cfg
*cfg
;
1835 struct irq_desc
*desc
;
1836 cpumask_t tmp
, cleanup_mask
;
1838 int modify_ioapic_rte
;
1840 unsigned long flags
;
1842 cpus_and(tmp
, mask
, cpu_online_map
);
1843 if (cpus_empty(tmp
))
1846 if (get_irte(irq
, &irte
))
1849 if (assign_irq_vector(irq
, mask
))
1853 cpus_and(tmp
, cfg
->domain
, mask
);
1854 dest
= cpu_mask_to_apicid(tmp
);
1856 desc
= irq_to_desc(irq
);
1857 modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
1858 if (modify_ioapic_rte
) {
1859 spin_lock_irqsave(&ioapic_lock
, flags
);
1860 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
1861 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1864 irte
.vector
= cfg
->vector
;
1865 irte
.dest_id
= IRTE_DEST(dest
);
1868 * Modified the IRTE and flushes the Interrupt entry cache.
1870 modify_irte(irq
, &irte
);
1872 if (cfg
->move_in_progress
) {
1873 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
1874 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
1875 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
1876 cfg
->move_in_progress
= 0;
1879 desc
->affinity
= mask
;
1882 static int migrate_irq_remapped_level(int irq
)
1885 struct irq_desc
*desc
= irq_to_desc(irq
);
1887 mask_IO_APIC_irq(irq
);
1889 if (io_apic_level_ack_pending(irq
)) {
1891 * Interrupt in progress. Migrating irq now will change the
1892 * vector information in the IO-APIC RTE and that will confuse
1893 * the EOI broadcast performed by cpu.
1894 * So, delay the irq migration to the next instance.
1896 schedule_delayed_work(&ir_migration_work
, 1);
1900 /* everthing is clear. we have right of way */
1901 migrate_ioapic_irq(irq
, desc
->pending_mask
);
1904 desc
->status
&= ~IRQ_MOVE_PENDING
;
1905 cpus_clear(desc
->pending_mask
);
1908 unmask_IO_APIC_irq(irq
);
1912 static void ir_irq_migration(struct work_struct
*work
)
1915 struct irq_desc
*desc
;
1917 for_each_irq_desc(irq
, desc
) {
1918 if (desc
->status
& IRQ_MOVE_PENDING
) {
1919 unsigned long flags
;
1921 spin_lock_irqsave(&desc
->lock
, flags
);
1922 if (!desc
->chip
->set_affinity
||
1923 !(desc
->status
& IRQ_MOVE_PENDING
)) {
1924 desc
->status
&= ~IRQ_MOVE_PENDING
;
1925 spin_unlock_irqrestore(&desc
->lock
, flags
);
1929 desc
->chip
->set_affinity(irq
, desc
->pending_mask
);
1930 spin_unlock_irqrestore(&desc
->lock
, flags
);
1936 * Migrates the IRQ destination in the process context.
1938 static void set_ir_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
1940 struct irq_desc
*desc
= irq_to_desc(irq
);
1942 if (desc
->status
& IRQ_LEVEL
) {
1943 desc
->status
|= IRQ_MOVE_PENDING
;
1944 desc
->pending_mask
= mask
;
1945 migrate_irq_remapped_level(irq
);
1949 migrate_ioapic_irq(irq
, mask
);
1953 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
1955 unsigned vector
, me
;
1960 me
= smp_processor_id();
1961 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
1963 struct irq_desc
*desc
;
1964 struct irq_cfg
*cfg
;
1965 irq
= __get_cpu_var(vector_irq
)[vector
];
1967 desc
= irq_to_desc(irq
);
1972 spin_lock(&desc
->lock
);
1973 if (!cfg
->move_cleanup_count
)
1976 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
))
1979 __get_cpu_var(vector_irq
)[vector
] = -1;
1980 cfg
->move_cleanup_count
--;
1982 spin_unlock(&desc
->lock
);
1988 static void irq_complete_move(unsigned int irq
)
1990 struct irq_cfg
*cfg
= irq_cfg(irq
);
1991 unsigned vector
, me
;
1993 if (likely(!cfg
->move_in_progress
))
1996 vector
= ~get_irq_regs()->orig_ax
;
1997 me
= smp_processor_id();
1998 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
)) {
1999 cpumask_t cleanup_mask
;
2001 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2002 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2003 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2004 cfg
->move_in_progress
= 0;
2008 static inline void irq_complete_move(unsigned int irq
) {}
2010 #ifdef CONFIG_INTR_REMAP
2011 static void ack_x2apic_level(unsigned int irq
)
2016 static void ack_x2apic_edge(unsigned int irq
)
2022 static void ack_apic_edge(unsigned int irq
)
2024 irq_complete_move(irq
);
2025 move_native_irq(irq
);
2029 static void ack_apic_level(unsigned int irq
)
2031 int do_unmask_irq
= 0;
2033 irq_complete_move(irq
);
2034 #ifdef CONFIG_GENERIC_PENDING_IRQ
2035 /* If we are moving the irq we need to mask it */
2036 if (unlikely(irq_to_desc(irq
)->status
& IRQ_MOVE_PENDING
)) {
2038 mask_IO_APIC_irq(irq
);
2043 * We must acknowledge the irq before we move it or the acknowledge will
2044 * not propagate properly.
2048 /* Now we can move and renable the irq */
2049 if (unlikely(do_unmask_irq
)) {
2050 /* Only migrate the irq if the ack has been received.
2052 * On rare occasions the broadcast level triggered ack gets
2053 * delayed going to ioapics, and if we reprogram the
2054 * vector while Remote IRR is still set the irq will never
2057 * To prevent this scenario we read the Remote IRR bit
2058 * of the ioapic. This has two effects.
2059 * - On any sane system the read of the ioapic will
2060 * flush writes (and acks) going to the ioapic from
2062 * - We get to see if the ACK has actually been delivered.
2064 * Based on failed experiments of reprogramming the
2065 * ioapic entry from outside of irq context starting
2066 * with masking the ioapic entry and then polling until
2067 * Remote IRR was clear before reprogramming the
2068 * ioapic I don't trust the Remote IRR bit to be
2069 * completey accurate.
2071 * However there appears to be no other way to plug
2072 * this race, so if the Remote IRR bit is not
2073 * accurate and is causing problems then it is a hardware bug
2074 * and you can go talk to the chipset vendor about it.
2076 if (!io_apic_level_ack_pending(irq
))
2077 move_masked_irq(irq
);
2078 unmask_IO_APIC_irq(irq
);
2082 static struct irq_chip ioapic_chip __read_mostly
= {
2084 .startup
= startup_ioapic_irq
,
2085 .mask
= mask_IO_APIC_irq
,
2086 .unmask
= unmask_IO_APIC_irq
,
2087 .ack
= ack_apic_edge
,
2088 .eoi
= ack_apic_level
,
2090 .set_affinity
= set_ioapic_affinity_irq
,
2092 .retrigger
= ioapic_retrigger_irq
,
2095 #ifdef CONFIG_INTR_REMAP
2096 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2097 .name
= "IR-IO-APIC",
2098 .startup
= startup_ioapic_irq
,
2099 .mask
= mask_IO_APIC_irq
,
2100 .unmask
= unmask_IO_APIC_irq
,
2101 .ack
= ack_x2apic_edge
,
2102 .eoi
= ack_x2apic_level
,
2104 .set_affinity
= set_ir_ioapic_affinity_irq
,
2106 .retrigger
= ioapic_retrigger_irq
,
2110 static inline void init_IO_APIC_traps(void)
2113 struct irq_desc
*desc
;
2114 struct irq_cfg
*cfg
;
2117 * NOTE! The local APIC isn't very good at handling
2118 * multiple interrupts at the same interrupt level.
2119 * As the interrupt level is determined by taking the
2120 * vector number and shifting that right by 4, we
2121 * want to spread these out a bit so that they don't
2122 * all fall in the same interrupt level.
2124 * Also, we've got to be careful not to trash gate
2125 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2127 for_each_irq_cfg(cfg
) {
2129 if (IO_APIC_IRQ(irq
) && !cfg
->vector
) {
2131 * Hmm.. We don't have an entry for this,
2132 * so default to an old-fashioned 8259
2133 * interrupt if we can..
2136 make_8259A_irq(irq
);
2138 desc
= irq_to_desc(irq
);
2139 /* Strange. Oh, well.. */
2140 desc
->chip
= &no_irq_chip
;
2146 static void unmask_lapic_irq(unsigned int irq
)
2150 v
= apic_read(APIC_LVT0
);
2151 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2154 static void mask_lapic_irq(unsigned int irq
)
2158 v
= apic_read(APIC_LVT0
);
2159 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2162 static void ack_lapic_irq (unsigned int irq
)
2167 static struct irq_chip lapic_chip __read_mostly
= {
2168 .name
= "local-APIC",
2169 .mask
= mask_lapic_irq
,
2170 .unmask
= unmask_lapic_irq
,
2171 .ack
= ack_lapic_irq
,
2174 static void lapic_register_intr(int irq
)
2176 struct irq_desc
*desc
;
2178 desc
= irq_to_desc(irq
);
2179 desc
->status
&= ~IRQ_LEVEL
;
2180 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2184 static void __init
setup_nmi(void)
2187 * Dirty trick to enable the NMI watchdog ...
2188 * We put the 8259A master into AEOI mode and
2189 * unmask on all local APICs LVT0 as NMI.
2191 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2192 * is from Maciej W. Rozycki - so we do not have to EOI from
2193 * the NMI handler or the timer interrupt.
2195 printk(KERN_INFO
"activating NMI Watchdog ...");
2197 enable_NMI_through_LVT0();
2203 * This looks a bit hackish but it's about the only one way of sending
2204 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2205 * not support the ExtINT mode, unfortunately. We need to send these
2206 * cycles as some i82489DX-based boards have glue logic that keeps the
2207 * 8259A interrupt line asserted until INTA. --macro
2209 static inline void __init
unlock_ExtINT_logic(void)
2212 struct IO_APIC_route_entry entry0
, entry1
;
2213 unsigned char save_control
, save_freq_select
;
2215 pin
= find_isa_irq_pin(8, mp_INT
);
2216 apic
= find_isa_irq_apic(8, mp_INT
);
2220 entry0
= ioapic_read_entry(apic
, pin
);
2222 clear_IO_APIC_pin(apic
, pin
);
2224 memset(&entry1
, 0, sizeof(entry1
));
2226 entry1
.dest_mode
= 0; /* physical delivery */
2227 entry1
.mask
= 0; /* unmask IRQ now */
2228 entry1
.dest
= hard_smp_processor_id();
2229 entry1
.delivery_mode
= dest_ExtINT
;
2230 entry1
.polarity
= entry0
.polarity
;
2234 ioapic_write_entry(apic
, pin
, entry1
);
2236 save_control
= CMOS_READ(RTC_CONTROL
);
2237 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2238 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2240 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2245 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2249 CMOS_WRITE(save_control
, RTC_CONTROL
);
2250 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2251 clear_IO_APIC_pin(apic
, pin
);
2253 ioapic_write_entry(apic
, pin
, entry0
);
2256 static int disable_timer_pin_1 __initdata
;
2257 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2258 static int __init
disable_timer_pin_setup(char *arg
)
2260 disable_timer_pin_1
= 1;
2263 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2265 int timer_through_8259 __initdata
;
2268 * This code may look a bit paranoid, but it's supposed to cooperate with
2269 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2270 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2271 * fanatically on his truly buggy board.
2273 * FIXME: really need to revamp this for modern platforms only.
2275 static inline void __init
check_timer(void)
2277 struct irq_cfg
*cfg
= irq_cfg(0);
2278 int apic1
, pin1
, apic2
, pin2
;
2279 unsigned long flags
;
2282 local_irq_save(flags
);
2285 * get/set the timer IRQ vector:
2287 disable_8259A_irq(0);
2288 assign_irq_vector(0, TARGET_CPUS
);
2291 * As IRQ0 is to be enabled in the 8259A, the virtual
2292 * wire has to be disabled in the local APIC.
2294 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2297 pin1
= find_isa_irq_pin(0, mp_INT
);
2298 apic1
= find_isa_irq_apic(0, mp_INT
);
2299 pin2
= ioapic_i8259
.pin
;
2300 apic2
= ioapic_i8259
.apic
;
2302 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2303 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2304 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2307 * Some BIOS writers are clueless and report the ExtINTA
2308 * I/O APIC input from the cascaded 8259A as the timer
2309 * interrupt input. So just in case, if only one pin
2310 * was found above, try it both directly and through the
2314 #ifdef CONFIG_INTR_REMAP
2315 if (intr_remapping_enabled
)
2316 panic("BIOS bug: timer not connected to IO-APIC");
2321 } else if (pin2
== -1) {
2328 * Ok, does IRQ0 through the IOAPIC work?
2331 add_pin_to_irq(0, apic1
, pin1
);
2332 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2334 unmask_IO_APIC_irq(0);
2335 if (timer_irq_works()) {
2336 if (nmi_watchdog
== NMI_IO_APIC
) {
2338 enable_8259A_irq(0);
2340 if (disable_timer_pin_1
> 0)
2341 clear_IO_APIC_pin(0, pin1
);
2344 #ifdef CONFIG_INTR_REMAP
2345 if (intr_remapping_enabled
)
2346 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2348 clear_IO_APIC_pin(apic1
, pin1
);
2350 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2351 "8254 timer not connected to IO-APIC\n");
2353 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2354 "(IRQ0) through the 8259A ...\n");
2355 apic_printk(APIC_QUIET
, KERN_INFO
2356 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2358 * legacy devices should be connected to IO APIC #0
2360 replace_pin_at_irq(0, apic1
, pin1
, apic2
, pin2
);
2361 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2362 unmask_IO_APIC_irq(0);
2363 enable_8259A_irq(0);
2364 if (timer_irq_works()) {
2365 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2366 timer_through_8259
= 1;
2367 if (nmi_watchdog
== NMI_IO_APIC
) {
2368 disable_8259A_irq(0);
2370 enable_8259A_irq(0);
2375 * Cleanup, just in case ...
2377 disable_8259A_irq(0);
2378 clear_IO_APIC_pin(apic2
, pin2
);
2379 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2382 if (nmi_watchdog
== NMI_IO_APIC
) {
2383 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2384 "through the IO-APIC - disabling NMI Watchdog!\n");
2385 nmi_watchdog
= NMI_NONE
;
2388 apic_printk(APIC_QUIET
, KERN_INFO
2389 "...trying to set up timer as Virtual Wire IRQ...\n");
2391 lapic_register_intr(0);
2392 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
2393 enable_8259A_irq(0);
2395 if (timer_irq_works()) {
2396 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2399 disable_8259A_irq(0);
2400 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
2401 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
2403 apic_printk(APIC_QUIET
, KERN_INFO
2404 "...trying to set up timer as ExtINT IRQ...\n");
2408 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2410 unlock_ExtINT_logic();
2412 if (timer_irq_works()) {
2413 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2416 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
2417 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2418 "report. Then try booting with the 'noapic' option.\n");
2420 local_irq_restore(flags
);
2424 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2425 * to devices. However there may be an I/O APIC pin available for
2426 * this interrupt regardless. The pin may be left unconnected, but
2427 * typically it will be reused as an ExtINT cascade interrupt for
2428 * the master 8259A. In the MPS case such a pin will normally be
2429 * reported as an ExtINT interrupt in the MP table. With ACPI
2430 * there is no provision for ExtINT interrupts, and in the absence
2431 * of an override it would be treated as an ordinary ISA I/O APIC
2432 * interrupt, that is edge-triggered and unmasked by default. We
2433 * used to do this, but it caused problems on some systems because
2434 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2435 * the same ExtINT cascade interrupt to drive the local APIC of the
2436 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2437 * the I/O APIC in all cases now. No actual device should request
2438 * it anyway. --macro
2440 #define PIC_IRQS (1<<2)
2442 void __init
setup_IO_APIC(void)
2446 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2449 io_apic_irqs
= ~PIC_IRQS
;
2451 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
2454 setup_IO_APIC_irqs();
2455 init_IO_APIC_traps();
2460 * Called after all the initialization is done. If we didnt find any
2461 * APIC bugs then we can allow the modify fast path
2464 static int __init
io_apic_bug_finalize(void)
2466 if (sis_apic_bug
== -1)
2471 late_initcall(io_apic_bug_finalize
);
2473 struct sysfs_ioapic_data
{
2474 struct sys_device dev
;
2475 struct IO_APIC_route_entry entry
[0];
2477 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
2479 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2481 struct IO_APIC_route_entry
*entry
;
2482 struct sysfs_ioapic_data
*data
;
2485 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2486 entry
= data
->entry
;
2487 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
2488 *entry
= ioapic_read_entry(dev
->id
, i
);
2493 static int ioapic_resume(struct sys_device
*dev
)
2495 struct IO_APIC_route_entry
*entry
;
2496 struct sysfs_ioapic_data
*data
;
2497 unsigned long flags
;
2498 union IO_APIC_reg_00 reg_00
;
2501 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2502 entry
= data
->entry
;
2504 spin_lock_irqsave(&ioapic_lock
, flags
);
2505 reg_00
.raw
= io_apic_read(dev
->id
, 0);
2506 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
2507 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
2508 io_apic_write(dev
->id
, 0, reg_00
.raw
);
2510 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2511 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2512 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
2517 static struct sysdev_class ioapic_sysdev_class
= {
2519 .suspend
= ioapic_suspend
,
2520 .resume
= ioapic_resume
,
2523 static int __init
ioapic_init_sysfs(void)
2525 struct sys_device
* dev
;
2528 error
= sysdev_class_register(&ioapic_sysdev_class
);
2532 for (i
= 0; i
< nr_ioapics
; i
++ ) {
2533 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
2534 * sizeof(struct IO_APIC_route_entry
);
2535 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
2536 if (!mp_ioapic_data
[i
]) {
2537 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2540 dev
= &mp_ioapic_data
[i
]->dev
;
2542 dev
->cls
= &ioapic_sysdev_class
;
2543 error
= sysdev_register(dev
);
2545 kfree(mp_ioapic_data
[i
]);
2546 mp_ioapic_data
[i
] = NULL
;
2547 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2555 device_initcall(ioapic_init_sysfs
);
2558 * Dynamic irq allocate and deallocation
2560 unsigned int create_irq_nr(unsigned int irq_want
)
2562 /* Allocate an unused irq */
2565 unsigned long flags
;
2566 struct irq_cfg
*cfg_new
;
2568 #ifndef CONFIG_HAVE_SPARSE_IRQ
2569 irq_want
= nr_irqs
- 1;
2573 spin_lock_irqsave(&vector_lock
, flags
);
2574 for (new = irq_want
; new > 0; new--) {
2575 if (platform_legacy_irq(new))
2577 cfg_new
= irq_cfg(new);
2578 if (cfg_new
&& cfg_new
->vector
!= 0)
2580 /* check if need to create one */
2582 cfg_new
= irq_cfg_alloc(new);
2583 if (__assign_irq_vector(new, TARGET_CPUS
) == 0)
2587 spin_unlock_irqrestore(&vector_lock
, flags
);
2590 dynamic_irq_init(irq
);
2595 int create_irq(void)
2599 irq
= create_irq_nr(nr_irqs
- 1);
2607 void destroy_irq(unsigned int irq
)
2609 unsigned long flags
;
2611 dynamic_irq_cleanup(irq
);
2613 #ifdef CONFIG_INTR_REMAP
2616 spin_lock_irqsave(&vector_lock
, flags
);
2617 __clear_irq_vector(irq
);
2618 spin_unlock_irqrestore(&vector_lock
, flags
);
2622 * MSI message composition
2624 #ifdef CONFIG_PCI_MSI
2625 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
2627 struct irq_cfg
*cfg
;
2633 err
= assign_irq_vector(irq
, tmp
);
2638 cpus_and(tmp
, cfg
->domain
, tmp
);
2639 dest
= cpu_mask_to_apicid(tmp
);
2641 #ifdef CONFIG_INTR_REMAP
2642 if (irq_remapped(irq
)) {
2647 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
2648 BUG_ON(ir_index
== -1);
2650 memset (&irte
, 0, sizeof(irte
));
2653 irte
.dst_mode
= INT_DEST_MODE
;
2654 irte
.trigger_mode
= 0; /* edge */
2655 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
2656 irte
.vector
= cfg
->vector
;
2657 irte
.dest_id
= IRTE_DEST(dest
);
2659 modify_irte(irq
, &irte
);
2661 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2662 msg
->data
= sub_handle
;
2663 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
2665 MSI_ADDR_IR_INDEX1(ir_index
) |
2666 MSI_ADDR_IR_INDEX2(ir_index
);
2670 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2673 ((INT_DEST_MODE
== 0) ?
2674 MSI_ADDR_DEST_MODE_PHYSICAL
:
2675 MSI_ADDR_DEST_MODE_LOGICAL
) |
2676 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2677 MSI_ADDR_REDIRECTION_CPU
:
2678 MSI_ADDR_REDIRECTION_LOWPRI
) |
2679 MSI_ADDR_DEST_ID(dest
);
2682 MSI_DATA_TRIGGER_EDGE
|
2683 MSI_DATA_LEVEL_ASSERT
|
2684 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2685 MSI_DATA_DELIVERY_FIXED
:
2686 MSI_DATA_DELIVERY_LOWPRI
) |
2687 MSI_DATA_VECTOR(cfg
->vector
);
2693 static void set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
2695 struct irq_cfg
*cfg
;
2699 struct irq_desc
*desc
;
2701 cpus_and(tmp
, mask
, cpu_online_map
);
2702 if (cpus_empty(tmp
))
2705 if (assign_irq_vector(irq
, mask
))
2709 cpus_and(tmp
, cfg
->domain
, mask
);
2710 dest
= cpu_mask_to_apicid(tmp
);
2712 read_msi_msg(irq
, &msg
);
2714 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
2715 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
2716 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
2717 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
2719 write_msi_msg(irq
, &msg
);
2720 desc
= irq_to_desc(irq
);
2721 desc
->affinity
= mask
;
2724 #ifdef CONFIG_INTR_REMAP
2726 * Migrate the MSI irq to another cpumask. This migration is
2727 * done in the process context using interrupt-remapping hardware.
2729 static void ir_set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
2731 struct irq_cfg
*cfg
;
2733 cpumask_t tmp
, cleanup_mask
;
2735 struct irq_desc
*desc
;
2737 cpus_and(tmp
, mask
, cpu_online_map
);
2738 if (cpus_empty(tmp
))
2741 if (get_irte(irq
, &irte
))
2744 if (assign_irq_vector(irq
, mask
))
2748 cpus_and(tmp
, cfg
->domain
, mask
);
2749 dest
= cpu_mask_to_apicid(tmp
);
2751 irte
.vector
= cfg
->vector
;
2752 irte
.dest_id
= IRTE_DEST(dest
);
2755 * atomically update the IRTE with the new destination and vector.
2757 modify_irte(irq
, &irte
);
2760 * After this point, all the interrupts will start arriving
2761 * at the new destination. So, time to cleanup the previous
2762 * vector allocation.
2764 if (cfg
->move_in_progress
) {
2765 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2766 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2767 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2768 cfg
->move_in_progress
= 0;
2771 desc
= irq_to_desc(irq
);
2772 desc
->affinity
= mask
;
2775 #endif /* CONFIG_SMP */
2778 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2779 * which implement the MSI or MSI-X Capability Structure.
2781 static struct irq_chip msi_chip
= {
2783 .unmask
= unmask_msi_irq
,
2784 .mask
= mask_msi_irq
,
2785 .ack
= ack_apic_edge
,
2787 .set_affinity
= set_msi_irq_affinity
,
2789 .retrigger
= ioapic_retrigger_irq
,
2792 #ifdef CONFIG_INTR_REMAP
2793 static struct irq_chip msi_ir_chip
= {
2794 .name
= "IR-PCI-MSI",
2795 .unmask
= unmask_msi_irq
,
2796 .mask
= mask_msi_irq
,
2797 .ack
= ack_x2apic_edge
,
2799 .set_affinity
= ir_set_msi_irq_affinity
,
2801 .retrigger
= ioapic_retrigger_irq
,
2805 * Map the PCI dev to the corresponding remapping hardware unit
2806 * and allocate 'nvec' consecutive interrupt-remapping table entries
2809 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
2811 struct intel_iommu
*iommu
;
2814 iommu
= map_dev_to_ir(dev
);
2817 "Unable to map PCI %s to iommu\n", pci_name(dev
));
2821 index
= alloc_irte(iommu
, irq
, nvec
);
2824 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
2832 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
, int irq
)
2837 ret
= msi_compose_msg(dev
, irq
, &msg
);
2841 set_irq_msi(irq
, desc
);
2842 write_msi_msg(irq
, &msg
);
2844 #ifdef CONFIG_INTR_REMAP
2845 if (irq_remapped(irq
)) {
2846 struct irq_desc
*desc
= irq_to_desc(irq
);
2848 * irq migration in process context
2850 desc
->status
|= IRQ_MOVE_PCNTXT
;
2851 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
2854 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
2859 static unsigned int build_irq_for_pci_dev(struct pci_dev
*dev
)
2863 irq
= dev
->bus
->number
;
2871 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
)
2875 unsigned int irq_want
;
2877 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
2879 irq
= create_irq_nr(irq_want
);
2883 #ifdef CONFIG_INTR_REMAP
2884 if (!intr_remapping_enabled
)
2887 ret
= msi_alloc_irte(dev
, irq
, 1);
2892 ret
= setup_msi_irq(dev
, desc
, irq
);
2899 #ifdef CONFIG_INTR_REMAP
2906 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
2909 int ret
, sub_handle
;
2910 struct msi_desc
*desc
;
2911 unsigned int irq_want
;
2913 #ifdef CONFIG_INTR_REMAP
2914 struct intel_iommu
*iommu
= 0;
2918 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
2920 list_for_each_entry(desc
, &dev
->msi_list
, list
) {
2921 irq
= create_irq_nr(irq_want
--);
2924 #ifdef CONFIG_INTR_REMAP
2925 if (!intr_remapping_enabled
)
2930 * allocate the consecutive block of IRTE's
2933 index
= msi_alloc_irte(dev
, irq
, nvec
);
2939 iommu
= map_dev_to_ir(dev
);
2945 * setup the mapping between the irq and the IRTE
2946 * base index, the sub_handle pointing to the
2947 * appropriate interrupt remap table entry.
2949 set_irte_irq(irq
, iommu
, index
, sub_handle
);
2953 ret
= setup_msi_irq(dev
, desc
, irq
);
2965 void arch_teardown_msi_irq(unsigned int irq
)
2972 static void dmar_msi_set_affinity(unsigned int irq
, cpumask_t mask
)
2974 struct irq_cfg
*cfg
;
2978 struct irq_desc
*desc
;
2980 cpus_and(tmp
, mask
, cpu_online_map
);
2981 if (cpus_empty(tmp
))
2984 if (assign_irq_vector(irq
, mask
))
2988 cpus_and(tmp
, cfg
->domain
, mask
);
2989 dest
= cpu_mask_to_apicid(tmp
);
2991 dmar_msi_read(irq
, &msg
);
2993 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
2994 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
2995 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
2996 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
2998 dmar_msi_write(irq
, &msg
);
2999 desc
= irq_to_desc(irq
);
3000 desc
->affinity
= mask
;
3002 #endif /* CONFIG_SMP */
3004 struct irq_chip dmar_msi_type
= {
3006 .unmask
= dmar_msi_unmask
,
3007 .mask
= dmar_msi_mask
,
3008 .ack
= ack_apic_edge
,
3010 .set_affinity
= dmar_msi_set_affinity
,
3012 .retrigger
= ioapic_retrigger_irq
,
3015 int arch_setup_dmar_msi(unsigned int irq
)
3020 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3023 dmar_msi_write(irq
, &msg
);
3024 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3030 #endif /* CONFIG_PCI_MSI */
3032 * Hypertransport interrupt support
3034 #ifdef CONFIG_HT_IRQ
3038 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3040 struct ht_irq_msg msg
;
3041 fetch_ht_irq_msg(irq
, &msg
);
3043 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3044 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3046 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3047 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3049 write_ht_irq_msg(irq
, &msg
);
3052 static void set_ht_irq_affinity(unsigned int irq
, cpumask_t mask
)
3054 struct irq_cfg
*cfg
;
3057 struct irq_desc
*desc
;
3059 cpus_and(tmp
, mask
, cpu_online_map
);
3060 if (cpus_empty(tmp
))
3063 if (assign_irq_vector(irq
, mask
))
3067 cpus_and(tmp
, cfg
->domain
, mask
);
3068 dest
= cpu_mask_to_apicid(tmp
);
3070 target_ht_irq(irq
, dest
, cfg
->vector
);
3071 desc
= irq_to_desc(irq
);
3072 desc
->affinity
= mask
;
3076 static struct irq_chip ht_irq_chip
= {
3078 .mask
= mask_ht_irq
,
3079 .unmask
= unmask_ht_irq
,
3080 .ack
= ack_apic_edge
,
3082 .set_affinity
= set_ht_irq_affinity
,
3084 .retrigger
= ioapic_retrigger_irq
,
3087 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3089 struct irq_cfg
*cfg
;
3094 err
= assign_irq_vector(irq
, tmp
);
3096 struct ht_irq_msg msg
;
3100 cpus_and(tmp
, cfg
->domain
, tmp
);
3101 dest
= cpu_mask_to_apicid(tmp
);
3103 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3107 HT_IRQ_LOW_DEST_ID(dest
) |
3108 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3109 ((INT_DEST_MODE
== 0) ?
3110 HT_IRQ_LOW_DM_PHYSICAL
:
3111 HT_IRQ_LOW_DM_LOGICAL
) |
3112 HT_IRQ_LOW_RQEOI_EDGE
|
3113 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3114 HT_IRQ_LOW_MT_FIXED
:
3115 HT_IRQ_LOW_MT_ARBITRATED
) |
3116 HT_IRQ_LOW_IRQ_MASKED
;
3118 write_ht_irq_msg(irq
, &msg
);
3120 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3121 handle_edge_irq
, "edge");
3125 #endif /* CONFIG_HT_IRQ */
3127 /* --------------------------------------------------------------------------
3128 ACPI-based IOAPIC Configuration
3129 -------------------------------------------------------------------------- */
3133 #define IO_APIC_MAX_ID 0xFE
3135 int __init
io_apic_get_redir_entries (int ioapic
)
3137 union IO_APIC_reg_01 reg_01
;
3138 unsigned long flags
;
3140 spin_lock_irqsave(&ioapic_lock
, flags
);
3141 reg_01
.raw
= io_apic_read(ioapic
, 1);
3142 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3144 return reg_01
.bits
.entries
;
3148 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
3150 if (!IO_APIC_IRQ(irq
)) {
3151 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3157 * IRQs < 16 are already in the irq_2_pin[] map
3160 add_pin_to_irq(irq
, ioapic
, pin
);
3162 setup_IO_APIC_irq(ioapic
, pin
, irq
, triggering
, polarity
);
3168 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
3172 if (skip_ioapic_setup
)
3175 for (i
= 0; i
< mp_irq_entries
; i
++)
3176 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
3177 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
3179 if (i
>= mp_irq_entries
)
3182 *trigger
= irq_trigger(i
);
3183 *polarity
= irq_polarity(i
);
3187 #endif /* CONFIG_ACPI */
3190 * This function currently is only a helper for the i386 smp boot process where
3191 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3192 * so mask in all cases should simply be TARGET_CPUS
3195 void __init
setup_ioapic_dest(void)
3197 int pin
, ioapic
, irq
, irq_entry
;
3198 struct irq_cfg
*cfg
;
3200 if (skip_ioapic_setup
== 1)
3203 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
3204 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
3205 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
3206 if (irq_entry
== -1)
3208 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
3210 /* setup_IO_APIC_irqs could fail to get vector for some device
3211 * when you have too many devices, because at that time only boot
3216 setup_IO_APIC_irq(ioapic
, pin
, irq
,
3217 irq_trigger(irq_entry
),
3218 irq_polarity(irq_entry
));
3219 #ifdef CONFIG_INTR_REMAP
3220 else if (intr_remapping_enabled
)
3221 set_ir_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3224 set_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3231 #define IOAPIC_RESOURCE_NAME_SIZE 11
3233 static struct resource
*ioapic_resources
;
3235 static struct resource
* __init
ioapic_setup_resources(void)
3238 struct resource
*res
;
3242 if (nr_ioapics
<= 0)
3245 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
3248 mem
= alloc_bootmem(n
);
3252 mem
+= sizeof(struct resource
) * nr_ioapics
;
3254 for (i
= 0; i
< nr_ioapics
; i
++) {
3256 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
3257 sprintf(mem
, "IOAPIC %u", i
);
3258 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
3262 ioapic_resources
= res
;
3267 void __init
ioapic_init_mappings(void)
3269 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
3270 struct resource
*ioapic_res
;
3273 ioapic_res
= ioapic_setup_resources();
3274 for (i
= 0; i
< nr_ioapics
; i
++) {
3275 if (smp_found_config
) {
3276 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
3278 ioapic_phys
= (unsigned long)
3279 alloc_bootmem_pages(PAGE_SIZE
);
3280 ioapic_phys
= __pa(ioapic_phys
);
3282 set_fixmap_nocache(idx
, ioapic_phys
);
3283 apic_printk(APIC_VERBOSE
,
3284 "mapped IOAPIC to %016lx (%016lx)\n",
3285 __fix_to_virt(idx
), ioapic_phys
);
3288 if (ioapic_res
!= NULL
) {
3289 ioapic_res
->start
= ioapic_phys
;
3290 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
3296 static int __init
ioapic_insert_resources(void)
3299 struct resource
*r
= ioapic_resources
;
3303 "IO APIC resources could be not be allocated.\n");
3307 for (i
= 0; i
< nr_ioapics
; i
++) {
3308 insert_resource(&iomem_resource
, r
);
3315 /* Insert the IO APIC resources after PCI initialization has occured to handle
3316 * IO APICS that are mapped in on a BAR in PCI space. */
3317 late_initcall(ioapic_insert_resources
);