ACPI: Add support for acpi_load_table/acpi_unload_table_id
[linux-2.6/linux-loongson.git] / arch / x86_64 / kernel / io_apic.c
blob2a1dcd5f69c2599e3b3e4b41b355a10f672f79d7
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/smp_lock.h>
29 #include <linux/pci.h>
30 #include <linux/mc146818rtc.h>
31 #include <linux/acpi.h>
32 #include <linux/sysdev.h>
33 #include <linux/msi.h>
34 #include <linux/htirq.h>
35 #ifdef CONFIG_ACPI
36 #include <acpi/acpi_bus.h>
37 #endif
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/desc.h>
42 #include <asm/proto.h>
43 #include <asm/mach_apic.h>
44 #include <asm/acpi.h>
45 #include <asm/dma.h>
46 #include <asm/nmi.h>
47 #include <asm/msidef.h>
48 #include <asm/hypertransport.h>
50 static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
52 #define __apicdebuginit __init
54 int sis_apic_bug; /* not actually supported, dummy for compile */
56 static int no_timer_check;
58 /* Where if anywhere is the i8259 connect in external int mode */
59 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
61 static DEFINE_SPINLOCK(ioapic_lock);
62 DEFINE_SPINLOCK(vector_lock);
65 * # of IRQ routing registers
67 int nr_ioapic_registers[MAX_IO_APICS];
70 * Rough estimation of how many shared IRQs there are, can
71 * be changed anytime.
73 #define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
74 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
77 * This is performance-critical, we want to do it O(1)
79 * the indexing order of this array favors 1:1 mappings
80 * between pins and IRQs.
83 static struct irq_pin_list {
84 short apic, pin, next;
85 } irq_2_pin[PIN_MAP_SIZE];
87 struct io_apic {
88 unsigned int index;
89 unsigned int unused[3];
90 unsigned int data;
93 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
95 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
96 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
99 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
101 struct io_apic __iomem *io_apic = io_apic_base(apic);
102 writel(reg, &io_apic->index);
103 return readl(&io_apic->data);
106 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
108 struct io_apic __iomem *io_apic = io_apic_base(apic);
109 writel(reg, &io_apic->index);
110 writel(value, &io_apic->data);
114 * Re-write a value: to be used for read-modify-write
115 * cycles where the read already set up the index register.
117 static inline void io_apic_modify(unsigned int apic, unsigned int value)
119 struct io_apic __iomem *io_apic = io_apic_base(apic);
120 writel(value, &io_apic->data);
124 * Synchronize the IO-APIC and the CPU by doing
125 * a dummy read from the IO-APIC
127 static inline void io_apic_sync(unsigned int apic)
129 struct io_apic __iomem *io_apic = io_apic_base(apic);
130 readl(&io_apic->data);
133 #define __DO_ACTION(R, ACTION, FINAL) \
136 int pin; \
137 struct irq_pin_list *entry = irq_2_pin + irq; \
139 BUG_ON(irq >= NR_IRQS); \
140 for (;;) { \
141 unsigned int reg; \
142 pin = entry->pin; \
143 if (pin == -1) \
144 break; \
145 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
146 reg ACTION; \
147 io_apic_modify(entry->apic, reg); \
148 if (!entry->next) \
149 break; \
150 entry = irq_2_pin + entry->next; \
152 FINAL; \
155 union entry_union {
156 struct { u32 w1, w2; };
157 struct IO_APIC_route_entry entry;
160 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
162 union entry_union eu;
163 unsigned long flags;
164 spin_lock_irqsave(&ioapic_lock, flags);
165 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
166 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
167 spin_unlock_irqrestore(&ioapic_lock, flags);
168 return eu.entry;
172 * When we write a new IO APIC routing entry, we need to write the high
173 * word first! If the mask bit in the low word is clear, we will enable
174 * the interrupt, and we need to make sure the entry is fully populated
175 * before that happens.
177 static void
178 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
180 union entry_union eu;
181 eu.entry = e;
182 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
183 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
186 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
188 unsigned long flags;
189 spin_lock_irqsave(&ioapic_lock, flags);
190 __ioapic_write_entry(apic, pin, e);
191 spin_unlock_irqrestore(&ioapic_lock, flags);
195 * When we mask an IO APIC routing entry, we need to write the low
196 * word first, in order to set the mask bit before we change the
197 * high bits!
199 static void ioapic_mask_entry(int apic, int pin)
201 unsigned long flags;
202 union entry_union eu = { .entry.mask = 1 };
204 spin_lock_irqsave(&ioapic_lock, flags);
205 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
206 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
207 spin_unlock_irqrestore(&ioapic_lock, flags);
210 #ifdef CONFIG_SMP
211 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
213 int apic, pin;
214 struct irq_pin_list *entry = irq_2_pin + irq;
216 BUG_ON(irq >= NR_IRQS);
217 for (;;) {
218 unsigned int reg;
219 apic = entry->apic;
220 pin = entry->pin;
221 if (pin == -1)
222 break;
223 io_apic_write(apic, 0x11 + pin*2, dest);
224 reg = io_apic_read(apic, 0x10 + pin*2);
225 reg &= ~0x000000ff;
226 reg |= vector;
227 io_apic_modify(apic, reg);
228 if (!entry->next)
229 break;
230 entry = irq_2_pin + entry->next;
234 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
236 unsigned long flags;
237 unsigned int dest;
238 cpumask_t tmp;
239 int vector;
241 cpus_and(tmp, mask, cpu_online_map);
242 if (cpus_empty(tmp))
243 tmp = TARGET_CPUS;
245 cpus_and(mask, tmp, CPU_MASK_ALL);
247 vector = assign_irq_vector(irq, mask, &tmp);
248 if (vector < 0)
249 return;
251 dest = cpu_mask_to_apicid(tmp);
254 * Only the high 8 bits are valid.
256 dest = SET_APIC_LOGICAL_ID(dest);
258 spin_lock_irqsave(&ioapic_lock, flags);
259 __target_IO_APIC_irq(irq, dest, vector);
260 set_native_irq_info(irq, mask);
261 spin_unlock_irqrestore(&ioapic_lock, flags);
263 #endif
266 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
267 * shared ISA-space IRQs, so we have to support them. We are super
268 * fast in the common case, and fast for shared ISA-space IRQs.
270 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
272 static int first_free_entry = NR_IRQS;
273 struct irq_pin_list *entry = irq_2_pin + irq;
275 BUG_ON(irq >= NR_IRQS);
276 while (entry->next)
277 entry = irq_2_pin + entry->next;
279 if (entry->pin != -1) {
280 entry->next = first_free_entry;
281 entry = irq_2_pin + entry->next;
282 if (++first_free_entry >= PIN_MAP_SIZE)
283 panic("io_apic.c: ran out of irq_2_pin entries!");
285 entry->apic = apic;
286 entry->pin = pin;
290 #define DO_ACTION(name,R,ACTION, FINAL) \
292 static void name##_IO_APIC_irq (unsigned int irq) \
293 __DO_ACTION(R, ACTION, FINAL)
295 DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
296 /* mask = 1 */
297 DO_ACTION( __unmask, 0, &= 0xfffeffff, )
298 /* mask = 0 */
300 static void mask_IO_APIC_irq (unsigned int irq)
302 unsigned long flags;
304 spin_lock_irqsave(&ioapic_lock, flags);
305 __mask_IO_APIC_irq(irq);
306 spin_unlock_irqrestore(&ioapic_lock, flags);
309 static void unmask_IO_APIC_irq (unsigned int irq)
311 unsigned long flags;
313 spin_lock_irqsave(&ioapic_lock, flags);
314 __unmask_IO_APIC_irq(irq);
315 spin_unlock_irqrestore(&ioapic_lock, flags);
318 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
320 struct IO_APIC_route_entry entry;
322 /* Check delivery_mode to be sure we're not clearing an SMI pin */
323 entry = ioapic_read_entry(apic, pin);
324 if (entry.delivery_mode == dest_SMI)
325 return;
327 * Disable it in the IO-APIC irq-routing table:
329 ioapic_mask_entry(apic, pin);
332 static void clear_IO_APIC (void)
334 int apic, pin;
336 for (apic = 0; apic < nr_ioapics; apic++)
337 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
338 clear_IO_APIC_pin(apic, pin);
341 int skip_ioapic_setup;
342 int ioapic_force;
344 /* dummy parsing: see setup.c */
346 static int __init disable_ioapic_setup(char *str)
348 skip_ioapic_setup = 1;
349 return 0;
351 early_param("noapic", disable_ioapic_setup);
354 * Find the IRQ entry number of a certain pin.
356 static int find_irq_entry(int apic, int pin, int type)
358 int i;
360 for (i = 0; i < mp_irq_entries; i++)
361 if (mp_irqs[i].mpc_irqtype == type &&
362 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
363 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
364 mp_irqs[i].mpc_dstirq == pin)
365 return i;
367 return -1;
371 * Find the pin to which IRQ[irq] (ISA) is connected
373 static int __init find_isa_irq_pin(int irq, int type)
375 int i;
377 for (i = 0; i < mp_irq_entries; i++) {
378 int lbus = mp_irqs[i].mpc_srcbus;
380 if (test_bit(lbus, mp_bus_not_pci) &&
381 (mp_irqs[i].mpc_irqtype == type) &&
382 (mp_irqs[i].mpc_srcbusirq == irq))
384 return mp_irqs[i].mpc_dstirq;
386 return -1;
389 static int __init find_isa_irq_apic(int irq, int type)
391 int i;
393 for (i = 0; i < mp_irq_entries; i++) {
394 int lbus = mp_irqs[i].mpc_srcbus;
396 if (test_bit(lbus, mp_bus_not_pci) &&
397 (mp_irqs[i].mpc_irqtype == type) &&
398 (mp_irqs[i].mpc_srcbusirq == irq))
399 break;
401 if (i < mp_irq_entries) {
402 int apic;
403 for(apic = 0; apic < nr_ioapics; apic++) {
404 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
405 return apic;
409 return -1;
413 * Find a specific PCI IRQ entry.
414 * Not an __init, possibly needed by modules
416 static int pin_2_irq(int idx, int apic, int pin);
418 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
420 int apic, i, best_guess = -1;
422 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
423 bus, slot, pin);
424 if (mp_bus_id_to_pci_bus[bus] == -1) {
425 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
426 return -1;
428 for (i = 0; i < mp_irq_entries; i++) {
429 int lbus = mp_irqs[i].mpc_srcbus;
431 for (apic = 0; apic < nr_ioapics; apic++)
432 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
433 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
434 break;
436 if (!test_bit(lbus, mp_bus_not_pci) &&
437 !mp_irqs[i].mpc_irqtype &&
438 (bus == lbus) &&
439 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
440 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
442 if (!(apic || IO_APIC_IRQ(irq)))
443 continue;
445 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
446 return irq;
448 * Use the first all-but-pin matching entry as a
449 * best-guess fuzzy result for broken mptables.
451 if (best_guess < 0)
452 best_guess = irq;
455 BUG_ON(best_guess >= NR_IRQS);
456 return best_guess;
459 /* ISA interrupts are always polarity zero edge triggered,
460 * when listed as conforming in the MP table. */
462 #define default_ISA_trigger(idx) (0)
463 #define default_ISA_polarity(idx) (0)
465 /* PCI interrupts are always polarity one level triggered,
466 * when listed as conforming in the MP table. */
468 #define default_PCI_trigger(idx) (1)
469 #define default_PCI_polarity(idx) (1)
471 static int __init MPBIOS_polarity(int idx)
473 int bus = mp_irqs[idx].mpc_srcbus;
474 int polarity;
477 * Determine IRQ line polarity (high active or low active):
479 switch (mp_irqs[idx].mpc_irqflag & 3)
481 case 0: /* conforms, ie. bus-type dependent polarity */
482 if (test_bit(bus, mp_bus_not_pci))
483 polarity = default_ISA_polarity(idx);
484 else
485 polarity = default_PCI_polarity(idx);
486 break;
487 case 1: /* high active */
489 polarity = 0;
490 break;
492 case 2: /* reserved */
494 printk(KERN_WARNING "broken BIOS!!\n");
495 polarity = 1;
496 break;
498 case 3: /* low active */
500 polarity = 1;
501 break;
503 default: /* invalid */
505 printk(KERN_WARNING "broken BIOS!!\n");
506 polarity = 1;
507 break;
510 return polarity;
513 static int MPBIOS_trigger(int idx)
515 int bus = mp_irqs[idx].mpc_srcbus;
516 int trigger;
519 * Determine IRQ trigger mode (edge or level sensitive):
521 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
523 case 0: /* conforms, ie. bus-type dependent */
524 if (test_bit(bus, mp_bus_not_pci))
525 trigger = default_ISA_trigger(idx);
526 else
527 trigger = default_PCI_trigger(idx);
528 break;
529 case 1: /* edge */
531 trigger = 0;
532 break;
534 case 2: /* reserved */
536 printk(KERN_WARNING "broken BIOS!!\n");
537 trigger = 1;
538 break;
540 case 3: /* level */
542 trigger = 1;
543 break;
545 default: /* invalid */
547 printk(KERN_WARNING "broken BIOS!!\n");
548 trigger = 0;
549 break;
552 return trigger;
555 static inline int irq_polarity(int idx)
557 return MPBIOS_polarity(idx);
560 static inline int irq_trigger(int idx)
562 return MPBIOS_trigger(idx);
565 static int pin_2_irq(int idx, int apic, int pin)
567 int irq, i;
568 int bus = mp_irqs[idx].mpc_srcbus;
571 * Debugging check, we are in big trouble if this message pops up!
573 if (mp_irqs[idx].mpc_dstirq != pin)
574 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
576 if (test_bit(bus, mp_bus_not_pci)) {
577 irq = mp_irqs[idx].mpc_srcbusirq;
578 } else {
580 * PCI IRQs are mapped in order
582 i = irq = 0;
583 while (i < apic)
584 irq += nr_ioapic_registers[i++];
585 irq += pin;
587 BUG_ON(irq >= NR_IRQS);
588 return irq;
591 static inline int IO_APIC_irq_trigger(int irq)
593 int apic, idx, pin;
595 for (apic = 0; apic < nr_ioapics; apic++) {
596 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
597 idx = find_irq_entry(apic,pin,mp_INT);
598 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
599 return irq_trigger(idx);
603 * nonexistent IRQs are edge default
605 return 0;
608 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
609 static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = {
610 [0] = FIRST_EXTERNAL_VECTOR + 0,
611 [1] = FIRST_EXTERNAL_VECTOR + 1,
612 [2] = FIRST_EXTERNAL_VECTOR + 2,
613 [3] = FIRST_EXTERNAL_VECTOR + 3,
614 [4] = FIRST_EXTERNAL_VECTOR + 4,
615 [5] = FIRST_EXTERNAL_VECTOR + 5,
616 [6] = FIRST_EXTERNAL_VECTOR + 6,
617 [7] = FIRST_EXTERNAL_VECTOR + 7,
618 [8] = FIRST_EXTERNAL_VECTOR + 8,
619 [9] = FIRST_EXTERNAL_VECTOR + 9,
620 [10] = FIRST_EXTERNAL_VECTOR + 10,
621 [11] = FIRST_EXTERNAL_VECTOR + 11,
622 [12] = FIRST_EXTERNAL_VECTOR + 12,
623 [13] = FIRST_EXTERNAL_VECTOR + 13,
624 [14] = FIRST_EXTERNAL_VECTOR + 14,
625 [15] = FIRST_EXTERNAL_VECTOR + 15,
628 static cpumask_t irq_domain[NR_IRQ_VECTORS] __read_mostly = {
629 [0] = CPU_MASK_ALL,
630 [1] = CPU_MASK_ALL,
631 [2] = CPU_MASK_ALL,
632 [3] = CPU_MASK_ALL,
633 [4] = CPU_MASK_ALL,
634 [5] = CPU_MASK_ALL,
635 [6] = CPU_MASK_ALL,
636 [7] = CPU_MASK_ALL,
637 [8] = CPU_MASK_ALL,
638 [9] = CPU_MASK_ALL,
639 [10] = CPU_MASK_ALL,
640 [11] = CPU_MASK_ALL,
641 [12] = CPU_MASK_ALL,
642 [13] = CPU_MASK_ALL,
643 [14] = CPU_MASK_ALL,
644 [15] = CPU_MASK_ALL,
647 static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
650 * NOTE! The local APIC isn't very good at handling
651 * multiple interrupts at the same interrupt level.
652 * As the interrupt level is determined by taking the
653 * vector number and shifting that right by 4, we
654 * want to spread these out a bit so that they don't
655 * all fall in the same interrupt level.
657 * Also, we've got to be careful not to trash gate
658 * 0x80, because int 0x80 is hm, kind of importantish. ;)
660 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
661 int old_vector = -1;
662 int cpu;
664 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
666 /* Only try and allocate irqs on cpus that are present */
667 cpus_and(mask, mask, cpu_online_map);
669 if (irq_vector[irq] > 0)
670 old_vector = irq_vector[irq];
671 if (old_vector > 0) {
672 cpus_and(*result, irq_domain[irq], mask);
673 if (!cpus_empty(*result))
674 return old_vector;
677 for_each_cpu_mask(cpu, mask) {
678 cpumask_t domain, new_mask;
679 int new_cpu;
680 int vector, offset;
682 domain = vector_allocation_domain(cpu);
683 cpus_and(new_mask, domain, cpu_online_map);
685 vector = current_vector;
686 offset = current_offset;
687 next:
688 vector += 8;
689 if (vector >= FIRST_SYSTEM_VECTOR) {
690 /* If we run out of vectors on large boxen, must share them. */
691 offset = (offset + 1) % 8;
692 vector = FIRST_DEVICE_VECTOR + offset;
694 if (unlikely(current_vector == vector))
695 continue;
696 if (vector == IA32_SYSCALL_VECTOR)
697 goto next;
698 for_each_cpu_mask(new_cpu, new_mask)
699 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
700 goto next;
701 /* Found one! */
702 current_vector = vector;
703 current_offset = offset;
704 if (old_vector >= 0) {
705 cpumask_t old_mask;
706 int old_cpu;
707 cpus_and(old_mask, irq_domain[irq], cpu_online_map);
708 for_each_cpu_mask(old_cpu, old_mask)
709 per_cpu(vector_irq, old_cpu)[old_vector] = -1;
711 for_each_cpu_mask(new_cpu, new_mask)
712 per_cpu(vector_irq, new_cpu)[vector] = irq;
713 irq_vector[irq] = vector;
714 irq_domain[irq] = domain;
715 cpus_and(*result, domain, mask);
716 return vector;
718 return -ENOSPC;
721 static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
723 int vector;
724 unsigned long flags;
726 spin_lock_irqsave(&vector_lock, flags);
727 vector = __assign_irq_vector(irq, mask, result);
728 spin_unlock_irqrestore(&vector_lock, flags);
729 return vector;
732 static void __clear_irq_vector(int irq)
734 cpumask_t mask;
735 int cpu, vector;
737 BUG_ON(!irq_vector[irq]);
739 vector = irq_vector[irq];
740 cpus_and(mask, irq_domain[irq], cpu_online_map);
741 for_each_cpu_mask(cpu, mask)
742 per_cpu(vector_irq, cpu)[vector] = -1;
744 irq_vector[irq] = 0;
745 irq_domain[irq] = CPU_MASK_NONE;
748 void __setup_vector_irq(int cpu)
750 /* Initialize vector_irq on a new cpu */
751 /* This function must be called with vector_lock held */
752 int irq, vector;
754 /* Mark the inuse vectors */
755 for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
756 if (!cpu_isset(cpu, irq_domain[irq]))
757 continue;
758 vector = irq_vector[irq];
759 per_cpu(vector_irq, cpu)[vector] = irq;
761 /* Mark the free vectors */
762 for (vector = 0; vector < NR_VECTORS; ++vector) {
763 irq = per_cpu(vector_irq, cpu)[vector];
764 if (irq < 0)
765 continue;
766 if (!cpu_isset(cpu, irq_domain[irq]))
767 per_cpu(vector_irq, cpu)[vector] = -1;
772 extern void (*interrupt[NR_IRQS])(void);
774 static struct irq_chip ioapic_chip;
776 #define IOAPIC_AUTO -1
777 #define IOAPIC_EDGE 0
778 #define IOAPIC_LEVEL 1
780 static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
782 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
783 trigger == IOAPIC_LEVEL)
784 set_irq_chip_and_handler_name(irq, &ioapic_chip,
785 handle_fasteoi_irq, "fasteoi");
786 else {
787 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
788 set_irq_chip_and_handler_name(irq, &ioapic_chip,
789 handle_edge_irq, "edge");
792 static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
794 struct IO_APIC_route_entry entry;
795 int vector;
796 unsigned long flags;
800 * add it to the IO-APIC irq-routing table:
802 memset(&entry,0,sizeof(entry));
804 entry.delivery_mode = INT_DELIVERY_MODE;
805 entry.dest_mode = INT_DEST_MODE;
806 entry.mask = 0; /* enable IRQ */
807 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
809 entry.trigger = irq_trigger(idx);
810 entry.polarity = irq_polarity(idx);
812 if (irq_trigger(idx)) {
813 entry.trigger = 1;
814 entry.mask = 1;
815 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
818 if (!apic && !IO_APIC_IRQ(irq))
819 return;
821 if (IO_APIC_IRQ(irq)) {
822 cpumask_t mask;
823 vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
824 if (vector < 0)
825 return;
827 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
828 entry.vector = vector;
830 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
831 if (!apic && (irq < 16))
832 disable_8259A_irq(irq);
835 ioapic_write_entry(apic, pin, entry);
837 spin_lock_irqsave(&ioapic_lock, flags);
838 set_native_irq_info(irq, TARGET_CPUS);
839 spin_unlock_irqrestore(&ioapic_lock, flags);
843 static void __init setup_IO_APIC_irqs(void)
845 int apic, pin, idx, irq, first_notcon = 1;
847 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
849 for (apic = 0; apic < nr_ioapics; apic++) {
850 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
852 idx = find_irq_entry(apic,pin,mp_INT);
853 if (idx == -1) {
854 if (first_notcon) {
855 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
856 first_notcon = 0;
857 } else
858 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
859 continue;
862 irq = pin_2_irq(idx, apic, pin);
863 add_pin_to_irq(irq, apic, pin);
865 setup_IO_APIC_irq(apic, pin, idx, irq);
870 if (!first_notcon)
871 apic_printk(APIC_VERBOSE," not connected.\n");
875 * Set up the 8259A-master output pin as broadcast to all
876 * CPUs.
878 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
880 struct IO_APIC_route_entry entry;
881 unsigned long flags;
883 memset(&entry,0,sizeof(entry));
885 disable_8259A_irq(0);
887 /* mask LVT0 */
888 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
891 * We use logical delivery to get the timer IRQ
892 * to the first CPU.
894 entry.dest_mode = INT_DEST_MODE;
895 entry.mask = 0; /* unmask IRQ now */
896 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
897 entry.delivery_mode = INT_DELIVERY_MODE;
898 entry.polarity = 0;
899 entry.trigger = 0;
900 entry.vector = vector;
903 * The timer IRQ doesn't have to know that behind the
904 * scene we have a 8259A-master in AEOI mode ...
906 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
909 * Add it to the IO-APIC irq-routing table:
911 spin_lock_irqsave(&ioapic_lock, flags);
912 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
913 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
914 spin_unlock_irqrestore(&ioapic_lock, flags);
916 enable_8259A_irq(0);
919 void __init UNEXPECTED_IO_APIC(void)
923 void __apicdebuginit print_IO_APIC(void)
925 int apic, i;
926 union IO_APIC_reg_00 reg_00;
927 union IO_APIC_reg_01 reg_01;
928 union IO_APIC_reg_02 reg_02;
929 unsigned long flags;
931 if (apic_verbosity == APIC_QUIET)
932 return;
934 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
935 for (i = 0; i < nr_ioapics; i++)
936 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
937 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
940 * We are a bit conservative about what we expect. We have to
941 * know about every hardware change ASAP.
943 printk(KERN_INFO "testing the IO APIC.......................\n");
945 for (apic = 0; apic < nr_ioapics; apic++) {
947 spin_lock_irqsave(&ioapic_lock, flags);
948 reg_00.raw = io_apic_read(apic, 0);
949 reg_01.raw = io_apic_read(apic, 1);
950 if (reg_01.bits.version >= 0x10)
951 reg_02.raw = io_apic_read(apic, 2);
952 spin_unlock_irqrestore(&ioapic_lock, flags);
954 printk("\n");
955 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
956 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
957 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
958 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
959 UNEXPECTED_IO_APIC();
961 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
962 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
963 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
964 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
965 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
966 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
967 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
968 (reg_01.bits.entries != 0x2E) &&
969 (reg_01.bits.entries != 0x3F) &&
970 (reg_01.bits.entries != 0x03)
972 UNEXPECTED_IO_APIC();
974 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
975 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
976 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
977 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
978 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
979 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
980 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
981 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
983 UNEXPECTED_IO_APIC();
984 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
985 UNEXPECTED_IO_APIC();
987 if (reg_01.bits.version >= 0x10) {
988 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
989 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
990 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
991 UNEXPECTED_IO_APIC();
994 printk(KERN_DEBUG ".... IRQ redirection table:\n");
996 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
997 " Stat Dest Deli Vect: \n");
999 for (i = 0; i <= reg_01.bits.entries; i++) {
1000 struct IO_APIC_route_entry entry;
1002 entry = ioapic_read_entry(apic, i);
1004 printk(KERN_DEBUG " %02x %03X %02X ",
1006 entry.dest.logical.logical_dest,
1007 entry.dest.physical.physical_dest
1010 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1011 entry.mask,
1012 entry.trigger,
1013 entry.irr,
1014 entry.polarity,
1015 entry.delivery_status,
1016 entry.dest_mode,
1017 entry.delivery_mode,
1018 entry.vector
1022 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1023 for (i = 0; i < NR_IRQS; i++) {
1024 struct irq_pin_list *entry = irq_2_pin + i;
1025 if (entry->pin < 0)
1026 continue;
1027 printk(KERN_DEBUG "IRQ%d ", i);
1028 for (;;) {
1029 printk("-> %d:%d", entry->apic, entry->pin);
1030 if (!entry->next)
1031 break;
1032 entry = irq_2_pin + entry->next;
1034 printk("\n");
1037 printk(KERN_INFO ".................................... done.\n");
1039 return;
1042 #if 0
1044 static __apicdebuginit void print_APIC_bitfield (int base)
1046 unsigned int v;
1047 int i, j;
1049 if (apic_verbosity == APIC_QUIET)
1050 return;
1052 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1053 for (i = 0; i < 8; i++) {
1054 v = apic_read(base + i*0x10);
1055 for (j = 0; j < 32; j++) {
1056 if (v & (1<<j))
1057 printk("1");
1058 else
1059 printk("0");
1061 printk("\n");
1065 void __apicdebuginit print_local_APIC(void * dummy)
1067 unsigned int v, ver, maxlvt;
1069 if (apic_verbosity == APIC_QUIET)
1070 return;
1072 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1073 smp_processor_id(), hard_smp_processor_id());
1074 v = apic_read(APIC_ID);
1075 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1076 v = apic_read(APIC_LVR);
1077 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1078 ver = GET_APIC_VERSION(v);
1079 maxlvt = get_maxlvt();
1081 v = apic_read(APIC_TASKPRI);
1082 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1084 v = apic_read(APIC_ARBPRI);
1085 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1086 v & APIC_ARBPRI_MASK);
1087 v = apic_read(APIC_PROCPRI);
1088 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1090 v = apic_read(APIC_EOI);
1091 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1092 v = apic_read(APIC_RRR);
1093 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1094 v = apic_read(APIC_LDR);
1095 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1096 v = apic_read(APIC_DFR);
1097 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1098 v = apic_read(APIC_SPIV);
1099 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1101 printk(KERN_DEBUG "... APIC ISR field:\n");
1102 print_APIC_bitfield(APIC_ISR);
1103 printk(KERN_DEBUG "... APIC TMR field:\n");
1104 print_APIC_bitfield(APIC_TMR);
1105 printk(KERN_DEBUG "... APIC IRR field:\n");
1106 print_APIC_bitfield(APIC_IRR);
1108 v = apic_read(APIC_ESR);
1109 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1111 v = apic_read(APIC_ICR);
1112 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1113 v = apic_read(APIC_ICR2);
1114 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1116 v = apic_read(APIC_LVTT);
1117 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1119 if (maxlvt > 3) { /* PC is LVT#4. */
1120 v = apic_read(APIC_LVTPC);
1121 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1123 v = apic_read(APIC_LVT0);
1124 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1125 v = apic_read(APIC_LVT1);
1126 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1128 if (maxlvt > 2) { /* ERR is LVT#3. */
1129 v = apic_read(APIC_LVTERR);
1130 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1133 v = apic_read(APIC_TMICT);
1134 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1135 v = apic_read(APIC_TMCCT);
1136 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1137 v = apic_read(APIC_TDCR);
1138 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1139 printk("\n");
1142 void print_all_local_APICs (void)
1144 on_each_cpu(print_local_APIC, NULL, 1, 1);
1147 void __apicdebuginit print_PIC(void)
1149 unsigned int v;
1150 unsigned long flags;
1152 if (apic_verbosity == APIC_QUIET)
1153 return;
1155 printk(KERN_DEBUG "\nprinting PIC contents\n");
1157 spin_lock_irqsave(&i8259A_lock, flags);
1159 v = inb(0xa1) << 8 | inb(0x21);
1160 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1162 v = inb(0xa0) << 8 | inb(0x20);
1163 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1165 outb(0x0b,0xa0);
1166 outb(0x0b,0x20);
1167 v = inb(0xa0) << 8 | inb(0x20);
1168 outb(0x0a,0xa0);
1169 outb(0x0a,0x20);
1171 spin_unlock_irqrestore(&i8259A_lock, flags);
1173 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1175 v = inb(0x4d1) << 8 | inb(0x4d0);
1176 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1179 #endif /* 0 */
1181 static void __init enable_IO_APIC(void)
1183 union IO_APIC_reg_01 reg_01;
1184 int i8259_apic, i8259_pin;
1185 int i, apic;
1186 unsigned long flags;
1188 for (i = 0; i < PIN_MAP_SIZE; i++) {
1189 irq_2_pin[i].pin = -1;
1190 irq_2_pin[i].next = 0;
1194 * The number of IO-APIC IRQ registers (== #pins):
1196 for (apic = 0; apic < nr_ioapics; apic++) {
1197 spin_lock_irqsave(&ioapic_lock, flags);
1198 reg_01.raw = io_apic_read(apic, 1);
1199 spin_unlock_irqrestore(&ioapic_lock, flags);
1200 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1202 for(apic = 0; apic < nr_ioapics; apic++) {
1203 int pin;
1204 /* See if any of the pins is in ExtINT mode */
1205 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1206 struct IO_APIC_route_entry entry;
1207 entry = ioapic_read_entry(apic, pin);
1209 /* If the interrupt line is enabled and in ExtInt mode
1210 * I have found the pin where the i8259 is connected.
1212 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1213 ioapic_i8259.apic = apic;
1214 ioapic_i8259.pin = pin;
1215 goto found_i8259;
1219 found_i8259:
1220 /* Look to see what if the MP table has reported the ExtINT */
1221 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1222 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1223 /* Trust the MP table if nothing is setup in the hardware */
1224 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1225 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1226 ioapic_i8259.pin = i8259_pin;
1227 ioapic_i8259.apic = i8259_apic;
1229 /* Complain if the MP table and the hardware disagree */
1230 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1231 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1233 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1237 * Do not trust the IO-APIC being empty at bootup
1239 clear_IO_APIC();
1243 * Not an __init, needed by the reboot code
1245 void disable_IO_APIC(void)
1248 * Clear the IO-APIC before rebooting:
1250 clear_IO_APIC();
1253 * If the i8259 is routed through an IOAPIC
1254 * Put that IOAPIC in virtual wire mode
1255 * so legacy interrupts can be delivered.
1257 if (ioapic_i8259.pin != -1) {
1258 struct IO_APIC_route_entry entry;
1260 memset(&entry, 0, sizeof(entry));
1261 entry.mask = 0; /* Enabled */
1262 entry.trigger = 0; /* Edge */
1263 entry.irr = 0;
1264 entry.polarity = 0; /* High */
1265 entry.delivery_status = 0;
1266 entry.dest_mode = 0; /* Physical */
1267 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1268 entry.vector = 0;
1269 entry.dest.physical.physical_dest =
1270 GET_APIC_ID(apic_read(APIC_ID));
1273 * Add it to the IO-APIC irq-routing table:
1275 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1278 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1282 * There is a nasty bug in some older SMP boards, their mptable lies
1283 * about the timer IRQ. We do the following to work around the situation:
1285 * - timer IRQ defaults to IO-APIC IRQ
1286 * - if this function detects that timer IRQs are defunct, then we fall
1287 * back to ISA timer IRQs
1289 static int __init timer_irq_works(void)
1291 unsigned long t1 = jiffies;
1293 local_irq_enable();
1294 /* Let ten ticks pass... */
1295 mdelay((10 * 1000) / HZ);
1298 * Expect a few ticks at least, to be sure some possible
1299 * glue logic does not lock up after one or two first
1300 * ticks in a non-ExtINT mode. Also the local APIC
1301 * might have cached one ExtINT interrupt. Finally, at
1302 * least one tick may be lost due to delays.
1305 /* jiffies wrap? */
1306 if (jiffies - t1 > 4)
1307 return 1;
1308 return 0;
1312 * In the SMP+IOAPIC case it might happen that there are an unspecified
1313 * number of pending IRQ events unhandled. These cases are very rare,
1314 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1315 * better to do it this way as thus we do not have to be aware of
1316 * 'pending' interrupts in the IRQ path, except at this point.
1319 * Edge triggered needs to resend any interrupt
1320 * that was delayed but this is now handled in the device
1321 * independent code.
1325 * Starting up a edge-triggered IO-APIC interrupt is
1326 * nasty - we need to make sure that we get the edge.
1327 * If it is already asserted for some reason, we need
1328 * return 1 to indicate that is was pending.
1330 * This is not complete - we should be able to fake
1331 * an edge even if it isn't on the 8259A...
1334 static unsigned int startup_ioapic_irq(unsigned int irq)
1336 int was_pending = 0;
1337 unsigned long flags;
1339 spin_lock_irqsave(&ioapic_lock, flags);
1340 if (irq < 16) {
1341 disable_8259A_irq(irq);
1342 if (i8259A_irq_pending(irq))
1343 was_pending = 1;
1345 __unmask_IO_APIC_irq(irq);
1346 spin_unlock_irqrestore(&ioapic_lock, flags);
1348 return was_pending;
1351 static int ioapic_retrigger_irq(unsigned int irq)
1353 cpumask_t mask;
1354 unsigned vector;
1355 unsigned long flags;
1357 spin_lock_irqsave(&vector_lock, flags);
1358 vector = irq_vector[irq];
1359 cpus_clear(mask);
1360 cpu_set(first_cpu(irq_domain[irq]), mask);
1362 send_IPI_mask(mask, vector);
1363 spin_unlock_irqrestore(&vector_lock, flags);
1365 return 1;
1369 * Level and edge triggered IO-APIC interrupts need different handling,
1370 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1371 * handled with the level-triggered descriptor, but that one has slightly
1372 * more overhead. Level-triggered interrupts cannot be handled with the
1373 * edge-triggered handler, without risking IRQ storms and other ugly
1374 * races.
1377 static void ack_apic_edge(unsigned int irq)
1379 move_native_irq(irq);
1380 ack_APIC_irq();
1383 static void ack_apic_level(unsigned int irq)
1385 int do_unmask_irq = 0;
1387 #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
1388 /* If we are moving the irq we need to mask it */
1389 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
1390 do_unmask_irq = 1;
1391 mask_IO_APIC_irq(irq);
1393 #endif
1396 * We must acknowledge the irq before we move it or the acknowledge will
1397 * not propogate properly.
1399 ack_APIC_irq();
1401 /* Now we can move and renable the irq */
1402 move_masked_irq(irq);
1403 if (unlikely(do_unmask_irq))
1404 unmask_IO_APIC_irq(irq);
1407 static struct irq_chip ioapic_chip __read_mostly = {
1408 .name = "IO-APIC",
1409 .startup = startup_ioapic_irq,
1410 .mask = mask_IO_APIC_irq,
1411 .unmask = unmask_IO_APIC_irq,
1412 .ack = ack_apic_edge,
1413 .eoi = ack_apic_level,
1414 #ifdef CONFIG_SMP
1415 .set_affinity = set_ioapic_affinity_irq,
1416 #endif
1417 .retrigger = ioapic_retrigger_irq,
1420 static inline void init_IO_APIC_traps(void)
1422 int irq;
1425 * NOTE! The local APIC isn't very good at handling
1426 * multiple interrupts at the same interrupt level.
1427 * As the interrupt level is determined by taking the
1428 * vector number and shifting that right by 4, we
1429 * want to spread these out a bit so that they don't
1430 * all fall in the same interrupt level.
1432 * Also, we've got to be careful not to trash gate
1433 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1435 for (irq = 0; irq < NR_IRQS ; irq++) {
1436 int tmp = irq;
1437 if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
1439 * Hmm.. We don't have an entry for this,
1440 * so default to an old-fashioned 8259
1441 * interrupt if we can..
1443 if (irq < 16)
1444 make_8259A_irq(irq);
1445 else
1446 /* Strange. Oh, well.. */
1447 irq_desc[irq].chip = &no_irq_chip;
1452 static void enable_lapic_irq (unsigned int irq)
1454 unsigned long v;
1456 v = apic_read(APIC_LVT0);
1457 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1460 static void disable_lapic_irq (unsigned int irq)
1462 unsigned long v;
1464 v = apic_read(APIC_LVT0);
1465 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1468 static void ack_lapic_irq (unsigned int irq)
1470 ack_APIC_irq();
1473 static void end_lapic_irq (unsigned int i) { /* nothing */ }
1475 static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1476 .typename = "local-APIC-edge",
1477 .startup = NULL, /* startup_irq() not used for IRQ0 */
1478 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1479 .enable = enable_lapic_irq,
1480 .disable = disable_lapic_irq,
1481 .ack = ack_lapic_irq,
1482 .end = end_lapic_irq,
1485 static void setup_nmi (void)
1488 * Dirty trick to enable the NMI watchdog ...
1489 * We put the 8259A master into AEOI mode and
1490 * unmask on all local APICs LVT0 as NMI.
1492 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1493 * is from Maciej W. Rozycki - so we do not have to EOI from
1494 * the NMI handler or the timer interrupt.
1496 printk(KERN_INFO "activating NMI Watchdog ...");
1498 enable_NMI_through_LVT0(NULL);
1500 printk(" done.\n");
1504 * This looks a bit hackish but it's about the only one way of sending
1505 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1506 * not support the ExtINT mode, unfortunately. We need to send these
1507 * cycles as some i82489DX-based boards have glue logic that keeps the
1508 * 8259A interrupt line asserted until INTA. --macro
1510 static inline void unlock_ExtINT_logic(void)
1512 int apic, pin, i;
1513 struct IO_APIC_route_entry entry0, entry1;
1514 unsigned char save_control, save_freq_select;
1515 unsigned long flags;
1517 pin = find_isa_irq_pin(8, mp_INT);
1518 apic = find_isa_irq_apic(8, mp_INT);
1519 if (pin == -1)
1520 return;
1522 spin_lock_irqsave(&ioapic_lock, flags);
1523 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1524 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1525 spin_unlock_irqrestore(&ioapic_lock, flags);
1526 clear_IO_APIC_pin(apic, pin);
1528 memset(&entry1, 0, sizeof(entry1));
1530 entry1.dest_mode = 0; /* physical delivery */
1531 entry1.mask = 0; /* unmask IRQ now */
1532 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1533 entry1.delivery_mode = dest_ExtINT;
1534 entry1.polarity = entry0.polarity;
1535 entry1.trigger = 0;
1536 entry1.vector = 0;
1538 spin_lock_irqsave(&ioapic_lock, flags);
1539 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1540 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1541 spin_unlock_irqrestore(&ioapic_lock, flags);
1543 save_control = CMOS_READ(RTC_CONTROL);
1544 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1545 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1546 RTC_FREQ_SELECT);
1547 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1549 i = 100;
1550 while (i-- > 0) {
1551 mdelay(10);
1552 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1553 i -= 10;
1556 CMOS_WRITE(save_control, RTC_CONTROL);
1557 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1558 clear_IO_APIC_pin(apic, pin);
1560 spin_lock_irqsave(&ioapic_lock, flags);
1561 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1562 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1563 spin_unlock_irqrestore(&ioapic_lock, flags);
1567 * This code may look a bit paranoid, but it's supposed to cooperate with
1568 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1569 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1570 * fanatically on his truly buggy board.
1573 static int try_apic_pin(int apic, int pin, char *msg)
1575 apic_printk(APIC_VERBOSE, KERN_INFO
1576 "..TIMER: trying IO-APIC=%d PIN=%d %s",
1577 apic, pin, msg);
1580 * Ok, does IRQ0 through the IOAPIC work?
1582 if (!no_timer_check && timer_irq_works()) {
1583 nmi_watchdog_default();
1584 if (nmi_watchdog == NMI_IO_APIC) {
1585 disable_8259A_irq(0);
1586 setup_nmi();
1587 enable_8259A_irq(0);
1589 return 1;
1591 clear_IO_APIC_pin(apic, pin);
1592 apic_printk(APIC_QUIET, KERN_ERR " .. failed\n");
1593 return 0;
1596 /* The function from hell */
1597 static void check_timer(void)
1599 int apic1, pin1, apic2, pin2;
1600 int vector;
1601 cpumask_t mask;
1604 * get/set the timer IRQ vector:
1606 disable_8259A_irq(0);
1607 vector = assign_irq_vector(0, TARGET_CPUS, &mask);
1610 * Subtle, code in do_timer_interrupt() expects an AEOI
1611 * mode for the 8259A whenever interrupts are routed
1612 * through I/O APICs. Also IRQ0 has to be enabled in
1613 * the 8259A which implies the virtual wire has to be
1614 * disabled in the local APIC.
1616 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1617 init_8259A(1);
1619 pin1 = find_isa_irq_pin(0, mp_INT);
1620 apic1 = find_isa_irq_apic(0, mp_INT);
1621 pin2 = ioapic_i8259.pin;
1622 apic2 = ioapic_i8259.apic;
1624 /* Do this first, otherwise we get double interrupts on ATI boards */
1625 if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled"))
1626 return;
1628 /* Now try again with IRQ0 8259A enabled.
1629 Assumes timer is on IO-APIC 0 ?!? */
1630 enable_8259A_irq(0);
1631 unmask_IO_APIC_irq(0);
1632 if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled"))
1633 return;
1634 disable_8259A_irq(0);
1636 /* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides
1637 on Nvidia boards */
1638 if (!(apic1 == 0 && pin1 == 0) &&
1639 try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled"))
1640 return;
1641 if (!(apic1 == 0 && pin1 == 2) &&
1642 try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled"))
1643 return;
1645 /* Then try pure 8259A routing on the 8259 as reported by BIOS*/
1646 enable_8259A_irq(0);
1647 if (pin2 != -1) {
1648 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1649 if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS"))
1650 return;
1653 /* Tried all possibilities to go through the IO-APIC. Now come the
1654 really cheesy fallbacks. */
1656 if (nmi_watchdog == NMI_IO_APIC) {
1657 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1658 nmi_watchdog = 0;
1661 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1663 disable_8259A_irq(0);
1664 irq_desc[0].chip = &lapic_irq_type;
1665 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1666 enable_8259A_irq(0);
1668 if (timer_irq_works()) {
1669 apic_printk(APIC_VERBOSE," works.\n");
1670 return;
1672 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1673 apic_printk(APIC_VERBOSE," failed.\n");
1675 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1677 init_8259A(0);
1678 make_8259A_irq(0);
1679 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1681 unlock_ExtINT_logic();
1683 if (timer_irq_works()) {
1684 apic_printk(APIC_VERBOSE," works.\n");
1685 return;
1687 apic_printk(APIC_VERBOSE," failed :(.\n");
1688 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1691 static int __init notimercheck(char *s)
1693 no_timer_check = 1;
1694 return 1;
1696 __setup("no_timer_check", notimercheck);
1700 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1701 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1702 * Linux doesn't really care, as it's not actually used
1703 * for any interrupt handling anyway.
1705 #define PIC_IRQS (1<<2)
1707 void __init setup_IO_APIC(void)
1709 enable_IO_APIC();
1711 if (acpi_ioapic)
1712 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1713 else
1714 io_apic_irqs = ~PIC_IRQS;
1716 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1718 sync_Arb_IDs();
1719 setup_IO_APIC_irqs();
1720 init_IO_APIC_traps();
1721 check_timer();
1722 if (!acpi_ioapic)
1723 print_IO_APIC();
1726 struct sysfs_ioapic_data {
1727 struct sys_device dev;
1728 struct IO_APIC_route_entry entry[0];
1730 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1732 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1734 struct IO_APIC_route_entry *entry;
1735 struct sysfs_ioapic_data *data;
1736 int i;
1738 data = container_of(dev, struct sysfs_ioapic_data, dev);
1739 entry = data->entry;
1740 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
1741 *entry = ioapic_read_entry(dev->id, i);
1743 return 0;
1746 static int ioapic_resume(struct sys_device *dev)
1748 struct IO_APIC_route_entry *entry;
1749 struct sysfs_ioapic_data *data;
1750 unsigned long flags;
1751 union IO_APIC_reg_00 reg_00;
1752 int i;
1754 data = container_of(dev, struct sysfs_ioapic_data, dev);
1755 entry = data->entry;
1757 spin_lock_irqsave(&ioapic_lock, flags);
1758 reg_00.raw = io_apic_read(dev->id, 0);
1759 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
1760 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
1761 io_apic_write(dev->id, 0, reg_00.raw);
1763 spin_unlock_irqrestore(&ioapic_lock, flags);
1764 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
1765 ioapic_write_entry(dev->id, i, entry[i]);
1767 return 0;
1770 static struct sysdev_class ioapic_sysdev_class = {
1771 set_kset_name("ioapic"),
1772 .suspend = ioapic_suspend,
1773 .resume = ioapic_resume,
1776 static int __init ioapic_init_sysfs(void)
1778 struct sys_device * dev;
1779 int i, size, error = 0;
1781 error = sysdev_class_register(&ioapic_sysdev_class);
1782 if (error)
1783 return error;
1785 for (i = 0; i < nr_ioapics; i++ ) {
1786 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1787 * sizeof(struct IO_APIC_route_entry);
1788 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
1789 if (!mp_ioapic_data[i]) {
1790 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1791 continue;
1793 memset(mp_ioapic_data[i], 0, size);
1794 dev = &mp_ioapic_data[i]->dev;
1795 dev->id = i;
1796 dev->cls = &ioapic_sysdev_class;
1797 error = sysdev_register(dev);
1798 if (error) {
1799 kfree(mp_ioapic_data[i]);
1800 mp_ioapic_data[i] = NULL;
1801 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1802 continue;
1806 return 0;
1809 device_initcall(ioapic_init_sysfs);
1812 * Dynamic irq allocate and deallocation
1814 int create_irq(void)
1816 /* Allocate an unused irq */
1817 int irq;
1818 int new;
1819 int vector = 0;
1820 unsigned long flags;
1821 cpumask_t mask;
1823 irq = -ENOSPC;
1824 spin_lock_irqsave(&vector_lock, flags);
1825 for (new = (NR_IRQS - 1); new >= 0; new--) {
1826 if (platform_legacy_irq(new))
1827 continue;
1828 if (irq_vector[new] != 0)
1829 continue;
1830 vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
1831 if (likely(vector > 0))
1832 irq = new;
1833 break;
1835 spin_unlock_irqrestore(&vector_lock, flags);
1837 if (irq >= 0) {
1838 dynamic_irq_init(irq);
1840 return irq;
1843 void destroy_irq(unsigned int irq)
1845 unsigned long flags;
1847 dynamic_irq_cleanup(irq);
1849 spin_lock_irqsave(&vector_lock, flags);
1850 __clear_irq_vector(irq);
1851 spin_unlock_irqrestore(&vector_lock, flags);
1855 * MSI mesage composition
1857 #ifdef CONFIG_PCI_MSI
1858 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
1860 int vector;
1861 unsigned dest;
1862 cpumask_t tmp;
1864 vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
1865 if (vector >= 0) {
1866 dest = cpu_mask_to_apicid(tmp);
1868 msg->address_hi = MSI_ADDR_BASE_HI;
1869 msg->address_lo =
1870 MSI_ADDR_BASE_LO |
1871 ((INT_DEST_MODE == 0) ?
1872 MSI_ADDR_DEST_MODE_PHYSICAL:
1873 MSI_ADDR_DEST_MODE_LOGICAL) |
1874 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1875 MSI_ADDR_REDIRECTION_CPU:
1876 MSI_ADDR_REDIRECTION_LOWPRI) |
1877 MSI_ADDR_DEST_ID(dest);
1879 msg->data =
1880 MSI_DATA_TRIGGER_EDGE |
1881 MSI_DATA_LEVEL_ASSERT |
1882 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1883 MSI_DATA_DELIVERY_FIXED:
1884 MSI_DATA_DELIVERY_LOWPRI) |
1885 MSI_DATA_VECTOR(vector);
1887 return vector;
1890 #ifdef CONFIG_SMP
1891 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
1893 struct msi_msg msg;
1894 unsigned int dest;
1895 cpumask_t tmp;
1896 int vector;
1898 cpus_and(tmp, mask, cpu_online_map);
1899 if (cpus_empty(tmp))
1900 tmp = TARGET_CPUS;
1902 cpus_and(mask, tmp, CPU_MASK_ALL);
1904 vector = assign_irq_vector(irq, mask, &tmp);
1905 if (vector < 0)
1906 return;
1908 dest = cpu_mask_to_apicid(tmp);
1910 read_msi_msg(irq, &msg);
1912 msg.data &= ~MSI_DATA_VECTOR_MASK;
1913 msg.data |= MSI_DATA_VECTOR(vector);
1914 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
1915 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
1917 write_msi_msg(irq, &msg);
1918 set_native_irq_info(irq, mask);
1920 #endif /* CONFIG_SMP */
1923 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
1924 * which implement the MSI or MSI-X Capability Structure.
1926 static struct irq_chip msi_chip = {
1927 .name = "PCI-MSI",
1928 .unmask = unmask_msi_irq,
1929 .mask = mask_msi_irq,
1930 .ack = ack_apic_edge,
1931 #ifdef CONFIG_SMP
1932 .set_affinity = set_msi_irq_affinity,
1933 #endif
1934 .retrigger = ioapic_retrigger_irq,
1937 int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
1939 struct msi_msg msg;
1940 int ret;
1941 ret = msi_compose_msg(dev, irq, &msg);
1942 if (ret < 0)
1943 return ret;
1945 write_msi_msg(irq, &msg);
1947 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
1949 return 0;
1952 void arch_teardown_msi_irq(unsigned int irq)
1954 return;
1957 #endif /* CONFIG_PCI_MSI */
1960 * Hypertransport interrupt support
1962 #ifdef CONFIG_HT_IRQ
1964 #ifdef CONFIG_SMP
1966 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
1968 struct ht_irq_msg msg;
1969 fetch_ht_irq_msg(irq, &msg);
1971 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
1972 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
1974 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
1975 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
1977 write_ht_irq_msg(irq, &msg);
1980 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
1982 unsigned int dest;
1983 cpumask_t tmp;
1984 int vector;
1986 cpus_and(tmp, mask, cpu_online_map);
1987 if (cpus_empty(tmp))
1988 tmp = TARGET_CPUS;
1990 cpus_and(mask, tmp, CPU_MASK_ALL);
1992 vector = assign_irq_vector(irq, mask, &tmp);
1993 if (vector < 0)
1994 return;
1996 dest = cpu_mask_to_apicid(tmp);
1998 target_ht_irq(irq, dest, vector);
1999 set_native_irq_info(irq, mask);
2001 #endif
2003 static struct irq_chip ht_irq_chip = {
2004 .name = "PCI-HT",
2005 .mask = mask_ht_irq,
2006 .unmask = unmask_ht_irq,
2007 .ack = ack_apic_edge,
2008 #ifdef CONFIG_SMP
2009 .set_affinity = set_ht_irq_affinity,
2010 #endif
2011 .retrigger = ioapic_retrigger_irq,
2014 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2016 int vector;
2017 cpumask_t tmp;
2019 vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
2020 if (vector >= 0) {
2021 struct ht_irq_msg msg;
2022 unsigned dest;
2024 dest = cpu_mask_to_apicid(tmp);
2026 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
2028 msg.address_lo =
2029 HT_IRQ_LOW_BASE |
2030 HT_IRQ_LOW_DEST_ID(dest) |
2031 HT_IRQ_LOW_VECTOR(vector) |
2032 ((INT_DEST_MODE == 0) ?
2033 HT_IRQ_LOW_DM_PHYSICAL :
2034 HT_IRQ_LOW_DM_LOGICAL) |
2035 HT_IRQ_LOW_RQEOI_EDGE |
2036 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2037 HT_IRQ_LOW_MT_FIXED :
2038 HT_IRQ_LOW_MT_ARBITRATED) |
2039 HT_IRQ_LOW_IRQ_MASKED;
2041 write_ht_irq_msg(irq, &msg);
2043 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2044 handle_edge_irq, "edge");
2046 return vector;
2048 #endif /* CONFIG_HT_IRQ */
2050 /* --------------------------------------------------------------------------
2051 ACPI-based IOAPIC Configuration
2052 -------------------------------------------------------------------------- */
2054 #ifdef CONFIG_ACPI
2056 #define IO_APIC_MAX_ID 0xFE
2058 int __init io_apic_get_redir_entries (int ioapic)
2060 union IO_APIC_reg_01 reg_01;
2061 unsigned long flags;
2063 spin_lock_irqsave(&ioapic_lock, flags);
2064 reg_01.raw = io_apic_read(ioapic, 1);
2065 spin_unlock_irqrestore(&ioapic_lock, flags);
2067 return reg_01.bits.entries;
2071 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2073 struct IO_APIC_route_entry entry;
2074 unsigned long flags;
2075 int vector;
2076 cpumask_t mask;
2078 if (!IO_APIC_IRQ(irq)) {
2079 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2080 ioapic);
2081 return -EINVAL;
2085 * IRQs < 16 are already in the irq_2_pin[] map
2087 if (irq >= 16)
2088 add_pin_to_irq(irq, ioapic, pin);
2091 vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
2092 if (vector < 0)
2093 return vector;
2096 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2097 * Note that we mask (disable) IRQs now -- these get enabled when the
2098 * corresponding device driver registers for this IRQ.
2101 memset(&entry,0,sizeof(entry));
2103 entry.delivery_mode = INT_DELIVERY_MODE;
2104 entry.dest_mode = INT_DEST_MODE;
2105 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
2106 entry.trigger = triggering;
2107 entry.polarity = polarity;
2108 entry.mask = 1; /* Disabled (masked) */
2109 entry.vector = vector & 0xff;
2111 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2112 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2113 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2114 triggering, polarity);
2116 ioapic_register_intr(irq, entry.vector, triggering);
2118 if (!ioapic && (irq < 16))
2119 disable_8259A_irq(irq);
2121 ioapic_write_entry(ioapic, pin, entry);
2123 spin_lock_irqsave(&ioapic_lock, flags);
2124 set_native_irq_info(irq, TARGET_CPUS);
2125 spin_unlock_irqrestore(&ioapic_lock, flags);
2127 return 0;
2130 #endif /* CONFIG_ACPI */
2134 * This function currently is only a helper for the i386 smp boot process where
2135 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2136 * so mask in all cases should simply be TARGET_CPUS
2138 #ifdef CONFIG_SMP
2139 void __init setup_ioapic_dest(void)
2141 int pin, ioapic, irq, irq_entry;
2143 if (skip_ioapic_setup == 1)
2144 return;
2146 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2147 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2148 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2149 if (irq_entry == -1)
2150 continue;
2151 irq = pin_2_irq(irq_entry, ioapic, pin);
2153 /* setup_IO_APIC_irqs could fail to get vector for some device
2154 * when you have too many devices, because at that time only boot
2155 * cpu is online.
2157 if(!irq_vector[irq])
2158 setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
2159 else
2160 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2165 #endif