1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
12 #include <acpi/acpi.h>
14 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
15 static int ir_ioapic_num
;
16 int intr_remapping_enabled
;
18 static int disable_intremap
;
19 static __init
int setup_nointremap(char *str
)
24 early_param("nointremap", setup_nointremap
);
27 struct intel_iommu
*iommu
;
33 #ifdef CONFIG_GENERIC_HARDIRQS
34 static struct irq_2_iommu
*get_one_free_irq_2_iommu(int node
)
36 struct irq_2_iommu
*iommu
;
38 iommu
= kzalloc_node(sizeof(*iommu
), GFP_ATOMIC
, node
);
39 printk(KERN_DEBUG
"alloc irq_2_iommu on node %d\n", node
);
44 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
46 struct irq_desc
*desc
;
48 desc
= irq_to_desc(irq
);
50 if (WARN_ON_ONCE(!desc
))
53 return desc
->irq_2_iommu
;
56 static struct irq_2_iommu
*irq_2_iommu_alloc_node(unsigned int irq
, int node
)
58 struct irq_desc
*desc
;
59 struct irq_2_iommu
*irq_iommu
;
62 * alloc irq desc if not allocated already.
64 desc
= irq_to_desc_alloc_node(irq
, node
);
66 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
70 irq_iommu
= desc
->irq_2_iommu
;
73 desc
->irq_2_iommu
= get_one_free_irq_2_iommu(node
);
75 return desc
->irq_2_iommu
;
78 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
80 return irq_2_iommu_alloc_node(irq
, cpu_to_node(boot_cpu_id
));
83 #else /* !CONFIG_SPARSE_IRQ */
85 static struct irq_2_iommu irq_2_iommuX
[NR_IRQS
];
87 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
90 return &irq_2_iommuX
[irq
];
94 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
96 return irq_2_iommu(irq
);
100 static DEFINE_SPINLOCK(irq_2_ir_lock
);
102 static struct irq_2_iommu
*valid_irq_2_iommu(unsigned int irq
)
104 struct irq_2_iommu
*irq_iommu
;
106 irq_iommu
= irq_2_iommu(irq
);
111 if (!irq_iommu
->iommu
)
117 int irq_remapped(int irq
)
119 return valid_irq_2_iommu(irq
) != NULL
;
122 int get_irte(int irq
, struct irte
*entry
)
125 struct irq_2_iommu
*irq_iommu
;
131 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
132 irq_iommu
= valid_irq_2_iommu(irq
);
134 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
138 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
139 *entry
= *(irq_iommu
->iommu
->ir_table
->base
+ index
);
141 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
145 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
147 struct ir_table
*table
= iommu
->ir_table
;
148 struct irq_2_iommu
*irq_iommu
;
149 u16 index
, start_index
;
150 unsigned int mask
= 0;
157 #ifndef CONFIG_SPARSE_IRQ
158 /* protect irq_2_iommu_alloc later */
164 * start the IRTE search from index 0.
166 index
= start_index
= 0;
169 count
= __roundup_pow_of_two(count
);
173 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
175 "Requested mask %x exceeds the max invalidation handle"
176 " mask value %Lx\n", mask
,
177 ecap_max_handle_mask(iommu
->ecap
));
181 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
183 for (i
= index
; i
< index
+ count
; i
++)
184 if (table
->base
[i
].present
)
186 /* empty index found */
187 if (i
== index
+ count
)
190 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
192 if (index
== start_index
) {
193 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
194 printk(KERN_ERR
"can't allocate an IRTE\n");
199 for (i
= index
; i
< index
+ count
; i
++)
200 table
->base
[i
].present
= 1;
202 irq_iommu
= irq_2_iommu_alloc(irq
);
204 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
205 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
209 irq_iommu
->iommu
= iommu
;
210 irq_iommu
->irte_index
= index
;
211 irq_iommu
->sub_handle
= 0;
212 irq_iommu
->irte_mask
= mask
;
214 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
219 static int qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
223 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
227 return qi_submit_sync(&desc
, iommu
);
230 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
233 struct irq_2_iommu
*irq_iommu
;
236 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
237 irq_iommu
= valid_irq_2_iommu(irq
);
239 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
243 *sub_handle
= irq_iommu
->sub_handle
;
244 index
= irq_iommu
->irte_index
;
245 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
249 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
251 struct irq_2_iommu
*irq_iommu
;
254 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
256 irq_iommu
= irq_2_iommu_alloc(irq
);
259 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
260 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
264 irq_iommu
->iommu
= iommu
;
265 irq_iommu
->irte_index
= index
;
266 irq_iommu
->sub_handle
= subhandle
;
267 irq_iommu
->irte_mask
= 0;
269 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
274 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
276 struct irq_2_iommu
*irq_iommu
;
279 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
280 irq_iommu
= valid_irq_2_iommu(irq
);
282 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
286 irq_iommu
->iommu
= NULL
;
287 irq_iommu
->irte_index
= 0;
288 irq_iommu
->sub_handle
= 0;
289 irq_2_iommu(irq
)->irte_mask
= 0;
291 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
296 int modify_irte(int irq
, struct irte
*irte_modified
)
301 struct intel_iommu
*iommu
;
302 struct irq_2_iommu
*irq_iommu
;
305 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
306 irq_iommu
= valid_irq_2_iommu(irq
);
308 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
312 iommu
= irq_iommu
->iommu
;
314 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
315 irte
= &iommu
->ir_table
->base
[index
];
317 set_64bit((unsigned long *)irte
, irte_modified
->low
);
318 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
320 rc
= qi_flush_iec(iommu
, index
, 0);
321 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
326 int flush_irte(int irq
)
330 struct intel_iommu
*iommu
;
331 struct irq_2_iommu
*irq_iommu
;
334 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
335 irq_iommu
= valid_irq_2_iommu(irq
);
337 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
341 iommu
= irq_iommu
->iommu
;
343 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
345 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
346 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
351 struct intel_iommu
*map_ioapic_to_ir(int apic
)
355 for (i
= 0; i
< MAX_IO_APICS
; i
++)
356 if (ir_ioapic
[i
].id
== apic
)
357 return ir_ioapic
[i
].iommu
;
361 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
363 struct dmar_drhd_unit
*drhd
;
365 drhd
= dmar_find_matched_drhd_unit(dev
);
372 int free_irte(int irq
)
377 struct intel_iommu
*iommu
;
378 struct irq_2_iommu
*irq_iommu
;
381 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
382 irq_iommu
= valid_irq_2_iommu(irq
);
384 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
388 iommu
= irq_iommu
->iommu
;
390 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
391 irte
= &iommu
->ir_table
->base
[index
];
393 if (!irq_iommu
->sub_handle
) {
394 for (i
= 0; i
< (1 << irq_iommu
->irte_mask
); i
++)
395 set_64bit((unsigned long *)(irte
+ i
), 0);
396 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
399 irq_iommu
->iommu
= NULL
;
400 irq_iommu
->irte_index
= 0;
401 irq_iommu
->sub_handle
= 0;
402 irq_iommu
->irte_mask
= 0;
404 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
409 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
415 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
417 spin_lock_irqsave(&iommu
->register_lock
, flags
);
419 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
420 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
422 /* Set interrupt-remapping table pointer */
423 cmd
= iommu
->gcmd
| DMA_GCMD_SIRTP
;
424 iommu
->gcmd
|= DMA_GCMD_SIRTP
;
425 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
427 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
428 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
429 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
432 * global invalidation of interrupt entry cache before enabling
433 * interrupt-remapping.
435 qi_global_iec(iommu
);
437 spin_lock_irqsave(&iommu
->register_lock
, flags
);
439 /* Enable interrupt-remapping */
440 cmd
= iommu
->gcmd
| DMA_GCMD_IRE
;
441 iommu
->gcmd
|= DMA_GCMD_IRE
;
442 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
444 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
445 readl
, (sts
& DMA_GSTS_IRES
), sts
);
447 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
451 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
453 struct ir_table
*ir_table
;
456 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
459 if (!iommu
->ir_table
)
462 pages
= alloc_pages(GFP_ATOMIC
| __GFP_ZERO
, INTR_REMAP_PAGE_ORDER
);
465 printk(KERN_ERR
"failed to allocate pages of order %d\n",
466 INTR_REMAP_PAGE_ORDER
);
467 kfree(iommu
->ir_table
);
471 ir_table
->base
= page_address(pages
);
473 iommu_set_intr_remapping(iommu
, mode
);
478 * Disable Interrupt Remapping.
480 static void iommu_disable_intr_remapping(struct intel_iommu
*iommu
)
485 if (!ecap_ir_support(iommu
->ecap
))
489 * global invalidation of interrupt entry cache before disabling
490 * interrupt-remapping.
492 qi_global_iec(iommu
);
494 spin_lock_irqsave(&iommu
->register_lock
, flags
);
496 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
497 if (!(sts
& DMA_GSTS_IRES
))
500 iommu
->gcmd
&= ~DMA_GCMD_IRE
;
501 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
503 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
504 readl
, !(sts
& DMA_GSTS_IRES
), sts
);
507 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
510 int __init
intr_remapping_supported(void)
512 struct dmar_drhd_unit
*drhd
;
514 if (disable_intremap
)
517 for_each_drhd_unit(drhd
) {
518 struct intel_iommu
*iommu
= drhd
->iommu
;
520 if (!ecap_ir_support(iommu
->ecap
))
527 int __init
enable_intr_remapping(int eim
)
529 struct dmar_drhd_unit
*drhd
;
532 for_each_drhd_unit(drhd
) {
533 struct intel_iommu
*iommu
= drhd
->iommu
;
536 * If the queued invalidation is already initialized,
537 * shouldn't disable it.
543 * Clear previous faults.
545 dmar_fault(-1, iommu
);
548 * Disable intr remapping and queued invalidation, if already
549 * enabled prior to OS handover.
551 iommu_disable_intr_remapping(iommu
);
553 dmar_disable_qi(iommu
);
557 * check for the Interrupt-remapping support
559 for_each_drhd_unit(drhd
) {
560 struct intel_iommu
*iommu
= drhd
->iommu
;
562 if (!ecap_ir_support(iommu
->ecap
))
565 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
566 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
567 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
573 * Enable queued invalidation for all the DRHD's.
575 for_each_drhd_unit(drhd
) {
577 struct intel_iommu
*iommu
= drhd
->iommu
;
578 ret
= dmar_enable_qi(iommu
);
581 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
582 " invalidation, ecap %Lx, ret %d\n",
583 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
589 * Setup Interrupt-remapping for all the DRHD's now.
591 for_each_drhd_unit(drhd
) {
592 struct intel_iommu
*iommu
= drhd
->iommu
;
594 if (!ecap_ir_support(iommu
->ecap
))
597 if (setup_intr_remapping(iommu
, eim
))
606 intr_remapping_enabled
= 1;
612 * handle error condition gracefully here!
617 static int ir_parse_ioapic_scope(struct acpi_dmar_header
*header
,
618 struct intel_iommu
*iommu
)
620 struct acpi_dmar_hardware_unit
*drhd
;
621 struct acpi_dmar_device_scope
*scope
;
624 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
626 start
= (void *)(drhd
+ 1);
627 end
= ((void *)drhd
) + header
->length
;
629 while (start
< end
) {
631 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
632 if (ir_ioapic_num
== MAX_IO_APICS
) {
633 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
637 printk(KERN_INFO
"IOAPIC id %d under DRHD base"
638 " 0x%Lx\n", scope
->enumeration_id
,
641 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
642 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
645 start
+= scope
->length
;
652 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
655 int __init
parse_ioapics_under_ir(void)
657 struct dmar_drhd_unit
*drhd
;
658 int ir_supported
= 0;
660 for_each_drhd_unit(drhd
) {
661 struct intel_iommu
*iommu
= drhd
->iommu
;
663 if (ecap_ir_support(iommu
->ecap
)) {
664 if (ir_parse_ioapic_scope(drhd
->hdr
, iommu
))
671 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
673 "Not all IO-APIC's listed under remapping hardware\n");
680 void disable_intr_remapping(void)
682 struct dmar_drhd_unit
*drhd
;
683 struct intel_iommu
*iommu
= NULL
;
686 * Disable Interrupt-remapping for all the DRHD's now.
688 for_each_iommu(iommu
, drhd
) {
689 if (!ecap_ir_support(iommu
->ecap
))
692 iommu_disable_intr_remapping(iommu
);
696 int reenable_intr_remapping(int eim
)
698 struct dmar_drhd_unit
*drhd
;
700 struct intel_iommu
*iommu
= NULL
;
702 for_each_iommu(iommu
, drhd
)
704 dmar_reenable_qi(iommu
);
707 * Setup Interrupt-remapping for all the DRHD's now.
709 for_each_iommu(iommu
, drhd
) {
710 if (!ecap_ir_support(iommu
->ecap
))
713 /* Set up interrupt remapping for iommu.*/
714 iommu_set_intr_remapping(iommu
, eim
);
725 * handle error condition gracefully here!