intel-iommu: Clean up handling of "caching mode" vs. context flushing.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / pci / dmar.c
blob10a071ba32325e45aaa56a44b06734809355f2b0
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
37 #undef PREFIX
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units);
46 static struct acpi_table_header * __initdata dmar_tbl;
47 static acpi_size dmar_tbl_size;
49 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
53 * the very end.
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
57 else
58 list_add(&drhd->list, &dmar_drhd_units);
61 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
64 struct pci_bus *bus;
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
67 int count;
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
74 while (count) {
75 if (pdev)
76 pci_dev_put(pdev);
78 * Some BIOSes list non-exist devices in DMAR table, just
79 * ignore it
81 if (!bus) {
82 printk(KERN_WARNING
83 PREFIX "Device scope bus [%d] not found\n",
84 scope->bus);
85 break;
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
88 if (!pdev) {
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
92 break;
94 path ++;
95 count --;
96 bus = pdev->subordinate;
98 if (!pdev) {
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
102 *dev = NULL;
103 return 0;
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
114 *dev = pdev;
115 return 0;
118 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
137 if (*cnt == 0)
138 return 0;
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
153 kfree(*devices);
154 return ret;
156 index ++;
158 start += scope->length;
161 return 0;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
169 static int __init
170 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
174 int ret = 0;
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
187 if (!dmaru)
188 return -ENOMEM;
190 dmaru->hdr = header;
191 dmaru->reg_base_addr = drhd->address;
192 dmaru->segment = drhd->segment;
193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195 ret = alloc_iommu(dmaru);
196 if (ret) {
197 kfree(dmaru);
198 return ret;
200 dmar_register_drhd_unit(dmaru);
201 return 0;
204 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
206 struct acpi_dmar_hardware_unit *drhd;
207 int ret = 0;
209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211 if (dmaru->include_all)
212 return 0;
214 ret = dmar_parse_dev_scope((void *)(drhd + 1),
215 ((void *)drhd) + drhd->header.length,
216 &dmaru->devices_cnt, &dmaru->devices,
217 drhd->segment);
218 if (ret) {
219 list_del(&dmaru->list);
220 kfree(dmaru);
222 return ret;
225 #ifdef CONFIG_DMAR
226 LIST_HEAD(dmar_rmrr_units);
228 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230 list_add(&rmrr->list, &dmar_rmrr_units);
234 static int __init
235 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237 struct acpi_dmar_reserved_memory *rmrr;
238 struct dmar_rmrr_unit *rmrru;
240 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
241 if (!rmrru)
242 return -ENOMEM;
244 rmrru->hdr = header;
245 rmrr = (struct acpi_dmar_reserved_memory *)header;
246 rmrru->base_address = rmrr->base_address;
247 rmrru->end_address = rmrr->end_address;
249 dmar_register_rmrr_unit(rmrru);
250 return 0;
253 static int __init
254 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256 struct acpi_dmar_reserved_memory *rmrr;
257 int ret;
259 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
260 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
261 ((void *)rmrr) + rmrr->header.length,
262 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264 if (ret || (rmrru->devices_cnt == 0)) {
265 list_del(&rmrru->list);
266 kfree(rmrru);
268 return ret;
270 #endif
272 static void __init
273 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
275 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr;
278 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header;
281 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
283 drhd->flags, (unsigned long long)drhd->address);
284 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header;
288 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
290 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address);
292 break;
297 * dmar_table_detect - checks to see if the platform supports DMAR devices
299 static int __init dmar_table_detect(void)
301 acpi_status status = AE_OK;
303 /* if we could find DMAR table, then there are DMAR devices */
304 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
305 (struct acpi_table_header **)&dmar_tbl,
306 &dmar_tbl_size);
308 if (ACPI_SUCCESS(status) && !dmar_tbl) {
309 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
310 status = AE_NOT_FOUND;
313 return (ACPI_SUCCESS(status) ? 1 : 0);
317 * parse_dmar_table - parses the DMA reporting table
319 static int __init
320 parse_dmar_table(void)
322 struct acpi_table_dmar *dmar;
323 struct acpi_dmar_header *entry_header;
324 int ret = 0;
327 * Do it again, earlier dmar_tbl mapping could be mapped with
328 * fixed map.
330 dmar_table_detect();
332 dmar = (struct acpi_table_dmar *)dmar_tbl;
333 if (!dmar)
334 return -ENODEV;
336 if (dmar->width < PAGE_SHIFT - 1) {
337 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
338 return -EINVAL;
341 printk (KERN_INFO PREFIX "Host address width %d\n",
342 dmar->width + 1);
344 entry_header = (struct acpi_dmar_header *)(dmar + 1);
345 while (((unsigned long)entry_header) <
346 (((unsigned long)dmar) + dmar_tbl->length)) {
347 /* Avoid looping forever on bad ACPI tables */
348 if (entry_header->length == 0) {
349 printk(KERN_WARNING PREFIX
350 "Invalid 0-length structure\n");
351 ret = -EINVAL;
352 break;
355 dmar_table_print_dmar_entry(entry_header);
357 switch (entry_header->type) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
359 ret = dmar_parse_one_drhd(entry_header);
360 break;
361 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
362 #ifdef CONFIG_DMAR
363 ret = dmar_parse_one_rmrr(entry_header);
364 #endif
365 break;
366 default:
367 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n");
369 ret = 0; /* for forward compatibility */
370 break;
372 if (ret)
373 break;
375 entry_header = ((void *)entry_header + entry_header->length);
377 return ret;
380 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
381 struct pci_dev *dev)
383 int index;
385 while (dev) {
386 for (index = 0; index < cnt; index++)
387 if (dev == devices[index])
388 return 1;
390 /* Check our parent */
391 dev = dev->bus->self;
394 return 0;
397 struct dmar_drhd_unit *
398 dmar_find_matched_drhd_unit(struct pci_dev *dev)
400 struct dmar_drhd_unit *dmaru = NULL;
401 struct acpi_dmar_hardware_unit *drhd;
403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
417 return NULL;
420 int __init dmar_dev_scope_init(void)
422 struct dmar_drhd_unit *drhd, *drhd_n;
423 int ret = -ENODEV;
425 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
426 ret = dmar_parse_dev(drhd);
427 if (ret)
428 return ret;
431 #ifdef CONFIG_DMAR
433 struct dmar_rmrr_unit *rmrr, *rmrr_n;
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
435 ret = rmrr_parse_dev(rmrr);
436 if (ret)
437 return ret;
440 #endif
442 return ret;
446 int __init dmar_table_init(void)
448 static int dmar_table_initialized;
449 int ret;
451 if (dmar_table_initialized)
452 return 0;
454 dmar_table_initialized = 1;
456 ret = parse_dmar_table();
457 if (ret) {
458 if (ret != -ENODEV)
459 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
460 return ret;
463 if (list_empty(&dmar_drhd_units)) {
464 printk(KERN_INFO PREFIX "No DMAR devices found\n");
465 return -ENODEV;
468 #ifdef CONFIG_DMAR
469 if (list_empty(&dmar_rmrr_units))
470 printk(KERN_INFO PREFIX "No RMRR found\n");
471 #endif
473 #ifdef CONFIG_INTR_REMAP
474 parse_ioapics_under_ir();
475 #endif
476 return 0;
479 void __init detect_intel_iommu(void)
481 int ret;
483 ret = dmar_table_detect();
486 #ifdef CONFIG_INTR_REMAP
487 struct acpi_table_dmar *dmar;
489 * for now we will disable dma-remapping when interrupt
490 * remapping is enabled.
491 * When support for queued invalidation for IOTLB invalidation
492 * is added, we will not need this any more.
494 dmar = (struct acpi_table_dmar *) dmar_tbl;
495 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
496 printk(KERN_INFO
497 "Queued invalidation will be enabled to support "
498 "x2apic and Intr-remapping.\n");
499 #endif
500 #ifdef CONFIG_DMAR
501 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
502 !dmar_disabled)
503 iommu_detected = 1;
504 #endif
506 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
507 dmar_tbl = NULL;
511 int alloc_iommu(struct dmar_drhd_unit *drhd)
513 struct intel_iommu *iommu;
514 int map_size;
515 u32 ver;
516 static int iommu_allocated = 0;
517 int agaw = 0;
518 int msagaw = 0;
520 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
521 if (!iommu)
522 return -ENOMEM;
524 iommu->seq_id = iommu_allocated++;
525 sprintf (iommu->name, "dmar%d", iommu->seq_id);
527 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
528 if (!iommu->reg) {
529 printk(KERN_ERR "IOMMU: can't map the region\n");
530 goto error;
532 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
533 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
535 #ifdef CONFIG_DMAR
536 agaw = iommu_calculate_agaw(iommu);
537 if (agaw < 0) {
538 printk(KERN_ERR
539 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
540 iommu->seq_id);
541 goto error;
543 msagaw = iommu_calculate_max_sagaw(iommu);
544 if (msagaw < 0) {
545 printk(KERN_ERR
546 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
547 iommu->seq_id);
548 goto error;
550 #endif
551 iommu->agaw = agaw;
552 iommu->msagaw = msagaw;
554 /* the registers might be more than one page */
555 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
556 cap_max_fault_reg_offset(iommu->cap));
557 map_size = VTD_PAGE_ALIGN(map_size);
558 if (map_size > VTD_PAGE_SIZE) {
559 iounmap(iommu->reg);
560 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
561 if (!iommu->reg) {
562 printk(KERN_ERR "IOMMU: can't map the region\n");
563 goto error;
567 ver = readl(iommu->reg + DMAR_VER_REG);
568 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
569 (unsigned long long)drhd->reg_base_addr,
570 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
571 (unsigned long long)iommu->cap,
572 (unsigned long long)iommu->ecap);
574 spin_lock_init(&iommu->register_lock);
576 drhd->iommu = iommu;
577 return 0;
578 error:
579 kfree(iommu);
580 return -1;
583 void free_iommu(struct intel_iommu *iommu)
585 if (!iommu)
586 return;
588 #ifdef CONFIG_DMAR
589 free_dmar_iommu(iommu);
590 #endif
592 if (iommu->reg)
593 iounmap(iommu->reg);
594 kfree(iommu);
598 * Reclaim all the submitted descriptors which have completed its work.
600 static inline void reclaim_free_desc(struct q_inval *qi)
602 while (qi->desc_status[qi->free_tail] == QI_DONE) {
603 qi->desc_status[qi->free_tail] = QI_FREE;
604 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
605 qi->free_cnt++;
609 static int qi_check_fault(struct intel_iommu *iommu, int index)
611 u32 fault;
612 int head;
613 struct q_inval *qi = iommu->qi;
614 int wait_index = (index + 1) % QI_LENGTH;
616 fault = readl(iommu->reg + DMAR_FSTS_REG);
619 * If IQE happens, the head points to the descriptor associated
620 * with the error. No new descriptors are fetched until the IQE
621 * is cleared.
623 if (fault & DMA_FSTS_IQE) {
624 head = readl(iommu->reg + DMAR_IQH_REG);
625 if ((head >> 4) == index) {
626 memcpy(&qi->desc[index], &qi->desc[wait_index],
627 sizeof(struct qi_desc));
628 __iommu_flush_cache(iommu, &qi->desc[index],
629 sizeof(struct qi_desc));
630 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
631 return -EINVAL;
635 return 0;
639 * Submit the queued invalidation descriptor to the remapping
640 * hardware unit and wait for its completion.
642 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
644 int rc = 0;
645 struct q_inval *qi = iommu->qi;
646 struct qi_desc *hw, wait_desc;
647 int wait_index, index;
648 unsigned long flags;
650 if (!qi)
651 return 0;
653 hw = qi->desc;
655 spin_lock_irqsave(&qi->q_lock, flags);
656 while (qi->free_cnt < 3) {
657 spin_unlock_irqrestore(&qi->q_lock, flags);
658 cpu_relax();
659 spin_lock_irqsave(&qi->q_lock, flags);
662 index = qi->free_head;
663 wait_index = (index + 1) % QI_LENGTH;
665 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
667 hw[index] = *desc;
669 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
670 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
671 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
673 hw[wait_index] = wait_desc;
675 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
676 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
678 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
679 qi->free_cnt -= 2;
682 * update the HW tail register indicating the presence of
683 * new descriptors.
685 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
687 while (qi->desc_status[wait_index] != QI_DONE) {
689 * We will leave the interrupts disabled, to prevent interrupt
690 * context to queue another cmd while a cmd is already submitted
691 * and waiting for completion on this cpu. This is to avoid
692 * a deadlock where the interrupt context can wait indefinitely
693 * for free slots in the queue.
695 rc = qi_check_fault(iommu, index);
696 if (rc)
697 goto out;
699 spin_unlock(&qi->q_lock);
700 cpu_relax();
701 spin_lock(&qi->q_lock);
703 out:
704 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
706 reclaim_free_desc(qi);
707 spin_unlock_irqrestore(&qi->q_lock, flags);
709 return rc;
713 * Flush the global interrupt entry cache.
715 void qi_global_iec(struct intel_iommu *iommu)
717 struct qi_desc desc;
719 desc.low = QI_IEC_TYPE;
720 desc.high = 0;
722 /* should never fail */
723 qi_submit_sync(&desc, iommu);
726 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
727 u64 type)
729 struct qi_desc desc;
731 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
732 | QI_CC_GRAN(type) | QI_CC_TYPE;
733 desc.high = 0;
735 qi_submit_sync(&desc, iommu);
738 int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
739 unsigned int size_order, u64 type,
740 int non_present_entry_flush)
742 u8 dw = 0, dr = 0;
744 struct qi_desc desc;
745 int ih = 0;
747 if (non_present_entry_flush) {
748 if (!cap_caching_mode(iommu->cap))
749 return 1;
750 else
751 did = 0;
754 if (cap_write_drain(iommu->cap))
755 dw = 1;
757 if (cap_read_drain(iommu->cap))
758 dr = 1;
760 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
761 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
762 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
763 | QI_IOTLB_AM(size_order);
765 return qi_submit_sync(&desc, iommu);
769 * Disable Queued Invalidation interface.
771 void dmar_disable_qi(struct intel_iommu *iommu)
773 unsigned long flags;
774 u32 sts;
775 cycles_t start_time = get_cycles();
777 if (!ecap_qis(iommu->ecap))
778 return;
780 spin_lock_irqsave(&iommu->register_lock, flags);
782 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
783 if (!(sts & DMA_GSTS_QIES))
784 goto end;
787 * Give a chance to HW to complete the pending invalidation requests.
789 while ((readl(iommu->reg + DMAR_IQT_REG) !=
790 readl(iommu->reg + DMAR_IQH_REG)) &&
791 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
792 cpu_relax();
794 iommu->gcmd &= ~DMA_GCMD_QIE;
796 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
798 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
799 !(sts & DMA_GSTS_QIES), sts);
800 end:
801 spin_unlock_irqrestore(&iommu->register_lock, flags);
805 * Enable queued invalidation.
807 static void __dmar_enable_qi(struct intel_iommu *iommu)
809 u32 cmd, sts;
810 unsigned long flags;
811 struct q_inval *qi = iommu->qi;
813 qi->free_head = qi->free_tail = 0;
814 qi->free_cnt = QI_LENGTH;
816 spin_lock_irqsave(&iommu->register_lock, flags);
818 /* write zero to the tail reg */
819 writel(0, iommu->reg + DMAR_IQT_REG);
821 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
823 cmd = iommu->gcmd | DMA_GCMD_QIE;
824 iommu->gcmd |= DMA_GCMD_QIE;
825 writel(cmd, iommu->reg + DMAR_GCMD_REG);
827 /* Make sure hardware complete it */
828 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
830 spin_unlock_irqrestore(&iommu->register_lock, flags);
834 * Enable Queued Invalidation interface. This is a must to support
835 * interrupt-remapping. Also used by DMA-remapping, which replaces
836 * register based IOTLB invalidation.
838 int dmar_enable_qi(struct intel_iommu *iommu)
840 struct q_inval *qi;
842 if (!ecap_qis(iommu->ecap))
843 return -ENOENT;
846 * queued invalidation is already setup and enabled.
848 if (iommu->qi)
849 return 0;
851 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
852 if (!iommu->qi)
853 return -ENOMEM;
855 qi = iommu->qi;
857 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
858 if (!qi->desc) {
859 kfree(qi);
860 iommu->qi = 0;
861 return -ENOMEM;
864 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
865 if (!qi->desc_status) {
866 free_page((unsigned long) qi->desc);
867 kfree(qi);
868 iommu->qi = 0;
869 return -ENOMEM;
872 qi->free_head = qi->free_tail = 0;
873 qi->free_cnt = QI_LENGTH;
875 spin_lock_init(&qi->q_lock);
877 __dmar_enable_qi(iommu);
879 return 0;
882 /* iommu interrupt handling. Most stuff are MSI-like. */
884 enum faulttype {
885 DMA_REMAP,
886 INTR_REMAP,
887 UNKNOWN,
890 static const char *dma_remap_fault_reasons[] =
892 "Software",
893 "Present bit in root entry is clear",
894 "Present bit in context entry is clear",
895 "Invalid context entry",
896 "Access beyond MGAW",
897 "PTE Write access is not set",
898 "PTE Read access is not set",
899 "Next page table ptr is invalid",
900 "Root table address invalid",
901 "Context table ptr is invalid",
902 "non-zero reserved fields in RTP",
903 "non-zero reserved fields in CTP",
904 "non-zero reserved fields in PTE",
907 static const char *intr_remap_fault_reasons[] =
909 "Detected reserved fields in the decoded interrupt-remapped request",
910 "Interrupt index exceeded the interrupt-remapping table size",
911 "Present field in the IRTE entry is clear",
912 "Error accessing interrupt-remapping table pointed by IRTA_REG",
913 "Detected reserved fields in the IRTE entry",
914 "Blocked a compatibility format interrupt request",
915 "Blocked an interrupt request due to source-id verification failure",
918 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
920 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
922 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
923 ARRAY_SIZE(intr_remap_fault_reasons))) {
924 *fault_type = INTR_REMAP;
925 return intr_remap_fault_reasons[fault_reason - 0x20];
926 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
927 *fault_type = DMA_REMAP;
928 return dma_remap_fault_reasons[fault_reason];
929 } else {
930 *fault_type = UNKNOWN;
931 return "Unknown";
935 void dmar_msi_unmask(unsigned int irq)
937 struct intel_iommu *iommu = get_irq_data(irq);
938 unsigned long flag;
940 /* unmask it */
941 spin_lock_irqsave(&iommu->register_lock, flag);
942 writel(0, iommu->reg + DMAR_FECTL_REG);
943 /* Read a reg to force flush the post write */
944 readl(iommu->reg + DMAR_FECTL_REG);
945 spin_unlock_irqrestore(&iommu->register_lock, flag);
948 void dmar_msi_mask(unsigned int irq)
950 unsigned long flag;
951 struct intel_iommu *iommu = get_irq_data(irq);
953 /* mask it */
954 spin_lock_irqsave(&iommu->register_lock, flag);
955 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
956 /* Read a reg to force flush the post write */
957 readl(iommu->reg + DMAR_FECTL_REG);
958 spin_unlock_irqrestore(&iommu->register_lock, flag);
961 void dmar_msi_write(int irq, struct msi_msg *msg)
963 struct intel_iommu *iommu = get_irq_data(irq);
964 unsigned long flag;
966 spin_lock_irqsave(&iommu->register_lock, flag);
967 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
968 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
969 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
970 spin_unlock_irqrestore(&iommu->register_lock, flag);
973 void dmar_msi_read(int irq, struct msi_msg *msg)
975 struct intel_iommu *iommu = get_irq_data(irq);
976 unsigned long flag;
978 spin_lock_irqsave(&iommu->register_lock, flag);
979 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
980 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
981 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
982 spin_unlock_irqrestore(&iommu->register_lock, flag);
985 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
986 u8 fault_reason, u16 source_id, unsigned long long addr)
988 const char *reason;
989 int fault_type;
991 reason = dmar_get_fault_reason(fault_reason, &fault_type);
993 if (fault_type == INTR_REMAP)
994 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
995 "fault index %llx\n"
996 "INTR-REMAP:[fault reason %02d] %s\n",
997 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
998 PCI_FUNC(source_id & 0xFF), addr >> 48,
999 fault_reason, reason);
1000 else
1001 printk(KERN_ERR
1002 "DMAR:[%s] Request device [%02x:%02x.%d] "
1003 "fault addr %llx \n"
1004 "DMAR:[fault reason %02d] %s\n",
1005 (type ? "DMA Read" : "DMA Write"),
1006 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1007 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1008 return 0;
1011 #define PRIMARY_FAULT_REG_LEN (16)
1012 irqreturn_t dmar_fault(int irq, void *dev_id)
1014 struct intel_iommu *iommu = dev_id;
1015 int reg, fault_index;
1016 u32 fault_status;
1017 unsigned long flag;
1019 spin_lock_irqsave(&iommu->register_lock, flag);
1020 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1021 if (fault_status)
1022 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1023 fault_status);
1025 /* TBD: ignore advanced fault log currently */
1026 if (!(fault_status & DMA_FSTS_PPF))
1027 goto clear_rest;
1029 fault_index = dma_fsts_fault_record_index(fault_status);
1030 reg = cap_fault_reg_offset(iommu->cap);
1031 while (1) {
1032 u8 fault_reason;
1033 u16 source_id;
1034 u64 guest_addr;
1035 int type;
1036 u32 data;
1038 /* highest 32 bits */
1039 data = readl(iommu->reg + reg +
1040 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1041 if (!(data & DMA_FRCD_F))
1042 break;
1044 fault_reason = dma_frcd_fault_reason(data);
1045 type = dma_frcd_type(data);
1047 data = readl(iommu->reg + reg +
1048 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1049 source_id = dma_frcd_source_id(data);
1051 guest_addr = dmar_readq(iommu->reg + reg +
1052 fault_index * PRIMARY_FAULT_REG_LEN);
1053 guest_addr = dma_frcd_page_addr(guest_addr);
1054 /* clear the fault */
1055 writel(DMA_FRCD_F, iommu->reg + reg +
1056 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1058 spin_unlock_irqrestore(&iommu->register_lock, flag);
1060 dmar_fault_do_one(iommu, type, fault_reason,
1061 source_id, guest_addr);
1063 fault_index++;
1064 if (fault_index > cap_num_fault_regs(iommu->cap))
1065 fault_index = 0;
1066 spin_lock_irqsave(&iommu->register_lock, flag);
1068 clear_rest:
1069 /* clear all the other faults */
1070 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1071 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1073 spin_unlock_irqrestore(&iommu->register_lock, flag);
1074 return IRQ_HANDLED;
1077 int dmar_set_interrupt(struct intel_iommu *iommu)
1079 int irq, ret;
1082 * Check if the fault interrupt is already initialized.
1084 if (iommu->irq)
1085 return 0;
1087 irq = create_irq();
1088 if (!irq) {
1089 printk(KERN_ERR "IOMMU: no free vectors\n");
1090 return -EINVAL;
1093 set_irq_data(irq, iommu);
1094 iommu->irq = irq;
1096 ret = arch_setup_dmar_msi(irq);
1097 if (ret) {
1098 set_irq_data(irq, NULL);
1099 iommu->irq = 0;
1100 destroy_irq(irq);
1101 return 0;
1104 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1105 if (ret)
1106 printk(KERN_ERR "IOMMU: can't request irq\n");
1107 return ret;
1110 int __init enable_drhd_fault_handling(void)
1112 struct dmar_drhd_unit *drhd;
1115 * Enable fault control interrupt.
1117 for_each_drhd_unit(drhd) {
1118 int ret;
1119 struct intel_iommu *iommu = drhd->iommu;
1120 ret = dmar_set_interrupt(iommu);
1122 if (ret) {
1123 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1124 " interrupt, ret %d\n",
1125 (unsigned long long)drhd->reg_base_addr, ret);
1126 return -1;
1130 return 0;
1134 * Re-enable Queued Invalidation interface.
1136 int dmar_reenable_qi(struct intel_iommu *iommu)
1138 if (!ecap_qis(iommu->ecap))
1139 return -ENOENT;
1141 if (!iommu->qi)
1142 return -ENOENT;
1145 * First disable queued invalidation.
1147 dmar_disable_qi(iommu);
1149 * Then enable queued invalidation again. Since there is no pending
1150 * invalidation requests now, it's safe to re-enable queued
1151 * invalidation.
1153 __dmar_enable_qi(iommu);
1155 return 0;