mac80211: fix ampdu_action tx_start ssn
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / pci / dmar.c
blob416f6ac65b761080a2e81a8ace5c5d442a8ea2d1
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
95 path ++;
96 count --;
97 bus = pdev->subordinate;
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
115 *dev = pdev;
116 return 0;
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
138 if (*cnt == 0)
139 return 0;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
157 index ++;
159 start += scope->length;
162 return 0;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
192 dmar_register_drhd_unit(dmaru);
193 return 0;
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
204 return 0;
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
214 return ret;
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222 list_add(&rmrr->list, &dmar_rmrr_units);
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
236 rmrru->hdr = header;
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
258 kfree(rmrru);
260 return ret;
263 static LIST_HEAD(dmar_atsr_units);
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
278 list_add(&atsru->list, &dmar_atsr_units);
280 return 0;
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285 int rc;
286 struct acpi_dmar_atsr *atsr;
288 if (atsru->include_all)
289 return 0;
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
301 return rc;
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
317 return 0;
319 found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
335 if (atsru->include_all)
336 return 1;
338 return 0;
340 #endif
342 static void __init
343 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
345 struct acpi_dmar_hardware_unit *drhd;
346 struct acpi_dmar_reserved_memory *rmrr;
347 struct acpi_dmar_atsr *atsr;
348 struct acpi_dmar_rhsa *rhsa;
350 switch (header->type) {
351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
352 drhd = container_of(header, struct acpi_dmar_hardware_unit,
353 header);
354 printk (KERN_INFO PREFIX
355 "DRHD base: %#016Lx flags: %#x\n",
356 (unsigned long long)drhd->address, drhd->flags);
357 break;
358 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
359 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
360 header);
361 printk (KERN_INFO PREFIX
362 "RMRR base: %#016Lx end: %#016Lx\n",
363 (unsigned long long)rmrr->base_address,
364 (unsigned long long)rmrr->end_address);
365 break;
366 case ACPI_DMAR_TYPE_ATSR:
367 atsr = container_of(header, struct acpi_dmar_atsr, header);
368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
369 break;
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
375 break;
380 * dmar_table_detect - checks to see if the platform supports DMAR devices
382 static int __init dmar_table_detect(void)
384 acpi_status status = AE_OK;
386 /* if we could find DMAR table, then there are DMAR devices */
387 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388 (struct acpi_table_header **)&dmar_tbl,
389 &dmar_tbl_size);
391 if (ACPI_SUCCESS(status) && !dmar_tbl) {
392 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393 status = AE_NOT_FOUND;
396 return (ACPI_SUCCESS(status) ? 1 : 0);
400 * parse_dmar_table - parses the DMA reporting table
402 static int __init
403 parse_dmar_table(void)
405 struct acpi_table_dmar *dmar;
406 struct acpi_dmar_header *entry_header;
407 int ret = 0;
410 * Do it again, earlier dmar_tbl mapping could be mapped with
411 * fixed map.
413 dmar_table_detect();
416 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
417 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
419 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
421 dmar = (struct acpi_table_dmar *)dmar_tbl;
422 if (!dmar)
423 return -ENODEV;
425 if (dmar->width < PAGE_SHIFT - 1) {
426 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
427 return -EINVAL;
430 printk (KERN_INFO PREFIX "Host address width %d\n",
431 dmar->width + 1);
433 entry_header = (struct acpi_dmar_header *)(dmar + 1);
434 while (((unsigned long)entry_header) <
435 (((unsigned long)dmar) + dmar_tbl->length)) {
436 /* Avoid looping forever on bad ACPI tables */
437 if (entry_header->length == 0) {
438 printk(KERN_WARNING PREFIX
439 "Invalid 0-length structure\n");
440 ret = -EINVAL;
441 break;
444 dmar_table_print_dmar_entry(entry_header);
446 switch (entry_header->type) {
447 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448 ret = dmar_parse_one_drhd(entry_header);
449 break;
450 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
451 #ifdef CONFIG_DMAR
452 ret = dmar_parse_one_rmrr(entry_header);
453 #endif
454 break;
455 case ACPI_DMAR_TYPE_ATSR:
456 #ifdef CONFIG_DMAR
457 ret = dmar_parse_one_atsr(entry_header);
458 #endif
459 break;
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */
462 break;
463 default:
464 printk(KERN_WARNING PREFIX
465 "Unknown DMAR structure type %d\n",
466 entry_header->type);
467 ret = 0; /* for forward compatibility */
468 break;
470 if (ret)
471 break;
473 entry_header = ((void *)entry_header + entry_header->length);
475 return ret;
478 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
479 struct pci_dev *dev)
481 int index;
483 while (dev) {
484 for (index = 0; index < cnt; index++)
485 if (dev == devices[index])
486 return 1;
488 /* Check our parent */
489 dev = dev->bus->self;
492 return 0;
495 struct dmar_drhd_unit *
496 dmar_find_matched_drhd_unit(struct pci_dev *dev)
498 struct dmar_drhd_unit *dmaru = NULL;
499 struct acpi_dmar_hardware_unit *drhd;
501 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502 drhd = container_of(dmaru->hdr,
503 struct acpi_dmar_hardware_unit,
504 header);
506 if (dmaru->include_all &&
507 drhd->segment == pci_domain_nr(dev->bus))
508 return dmaru;
510 if (dmar_pci_device_match(dmaru->devices,
511 dmaru->devices_cnt, dev))
512 return dmaru;
515 return NULL;
518 int __init dmar_dev_scope_init(void)
520 struct dmar_drhd_unit *drhd, *drhd_n;
521 int ret = -ENODEV;
523 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
524 ret = dmar_parse_dev(drhd);
525 if (ret)
526 return ret;
529 #ifdef CONFIG_DMAR
531 struct dmar_rmrr_unit *rmrr, *rmrr_n;
532 struct dmar_atsr_unit *atsr, *atsr_n;
534 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
535 ret = rmrr_parse_dev(rmrr);
536 if (ret)
537 return ret;
540 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541 ret = atsr_parse_dev(atsr);
542 if (ret)
543 return ret;
546 #endif
548 return ret;
552 int __init dmar_table_init(void)
554 static int dmar_table_initialized;
555 int ret;
557 if (dmar_table_initialized)
558 return 0;
560 dmar_table_initialized = 1;
562 ret = parse_dmar_table();
563 if (ret) {
564 if (ret != -ENODEV)
565 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
566 return ret;
569 if (list_empty(&dmar_drhd_units)) {
570 printk(KERN_INFO PREFIX "No DMAR devices found\n");
571 return -ENODEV;
574 #ifdef CONFIG_DMAR
575 if (list_empty(&dmar_rmrr_units))
576 printk(KERN_INFO PREFIX "No RMRR found\n");
578 if (list_empty(&dmar_atsr_units))
579 printk(KERN_INFO PREFIX "No ATSR found\n");
580 #endif
582 return 0;
585 int __init check_zero_address(void)
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596 /* Avoid looping forever on bad ACPI tables */
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
600 return 0;
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
612 #ifdef CONFIG_DMAR
613 dmar_disabled = 1;
614 #endif
615 return 0;
617 break;
620 entry_header = ((void *)entry_header + entry_header->length);
622 return 1;
625 void __init detect_intel_iommu(void)
627 int ret;
629 ret = dmar_table_detect();
630 if (ret)
631 ret = check_zero_address();
633 #ifdef CONFIG_INTR_REMAP
634 struct acpi_table_dmar *dmar;
636 * for now we will disable dma-remapping when interrupt
637 * remapping is enabled.
638 * When support for queued invalidation for IOTLB invalidation
639 * is added, we will not need this any more.
641 dmar = (struct acpi_table_dmar *) dmar_tbl;
642 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
643 printk(KERN_INFO
644 "Queued invalidation will be enabled to support "
645 "x2apic and Intr-remapping.\n");
646 #endif
647 #ifdef CONFIG_DMAR
648 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
649 iommu_detected = 1;
650 #endif
651 #ifdef CONFIG_X86
652 if (ret)
653 x86_init.iommu.iommu_init = intel_iommu_init;
654 #endif
656 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
657 dmar_tbl = NULL;
661 int alloc_iommu(struct dmar_drhd_unit *drhd)
663 struct intel_iommu *iommu;
664 int map_size;
665 u32 ver;
666 static int iommu_allocated = 0;
667 int agaw = 0;
668 int msagaw = 0;
670 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
671 if (!iommu)
672 return -ENOMEM;
674 iommu->seq_id = iommu_allocated++;
675 sprintf (iommu->name, "dmar%d", iommu->seq_id);
677 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
678 if (!iommu->reg) {
679 printk(KERN_ERR "IOMMU: can't map the region\n");
680 goto error;
682 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
683 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
685 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
686 /* Promote an attitude of violence to a BIOS engineer today */
687 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
688 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
689 drhd->reg_base_addr,
690 dmi_get_system_info(DMI_BIOS_VENDOR),
691 dmi_get_system_info(DMI_BIOS_VERSION),
692 dmi_get_system_info(DMI_PRODUCT_VERSION));
693 goto err_unmap;
696 #ifdef CONFIG_DMAR
697 agaw = iommu_calculate_agaw(iommu);
698 if (agaw < 0) {
699 printk(KERN_ERR
700 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
701 iommu->seq_id);
702 goto err_unmap;
704 msagaw = iommu_calculate_max_sagaw(iommu);
705 if (msagaw < 0) {
706 printk(KERN_ERR
707 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
708 iommu->seq_id);
709 goto err_unmap;
711 #endif
712 iommu->agaw = agaw;
713 iommu->msagaw = msagaw;
715 /* the registers might be more than one page */
716 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
717 cap_max_fault_reg_offset(iommu->cap));
718 map_size = VTD_PAGE_ALIGN(map_size);
719 if (map_size > VTD_PAGE_SIZE) {
720 iounmap(iommu->reg);
721 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
722 if (!iommu->reg) {
723 printk(KERN_ERR "IOMMU: can't map the region\n");
724 goto error;
728 ver = readl(iommu->reg + DMAR_VER_REG);
729 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
730 (unsigned long long)drhd->reg_base_addr,
731 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
732 (unsigned long long)iommu->cap,
733 (unsigned long long)iommu->ecap);
735 spin_lock_init(&iommu->register_lock);
737 drhd->iommu = iommu;
738 return 0;
740 err_unmap:
741 iounmap(iommu->reg);
742 error:
743 kfree(iommu);
744 return -1;
747 void free_iommu(struct intel_iommu *iommu)
749 if (!iommu)
750 return;
752 #ifdef CONFIG_DMAR
753 free_dmar_iommu(iommu);
754 #endif
756 if (iommu->reg)
757 iounmap(iommu->reg);
758 kfree(iommu);
762 * Reclaim all the submitted descriptors which have completed its work.
764 static inline void reclaim_free_desc(struct q_inval *qi)
766 while (qi->desc_status[qi->free_tail] == QI_DONE ||
767 qi->desc_status[qi->free_tail] == QI_ABORT) {
768 qi->desc_status[qi->free_tail] = QI_FREE;
769 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
770 qi->free_cnt++;
774 static int qi_check_fault(struct intel_iommu *iommu, int index)
776 u32 fault;
777 int head, tail;
778 struct q_inval *qi = iommu->qi;
779 int wait_index = (index + 1) % QI_LENGTH;
781 if (qi->desc_status[wait_index] == QI_ABORT)
782 return -EAGAIN;
784 fault = readl(iommu->reg + DMAR_FSTS_REG);
787 * If IQE happens, the head points to the descriptor associated
788 * with the error. No new descriptors are fetched until the IQE
789 * is cleared.
791 if (fault & DMA_FSTS_IQE) {
792 head = readl(iommu->reg + DMAR_IQH_REG);
793 if ((head >> DMAR_IQ_SHIFT) == index) {
794 printk(KERN_ERR "VT-d detected invalid descriptor: "
795 "low=%llx, high=%llx\n",
796 (unsigned long long)qi->desc[index].low,
797 (unsigned long long)qi->desc[index].high);
798 memcpy(&qi->desc[index], &qi->desc[wait_index],
799 sizeof(struct qi_desc));
800 __iommu_flush_cache(iommu, &qi->desc[index],
801 sizeof(struct qi_desc));
802 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
803 return -EINVAL;
808 * If ITE happens, all pending wait_desc commands are aborted.
809 * No new descriptors are fetched until the ITE is cleared.
811 if (fault & DMA_FSTS_ITE) {
812 head = readl(iommu->reg + DMAR_IQH_REG);
813 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
814 head |= 1;
815 tail = readl(iommu->reg + DMAR_IQT_REG);
816 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
818 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
820 do {
821 if (qi->desc_status[head] == QI_IN_USE)
822 qi->desc_status[head] = QI_ABORT;
823 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
824 } while (head != tail);
826 if (qi->desc_status[wait_index] == QI_ABORT)
827 return -EAGAIN;
830 if (fault & DMA_FSTS_ICE)
831 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
833 return 0;
837 * Submit the queued invalidation descriptor to the remapping
838 * hardware unit and wait for its completion.
840 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
842 int rc;
843 struct q_inval *qi = iommu->qi;
844 struct qi_desc *hw, wait_desc;
845 int wait_index, index;
846 unsigned long flags;
848 if (!qi)
849 return 0;
851 hw = qi->desc;
853 restart:
854 rc = 0;
856 spin_lock_irqsave(&qi->q_lock, flags);
857 while (qi->free_cnt < 3) {
858 spin_unlock_irqrestore(&qi->q_lock, flags);
859 cpu_relax();
860 spin_lock_irqsave(&qi->q_lock, flags);
863 index = qi->free_head;
864 wait_index = (index + 1) % QI_LENGTH;
866 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
868 hw[index] = *desc;
870 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
871 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
872 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
874 hw[wait_index] = wait_desc;
876 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
877 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
879 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
880 qi->free_cnt -= 2;
883 * update the HW tail register indicating the presence of
884 * new descriptors.
886 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
888 while (qi->desc_status[wait_index] != QI_DONE) {
890 * We will leave the interrupts disabled, to prevent interrupt
891 * context to queue another cmd while a cmd is already submitted
892 * and waiting for completion on this cpu. This is to avoid
893 * a deadlock where the interrupt context can wait indefinitely
894 * for free slots in the queue.
896 rc = qi_check_fault(iommu, index);
897 if (rc)
898 break;
900 spin_unlock(&qi->q_lock);
901 cpu_relax();
902 spin_lock(&qi->q_lock);
905 qi->desc_status[index] = QI_DONE;
907 reclaim_free_desc(qi);
908 spin_unlock_irqrestore(&qi->q_lock, flags);
910 if (rc == -EAGAIN)
911 goto restart;
913 return rc;
917 * Flush the global interrupt entry cache.
919 void qi_global_iec(struct intel_iommu *iommu)
921 struct qi_desc desc;
923 desc.low = QI_IEC_TYPE;
924 desc.high = 0;
926 /* should never fail */
927 qi_submit_sync(&desc, iommu);
930 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
931 u64 type)
933 struct qi_desc desc;
935 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
936 | QI_CC_GRAN(type) | QI_CC_TYPE;
937 desc.high = 0;
939 qi_submit_sync(&desc, iommu);
942 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
943 unsigned int size_order, u64 type)
945 u8 dw = 0, dr = 0;
947 struct qi_desc desc;
948 int ih = 0;
950 if (cap_write_drain(iommu->cap))
951 dw = 1;
953 if (cap_read_drain(iommu->cap))
954 dr = 1;
956 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
957 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
958 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
959 | QI_IOTLB_AM(size_order);
961 qi_submit_sync(&desc, iommu);
964 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
965 u64 addr, unsigned mask)
967 struct qi_desc desc;
969 if (mask) {
970 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
971 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
972 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
973 } else
974 desc.high = QI_DEV_IOTLB_ADDR(addr);
976 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
977 qdep = 0;
979 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
980 QI_DIOTLB_TYPE;
982 qi_submit_sync(&desc, iommu);
986 * Disable Queued Invalidation interface.
988 void dmar_disable_qi(struct intel_iommu *iommu)
990 unsigned long flags;
991 u32 sts;
992 cycles_t start_time = get_cycles();
994 if (!ecap_qis(iommu->ecap))
995 return;
997 spin_lock_irqsave(&iommu->register_lock, flags);
999 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1000 if (!(sts & DMA_GSTS_QIES))
1001 goto end;
1004 * Give a chance to HW to complete the pending invalidation requests.
1006 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1007 readl(iommu->reg + DMAR_IQH_REG)) &&
1008 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1009 cpu_relax();
1011 iommu->gcmd &= ~DMA_GCMD_QIE;
1012 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1014 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1015 !(sts & DMA_GSTS_QIES), sts);
1016 end:
1017 spin_unlock_irqrestore(&iommu->register_lock, flags);
1021 * Enable queued invalidation.
1023 static void __dmar_enable_qi(struct intel_iommu *iommu)
1025 u32 sts;
1026 unsigned long flags;
1027 struct q_inval *qi = iommu->qi;
1029 qi->free_head = qi->free_tail = 0;
1030 qi->free_cnt = QI_LENGTH;
1032 spin_lock_irqsave(&iommu->register_lock, flags);
1034 /* write zero to the tail reg */
1035 writel(0, iommu->reg + DMAR_IQT_REG);
1037 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1039 iommu->gcmd |= DMA_GCMD_QIE;
1040 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1042 /* Make sure hardware complete it */
1043 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1045 spin_unlock_irqrestore(&iommu->register_lock, flags);
1049 * Enable Queued Invalidation interface. This is a must to support
1050 * interrupt-remapping. Also used by DMA-remapping, which replaces
1051 * register based IOTLB invalidation.
1053 int dmar_enable_qi(struct intel_iommu *iommu)
1055 struct q_inval *qi;
1057 if (!ecap_qis(iommu->ecap))
1058 return -ENOENT;
1061 * queued invalidation is already setup and enabled.
1063 if (iommu->qi)
1064 return 0;
1066 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1067 if (!iommu->qi)
1068 return -ENOMEM;
1070 qi = iommu->qi;
1072 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1073 if (!qi->desc) {
1074 kfree(qi);
1075 iommu->qi = 0;
1076 return -ENOMEM;
1079 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1080 if (!qi->desc_status) {
1081 free_page((unsigned long) qi->desc);
1082 kfree(qi);
1083 iommu->qi = 0;
1084 return -ENOMEM;
1087 qi->free_head = qi->free_tail = 0;
1088 qi->free_cnt = QI_LENGTH;
1090 spin_lock_init(&qi->q_lock);
1092 __dmar_enable_qi(iommu);
1094 return 0;
1097 /* iommu interrupt handling. Most stuff are MSI-like. */
1099 enum faulttype {
1100 DMA_REMAP,
1101 INTR_REMAP,
1102 UNKNOWN,
1105 static const char *dma_remap_fault_reasons[] =
1107 "Software",
1108 "Present bit in root entry is clear",
1109 "Present bit in context entry is clear",
1110 "Invalid context entry",
1111 "Access beyond MGAW",
1112 "PTE Write access is not set",
1113 "PTE Read access is not set",
1114 "Next page table ptr is invalid",
1115 "Root table address invalid",
1116 "Context table ptr is invalid",
1117 "non-zero reserved fields in RTP",
1118 "non-zero reserved fields in CTP",
1119 "non-zero reserved fields in PTE",
1122 static const char *intr_remap_fault_reasons[] =
1124 "Detected reserved fields in the decoded interrupt-remapped request",
1125 "Interrupt index exceeded the interrupt-remapping table size",
1126 "Present field in the IRTE entry is clear",
1127 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1128 "Detected reserved fields in the IRTE entry",
1129 "Blocked a compatibility format interrupt request",
1130 "Blocked an interrupt request due to source-id verification failure",
1133 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1135 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1137 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1138 ARRAY_SIZE(intr_remap_fault_reasons))) {
1139 *fault_type = INTR_REMAP;
1140 return intr_remap_fault_reasons[fault_reason - 0x20];
1141 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1142 *fault_type = DMA_REMAP;
1143 return dma_remap_fault_reasons[fault_reason];
1144 } else {
1145 *fault_type = UNKNOWN;
1146 return "Unknown";
1150 void dmar_msi_unmask(unsigned int irq)
1152 struct intel_iommu *iommu = get_irq_data(irq);
1153 unsigned long flag;
1155 /* unmask it */
1156 spin_lock_irqsave(&iommu->register_lock, flag);
1157 writel(0, iommu->reg + DMAR_FECTL_REG);
1158 /* Read a reg to force flush the post write */
1159 readl(iommu->reg + DMAR_FECTL_REG);
1160 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 void dmar_msi_mask(unsigned int irq)
1165 unsigned long flag;
1166 struct intel_iommu *iommu = get_irq_data(irq);
1168 /* mask it */
1169 spin_lock_irqsave(&iommu->register_lock, flag);
1170 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1171 /* Read a reg to force flush the post write */
1172 readl(iommu->reg + DMAR_FECTL_REG);
1173 spin_unlock_irqrestore(&iommu->register_lock, flag);
1176 void dmar_msi_write(int irq, struct msi_msg *msg)
1178 struct intel_iommu *iommu = get_irq_data(irq);
1179 unsigned long flag;
1181 spin_lock_irqsave(&iommu->register_lock, flag);
1182 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1183 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1184 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1185 spin_unlock_irqrestore(&iommu->register_lock, flag);
1188 void dmar_msi_read(int irq, struct msi_msg *msg)
1190 struct intel_iommu *iommu = get_irq_data(irq);
1191 unsigned long flag;
1193 spin_lock_irqsave(&iommu->register_lock, flag);
1194 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1195 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1196 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1197 spin_unlock_irqrestore(&iommu->register_lock, flag);
1200 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1201 u8 fault_reason, u16 source_id, unsigned long long addr)
1203 const char *reason;
1204 int fault_type;
1206 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1208 if (fault_type == INTR_REMAP)
1209 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1210 "fault index %llx\n"
1211 "INTR-REMAP:[fault reason %02d] %s\n",
1212 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1213 PCI_FUNC(source_id & 0xFF), addr >> 48,
1214 fault_reason, reason);
1215 else
1216 printk(KERN_ERR
1217 "DMAR:[%s] Request device [%02x:%02x.%d] "
1218 "fault addr %llx \n"
1219 "DMAR:[fault reason %02d] %s\n",
1220 (type ? "DMA Read" : "DMA Write"),
1221 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1222 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1223 return 0;
1226 #define PRIMARY_FAULT_REG_LEN (16)
1227 irqreturn_t dmar_fault(int irq, void *dev_id)
1229 struct intel_iommu *iommu = dev_id;
1230 int reg, fault_index;
1231 u32 fault_status;
1232 unsigned long flag;
1234 spin_lock_irqsave(&iommu->register_lock, flag);
1235 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1236 if (fault_status)
1237 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1238 fault_status);
1240 /* TBD: ignore advanced fault log currently */
1241 if (!(fault_status & DMA_FSTS_PPF))
1242 goto clear_rest;
1244 fault_index = dma_fsts_fault_record_index(fault_status);
1245 reg = cap_fault_reg_offset(iommu->cap);
1246 while (1) {
1247 u8 fault_reason;
1248 u16 source_id;
1249 u64 guest_addr;
1250 int type;
1251 u32 data;
1253 /* highest 32 bits */
1254 data = readl(iommu->reg + reg +
1255 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1256 if (!(data & DMA_FRCD_F))
1257 break;
1259 fault_reason = dma_frcd_fault_reason(data);
1260 type = dma_frcd_type(data);
1262 data = readl(iommu->reg + reg +
1263 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1264 source_id = dma_frcd_source_id(data);
1266 guest_addr = dmar_readq(iommu->reg + reg +
1267 fault_index * PRIMARY_FAULT_REG_LEN);
1268 guest_addr = dma_frcd_page_addr(guest_addr);
1269 /* clear the fault */
1270 writel(DMA_FRCD_F, iommu->reg + reg +
1271 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1273 spin_unlock_irqrestore(&iommu->register_lock, flag);
1275 dmar_fault_do_one(iommu, type, fault_reason,
1276 source_id, guest_addr);
1278 fault_index++;
1279 if (fault_index >= cap_num_fault_regs(iommu->cap))
1280 fault_index = 0;
1281 spin_lock_irqsave(&iommu->register_lock, flag);
1283 clear_rest:
1284 /* clear all the other faults */
1285 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1286 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1288 spin_unlock_irqrestore(&iommu->register_lock, flag);
1289 return IRQ_HANDLED;
1292 int dmar_set_interrupt(struct intel_iommu *iommu)
1294 int irq, ret;
1297 * Check if the fault interrupt is already initialized.
1299 if (iommu->irq)
1300 return 0;
1302 irq = create_irq();
1303 if (!irq) {
1304 printk(KERN_ERR "IOMMU: no free vectors\n");
1305 return -EINVAL;
1308 set_irq_data(irq, iommu);
1309 iommu->irq = irq;
1311 ret = arch_setup_dmar_msi(irq);
1312 if (ret) {
1313 set_irq_data(irq, NULL);
1314 iommu->irq = 0;
1315 destroy_irq(irq);
1316 return ret;
1319 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1320 if (ret)
1321 printk(KERN_ERR "IOMMU: can't request irq\n");
1322 return ret;
1325 int __init enable_drhd_fault_handling(void)
1327 struct dmar_drhd_unit *drhd;
1330 * Enable fault control interrupt.
1332 for_each_drhd_unit(drhd) {
1333 int ret;
1334 struct intel_iommu *iommu = drhd->iommu;
1335 ret = dmar_set_interrupt(iommu);
1337 if (ret) {
1338 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1339 " interrupt, ret %d\n",
1340 (unsigned long long)drhd->reg_base_addr, ret);
1341 return -1;
1345 return 0;
1349 * Re-enable Queued Invalidation interface.
1351 int dmar_reenable_qi(struct intel_iommu *iommu)
1353 if (!ecap_qis(iommu->ecap))
1354 return -ENOENT;
1356 if (!iommu->qi)
1357 return -ENOENT;
1360 * First disable queued invalidation.
1362 dmar_disable_qi(iommu);
1364 * Then enable queued invalidation again. Since there is no pending
1365 * invalidation requests now, it's safe to re-enable queued
1366 * invalidation.
1368 __dmar_enable_qi(iommu);
1370 return 0;
1374 * Check interrupt remapping support in DMAR table description.
1376 int dmar_ir_support(void)
1378 struct acpi_table_dmar *dmar;
1379 dmar = (struct acpi_table_dmar *)dmar_tbl;
1380 return dmar->flags & 0x1;