SCSI: scsi_dh: check queuedata pointer before proceeding further
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / pci / dmar.c
blob91d039016ad93ab776177670975fd8c81ec9e26e
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
95 path ++;
96 count --;
97 bus = pdev->subordinate;
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
115 *dev = pdev;
116 return 0;
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
138 if (*cnt == 0)
139 return 0;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
157 index ++;
159 start += scope->length;
162 return 0;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
192 dmar_register_drhd_unit(dmaru);
193 return 0;
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
204 return 0;
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
214 return ret;
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222 list_add(&rmrr->list, &dmar_rmrr_units);
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
236 rmrru->hdr = header;
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
258 kfree(rmrru);
260 return ret;
263 static LIST_HEAD(dmar_atsr_units);
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
278 list_add(&atsru->list, &dmar_atsr_units);
280 return 0;
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285 int rc;
286 struct acpi_dmar_atsr *atsr;
288 if (atsru->include_all)
289 return 0;
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
301 return rc;
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
317 return 0;
319 found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
335 if (atsru->include_all)
336 return 1;
338 return 0;
340 #endif
342 static void __init
343 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
345 struct acpi_dmar_hardware_unit *drhd;
346 struct acpi_dmar_reserved_memory *rmrr;
347 struct acpi_dmar_atsr *atsr;
348 struct acpi_dmar_rhsa *rhsa;
350 switch (header->type) {
351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
352 drhd = container_of(header, struct acpi_dmar_hardware_unit,
353 header);
354 printk (KERN_INFO PREFIX
355 "DRHD base: %#016Lx flags: %#x\n",
356 (unsigned long long)drhd->address, drhd->flags);
357 break;
358 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
359 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
360 header);
361 printk (KERN_INFO PREFIX
362 "RMRR base: %#016Lx end: %#016Lx\n",
363 (unsigned long long)rmrr->base_address,
364 (unsigned long long)rmrr->end_address);
365 break;
366 case ACPI_DMAR_TYPE_ATSR:
367 atsr = container_of(header, struct acpi_dmar_atsr, header);
368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
369 break;
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
375 break;
380 * dmar_table_detect - checks to see if the platform supports DMAR devices
382 static int __init dmar_table_detect(void)
384 acpi_status status = AE_OK;
386 /* if we could find DMAR table, then there are DMAR devices */
387 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388 (struct acpi_table_header **)&dmar_tbl,
389 &dmar_tbl_size);
391 if (ACPI_SUCCESS(status) && !dmar_tbl) {
392 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393 status = AE_NOT_FOUND;
396 return (ACPI_SUCCESS(status) ? 1 : 0);
400 * parse_dmar_table - parses the DMA reporting table
402 static int __init
403 parse_dmar_table(void)
405 struct acpi_table_dmar *dmar;
406 struct acpi_dmar_header *entry_header;
407 int ret = 0;
410 * Do it again, earlier dmar_tbl mapping could be mapped with
411 * fixed map.
413 dmar_table_detect();
416 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
417 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
419 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
421 dmar = (struct acpi_table_dmar *)dmar_tbl;
422 if (!dmar)
423 return -ENODEV;
425 if (dmar->width < PAGE_SHIFT - 1) {
426 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
427 return -EINVAL;
430 printk (KERN_INFO PREFIX "Host address width %d\n",
431 dmar->width + 1);
433 entry_header = (struct acpi_dmar_header *)(dmar + 1);
434 while (((unsigned long)entry_header) <
435 (((unsigned long)dmar) + dmar_tbl->length)) {
436 /* Avoid looping forever on bad ACPI tables */
437 if (entry_header->length == 0) {
438 printk(KERN_WARNING PREFIX
439 "Invalid 0-length structure\n");
440 ret = -EINVAL;
441 break;
444 dmar_table_print_dmar_entry(entry_header);
446 switch (entry_header->type) {
447 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448 ret = dmar_parse_one_drhd(entry_header);
449 break;
450 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
451 #ifdef CONFIG_DMAR
452 ret = dmar_parse_one_rmrr(entry_header);
453 #endif
454 break;
455 case ACPI_DMAR_TYPE_ATSR:
456 #ifdef CONFIG_DMAR
457 ret = dmar_parse_one_atsr(entry_header);
458 #endif
459 break;
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */
462 break;
463 default:
464 printk(KERN_WARNING PREFIX
465 "Unknown DMAR structure type %d\n",
466 entry_header->type);
467 ret = 0; /* for forward compatibility */
468 break;
470 if (ret)
471 break;
473 entry_header = ((void *)entry_header + entry_header->length);
475 return ret;
478 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
479 struct pci_dev *dev)
481 int index;
483 while (dev) {
484 for (index = 0; index < cnt; index++)
485 if (dev == devices[index])
486 return 1;
488 /* Check our parent */
489 dev = dev->bus->self;
492 return 0;
495 struct dmar_drhd_unit *
496 dmar_find_matched_drhd_unit(struct pci_dev *dev)
498 struct dmar_drhd_unit *dmaru = NULL;
499 struct acpi_dmar_hardware_unit *drhd;
501 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502 drhd = container_of(dmaru->hdr,
503 struct acpi_dmar_hardware_unit,
504 header);
506 if (dmaru->include_all &&
507 drhd->segment == pci_domain_nr(dev->bus))
508 return dmaru;
510 if (dmar_pci_device_match(dmaru->devices,
511 dmaru->devices_cnt, dev))
512 return dmaru;
515 return NULL;
518 int __init dmar_dev_scope_init(void)
520 struct dmar_drhd_unit *drhd, *drhd_n;
521 int ret = -ENODEV;
523 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
524 ret = dmar_parse_dev(drhd);
525 if (ret)
526 return ret;
529 #ifdef CONFIG_DMAR
531 struct dmar_rmrr_unit *rmrr, *rmrr_n;
532 struct dmar_atsr_unit *atsr, *atsr_n;
534 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
535 ret = rmrr_parse_dev(rmrr);
536 if (ret)
537 return ret;
540 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541 ret = atsr_parse_dev(atsr);
542 if (ret)
543 return ret;
546 #endif
548 return ret;
552 int __init dmar_table_init(void)
554 static int dmar_table_initialized;
555 int ret;
557 if (dmar_table_initialized)
558 return 0;
560 dmar_table_initialized = 1;
562 ret = parse_dmar_table();
563 if (ret) {
564 if (ret != -ENODEV)
565 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
566 return ret;
569 if (list_empty(&dmar_drhd_units)) {
570 printk(KERN_INFO PREFIX "No DMAR devices found\n");
571 return -ENODEV;
574 #ifdef CONFIG_DMAR
575 if (list_empty(&dmar_rmrr_units))
576 printk(KERN_INFO PREFIX "No RMRR found\n");
578 if (list_empty(&dmar_atsr_units))
579 printk(KERN_INFO PREFIX "No ATSR found\n");
580 #endif
582 return 0;
585 static int bios_warned;
587 int __init check_zero_address(void)
589 struct acpi_table_dmar *dmar;
590 struct acpi_dmar_header *entry_header;
591 struct acpi_dmar_hardware_unit *drhd;
593 dmar = (struct acpi_table_dmar *)dmar_tbl;
594 entry_header = (struct acpi_dmar_header *)(dmar + 1);
596 while (((unsigned long)entry_header) <
597 (((unsigned long)dmar) + dmar_tbl->length)) {
598 /* Avoid looping forever on bad ACPI tables */
599 if (entry_header->length == 0) {
600 printk(KERN_WARNING PREFIX
601 "Invalid 0-length structure\n");
602 return 0;
605 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
606 void __iomem *addr;
607 u64 cap, ecap;
609 drhd = (void *)entry_header;
610 if (!drhd->address) {
611 /* Promote an attitude of violence to a BIOS engineer today */
612 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
613 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
614 dmi_get_system_info(DMI_BIOS_VENDOR),
615 dmi_get_system_info(DMI_BIOS_VERSION),
616 dmi_get_system_info(DMI_PRODUCT_VERSION));
617 bios_warned = 1;
618 goto failed;
621 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
622 if (!addr ) {
623 printk("IOMMU: can't validate: %llx\n", drhd->address);
624 goto failed;
626 cap = dmar_readq(addr + DMAR_CAP_REG);
627 ecap = dmar_readq(addr + DMAR_ECAP_REG);
628 early_iounmap(addr, VTD_PAGE_SIZE);
629 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
630 /* Promote an attitude of violence to a BIOS engineer today */
631 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
632 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
633 drhd->address,
634 dmi_get_system_info(DMI_BIOS_VENDOR),
635 dmi_get_system_info(DMI_BIOS_VERSION),
636 dmi_get_system_info(DMI_PRODUCT_VERSION));
637 bios_warned = 1;
638 goto failed;
642 entry_header = ((void *)entry_header + entry_header->length);
644 return 1;
646 failed:
647 #ifdef CONFIG_DMAR
648 dmar_disabled = 1;
649 #endif
650 return 0;
653 void __init detect_intel_iommu(void)
655 int ret;
657 ret = dmar_table_detect();
658 if (ret)
659 ret = check_zero_address();
661 #ifdef CONFIG_INTR_REMAP
662 struct acpi_table_dmar *dmar;
664 * for now we will disable dma-remapping when interrupt
665 * remapping is enabled.
666 * When support for queued invalidation for IOTLB invalidation
667 * is added, we will not need this any more.
669 dmar = (struct acpi_table_dmar *) dmar_tbl;
670 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
671 printk(KERN_INFO
672 "Queued invalidation will be enabled to support "
673 "x2apic and Intr-remapping.\n");
674 #endif
675 #ifdef CONFIG_DMAR
676 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
677 !dmar_disabled)
678 iommu_detected = 1;
679 #endif
681 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
682 dmar_tbl = NULL;
686 int alloc_iommu(struct dmar_drhd_unit *drhd)
688 struct intel_iommu *iommu;
689 int map_size;
690 u32 ver;
691 static int iommu_allocated = 0;
692 int agaw = 0;
693 int msagaw = 0;
695 if (!drhd->reg_base_addr) {
696 if (!bios_warned) {
697 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
698 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
699 dmi_get_system_info(DMI_BIOS_VENDOR),
700 dmi_get_system_info(DMI_BIOS_VERSION),
701 dmi_get_system_info(DMI_PRODUCT_VERSION));
702 bios_warned = 1;
704 return -EINVAL;
707 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
708 if (!iommu)
709 return -ENOMEM;
711 iommu->seq_id = iommu_allocated++;
712 sprintf (iommu->name, "dmar%d", iommu->seq_id);
714 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
715 if (!iommu->reg) {
716 printk(KERN_ERR "IOMMU: can't map the region\n");
717 goto error;
719 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
720 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
722 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
723 if (!bios_warned) {
724 /* Promote an attitude of violence to a BIOS engineer today */
725 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
726 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
727 drhd->reg_base_addr,
728 dmi_get_system_info(DMI_BIOS_VENDOR),
729 dmi_get_system_info(DMI_BIOS_VERSION),
730 dmi_get_system_info(DMI_PRODUCT_VERSION));
731 bios_warned = 1;
733 goto err_unmap;
736 #ifdef CONFIG_DMAR
737 agaw = iommu_calculate_agaw(iommu);
738 if (agaw < 0) {
739 printk(KERN_ERR
740 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
741 iommu->seq_id);
742 goto err_unmap;
744 msagaw = iommu_calculate_max_sagaw(iommu);
745 if (msagaw < 0) {
746 printk(KERN_ERR
747 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
748 iommu->seq_id);
749 goto err_unmap;
751 #endif
752 iommu->agaw = agaw;
753 iommu->msagaw = msagaw;
755 /* the registers might be more than one page */
756 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
757 cap_max_fault_reg_offset(iommu->cap));
758 map_size = VTD_PAGE_ALIGN(map_size);
759 if (map_size > VTD_PAGE_SIZE) {
760 iounmap(iommu->reg);
761 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
762 if (!iommu->reg) {
763 printk(KERN_ERR "IOMMU: can't map the region\n");
764 goto error;
768 ver = readl(iommu->reg + DMAR_VER_REG);
769 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
770 (unsigned long long)drhd->reg_base_addr,
771 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
772 (unsigned long long)iommu->cap,
773 (unsigned long long)iommu->ecap);
775 spin_lock_init(&iommu->register_lock);
777 drhd->iommu = iommu;
778 return 0;
780 err_unmap:
781 iounmap(iommu->reg);
782 error:
783 kfree(iommu);
784 return -1;
787 void free_iommu(struct intel_iommu *iommu)
789 if (!iommu)
790 return;
792 #ifdef CONFIG_DMAR
793 free_dmar_iommu(iommu);
794 #endif
796 if (iommu->reg)
797 iounmap(iommu->reg);
798 kfree(iommu);
802 * Reclaim all the submitted descriptors which have completed its work.
804 static inline void reclaim_free_desc(struct q_inval *qi)
806 while (qi->desc_status[qi->free_tail] == QI_DONE ||
807 qi->desc_status[qi->free_tail] == QI_ABORT) {
808 qi->desc_status[qi->free_tail] = QI_FREE;
809 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
810 qi->free_cnt++;
814 static int qi_check_fault(struct intel_iommu *iommu, int index)
816 u32 fault;
817 int head, tail;
818 struct q_inval *qi = iommu->qi;
819 int wait_index = (index + 1) % QI_LENGTH;
821 if (qi->desc_status[wait_index] == QI_ABORT)
822 return -EAGAIN;
824 fault = readl(iommu->reg + DMAR_FSTS_REG);
827 * If IQE happens, the head points to the descriptor associated
828 * with the error. No new descriptors are fetched until the IQE
829 * is cleared.
831 if (fault & DMA_FSTS_IQE) {
832 head = readl(iommu->reg + DMAR_IQH_REG);
833 if ((head >> DMAR_IQ_SHIFT) == index) {
834 printk(KERN_ERR "VT-d detected invalid descriptor: "
835 "low=%llx, high=%llx\n",
836 (unsigned long long)qi->desc[index].low,
837 (unsigned long long)qi->desc[index].high);
838 memcpy(&qi->desc[index], &qi->desc[wait_index],
839 sizeof(struct qi_desc));
840 __iommu_flush_cache(iommu, &qi->desc[index],
841 sizeof(struct qi_desc));
842 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
843 return -EINVAL;
848 * If ITE happens, all pending wait_desc commands are aborted.
849 * No new descriptors are fetched until the ITE is cleared.
851 if (fault & DMA_FSTS_ITE) {
852 head = readl(iommu->reg + DMAR_IQH_REG);
853 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
854 head |= 1;
855 tail = readl(iommu->reg + DMAR_IQT_REG);
856 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
858 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
860 do {
861 if (qi->desc_status[head] == QI_IN_USE)
862 qi->desc_status[head] = QI_ABORT;
863 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
864 } while (head != tail);
866 if (qi->desc_status[wait_index] == QI_ABORT)
867 return -EAGAIN;
870 if (fault & DMA_FSTS_ICE)
871 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
873 return 0;
877 * Submit the queued invalidation descriptor to the remapping
878 * hardware unit and wait for its completion.
880 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
882 int rc;
883 struct q_inval *qi = iommu->qi;
884 struct qi_desc *hw, wait_desc;
885 int wait_index, index;
886 unsigned long flags;
888 if (!qi)
889 return 0;
891 hw = qi->desc;
893 restart:
894 rc = 0;
896 spin_lock_irqsave(&qi->q_lock, flags);
897 while (qi->free_cnt < 3) {
898 spin_unlock_irqrestore(&qi->q_lock, flags);
899 cpu_relax();
900 spin_lock_irqsave(&qi->q_lock, flags);
903 index = qi->free_head;
904 wait_index = (index + 1) % QI_LENGTH;
906 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
908 hw[index] = *desc;
910 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
911 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
912 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
914 hw[wait_index] = wait_desc;
916 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
917 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
919 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
920 qi->free_cnt -= 2;
923 * update the HW tail register indicating the presence of
924 * new descriptors.
926 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
928 while (qi->desc_status[wait_index] != QI_DONE) {
930 * We will leave the interrupts disabled, to prevent interrupt
931 * context to queue another cmd while a cmd is already submitted
932 * and waiting for completion on this cpu. This is to avoid
933 * a deadlock where the interrupt context can wait indefinitely
934 * for free slots in the queue.
936 rc = qi_check_fault(iommu, index);
937 if (rc)
938 break;
940 spin_unlock(&qi->q_lock);
941 cpu_relax();
942 spin_lock(&qi->q_lock);
945 qi->desc_status[index] = QI_DONE;
947 reclaim_free_desc(qi);
948 spin_unlock_irqrestore(&qi->q_lock, flags);
950 if (rc == -EAGAIN)
951 goto restart;
953 return rc;
957 * Flush the global interrupt entry cache.
959 void qi_global_iec(struct intel_iommu *iommu)
961 struct qi_desc desc;
963 desc.low = QI_IEC_TYPE;
964 desc.high = 0;
966 /* should never fail */
967 qi_submit_sync(&desc, iommu);
970 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
971 u64 type)
973 struct qi_desc desc;
975 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
976 | QI_CC_GRAN(type) | QI_CC_TYPE;
977 desc.high = 0;
979 qi_submit_sync(&desc, iommu);
982 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
983 unsigned int size_order, u64 type)
985 u8 dw = 0, dr = 0;
987 struct qi_desc desc;
988 int ih = 0;
990 if (cap_write_drain(iommu->cap))
991 dw = 1;
993 if (cap_read_drain(iommu->cap))
994 dr = 1;
996 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
997 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
998 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
999 | QI_IOTLB_AM(size_order);
1001 qi_submit_sync(&desc, iommu);
1004 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1005 u64 addr, unsigned mask)
1007 struct qi_desc desc;
1009 if (mask) {
1010 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1011 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1012 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1013 } else
1014 desc.high = QI_DEV_IOTLB_ADDR(addr);
1016 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1017 qdep = 0;
1019 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1020 QI_DIOTLB_TYPE;
1022 qi_submit_sync(&desc, iommu);
1026 * Disable Queued Invalidation interface.
1028 void dmar_disable_qi(struct intel_iommu *iommu)
1030 unsigned long flags;
1031 u32 sts;
1032 cycles_t start_time = get_cycles();
1034 if (!ecap_qis(iommu->ecap))
1035 return;
1037 spin_lock_irqsave(&iommu->register_lock, flags);
1039 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1040 if (!(sts & DMA_GSTS_QIES))
1041 goto end;
1044 * Give a chance to HW to complete the pending invalidation requests.
1046 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1047 readl(iommu->reg + DMAR_IQH_REG)) &&
1048 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1049 cpu_relax();
1051 iommu->gcmd &= ~DMA_GCMD_QIE;
1052 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1054 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1055 !(sts & DMA_GSTS_QIES), sts);
1056 end:
1057 spin_unlock_irqrestore(&iommu->register_lock, flags);
1061 * Enable queued invalidation.
1063 static void __dmar_enable_qi(struct intel_iommu *iommu)
1065 u32 sts;
1066 unsigned long flags;
1067 struct q_inval *qi = iommu->qi;
1069 qi->free_head = qi->free_tail = 0;
1070 qi->free_cnt = QI_LENGTH;
1072 spin_lock_irqsave(&iommu->register_lock, flags);
1074 /* write zero to the tail reg */
1075 writel(0, iommu->reg + DMAR_IQT_REG);
1077 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1079 iommu->gcmd |= DMA_GCMD_QIE;
1080 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1082 /* Make sure hardware complete it */
1083 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1085 spin_unlock_irqrestore(&iommu->register_lock, flags);
1089 * Enable Queued Invalidation interface. This is a must to support
1090 * interrupt-remapping. Also used by DMA-remapping, which replaces
1091 * register based IOTLB invalidation.
1093 int dmar_enable_qi(struct intel_iommu *iommu)
1095 struct q_inval *qi;
1097 if (!ecap_qis(iommu->ecap))
1098 return -ENOENT;
1101 * queued invalidation is already setup and enabled.
1103 if (iommu->qi)
1104 return 0;
1106 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1107 if (!iommu->qi)
1108 return -ENOMEM;
1110 qi = iommu->qi;
1112 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1113 if (!qi->desc) {
1114 kfree(qi);
1115 iommu->qi = 0;
1116 return -ENOMEM;
1119 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1120 if (!qi->desc_status) {
1121 free_page((unsigned long) qi->desc);
1122 kfree(qi);
1123 iommu->qi = 0;
1124 return -ENOMEM;
1127 qi->free_head = qi->free_tail = 0;
1128 qi->free_cnt = QI_LENGTH;
1130 spin_lock_init(&qi->q_lock);
1132 __dmar_enable_qi(iommu);
1134 return 0;
1137 /* iommu interrupt handling. Most stuff are MSI-like. */
1139 enum faulttype {
1140 DMA_REMAP,
1141 INTR_REMAP,
1142 UNKNOWN,
1145 static const char *dma_remap_fault_reasons[] =
1147 "Software",
1148 "Present bit in root entry is clear",
1149 "Present bit in context entry is clear",
1150 "Invalid context entry",
1151 "Access beyond MGAW",
1152 "PTE Write access is not set",
1153 "PTE Read access is not set",
1154 "Next page table ptr is invalid",
1155 "Root table address invalid",
1156 "Context table ptr is invalid",
1157 "non-zero reserved fields in RTP",
1158 "non-zero reserved fields in CTP",
1159 "non-zero reserved fields in PTE",
1162 static const char *intr_remap_fault_reasons[] =
1164 "Detected reserved fields in the decoded interrupt-remapped request",
1165 "Interrupt index exceeded the interrupt-remapping table size",
1166 "Present field in the IRTE entry is clear",
1167 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1168 "Detected reserved fields in the IRTE entry",
1169 "Blocked a compatibility format interrupt request",
1170 "Blocked an interrupt request due to source-id verification failure",
1173 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1175 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1177 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1178 ARRAY_SIZE(intr_remap_fault_reasons))) {
1179 *fault_type = INTR_REMAP;
1180 return intr_remap_fault_reasons[fault_reason - 0x20];
1181 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1182 *fault_type = DMA_REMAP;
1183 return dma_remap_fault_reasons[fault_reason];
1184 } else {
1185 *fault_type = UNKNOWN;
1186 return "Unknown";
1190 void dmar_msi_unmask(unsigned int irq)
1192 struct intel_iommu *iommu = get_irq_data(irq);
1193 unsigned long flag;
1195 /* unmask it */
1196 spin_lock_irqsave(&iommu->register_lock, flag);
1197 writel(0, iommu->reg + DMAR_FECTL_REG);
1198 /* Read a reg to force flush the post write */
1199 readl(iommu->reg + DMAR_FECTL_REG);
1200 spin_unlock_irqrestore(&iommu->register_lock, flag);
1203 void dmar_msi_mask(unsigned int irq)
1205 unsigned long flag;
1206 struct intel_iommu *iommu = get_irq_data(irq);
1208 /* mask it */
1209 spin_lock_irqsave(&iommu->register_lock, flag);
1210 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1211 /* Read a reg to force flush the post write */
1212 readl(iommu->reg + DMAR_FECTL_REG);
1213 spin_unlock_irqrestore(&iommu->register_lock, flag);
1216 void dmar_msi_write(int irq, struct msi_msg *msg)
1218 struct intel_iommu *iommu = get_irq_data(irq);
1219 unsigned long flag;
1221 spin_lock_irqsave(&iommu->register_lock, flag);
1222 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1223 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1224 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1225 spin_unlock_irqrestore(&iommu->register_lock, flag);
1228 void dmar_msi_read(int irq, struct msi_msg *msg)
1230 struct intel_iommu *iommu = get_irq_data(irq);
1231 unsigned long flag;
1233 spin_lock_irqsave(&iommu->register_lock, flag);
1234 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1235 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1236 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1237 spin_unlock_irqrestore(&iommu->register_lock, flag);
1240 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1241 u8 fault_reason, u16 source_id, unsigned long long addr)
1243 const char *reason;
1244 int fault_type;
1246 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1248 if (fault_type == INTR_REMAP)
1249 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1250 "fault index %llx\n"
1251 "INTR-REMAP:[fault reason %02d] %s\n",
1252 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1253 PCI_FUNC(source_id & 0xFF), addr >> 48,
1254 fault_reason, reason);
1255 else
1256 printk(KERN_ERR
1257 "DMAR:[%s] Request device [%02x:%02x.%d] "
1258 "fault addr %llx \n"
1259 "DMAR:[fault reason %02d] %s\n",
1260 (type ? "DMA Read" : "DMA Write"),
1261 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1262 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1263 return 0;
1266 #define PRIMARY_FAULT_REG_LEN (16)
1267 irqreturn_t dmar_fault(int irq, void *dev_id)
1269 struct intel_iommu *iommu = dev_id;
1270 int reg, fault_index;
1271 u32 fault_status;
1272 unsigned long flag;
1274 spin_lock_irqsave(&iommu->register_lock, flag);
1275 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1276 if (fault_status)
1277 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1278 fault_status);
1280 /* TBD: ignore advanced fault log currently */
1281 if (!(fault_status & DMA_FSTS_PPF))
1282 goto clear_rest;
1284 fault_index = dma_fsts_fault_record_index(fault_status);
1285 reg = cap_fault_reg_offset(iommu->cap);
1286 while (1) {
1287 u8 fault_reason;
1288 u16 source_id;
1289 u64 guest_addr;
1290 int type;
1291 u32 data;
1293 /* highest 32 bits */
1294 data = readl(iommu->reg + reg +
1295 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1296 if (!(data & DMA_FRCD_F))
1297 break;
1299 fault_reason = dma_frcd_fault_reason(data);
1300 type = dma_frcd_type(data);
1302 data = readl(iommu->reg + reg +
1303 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1304 source_id = dma_frcd_source_id(data);
1306 guest_addr = dmar_readq(iommu->reg + reg +
1307 fault_index * PRIMARY_FAULT_REG_LEN);
1308 guest_addr = dma_frcd_page_addr(guest_addr);
1309 /* clear the fault */
1310 writel(DMA_FRCD_F, iommu->reg + reg +
1311 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1313 spin_unlock_irqrestore(&iommu->register_lock, flag);
1315 dmar_fault_do_one(iommu, type, fault_reason,
1316 source_id, guest_addr);
1318 fault_index++;
1319 if (fault_index >= cap_num_fault_regs(iommu->cap))
1320 fault_index = 0;
1321 spin_lock_irqsave(&iommu->register_lock, flag);
1323 clear_rest:
1324 /* clear all the other faults */
1325 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1326 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1328 spin_unlock_irqrestore(&iommu->register_lock, flag);
1329 return IRQ_HANDLED;
1332 int dmar_set_interrupt(struct intel_iommu *iommu)
1334 int irq, ret;
1337 * Check if the fault interrupt is already initialized.
1339 if (iommu->irq)
1340 return 0;
1342 irq = create_irq();
1343 if (!irq) {
1344 printk(KERN_ERR "IOMMU: no free vectors\n");
1345 return -EINVAL;
1348 set_irq_data(irq, iommu);
1349 iommu->irq = irq;
1351 ret = arch_setup_dmar_msi(irq);
1352 if (ret) {
1353 set_irq_data(irq, NULL);
1354 iommu->irq = 0;
1355 destroy_irq(irq);
1356 return ret;
1359 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1360 if (ret)
1361 printk(KERN_ERR "IOMMU: can't request irq\n");
1362 return ret;
1365 int __init enable_drhd_fault_handling(void)
1367 struct dmar_drhd_unit *drhd;
1370 * Enable fault control interrupt.
1372 for_each_drhd_unit(drhd) {
1373 int ret;
1374 struct intel_iommu *iommu = drhd->iommu;
1375 ret = dmar_set_interrupt(iommu);
1377 if (ret) {
1378 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1379 " interrupt, ret %d\n",
1380 (unsigned long long)drhd->reg_base_addr, ret);
1381 return -1;
1385 * Clear any previous faults.
1387 dmar_fault(iommu->irq, iommu);
1390 return 0;
1394 * Re-enable Queued Invalidation interface.
1396 int dmar_reenable_qi(struct intel_iommu *iommu)
1398 if (!ecap_qis(iommu->ecap))
1399 return -ENOENT;
1401 if (!iommu->qi)
1402 return -ENOENT;
1405 * First disable queued invalidation.
1407 dmar_disable_qi(iommu);
1409 * Then enable queued invalidation again. Since there is no pending
1410 * invalidation requests now, it's safe to re-enable queued
1411 * invalidation.
1413 __dmar_enable_qi(iommu);
1415 return 0;
1419 * Check interrupt remapping support in DMAR table description.
1421 int dmar_ir_support(void)
1423 struct acpi_table_dmar *dmar;
1424 dmar = (struct acpi_table_dmar *)dmar_tbl;
1425 return dmar->flags & 0x1;