x86, AMD IOMMU: add pre-allocation of protection domains
[linux-2.6/verdex.git] / arch / x86 / kernel / amd_iommu.c
blobbed5f820898ddfa68c3c9a8a9d3bbc76b7668b94
1 /*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/gfp.h>
22 #include <linux/bitops.h>
23 #include <linux/scatterlist.h>
24 #include <linux/iommu-helper.h>
25 #include <asm/proto.h>
26 #include <asm/gart.h>
27 #include <asm/amd_iommu_types.h>
29 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
31 #define to_pages(addr, size) \
32 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
34 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
36 struct command {
37 u32 data[4];
40 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
41 struct unity_map_entry *e);
43 static int iommu_has_npcache(struct amd_iommu *iommu)
45 return iommu->cap & IOMMU_CAP_NPCACHE;
48 static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
50 u32 tail, head;
51 u8 *target;
53 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
54 target = (iommu->cmd_buf + tail);
55 memcpy_toio(target, cmd, sizeof(*cmd));
56 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
57 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
58 if (tail == head)
59 return -ENOMEM;
60 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
62 return 0;
65 static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
67 unsigned long flags;
68 int ret;
70 spin_lock_irqsave(&iommu->lock, flags);
71 ret = __iommu_queue_command(iommu, cmd);
72 spin_unlock_irqrestore(&iommu->lock, flags);
74 return ret;
77 static int iommu_completion_wait(struct amd_iommu *iommu)
79 int ret;
80 struct command cmd;
81 volatile u64 ready = 0;
82 unsigned long ready_phys = virt_to_phys(&ready);
84 memset(&cmd, 0, sizeof(cmd));
85 cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
86 cmd.data[1] = HIGH_U32(ready_phys);
87 cmd.data[2] = 1; /* value written to 'ready' */
88 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
90 iommu->need_sync = 0;
92 ret = iommu_queue_command(iommu, &cmd);
94 if (ret)
95 return ret;
97 while (!ready)
98 cpu_relax();
100 return 0;
103 static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
105 struct command cmd;
107 BUG_ON(iommu == NULL);
109 memset(&cmd, 0, sizeof(cmd));
110 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
111 cmd.data[0] = devid;
113 iommu->need_sync = 1;
115 return iommu_queue_command(iommu, &cmd);
118 static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
119 u64 address, u16 domid, int pde, int s)
121 struct command cmd;
123 memset(&cmd, 0, sizeof(cmd));
124 address &= PAGE_MASK;
125 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
126 cmd.data[1] |= domid;
127 cmd.data[2] = LOW_U32(address);
128 cmd.data[3] = HIGH_U32(address);
129 if (s)
130 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
131 if (pde)
132 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
134 iommu->need_sync = 1;
136 return iommu_queue_command(iommu, &cmd);
139 static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
140 u64 address, size_t size)
142 int i;
143 unsigned pages = to_pages(address, size);
145 address &= PAGE_MASK;
147 for (i = 0; i < pages; ++i) {
148 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0);
149 address += PAGE_SIZE;
152 return 0;
155 static int iommu_map(struct protection_domain *dom,
156 unsigned long bus_addr,
157 unsigned long phys_addr,
158 int prot)
160 u64 __pte, *pte, *page;
162 bus_addr = PAGE_ALIGN(bus_addr);
163 phys_addr = PAGE_ALIGN(bus_addr);
165 /* only support 512GB address spaces for now */
166 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
167 return -EINVAL;
169 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
171 if (!IOMMU_PTE_PRESENT(*pte)) {
172 page = (u64 *)get_zeroed_page(GFP_KERNEL);
173 if (!page)
174 return -ENOMEM;
175 *pte = IOMMU_L2_PDE(virt_to_phys(page));
178 pte = IOMMU_PTE_PAGE(*pte);
179 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
181 if (!IOMMU_PTE_PRESENT(*pte)) {
182 page = (u64 *)get_zeroed_page(GFP_KERNEL);
183 if (!page)
184 return -ENOMEM;
185 *pte = IOMMU_L1_PDE(virt_to_phys(page));
188 pte = IOMMU_PTE_PAGE(*pte);
189 pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
191 if (IOMMU_PTE_PRESENT(*pte))
192 return -EBUSY;
194 __pte = phys_addr | IOMMU_PTE_P;
195 if (prot & IOMMU_PROT_IR)
196 __pte |= IOMMU_PTE_IR;
197 if (prot & IOMMU_PROT_IW)
198 __pte |= IOMMU_PTE_IW;
200 *pte = __pte;
202 return 0;
205 static int iommu_for_unity_map(struct amd_iommu *iommu,
206 struct unity_map_entry *entry)
208 u16 bdf, i;
210 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
211 bdf = amd_iommu_alias_table[i];
212 if (amd_iommu_rlookup_table[bdf] == iommu)
213 return 1;
216 return 0;
219 static int iommu_init_unity_mappings(struct amd_iommu *iommu)
221 struct unity_map_entry *entry;
222 int ret;
224 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
225 if (!iommu_for_unity_map(iommu, entry))
226 continue;
227 ret = dma_ops_unity_map(iommu->default_dom, entry);
228 if (ret)
229 return ret;
232 return 0;
235 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
236 struct unity_map_entry *e)
238 u64 addr;
239 int ret;
241 for (addr = e->address_start; addr < e->address_end;
242 addr += PAGE_SIZE) {
243 ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
244 if (ret)
245 return ret;
247 * if unity mapping is in aperture range mark the page
248 * as allocated in the aperture
250 if (addr < dma_dom->aperture_size)
251 __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
254 return 0;
257 static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
258 u16 devid)
260 struct unity_map_entry *e;
261 int ret;
263 list_for_each_entry(e, &amd_iommu_unity_map, list) {
264 if (!(devid >= e->devid_start && devid <= e->devid_end))
265 continue;
266 ret = dma_ops_unity_map(dma_dom, e);
267 if (ret)
268 return ret;
271 return 0;
274 static unsigned long dma_mask_to_pages(unsigned long mask)
276 return (mask >> PAGE_SHIFT) +
277 (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
280 static unsigned long dma_ops_alloc_addresses(struct device *dev,
281 struct dma_ops_domain *dom,
282 unsigned int pages)
284 unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
285 unsigned long address;
286 unsigned long size = dom->aperture_size >> PAGE_SHIFT;
287 unsigned long boundary_size;
289 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
290 PAGE_SIZE) >> PAGE_SHIFT;
291 limit = limit < size ? limit : size;
293 if (dom->next_bit >= limit)
294 dom->next_bit = 0;
296 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
297 0 , boundary_size, 0);
298 if (address == -1)
299 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
300 0, boundary_size, 0);
302 if (likely(address != -1)) {
303 set_bit_string(dom->bitmap, address, pages);
304 dom->next_bit = address + pages;
305 address <<= PAGE_SHIFT;
306 } else
307 address = bad_dma_address;
309 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
311 return address;
314 static void dma_ops_free_addresses(struct dma_ops_domain *dom,
315 unsigned long address,
316 unsigned int pages)
318 address >>= PAGE_SHIFT;
319 iommu_area_free(dom->bitmap, address, pages);
322 static u16 domain_id_alloc(void)
324 unsigned long flags;
325 int id;
327 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
328 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
329 BUG_ON(id == 0);
330 if (id > 0 && id < MAX_DOMAIN_ID)
331 __set_bit(id, amd_iommu_pd_alloc_bitmap);
332 else
333 id = 0;
334 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
336 return id;
339 static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
340 unsigned long start_page,
341 unsigned int pages)
343 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
345 if (start_page + pages > last_page)
346 pages = last_page - start_page;
348 set_bit_string(dom->bitmap, start_page, pages);
351 static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
353 int i, j;
354 u64 *p1, *p2, *p3;
356 p1 = dma_dom->domain.pt_root;
358 if (!p1)
359 return;
361 for (i = 0; i < 512; ++i) {
362 if (!IOMMU_PTE_PRESENT(p1[i]))
363 continue;
365 p2 = IOMMU_PTE_PAGE(p1[i]);
366 for (j = 0; j < 512; ++i) {
367 if (!IOMMU_PTE_PRESENT(p2[j]))
368 continue;
369 p3 = IOMMU_PTE_PAGE(p2[j]);
370 free_page((unsigned long)p3);
373 free_page((unsigned long)p2);
376 free_page((unsigned long)p1);
379 static void dma_ops_domain_free(struct dma_ops_domain *dom)
381 if (!dom)
382 return;
384 dma_ops_free_pagetable(dom);
386 kfree(dom->pte_pages);
388 kfree(dom->bitmap);
390 kfree(dom);
393 static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
394 unsigned order)
396 struct dma_ops_domain *dma_dom;
397 unsigned i, num_pte_pages;
398 u64 *l2_pde;
399 u64 address;
402 * Currently the DMA aperture must be between 32 MB and 1GB in size
404 if ((order < 25) || (order > 30))
405 return NULL;
407 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
408 if (!dma_dom)
409 return NULL;
411 spin_lock_init(&dma_dom->domain.lock);
413 dma_dom->domain.id = domain_id_alloc();
414 if (dma_dom->domain.id == 0)
415 goto free_dma_dom;
416 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
417 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
418 dma_dom->domain.priv = dma_dom;
419 if (!dma_dom->domain.pt_root)
420 goto free_dma_dom;
421 dma_dom->aperture_size = (1ULL << order);
422 dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
423 GFP_KERNEL);
424 if (!dma_dom->bitmap)
425 goto free_dma_dom;
427 * mark the first page as allocated so we never return 0 as
428 * a valid dma-address. So we can use 0 as error value
430 dma_dom->bitmap[0] = 1;
431 dma_dom->next_bit = 0;
433 if (iommu->exclusion_start &&
434 iommu->exclusion_start < dma_dom->aperture_size) {
435 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
436 int pages = to_pages(iommu->exclusion_start,
437 iommu->exclusion_length);
438 dma_ops_reserve_addresses(dma_dom, startpage, pages);
441 num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
442 dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
443 GFP_KERNEL);
444 if (!dma_dom->pte_pages)
445 goto free_dma_dom;
447 l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
448 if (l2_pde == NULL)
449 goto free_dma_dom;
451 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
453 for (i = 0; i < num_pte_pages; ++i) {
454 dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
455 if (!dma_dom->pte_pages[i])
456 goto free_dma_dom;
457 address = virt_to_phys(dma_dom->pte_pages[i]);
458 l2_pde[i] = IOMMU_L1_PDE(address);
461 return dma_dom;
463 free_dma_dom:
464 dma_ops_domain_free(dma_dom);
466 return NULL;
469 static struct protection_domain *domain_for_device(u16 devid)
471 struct protection_domain *dom;
472 unsigned long flags;
474 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
475 dom = amd_iommu_pd_table[devid];
476 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
478 return dom;
481 static void set_device_domain(struct amd_iommu *iommu,
482 struct protection_domain *domain,
483 u16 devid)
485 unsigned long flags;
487 u64 pte_root = virt_to_phys(domain->pt_root);
489 pte_root |= (domain->mode & 0x07) << 9;
490 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2;
492 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
493 amd_iommu_dev_table[devid].data[0] = pte_root;
494 amd_iommu_dev_table[devid].data[1] = pte_root >> 32;
495 amd_iommu_dev_table[devid].data[2] = domain->id;
497 amd_iommu_pd_table[devid] = domain;
498 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
500 iommu_queue_inv_dev_entry(iommu, devid);
502 iommu->need_sync = 1;
505 static int get_device_resources(struct device *dev,
506 struct amd_iommu **iommu,
507 struct protection_domain **domain,
508 u16 *bdf)
510 struct dma_ops_domain *dma_dom;
511 struct pci_dev *pcidev;
512 u16 _bdf;
514 BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask);
516 pcidev = to_pci_dev(dev);
517 _bdf = (pcidev->bus->number << 8) | pcidev->devfn;
519 if (_bdf >= amd_iommu_last_bdf) {
520 *iommu = NULL;
521 *domain = NULL;
522 *bdf = 0xffff;
523 return 0;
526 *bdf = amd_iommu_alias_table[_bdf];
528 *iommu = amd_iommu_rlookup_table[*bdf];
529 if (*iommu == NULL)
530 return 0;
531 dma_dom = (*iommu)->default_dom;
532 *domain = domain_for_device(*bdf);
533 if (*domain == NULL) {
534 *domain = &dma_dom->domain;
535 set_device_domain(*iommu, *domain, *bdf);
536 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
537 "device ", (*domain)->id);
538 print_devid(_bdf, 1);
541 return 1;
544 static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
545 struct dma_ops_domain *dom,
546 unsigned long address,
547 phys_addr_t paddr,
548 int direction)
550 u64 *pte, __pte;
552 WARN_ON(address > dom->aperture_size);
554 paddr &= PAGE_MASK;
556 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
557 pte += IOMMU_PTE_L0_INDEX(address);
559 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
561 if (direction == DMA_TO_DEVICE)
562 __pte |= IOMMU_PTE_IR;
563 else if (direction == DMA_FROM_DEVICE)
564 __pte |= IOMMU_PTE_IW;
565 else if (direction == DMA_BIDIRECTIONAL)
566 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
568 WARN_ON(*pte);
570 *pte = __pte;
572 return (dma_addr_t)address;
575 static void dma_ops_domain_unmap(struct amd_iommu *iommu,
576 struct dma_ops_domain *dom,
577 unsigned long address)
579 u64 *pte;
581 if (address >= dom->aperture_size)
582 return;
584 WARN_ON(address & 0xfffULL || address > dom->aperture_size);
586 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
587 pte += IOMMU_PTE_L0_INDEX(address);
589 WARN_ON(!*pte);
591 *pte = 0ULL;
594 static dma_addr_t __map_single(struct device *dev,
595 struct amd_iommu *iommu,
596 struct dma_ops_domain *dma_dom,
597 phys_addr_t paddr,
598 size_t size,
599 int dir)
601 dma_addr_t offset = paddr & ~PAGE_MASK;
602 dma_addr_t address, start;
603 unsigned int pages;
604 int i;
606 pages = to_pages(paddr, size);
607 paddr &= PAGE_MASK;
609 address = dma_ops_alloc_addresses(dev, dma_dom, pages);
610 if (unlikely(address == bad_dma_address))
611 goto out;
613 start = address;
614 for (i = 0; i < pages; ++i) {
615 dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
616 paddr += PAGE_SIZE;
617 start += PAGE_SIZE;
619 address += offset;
621 out:
622 return address;
625 static void __unmap_single(struct amd_iommu *iommu,
626 struct dma_ops_domain *dma_dom,
627 dma_addr_t dma_addr,
628 size_t size,
629 int dir)
631 dma_addr_t i, start;
632 unsigned int pages;
634 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
635 return;
637 pages = to_pages(dma_addr, size);
638 dma_addr &= PAGE_MASK;
639 start = dma_addr;
641 for (i = 0; i < pages; ++i) {
642 dma_ops_domain_unmap(iommu, dma_dom, start);
643 start += PAGE_SIZE;
646 dma_ops_free_addresses(dma_dom, dma_addr, pages);
649 static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
650 size_t size, int dir)
652 unsigned long flags;
653 struct amd_iommu *iommu;
654 struct protection_domain *domain;
655 u16 devid;
656 dma_addr_t addr;
658 get_device_resources(dev, &iommu, &domain, &devid);
660 if (iommu == NULL || domain == NULL)
661 return (dma_addr_t)paddr;
663 spin_lock_irqsave(&domain->lock, flags);
664 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir);
665 if (addr == bad_dma_address)
666 goto out;
668 if (iommu_has_npcache(iommu))
669 iommu_flush_pages(iommu, domain->id, addr, size);
671 if (iommu->need_sync)
672 iommu_completion_wait(iommu);
674 out:
675 spin_unlock_irqrestore(&domain->lock, flags);
677 return addr;
680 static void unmap_single(struct device *dev, dma_addr_t dma_addr,
681 size_t size, int dir)
683 unsigned long flags;
684 struct amd_iommu *iommu;
685 struct protection_domain *domain;
686 u16 devid;
688 if (!get_device_resources(dev, &iommu, &domain, &devid))
689 return;
691 spin_lock_irqsave(&domain->lock, flags);
693 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
695 iommu_flush_pages(iommu, domain->id, dma_addr, size);
697 if (iommu->need_sync)
698 iommu_completion_wait(iommu);
700 spin_unlock_irqrestore(&domain->lock, flags);
703 static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
704 int nelems, int dir)
706 struct scatterlist *s;
707 int i;
709 for_each_sg(sglist, s, nelems, i) {
710 s->dma_address = (dma_addr_t)sg_phys(s);
711 s->dma_length = s->length;
714 return nelems;
717 static int map_sg(struct device *dev, struct scatterlist *sglist,
718 int nelems, int dir)
720 unsigned long flags;
721 struct amd_iommu *iommu;
722 struct protection_domain *domain;
723 u16 devid;
724 int i;
725 struct scatterlist *s;
726 phys_addr_t paddr;
727 int mapped_elems = 0;
729 get_device_resources(dev, &iommu, &domain, &devid);
731 if (!iommu || !domain)
732 return map_sg_no_iommu(dev, sglist, nelems, dir);
734 spin_lock_irqsave(&domain->lock, flags);
736 for_each_sg(sglist, s, nelems, i) {
737 paddr = sg_phys(s);
739 s->dma_address = __map_single(dev, iommu, domain->priv,
740 paddr, s->length, dir);
742 if (s->dma_address) {
743 s->dma_length = s->length;
744 mapped_elems++;
745 } else
746 goto unmap;
747 if (iommu_has_npcache(iommu))
748 iommu_flush_pages(iommu, domain->id, s->dma_address,
749 s->dma_length);
752 if (iommu->need_sync)
753 iommu_completion_wait(iommu);
755 out:
756 spin_unlock_irqrestore(&domain->lock, flags);
758 return mapped_elems;
759 unmap:
760 for_each_sg(sglist, s, mapped_elems, i) {
761 if (s->dma_address)
762 __unmap_single(iommu, domain->priv, s->dma_address,
763 s->dma_length, dir);
764 s->dma_address = s->dma_length = 0;
767 mapped_elems = 0;
769 goto out;
772 static void unmap_sg(struct device *dev, struct scatterlist *sglist,
773 int nelems, int dir)
775 unsigned long flags;
776 struct amd_iommu *iommu;
777 struct protection_domain *domain;
778 struct scatterlist *s;
779 u16 devid;
780 int i;
782 if (!get_device_resources(dev, &iommu, &domain, &devid))
783 return;
785 spin_lock_irqsave(&domain->lock, flags);
787 for_each_sg(sglist, s, nelems, i) {
788 __unmap_single(iommu, domain->priv, s->dma_address,
789 s->dma_length, dir);
790 iommu_flush_pages(iommu, domain->id, s->dma_address,
791 s->dma_length);
792 s->dma_address = s->dma_length = 0;
795 if (iommu->need_sync)
796 iommu_completion_wait(iommu);
798 spin_unlock_irqrestore(&domain->lock, flags);
801 static void *alloc_coherent(struct device *dev, size_t size,
802 dma_addr_t *dma_addr, gfp_t flag)
804 unsigned long flags;
805 void *virt_addr;
806 struct amd_iommu *iommu;
807 struct protection_domain *domain;
808 u16 devid;
809 phys_addr_t paddr;
811 virt_addr = (void *)__get_free_pages(flag, get_order(size));
812 if (!virt_addr)
813 return 0;
815 memset(virt_addr, 0, size);
816 paddr = virt_to_phys(virt_addr);
818 get_device_resources(dev, &iommu, &domain, &devid);
820 if (!iommu || !domain) {
821 *dma_addr = (dma_addr_t)paddr;
822 return virt_addr;
825 spin_lock_irqsave(&domain->lock, flags);
827 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
828 size, DMA_BIDIRECTIONAL);
830 if (*dma_addr == bad_dma_address) {
831 free_pages((unsigned long)virt_addr, get_order(size));
832 virt_addr = NULL;
833 goto out;
836 if (iommu_has_npcache(iommu))
837 iommu_flush_pages(iommu, domain->id, *dma_addr, size);
839 if (iommu->need_sync)
840 iommu_completion_wait(iommu);
842 out:
843 spin_unlock_irqrestore(&domain->lock, flags);
845 return virt_addr;
848 static void free_coherent(struct device *dev, size_t size,
849 void *virt_addr, dma_addr_t dma_addr)
851 unsigned long flags;
852 struct amd_iommu *iommu;
853 struct protection_domain *domain;
854 u16 devid;
856 get_device_resources(dev, &iommu, &domain, &devid);
858 if (!iommu || !domain)
859 goto free_mem;
861 spin_lock_irqsave(&domain->lock, flags);
863 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
864 iommu_flush_pages(iommu, domain->id, dma_addr, size);
866 if (iommu->need_sync)
867 iommu_completion_wait(iommu);
869 spin_unlock_irqrestore(&domain->lock, flags);
871 free_mem:
872 free_pages((unsigned long)virt_addr, get_order(size));
876 * If the driver core informs the DMA layer if a driver grabs a device
877 * we don't need to preallocate the protection domains anymore.
878 * For now we have to.
880 void prealloc_protection_domains(void)
882 struct pci_dev *dev = NULL;
883 struct dma_ops_domain *dma_dom;
884 struct amd_iommu *iommu;
885 int order = amd_iommu_aperture_order;
886 u16 devid;
888 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
889 devid = (dev->bus->number << 8) | dev->devfn;
890 if (devid >= amd_iommu_last_bdf)
891 continue;
892 devid = amd_iommu_alias_table[devid];
893 if (domain_for_device(devid))
894 continue;
895 iommu = amd_iommu_rlookup_table[devid];
896 if (!iommu)
897 continue;
898 dma_dom = dma_ops_domain_alloc(iommu, order);
899 if (!dma_dom)
900 continue;
901 init_unity_mappings_for_device(dma_dom, devid);
902 set_device_domain(iommu, &dma_dom->domain, devid);
903 printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ",
904 dma_dom->domain.id);
905 print_devid(devid, 1);