GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / arm / plat-omap / iovmm.c
blobe7a618f82f86afafa59ab8c1087dba5e2e072d09
1 /*
2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mach/map.h>
22 #include <plat/iommu.h>
23 #include <plat/iovmm.h>
25 #include "iopgtable.h"
28 * A device driver needs to create address mappings between:
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
34 * There are 4 possible patterns for them:
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
50 * 'c': contiguous memory area
51 * 'd': discontiguous memory area
52 * 'a': anonymous memory allocation
53 * '()': optional feature
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 * '*': not yet, but feasible.
61 static struct kmem_cache *iovm_area_cachep;
63 /* return total bytes of sg buffers */
64 static size_t sgtable_len(const struct sg_table *sgt)
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
69 if (!sgt)
70 return 0;
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
75 bytes = sg_dma_len(sg);
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
83 total += bytes;
86 return total;
88 #define sgtable_ok(x) (!!sgtable_len(x))
91 * calculate the optimal number sg elements from total bytes based on
92 * iommu superpages
94 static unsigned int sgtable_nents(size_t bytes)
96 int i;
97 unsigned int nr_entries;
98 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
100 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
101 pr_err("%s: wrong size %08x\n", __func__, bytes);
102 return 0;
105 nr_entries = 0;
106 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
107 if (bytes >= pagesize[i]) {
108 nr_entries += (bytes / pagesize[i]);
109 bytes %= pagesize[i];
112 BUG_ON(bytes);
114 return nr_entries;
117 /* allocate and initialize sg_table header(a kind of 'superblock') */
118 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
120 unsigned int nr_entries;
121 int err;
122 struct sg_table *sgt;
124 if (!bytes)
125 return ERR_PTR(-EINVAL);
127 if (!IS_ALIGNED(bytes, PAGE_SIZE))
128 return ERR_PTR(-EINVAL);
130 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131 nr_entries = sgtable_nents(bytes);
132 if (!nr_entries)
133 return ERR_PTR(-EINVAL);
134 } else
135 nr_entries = bytes / PAGE_SIZE;
137 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138 if (!sgt)
139 return ERR_PTR(-ENOMEM);
141 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142 if (err) {
143 kfree(sgt);
144 return ERR_PTR(err);
147 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
149 return sgt;
152 /* free sg_table header(a kind of superblock) */
153 static void sgtable_free(struct sg_table *sgt)
155 if (!sgt)
156 return;
158 sg_free_table(sgt);
159 kfree(sgt);
161 pr_debug("%s: sgt:%p\n", __func__, sgt);
164 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
165 static void *vmap_sg(const struct sg_table *sgt)
167 u32 va;
168 size_t total;
169 unsigned int i;
170 struct scatterlist *sg;
171 struct vm_struct *new;
172 const struct mem_type *mtype;
174 mtype = get_mem_type(MT_DEVICE);
175 if (!mtype)
176 return ERR_PTR(-EINVAL);
178 total = sgtable_len(sgt);
179 if (!total)
180 return ERR_PTR(-EINVAL);
182 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
183 if (!new)
184 return ERR_PTR(-ENOMEM);
185 va = (u32)new->addr;
187 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
188 size_t bytes;
189 u32 pa;
190 int err;
192 pa = sg_phys(sg);
193 bytes = sg_dma_len(sg);
195 BUG_ON(bytes != PAGE_SIZE);
197 err = ioremap_page(va, pa, mtype);
198 if (err)
199 goto err_out;
201 va += bytes;
204 flush_cache_vmap((unsigned long)new->addr,
205 (unsigned long)(new->addr + total));
206 return new->addr;
208 err_out:
209 WARN_ON(1);
210 vunmap(new->addr);
211 return ERR_PTR(-EAGAIN);
214 static inline void vunmap_sg(const void *va)
216 vunmap(va);
219 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
221 struct iovm_struct *tmp;
223 list_for_each_entry(tmp, &obj->mmap, list) {
224 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
225 size_t len;
227 len = tmp->da_end - tmp->da_start;
229 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
230 __func__, tmp->da_start, da, tmp->da_end, len,
231 tmp->flags);
233 return tmp;
237 return NULL;
241 * find_iovm_area - find iovma which includes @da
242 * @da: iommu device virtual address
244 * Find the existing iovma starting at @da
246 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
248 struct iovm_struct *area;
250 mutex_lock(&obj->mmap_lock);
251 area = __find_iovm_area(obj, da);
252 mutex_unlock(&obj->mmap_lock);
254 return area;
256 EXPORT_SYMBOL_GPL(find_iovm_area);
259 * This finds the hole(area) which fits the requested address and len
260 * in iovmas mmap, and returns the new allocated iovma.
262 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
263 size_t bytes, u32 flags)
265 struct iovm_struct *new, *tmp;
266 u32 start, prev_end, alignement;
268 if (!obj || !bytes)
269 return ERR_PTR(-EINVAL);
271 start = da;
272 alignement = PAGE_SIZE;
274 if (flags & IOVMF_DA_ANON) {
276 * Reserve the first page for NULL
278 start = PAGE_SIZE;
279 if (flags & IOVMF_LINEAR)
280 alignement = iopgsz_max(bytes);
281 start = roundup(start, alignement);
284 tmp = NULL;
285 if (list_empty(&obj->mmap))
286 goto found;
288 prev_end = 0;
289 list_for_each_entry(tmp, &obj->mmap, list) {
291 if (prev_end >= start)
292 break;
294 if (start + bytes < tmp->da_start)
295 goto found;
297 if (flags & IOVMF_DA_ANON)
298 start = roundup(tmp->da_end + 1, alignement);
300 prev_end = tmp->da_end;
303 if ((start > prev_end) && (ULONG_MAX - start >= bytes))
304 goto found;
306 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
307 __func__, da, bytes, flags);
309 return ERR_PTR(-EINVAL);
311 found:
312 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
313 if (!new)
314 return ERR_PTR(-ENOMEM);
316 new->iommu = obj;
317 new->da_start = start;
318 new->da_end = start + bytes;
319 new->flags = flags;
322 * keep ascending order of iovmas
324 if (tmp)
325 list_add_tail(&new->list, &tmp->list);
326 else
327 list_add(&new->list, &obj->mmap);
329 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
330 __func__, new->da_start, start, new->da_end, bytes, flags);
332 return new;
335 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
337 size_t bytes;
339 BUG_ON(!obj || !area);
341 bytes = area->da_end - area->da_start;
343 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
344 __func__, area->da_start, area->da_end, bytes, area->flags);
346 list_del(&area->list);
347 kmem_cache_free(iovm_area_cachep, area);
351 * da_to_va - convert (d) to (v)
352 * @obj: objective iommu
353 * @da: iommu device virtual address
354 * @va: mpu virtual address
356 * Returns mpu virtual addr which corresponds to a given device virtual addr
358 void *da_to_va(struct iommu *obj, u32 da)
360 void *va = NULL;
361 struct iovm_struct *area;
363 mutex_lock(&obj->mmap_lock);
365 area = __find_iovm_area(obj, da);
366 if (!area) {
367 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
368 goto out;
370 va = area->va;
371 out:
372 mutex_unlock(&obj->mmap_lock);
374 return va;
376 EXPORT_SYMBOL_GPL(da_to_va);
378 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
380 unsigned int i;
381 struct scatterlist *sg;
382 void *va = _va;
383 void *va_end;
385 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
386 struct page *pg;
387 const size_t bytes = PAGE_SIZE;
390 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
392 pg = vmalloc_to_page(va);
393 BUG_ON(!pg);
394 sg_set_page(sg, pg, bytes, 0);
396 va += bytes;
399 va_end = _va + PAGE_SIZE * i;
402 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
405 * Actually this is not necessary at all, just exists for
406 * consistency of the code readability.
408 BUG_ON(!sgt);
411 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
413 unsigned int i;
414 struct scatterlist *sg;
415 void *va;
417 va = phys_to_virt(pa);
419 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
420 size_t bytes;
422 bytes = iopgsz_max(len);
424 BUG_ON(!iopgsz_ok(bytes));
426 sg_set_buf(sg, phys_to_virt(pa), bytes);
428 * 'pa' is cotinuous(linear).
430 pa += bytes;
431 len -= bytes;
433 BUG_ON(len);
436 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
439 * Actually this is not necessary at all, just exists for
440 * consistency of the code readability
442 BUG_ON(!sgt);
445 /* create 'da' <-> 'pa' mapping from 'sgt' */
446 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
447 const struct sg_table *sgt, u32 flags)
449 int err;
450 unsigned int i, j;
451 struct scatterlist *sg;
452 u32 da = new->da_start;
454 if (!obj || !sgt)
455 return -EINVAL;
457 BUG_ON(!sgtable_ok(sgt));
459 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
460 u32 pa;
461 int pgsz;
462 size_t bytes;
463 struct iotlb_entry e;
465 pa = sg_phys(sg);
466 bytes = sg_dma_len(sg);
468 flags &= ~IOVMF_PGSZ_MASK;
469 pgsz = bytes_to_iopgsz(bytes);
470 if (pgsz < 0)
471 goto err_out;
472 flags |= pgsz;
474 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
475 i, da, pa, bytes);
477 iotlb_init_entry(&e, da, pa, flags);
478 err = iopgtable_store_entry(obj, &e);
479 if (err)
480 goto err_out;
482 da += bytes;
484 return 0;
486 err_out:
487 da = new->da_start;
489 for_each_sg(sgt->sgl, sg, i, j) {
490 size_t bytes;
492 bytes = iopgtable_clear_entry(obj, da);
494 BUG_ON(!iopgsz_ok(bytes));
496 da += bytes;
498 return err;
501 /* release 'da' <-> 'pa' mapping */
502 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
504 u32 start;
505 size_t total = area->da_end - area->da_start;
507 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
509 start = area->da_start;
510 while (total > 0) {
511 size_t bytes;
513 bytes = iopgtable_clear_entry(obj, start);
514 if (bytes == 0)
515 bytes = PAGE_SIZE;
516 else
517 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
518 __func__, start, bytes, area->flags);
520 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
522 total -= bytes;
523 start += bytes;
525 BUG_ON(total);
528 /* template function for all unmapping */
529 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
530 void (*fn)(const void *), u32 flags)
532 struct sg_table *sgt = NULL;
533 struct iovm_struct *area;
535 if (!IS_ALIGNED(da, PAGE_SIZE)) {
536 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
537 return NULL;
540 mutex_lock(&obj->mmap_lock);
542 area = __find_iovm_area(obj, da);
543 if (!area) {
544 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
545 goto out;
548 if ((area->flags & flags) != flags) {
549 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
550 area->flags);
551 goto out;
553 sgt = (struct sg_table *)area->sgt;
555 unmap_iovm_area(obj, area);
557 fn(area->va);
559 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
560 area->da_start, da, area->da_end,
561 area->da_end - area->da_start, area->flags);
563 free_iovm_area(obj, area);
564 out:
565 mutex_unlock(&obj->mmap_lock);
567 return sgt;
570 static u32 map_iommu_region(struct iommu *obj, u32 da,
571 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
573 int err = -ENOMEM;
574 struct iovm_struct *new;
576 mutex_lock(&obj->mmap_lock);
578 new = alloc_iovm_area(obj, da, bytes, flags);
579 if (IS_ERR(new)) {
580 err = PTR_ERR(new);
581 goto err_alloc_iovma;
583 new->va = va;
584 new->sgt = sgt;
586 if (map_iovm_area(obj, new, sgt, new->flags))
587 goto err_map;
589 mutex_unlock(&obj->mmap_lock);
591 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
592 __func__, new->da_start, bytes, new->flags, va);
594 return new->da_start;
596 err_map:
597 free_iovm_area(obj, new);
598 err_alloc_iovma:
599 mutex_unlock(&obj->mmap_lock);
600 return err;
603 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
604 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
606 return map_iommu_region(obj, da, sgt, va, bytes, flags);
610 * iommu_vmap - (d)-(p)-(v) address mapper
611 * @obj: objective iommu
612 * @sgt: address of scatter gather table
613 * @flags: iovma and page property
615 * Creates 1-n-1 mapping with given @sgt and returns @da.
616 * All @sgt element must be io page size aligned.
618 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
619 u32 flags)
621 size_t bytes;
622 void *va = NULL;
624 if (!obj || !obj->dev || !sgt)
625 return -EINVAL;
627 bytes = sgtable_len(sgt);
628 if (!bytes)
629 return -EINVAL;
630 bytes = PAGE_ALIGN(bytes);
632 if (flags & IOVMF_MMIO) {
633 va = vmap_sg(sgt);
634 if (IS_ERR(va))
635 return PTR_ERR(va);
638 flags &= IOVMF_HW_MASK;
639 flags |= IOVMF_DISCONT;
640 flags |= IOVMF_MMIO;
641 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
643 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
644 if (IS_ERR_VALUE(da))
645 vunmap_sg(va);
647 return da;
649 EXPORT_SYMBOL_GPL(iommu_vmap);
652 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
653 * @obj: objective iommu
654 * @da: iommu device virtual address
656 * Free the iommu virtually contiguous memory area starting at
657 * @da, which was returned by 'iommu_vmap()'.
659 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
661 struct sg_table *sgt;
663 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
664 * Just returns 'sgt' to the caller to free
666 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
667 if (!sgt)
668 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
669 return sgt;
671 EXPORT_SYMBOL_GPL(iommu_vunmap);
674 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
675 * @obj: objective iommu
676 * @da: contiguous iommu virtual memory
677 * @bytes: allocation size
678 * @flags: iovma and page property
680 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
681 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
683 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
685 void *va;
686 struct sg_table *sgt;
688 if (!obj || !obj->dev || !bytes)
689 return -EINVAL;
691 bytes = PAGE_ALIGN(bytes);
693 va = vmalloc(bytes);
694 if (!va)
695 return -ENOMEM;
697 sgt = sgtable_alloc(bytes, flags);
698 if (IS_ERR(sgt)) {
699 da = PTR_ERR(sgt);
700 goto err_sgt_alloc;
702 sgtable_fill_vmalloc(sgt, va);
704 flags &= IOVMF_HW_MASK;
705 flags |= IOVMF_DISCONT;
706 flags |= IOVMF_ALLOC;
707 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
709 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
710 if (IS_ERR_VALUE(da))
711 goto err_iommu_vmap;
713 return da;
715 err_iommu_vmap:
716 sgtable_drain_vmalloc(sgt);
717 sgtable_free(sgt);
718 err_sgt_alloc:
719 vfree(va);
720 return da;
722 EXPORT_SYMBOL_GPL(iommu_vmalloc);
725 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
726 * @obj: objective iommu
727 * @da: iommu device virtual address
729 * Frees the iommu virtually continuous memory area starting at
730 * @da, as obtained from 'iommu_vmalloc()'.
732 void iommu_vfree(struct iommu *obj, const u32 da)
734 struct sg_table *sgt;
736 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
737 if (!sgt)
738 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
739 sgtable_free(sgt);
741 EXPORT_SYMBOL_GPL(iommu_vfree);
743 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
744 size_t bytes, u32 flags)
746 struct sg_table *sgt;
748 sgt = sgtable_alloc(bytes, flags);
749 if (IS_ERR(sgt))
750 return PTR_ERR(sgt);
752 sgtable_fill_kmalloc(sgt, pa, bytes);
754 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
755 if (IS_ERR_VALUE(da)) {
756 sgtable_drain_kmalloc(sgt);
757 sgtable_free(sgt);
760 return da;
764 * iommu_kmap - (d)-(p)-(v) address mapper
765 * @obj: objective iommu
766 * @da: contiguous iommu virtual memory
767 * @pa: contiguous physical memory
768 * @flags: iovma and page property
770 * Creates 1-1-1 mapping and returns @da again, which can be
771 * adjusted if 'IOVMF_DA_ANON' is set.
773 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
774 u32 flags)
776 void *va;
778 if (!obj || !obj->dev || !bytes)
779 return -EINVAL;
781 bytes = PAGE_ALIGN(bytes);
783 va = ioremap(pa, bytes);
784 if (!va)
785 return -ENOMEM;
787 flags &= IOVMF_HW_MASK;
788 flags |= IOVMF_LINEAR;
789 flags |= IOVMF_MMIO;
790 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
792 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
793 if (IS_ERR_VALUE(da))
794 iounmap(va);
796 return da;
798 EXPORT_SYMBOL_GPL(iommu_kmap);
801 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
802 * @obj: objective iommu
803 * @da: iommu device virtual address
805 * Frees the iommu virtually contiguous memory area starting at
806 * @da, which was passed to and was returned by'iommu_kmap()'.
808 void iommu_kunmap(struct iommu *obj, u32 da)
810 struct sg_table *sgt;
811 typedef void (*func_t)(const void *);
813 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
814 IOVMF_LINEAR | IOVMF_MMIO);
815 if (!sgt)
816 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
817 sgtable_free(sgt);
819 EXPORT_SYMBOL_GPL(iommu_kunmap);
822 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
823 * @obj: objective iommu
824 * @da: contiguous iommu virtual memory
825 * @bytes: bytes for allocation
826 * @flags: iovma and page property
828 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
829 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
831 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
833 void *va;
834 u32 pa;
836 if (!obj || !obj->dev || !bytes)
837 return -EINVAL;
839 bytes = PAGE_ALIGN(bytes);
841 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
842 if (!va)
843 return -ENOMEM;
844 pa = virt_to_phys(va);
846 flags &= IOVMF_HW_MASK;
847 flags |= IOVMF_LINEAR;
848 flags |= IOVMF_ALLOC;
849 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
851 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
852 if (IS_ERR_VALUE(da))
853 kfree(va);
855 return da;
857 EXPORT_SYMBOL_GPL(iommu_kmalloc);
860 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
861 * @obj: objective iommu
862 * @da: iommu device virtual address
864 * Frees the iommu virtually contiguous memory area starting at
865 * @da, which was passed to and was returned by'iommu_kmalloc()'.
867 void iommu_kfree(struct iommu *obj, u32 da)
869 struct sg_table *sgt;
871 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
872 if (!sgt)
873 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
874 sgtable_free(sgt);
876 EXPORT_SYMBOL_GPL(iommu_kfree);
879 static int __init iovmm_init(void)
881 const unsigned long flags = SLAB_HWCACHE_ALIGN;
882 struct kmem_cache *p;
884 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
885 flags, NULL);
886 if (!p)
887 return -ENOMEM;
888 iovm_area_cachep = p;
890 return 0;
892 module_init(iovmm_init);
894 static void __exit iovmm_exit(void)
896 kmem_cache_destroy(iovm_area_cachep);
898 module_exit(iovmm_exit);
900 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
901 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
902 MODULE_LICENSE("GPL v2");