2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
25 #include <asm/cacheflush.h>
27 #include <plat/iommu.h>
29 #include <plat/iopgtable.h>
31 #define for_each_iotlb_cr(obj, n, __i, cr) \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
36 /* bitmap of the page sizes currently supported */
37 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
40 * struct omap_iommu_domain - omap iommu domain
41 * @pgtable: the page table
42 * @iommu_dev: an omap iommu device attached to this domain. only a single
43 * iommu device can be attached for now.
44 * @dev: Device using this domain.
45 * @lock: domain lock, should be taken when attaching/detaching
47 struct omap_iommu_domain
{
49 struct omap_iommu
*iommu_dev
;
54 /* accommodate the difference between omap1 and omap2/3 */
55 static const struct iommu_functions
*arch_iommu
;
57 static struct platform_driver omap_iommu_driver
;
58 static struct kmem_cache
*iopte_cachep
;
61 * omap_install_iommu_arch - Install archtecure specific iommu functions
62 * @ops: a pointer to architecture specific iommu functions
64 * There are several kind of iommu algorithm(tlb, pagetable) among
65 * omap series. This interface installs such an iommu algorighm.
67 int omap_install_iommu_arch(const struct iommu_functions
*ops
)
75 EXPORT_SYMBOL_GPL(omap_install_iommu_arch
);
78 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
79 * @ops: a pointer to architecture specific iommu functions
81 * This interface uninstalls the iommu algorighm installed previously.
83 void omap_uninstall_iommu_arch(const struct iommu_functions
*ops
)
85 if (arch_iommu
!= ops
)
86 pr_err("%s: not your arch\n", __func__
);
90 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch
);
93 * omap_iommu_save_ctx - Save registers for pm off-mode support
96 void omap_iommu_save_ctx(struct device
*dev
)
98 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
100 arch_iommu
->save_ctx(obj
);
102 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx
);
105 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
106 * @dev: client device
108 void omap_iommu_restore_ctx(struct device
*dev
)
110 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
112 arch_iommu
->restore_ctx(obj
);
114 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx
);
117 * omap_iommu_arch_version - Return running iommu arch version
119 u32
omap_iommu_arch_version(void)
121 return arch_iommu
->version
;
123 EXPORT_SYMBOL_GPL(omap_iommu_arch_version
);
125 static int iommu_enable(struct omap_iommu
*obj
)
135 clk_enable(obj
->clk
);
137 err
= arch_iommu
->enable(obj
);
139 clk_disable(obj
->clk
);
143 static void iommu_disable(struct omap_iommu
*obj
)
148 clk_enable(obj
->clk
);
150 arch_iommu
->disable(obj
);
152 clk_disable(obj
->clk
);
158 void omap_iotlb_cr_to_e(struct cr_regs
*cr
, struct iotlb_entry
*e
)
162 arch_iommu
->cr_to_e(cr
, e
);
164 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e
);
166 static inline int iotlb_cr_valid(struct cr_regs
*cr
)
171 return arch_iommu
->cr_valid(cr
);
174 static inline struct cr_regs
*iotlb_alloc_cr(struct omap_iommu
*obj
,
175 struct iotlb_entry
*e
)
180 return arch_iommu
->alloc_cr(obj
, e
);
183 static u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
185 return arch_iommu
->cr_to_virt(cr
);
188 static u32
get_iopte_attr(struct iotlb_entry
*e
)
190 return arch_iommu
->get_pte_attr(e
);
193 static u32
iommu_report_fault(struct omap_iommu
*obj
, u32
*da
)
195 return arch_iommu
->fault_isr(obj
, da
);
198 static void iotlb_lock_get(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
202 val
= iommu_read_reg(obj
, MMU_LOCK
);
204 l
->base
= MMU_LOCK_BASE(val
);
205 l
->vict
= MMU_LOCK_VICT(val
);
209 static void iotlb_lock_set(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
213 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
214 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
216 iommu_write_reg(obj
, val
, MMU_LOCK
);
219 static void iotlb_read_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
221 arch_iommu
->tlb_read_cr(obj
, cr
);
224 static void iotlb_load_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
226 arch_iommu
->tlb_load_cr(obj
, cr
);
228 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
229 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
233 * iotlb_dump_cr - Dump an iommu tlb entry into buf
235 * @cr: contents of cam and ram register
236 * @buf: output buffer
238 static inline ssize_t
iotlb_dump_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
,
243 return arch_iommu
->dump_cr(obj
, cr
, buf
);
246 /* only used in iotlb iteration for-loop */
247 static struct cr_regs
__iotlb_read_cr(struct omap_iommu
*obj
, int n
)
252 iotlb_lock_get(obj
, &l
);
254 iotlb_lock_set(obj
, &l
);
255 iotlb_read_cr(obj
, &cr
);
261 * load_iotlb_entry - Set an iommu tlb entry
263 * @e: an iommu tlb entry info
265 #ifdef PREFETCH_IOTLB
266 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
272 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
275 clk_enable(obj
->clk
);
277 iotlb_lock_get(obj
, &l
);
278 if (l
.base
== obj
->nr_tlb_entries
) {
279 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
287 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
288 if (!iotlb_cr_valid(&tmp
))
291 if (i
== obj
->nr_tlb_entries
) {
292 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
297 iotlb_lock_get(obj
, &l
);
300 iotlb_lock_set(obj
, &l
);
303 cr
= iotlb_alloc_cr(obj
, e
);
305 clk_disable(obj
->clk
);
309 iotlb_load_cr(obj
, cr
);
314 /* increment victim for next tlb load */
315 if (++l
.vict
== obj
->nr_tlb_entries
)
317 iotlb_lock_set(obj
, &l
);
319 clk_disable(obj
->clk
);
323 #else /* !PREFETCH_IOTLB */
325 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
330 #endif /* !PREFETCH_IOTLB */
332 static int prefetch_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
334 return load_iotlb_entry(obj
, e
);
338 * flush_iotlb_page - Clear an iommu tlb entry
340 * @da: iommu device virtual address
342 * Clear an iommu tlb entry which includes 'da' address.
344 static void flush_iotlb_page(struct omap_iommu
*obj
, u32 da
)
349 clk_enable(obj
->clk
);
351 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
355 if (!iotlb_cr_valid(&cr
))
358 start
= iotlb_cr_to_virt(&cr
);
359 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
361 if ((start
<= da
) && (da
< start
+ bytes
)) {
362 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%x)\n",
363 __func__
, start
, da
, bytes
);
364 iotlb_load_cr(obj
, &cr
);
365 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
368 clk_disable(obj
->clk
);
370 if (i
== obj
->nr_tlb_entries
)
371 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
375 * flush_iotlb_all - Clear all iommu tlb entries
378 static void flush_iotlb_all(struct omap_iommu
*obj
)
382 clk_enable(obj
->clk
);
386 iotlb_lock_set(obj
, &l
);
388 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
390 clk_disable(obj
->clk
);
393 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
395 ssize_t
omap_iommu_dump_ctx(struct omap_iommu
*obj
, char *buf
, ssize_t bytes
)
400 clk_enable(obj
->clk
);
402 bytes
= arch_iommu
->dump_ctx(obj
, buf
, bytes
);
404 clk_disable(obj
->clk
);
408 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx
);
411 __dump_tlb_entries(struct omap_iommu
*obj
, struct cr_regs
*crs
, int num
)
414 struct iotlb_lock saved
;
416 struct cr_regs
*p
= crs
;
418 clk_enable(obj
->clk
);
419 iotlb_lock_get(obj
, &saved
);
421 for_each_iotlb_cr(obj
, num
, i
, tmp
) {
422 if (!iotlb_cr_valid(&tmp
))
427 iotlb_lock_set(obj
, &saved
);
428 clk_disable(obj
->clk
);
434 * omap_dump_tlb_entries - dump cr arrays to given buffer
436 * @buf: output buffer
438 size_t omap_dump_tlb_entries(struct omap_iommu
*obj
, char *buf
, ssize_t bytes
)
444 num
= bytes
/ sizeof(*cr
);
445 num
= min(obj
->nr_tlb_entries
, num
);
447 cr
= kcalloc(num
, sizeof(*cr
), GFP_KERNEL
);
451 num
= __dump_tlb_entries(obj
, cr
, num
);
452 for (i
= 0; i
< num
; i
++)
453 p
+= iotlb_dump_cr(obj
, cr
+ i
, p
);
458 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries
);
460 int omap_foreach_iommu_device(void *data
, int (*fn
)(struct device
*, void *))
462 return driver_for_each_device(&omap_iommu_driver
.driver
,
465 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device
);
467 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
470 * H/W pagetable operations
472 static void flush_iopgd_range(u32
*first
, u32
*last
)
474 /* FIXME: L2 cache should be taken care of if it exists */
476 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
478 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
479 } while (first
<= last
);
482 static void flush_iopte_range(u32
*first
, u32
*last
)
484 /* FIXME: L2 cache should be taken care of if it exists */
486 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
488 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
489 } while (first
<= last
);
492 static void iopte_free(u32
*iopte
)
494 /* Note: freed iopte's must be clean ready for re-use */
495 kmem_cache_free(iopte_cachep
, iopte
);
498 static u32
*iopte_alloc(struct omap_iommu
*obj
, u32
*iopgd
, u32 da
)
502 /* a table has already existed */
507 * do the allocation outside the page table lock
509 spin_unlock(&obj
->page_table_lock
);
510 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
511 spin_lock(&obj
->page_table_lock
);
515 return ERR_PTR(-ENOMEM
);
517 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
518 flush_iopgd_range(iopgd
, iopgd
);
520 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
522 /* We raced, free the reduniovant table */
527 iopte
= iopte_offset(iopgd
, da
);
530 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
536 static int iopgd_alloc_section(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
538 u32
*iopgd
= iopgd_offset(obj
, da
);
540 if ((da
| pa
) & ~IOSECTION_MASK
) {
541 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
542 __func__
, da
, pa
, IOSECTION_SIZE
);
546 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
547 flush_iopgd_range(iopgd
, iopgd
);
551 static int iopgd_alloc_super(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
553 u32
*iopgd
= iopgd_offset(obj
, da
);
556 if ((da
| pa
) & ~IOSUPER_MASK
) {
557 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__
, da
, pa
, IOSUPER_SIZE
);
562 for (i
= 0; i
< 16; i
++)
563 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
564 flush_iopgd_range(iopgd
, iopgd
+ 15);
568 static int iopte_alloc_page(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
570 u32
*iopgd
= iopgd_offset(obj
, da
);
571 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
574 return PTR_ERR(iopte
);
576 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
577 flush_iopte_range(iopte
, iopte
);
579 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 __func__
, da
, pa
, iopte
, *iopte
);
585 static int iopte_alloc_large(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
587 u32
*iopgd
= iopgd_offset(obj
, da
);
588 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
591 if ((da
| pa
) & ~IOLARGE_MASK
) {
592 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
593 __func__
, da
, pa
, IOLARGE_SIZE
);
598 return PTR_ERR(iopte
);
600 for (i
= 0; i
< 16; i
++)
601 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
602 flush_iopte_range(iopte
, iopte
+ 15);
607 iopgtable_store_entry_core(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
609 int (*fn
)(struct omap_iommu
*, u32
, u32
, u32
);
617 case MMU_CAM_PGSZ_16M
:
618 fn
= iopgd_alloc_super
;
620 case MMU_CAM_PGSZ_1M
:
621 fn
= iopgd_alloc_section
;
623 case MMU_CAM_PGSZ_64K
:
624 fn
= iopte_alloc_large
;
626 case MMU_CAM_PGSZ_4K
:
627 fn
= iopte_alloc_page
;
635 prot
= get_iopte_attr(e
);
637 spin_lock(&obj
->page_table_lock
);
638 err
= fn(obj
, e
->da
, e
->pa
, prot
);
639 spin_unlock(&obj
->page_table_lock
);
645 * omap_iopgtable_store_entry - Make an iommu pte entry
647 * @e: an iommu tlb entry info
649 int omap_iopgtable_store_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
653 flush_iotlb_page(obj
, e
->da
);
654 err
= iopgtable_store_entry_core(obj
, e
);
656 prefetch_iotlb_entry(obj
, e
);
659 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry
);
662 * iopgtable_lookup_entry - Lookup an iommu pte entry
664 * @da: iommu device virtual address
665 * @ppgd: iommu pgd entry pointer to be returned
666 * @ppte: iommu pte entry pointer to be returned
669 iopgtable_lookup_entry(struct omap_iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
671 u32
*iopgd
, *iopte
= NULL
;
673 iopgd
= iopgd_offset(obj
, da
);
677 if (iopgd_is_table(*iopgd
))
678 iopte
= iopte_offset(iopgd
, da
);
684 static size_t iopgtable_clear_entry_core(struct omap_iommu
*obj
, u32 da
)
687 u32
*iopgd
= iopgd_offset(obj
, da
);
693 if (iopgd_is_table(*iopgd
)) {
695 u32
*iopte
= iopte_offset(iopgd
, da
);
698 if (*iopte
& IOPTE_LARGE
) {
700 /* rewind to the 1st entry */
701 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
704 memset(iopte
, 0, nent
* sizeof(*iopte
));
705 flush_iopte_range(iopte
, iopte
+ (nent
- 1) * sizeof(*iopte
));
708 * do table walk to check if this table is necessary or not
710 iopte
= iopte_offset(iopgd
, 0);
711 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
716 nent
= 1; /* for the next L1 entry */
719 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
721 /* rewind to the 1st entry */
722 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
726 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
727 flush_iopgd_range(iopgd
, iopgd
+ (nent
- 1) * sizeof(*iopgd
));
733 * iopgtable_clear_entry - Remove an iommu pte entry
735 * @da: iommu device virtual address
737 static size_t iopgtable_clear_entry(struct omap_iommu
*obj
, u32 da
)
741 spin_lock(&obj
->page_table_lock
);
743 bytes
= iopgtable_clear_entry_core(obj
, da
);
744 flush_iotlb_page(obj
, da
);
746 spin_unlock(&obj
->page_table_lock
);
751 static void iopgtable_clear_entry_all(struct omap_iommu
*obj
)
755 spin_lock(&obj
->page_table_lock
);
757 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
761 da
= i
<< IOPGD_SHIFT
;
762 iopgd
= iopgd_offset(obj
, da
);
767 if (iopgd_is_table(*iopgd
))
768 iopte_free(iopte_offset(iopgd
, 0));
771 flush_iopgd_range(iopgd
, iopgd
);
774 flush_iotlb_all(obj
);
776 spin_unlock(&obj
->page_table_lock
);
780 * Device IOMMU generic operations
782 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
786 struct omap_iommu
*obj
= data
;
787 struct iommu_domain
*domain
= obj
->domain
;
792 clk_enable(obj
->clk
);
793 errs
= iommu_report_fault(obj
, &da
);
794 clk_disable(obj
->clk
);
798 /* Fault callback or TLB/PTE Dynamic loading */
799 if (!report_iommu_fault(domain
, obj
->dev
, da
, 0))
804 iopgd
= iopgd_offset(obj
, da
);
806 if (!iopgd_is_table(*iopgd
)) {
807 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
808 "*pgd:px%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
);
812 iopte
= iopte_offset(iopgd
, da
);
814 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
815 "pte:0x%p *pte:0x%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
,
821 static int device_match_by_alias(struct device
*dev
, void *data
)
823 struct omap_iommu
*obj
= to_iommu(dev
);
824 const char *name
= data
;
826 pr_debug("%s: %s %s\n", __func__
, obj
->name
, name
);
828 return strcmp(obj
->name
, name
) == 0;
832 * omap_iommu_attach() - attach iommu device to an iommu domain
833 * @name: name of target omap iommu device
836 static struct omap_iommu
*omap_iommu_attach(const char *name
, u32
*iopgd
)
840 struct omap_iommu
*obj
;
842 dev
= driver_find_device(&omap_iommu_driver
.driver
, NULL
,
844 device_match_by_alias
);
850 spin_lock(&obj
->iommu_lock
);
852 /* an iommu device can only be attached once */
853 if (++obj
->refcount
> 1) {
854 dev_err(dev
, "%s: already attached!\n", obj
->name
);
860 err
= iommu_enable(obj
);
863 flush_iotlb_all(obj
);
865 if (!try_module_get(obj
->owner
))
868 spin_unlock(&obj
->iommu_lock
);
870 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
874 if (obj
->refcount
== 1)
878 spin_unlock(&obj
->iommu_lock
);
883 * omap_iommu_detach - release iommu device
886 static void omap_iommu_detach(struct omap_iommu
*obj
)
888 if (!obj
|| IS_ERR(obj
))
891 spin_lock(&obj
->iommu_lock
);
893 if (--obj
->refcount
== 0)
896 module_put(obj
->owner
);
900 spin_unlock(&obj
->iommu_lock
);
902 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
906 * OMAP Device MMU(IOMMU) detection
908 static int __devinit
omap_iommu_probe(struct platform_device
*pdev
)
912 struct omap_iommu
*obj
;
913 struct resource
*res
;
914 struct iommu_platform_data
*pdata
= pdev
->dev
.platform_data
;
916 if (pdev
->num_resources
!= 2)
919 obj
= kzalloc(sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
923 obj
->clk
= clk_get(&pdev
->dev
, pdata
->clk_name
);
924 if (IS_ERR(obj
->clk
))
927 obj
->nr_tlb_entries
= pdata
->nr_tlb_entries
;
928 obj
->name
= pdata
->name
;
929 obj
->dev
= &pdev
->dev
;
930 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
931 obj
->da_start
= pdata
->da_start
;
932 obj
->da_end
= pdata
->da_end
;
934 spin_lock_init(&obj
->iommu_lock
);
935 mutex_init(&obj
->mmap_lock
);
936 spin_lock_init(&obj
->page_table_lock
);
937 INIT_LIST_HEAD(&obj
->mmap
);
939 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
945 res
= request_mem_region(res
->start
, resource_size(res
),
946 dev_name(&pdev
->dev
));
952 obj
->regbase
= ioremap(res
->start
, resource_size(res
));
958 irq
= platform_get_irq(pdev
, 0);
963 err
= request_irq(irq
, iommu_fault_handler
, IRQF_SHARED
,
964 dev_name(&pdev
->dev
), obj
);
967 platform_set_drvdata(pdev
, obj
);
969 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
973 iounmap(obj
->regbase
);
975 release_mem_region(res
->start
, resource_size(res
));
983 static int __devexit
omap_iommu_remove(struct platform_device
*pdev
)
986 struct resource
*res
;
987 struct omap_iommu
*obj
= platform_get_drvdata(pdev
);
989 platform_set_drvdata(pdev
, NULL
);
991 iopgtable_clear_entry_all(obj
);
993 irq
= platform_get_irq(pdev
, 0);
995 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
996 release_mem_region(res
->start
, resource_size(res
));
997 iounmap(obj
->regbase
);
1000 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
1005 static struct platform_driver omap_iommu_driver
= {
1006 .probe
= omap_iommu_probe
,
1007 .remove
= __devexit_p(omap_iommu_remove
),
1009 .name
= "omap-iommu",
1013 static void iopte_cachep_ctor(void *iopte
)
1015 clean_dcache_area(iopte
, IOPTE_TABLE_SIZE
);
1018 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
1019 phys_addr_t pa
, size_t bytes
, int prot
)
1021 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1022 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1023 struct device
*dev
= oiommu
->dev
;
1024 struct iotlb_entry e
;
1028 /* we only support mapping a single iommu page for now */
1029 omap_pgsz
= bytes_to_iopgsz(bytes
);
1030 if (omap_pgsz
< 0) {
1031 dev_err(dev
, "invalid size to map: %d\n", bytes
);
1035 dev_dbg(dev
, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da
, pa
, bytes
);
1037 flags
= omap_pgsz
| prot
;
1039 iotlb_init_entry(&e
, da
, pa
, flags
);
1041 ret
= omap_iopgtable_store_entry(oiommu
, &e
);
1043 dev_err(dev
, "omap_iopgtable_store_entry failed: %d\n", ret
);
1048 static size_t omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1051 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1052 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1053 struct device
*dev
= oiommu
->dev
;
1055 dev_dbg(dev
, "unmapping da 0x%lx size %u\n", da
, size
);
1057 return iopgtable_clear_entry(oiommu
, da
);
1061 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1063 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1064 struct omap_iommu
*oiommu
;
1065 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1068 spin_lock(&omap_domain
->lock
);
1070 /* only a single device is supported per domain for now */
1071 if (omap_domain
->iommu_dev
) {
1072 dev_err(dev
, "iommu domain is already attached\n");
1077 /* get a handle to and enable the omap iommu */
1078 oiommu
= omap_iommu_attach(arch_data
->name
, omap_domain
->pgtable
);
1079 if (IS_ERR(oiommu
)) {
1080 ret
= PTR_ERR(oiommu
);
1081 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1085 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= oiommu
;
1086 omap_domain
->dev
= dev
;
1087 oiommu
->domain
= domain
;
1090 spin_unlock(&omap_domain
->lock
);
1094 static void _omap_iommu_detach_dev(struct omap_iommu_domain
*omap_domain
,
1097 struct omap_iommu
*oiommu
= dev_to_omap_iommu(dev
);
1098 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1100 /* only a single device is supported per domain for now */
1101 if (omap_domain
->iommu_dev
!= oiommu
) {
1102 dev_err(dev
, "invalid iommu device\n");
1106 iopgtable_clear_entry_all(oiommu
);
1108 omap_iommu_detach(oiommu
);
1110 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= NULL
;
1111 omap_domain
->dev
= NULL
;
1114 static void omap_iommu_detach_dev(struct iommu_domain
*domain
,
1117 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1119 spin_lock(&omap_domain
->lock
);
1120 _omap_iommu_detach_dev(omap_domain
, dev
);
1121 spin_unlock(&omap_domain
->lock
);
1124 static int omap_iommu_domain_init(struct iommu_domain
*domain
)
1126 struct omap_iommu_domain
*omap_domain
;
1128 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1130 pr_err("kzalloc failed\n");
1134 omap_domain
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_KERNEL
);
1135 if (!omap_domain
->pgtable
) {
1136 pr_err("kzalloc failed\n");
1141 * should never fail, but please keep this around to ensure
1142 * we keep the hardware happy
1144 BUG_ON(!IS_ALIGNED((long)omap_domain
->pgtable
, IOPGD_TABLE_SIZE
));
1146 clean_dcache_area(omap_domain
->pgtable
, IOPGD_TABLE_SIZE
);
1147 spin_lock_init(&omap_domain
->lock
);
1149 domain
->priv
= omap_domain
;
1151 domain
->geometry
.aperture_start
= 0;
1152 domain
->geometry
.aperture_end
= (1ULL << 32) - 1;
1153 domain
->geometry
.force_aperture
= true;
1163 static void omap_iommu_domain_destroy(struct iommu_domain
*domain
)
1165 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1167 domain
->priv
= NULL
;
1170 * An iommu device is still attached
1171 * (currently, only one device can be attached) ?
1173 if (omap_domain
->iommu_dev
)
1174 _omap_iommu_detach_dev(omap_domain
, omap_domain
->dev
);
1176 kfree(omap_domain
->pgtable
);
1180 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1183 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1184 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1185 struct device
*dev
= oiommu
->dev
;
1187 phys_addr_t ret
= 0;
1189 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1192 if (iopte_is_small(*pte
))
1193 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1194 else if (iopte_is_large(*pte
))
1195 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1197 dev_err(dev
, "bogus pte 0x%x, da 0x%lx", *pte
, da
);
1199 if (iopgd_is_section(*pgd
))
1200 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1201 else if (iopgd_is_super(*pgd
))
1202 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1204 dev_err(dev
, "bogus pgd 0x%x, da 0x%lx", *pgd
, da
);
1210 static int omap_iommu_domain_has_cap(struct iommu_domain
*domain
,
1216 static struct iommu_ops omap_iommu_ops
= {
1217 .domain_init
= omap_iommu_domain_init
,
1218 .domain_destroy
= omap_iommu_domain_destroy
,
1219 .attach_dev
= omap_iommu_attach_dev
,
1220 .detach_dev
= omap_iommu_detach_dev
,
1221 .map
= omap_iommu_map
,
1222 .unmap
= omap_iommu_unmap
,
1223 .iova_to_phys
= omap_iommu_iova_to_phys
,
1224 .domain_has_cap
= omap_iommu_domain_has_cap
,
1225 .pgsize_bitmap
= OMAP_IOMMU_PGSIZES
,
1228 static int __init
omap_iommu_init(void)
1230 struct kmem_cache
*p
;
1231 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
1232 size_t align
= 1 << 10; /* L2 pagetable alignement */
1234 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1240 bus_set_iommu(&platform_bus_type
, &omap_iommu_ops
);
1242 return platform_driver_register(&omap_iommu_driver
);
1244 /* must be ready before omap3isp is probed */
1245 subsys_initcall(omap_iommu_init
);
1247 static void __exit
omap_iommu_exit(void)
1249 kmem_cache_destroy(iopte_cachep
);
1251 platform_driver_unregister(&omap_iommu_driver
);
1253 module_exit(omap_iommu_exit
);
1255 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1256 MODULE_ALIAS("platform:omap-iommu");
1257 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1258 MODULE_LICENSE("GPL v2");