2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/omap-iommu.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
27 #include <asm/cacheflush.h>
29 #include <linux/platform_data/iommu-omap.h>
31 #include "omap-iopgtable.h"
32 #include "omap-iommu.h"
34 #define for_each_iotlb_cr(obj, n, __i, cr) \
36 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
39 /* bitmap of the page sizes currently supported */
40 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
43 * struct omap_iommu_domain - omap iommu domain
44 * @pgtable: the page table
45 * @iommu_dev: an omap iommu device attached to this domain. only a single
46 * iommu device can be attached for now.
47 * @dev: Device using this domain.
48 * @lock: domain lock, should be taken when attaching/detaching
50 struct omap_iommu_domain
{
52 struct omap_iommu
*iommu_dev
;
57 #define MMU_LOCK_BASE_SHIFT 10
58 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
59 #define MMU_LOCK_BASE(x) \
60 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
62 #define MMU_LOCK_VICT_SHIFT 4
63 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
64 #define MMU_LOCK_VICT(x) \
65 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
72 /* accommodate the difference between omap1 and omap2/3 */
73 static const struct iommu_functions
*arch_iommu
;
75 static struct platform_driver omap_iommu_driver
;
76 static struct kmem_cache
*iopte_cachep
;
79 * omap_install_iommu_arch - Install archtecure specific iommu functions
80 * @ops: a pointer to architecture specific iommu functions
82 * There are several kind of iommu algorithm(tlb, pagetable) among
83 * omap series. This interface installs such an iommu algorighm.
85 int omap_install_iommu_arch(const struct iommu_functions
*ops
)
93 EXPORT_SYMBOL_GPL(omap_install_iommu_arch
);
96 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
97 * @ops: a pointer to architecture specific iommu functions
99 * This interface uninstalls the iommu algorighm installed previously.
101 void omap_uninstall_iommu_arch(const struct iommu_functions
*ops
)
103 if (arch_iommu
!= ops
)
104 pr_err("%s: not your arch\n", __func__
);
108 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch
);
111 * omap_iommu_save_ctx - Save registers for pm off-mode support
112 * @dev: client device
114 void omap_iommu_save_ctx(struct device
*dev
)
116 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
118 arch_iommu
->save_ctx(obj
);
120 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx
);
123 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
124 * @dev: client device
126 void omap_iommu_restore_ctx(struct device
*dev
)
128 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
130 arch_iommu
->restore_ctx(obj
);
132 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx
);
135 * omap_iommu_arch_version - Return running iommu arch version
137 u32
omap_iommu_arch_version(void)
139 return arch_iommu
->version
;
141 EXPORT_SYMBOL_GPL(omap_iommu_arch_version
);
143 static int iommu_enable(struct omap_iommu
*obj
)
153 clk_enable(obj
->clk
);
155 err
= arch_iommu
->enable(obj
);
157 clk_disable(obj
->clk
);
161 static void iommu_disable(struct omap_iommu
*obj
)
166 clk_enable(obj
->clk
);
168 arch_iommu
->disable(obj
);
170 clk_disable(obj
->clk
);
176 void omap_iotlb_cr_to_e(struct cr_regs
*cr
, struct iotlb_entry
*e
)
180 arch_iommu
->cr_to_e(cr
, e
);
182 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e
);
184 static inline int iotlb_cr_valid(struct cr_regs
*cr
)
189 return arch_iommu
->cr_valid(cr
);
192 static inline struct cr_regs
*iotlb_alloc_cr(struct omap_iommu
*obj
,
193 struct iotlb_entry
*e
)
198 return arch_iommu
->alloc_cr(obj
, e
);
201 static u32
iotlb_cr_to_virt(struct cr_regs
*cr
)
203 return arch_iommu
->cr_to_virt(cr
);
206 static u32
get_iopte_attr(struct iotlb_entry
*e
)
208 return arch_iommu
->get_pte_attr(e
);
211 static u32
iommu_report_fault(struct omap_iommu
*obj
, u32
*da
)
213 return arch_iommu
->fault_isr(obj
, da
);
216 static void iotlb_lock_get(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
220 val
= iommu_read_reg(obj
, MMU_LOCK
);
222 l
->base
= MMU_LOCK_BASE(val
);
223 l
->vict
= MMU_LOCK_VICT(val
);
227 static void iotlb_lock_set(struct omap_iommu
*obj
, struct iotlb_lock
*l
)
231 val
= (l
->base
<< MMU_LOCK_BASE_SHIFT
);
232 val
|= (l
->vict
<< MMU_LOCK_VICT_SHIFT
);
234 iommu_write_reg(obj
, val
, MMU_LOCK
);
237 static void iotlb_read_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
239 arch_iommu
->tlb_read_cr(obj
, cr
);
242 static void iotlb_load_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
)
244 arch_iommu
->tlb_load_cr(obj
, cr
);
246 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
247 iommu_write_reg(obj
, 1, MMU_LD_TLB
);
251 * iotlb_dump_cr - Dump an iommu tlb entry into buf
253 * @cr: contents of cam and ram register
254 * @buf: output buffer
256 static inline ssize_t
iotlb_dump_cr(struct omap_iommu
*obj
, struct cr_regs
*cr
,
261 return arch_iommu
->dump_cr(obj
, cr
, buf
);
264 /* only used in iotlb iteration for-loop */
265 static struct cr_regs
__iotlb_read_cr(struct omap_iommu
*obj
, int n
)
270 iotlb_lock_get(obj
, &l
);
272 iotlb_lock_set(obj
, &l
);
273 iotlb_read_cr(obj
, &cr
);
279 * load_iotlb_entry - Set an iommu tlb entry
281 * @e: an iommu tlb entry info
283 #ifdef PREFETCH_IOTLB
284 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
290 if (!obj
|| !obj
->nr_tlb_entries
|| !e
)
293 clk_enable(obj
->clk
);
295 iotlb_lock_get(obj
, &l
);
296 if (l
.base
== obj
->nr_tlb_entries
) {
297 dev_warn(obj
->dev
, "%s: preserve entries full\n", __func__
);
305 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, tmp
)
306 if (!iotlb_cr_valid(&tmp
))
309 if (i
== obj
->nr_tlb_entries
) {
310 dev_dbg(obj
->dev
, "%s: full: no entry\n", __func__
);
315 iotlb_lock_get(obj
, &l
);
318 iotlb_lock_set(obj
, &l
);
321 cr
= iotlb_alloc_cr(obj
, e
);
323 clk_disable(obj
->clk
);
327 iotlb_load_cr(obj
, cr
);
332 /* increment victim for next tlb load */
333 if (++l
.vict
== obj
->nr_tlb_entries
)
335 iotlb_lock_set(obj
, &l
);
337 clk_disable(obj
->clk
);
341 #else /* !PREFETCH_IOTLB */
343 static int load_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
348 #endif /* !PREFETCH_IOTLB */
350 static int prefetch_iotlb_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
352 return load_iotlb_entry(obj
, e
);
356 * flush_iotlb_page - Clear an iommu tlb entry
358 * @da: iommu device virtual address
360 * Clear an iommu tlb entry which includes 'da' address.
362 static void flush_iotlb_page(struct omap_iommu
*obj
, u32 da
)
367 clk_enable(obj
->clk
);
369 for_each_iotlb_cr(obj
, obj
->nr_tlb_entries
, i
, cr
) {
373 if (!iotlb_cr_valid(&cr
))
376 start
= iotlb_cr_to_virt(&cr
);
377 bytes
= iopgsz_to_bytes(cr
.cam
& 3);
379 if ((start
<= da
) && (da
< start
+ bytes
)) {
380 dev_dbg(obj
->dev
, "%s: %08x<=%08x(%x)\n",
381 __func__
, start
, da
, bytes
);
382 iotlb_load_cr(obj
, &cr
);
383 iommu_write_reg(obj
, 1, MMU_FLUSH_ENTRY
);
386 clk_disable(obj
->clk
);
388 if (i
== obj
->nr_tlb_entries
)
389 dev_dbg(obj
->dev
, "%s: no page for %08x\n", __func__
, da
);
393 * flush_iotlb_all - Clear all iommu tlb entries
396 static void flush_iotlb_all(struct omap_iommu
*obj
)
400 clk_enable(obj
->clk
);
404 iotlb_lock_set(obj
, &l
);
406 iommu_write_reg(obj
, 1, MMU_GFLUSH
);
408 clk_disable(obj
->clk
);
411 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
413 ssize_t
omap_iommu_dump_ctx(struct omap_iommu
*obj
, char *buf
, ssize_t bytes
)
418 clk_enable(obj
->clk
);
420 bytes
= arch_iommu
->dump_ctx(obj
, buf
, bytes
);
422 clk_disable(obj
->clk
);
426 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx
);
429 __dump_tlb_entries(struct omap_iommu
*obj
, struct cr_regs
*crs
, int num
)
432 struct iotlb_lock saved
;
434 struct cr_regs
*p
= crs
;
436 clk_enable(obj
->clk
);
437 iotlb_lock_get(obj
, &saved
);
439 for_each_iotlb_cr(obj
, num
, i
, tmp
) {
440 if (!iotlb_cr_valid(&tmp
))
445 iotlb_lock_set(obj
, &saved
);
446 clk_disable(obj
->clk
);
452 * omap_dump_tlb_entries - dump cr arrays to given buffer
454 * @buf: output buffer
456 size_t omap_dump_tlb_entries(struct omap_iommu
*obj
, char *buf
, ssize_t bytes
)
462 num
= bytes
/ sizeof(*cr
);
463 num
= min(obj
->nr_tlb_entries
, num
);
465 cr
= kcalloc(num
, sizeof(*cr
), GFP_KERNEL
);
469 num
= __dump_tlb_entries(obj
, cr
, num
);
470 for (i
= 0; i
< num
; i
++)
471 p
+= iotlb_dump_cr(obj
, cr
+ i
, p
);
476 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries
);
478 int omap_foreach_iommu_device(void *data
, int (*fn
)(struct device
*, void *))
480 return driver_for_each_device(&omap_iommu_driver
.driver
,
483 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device
);
485 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
488 * H/W pagetable operations
490 static void flush_iopgd_range(u32
*first
, u32
*last
)
492 /* FIXME: L2 cache should be taken care of if it exists */
494 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
496 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
497 } while (first
<= last
);
500 static void flush_iopte_range(u32
*first
, u32
*last
)
502 /* FIXME: L2 cache should be taken care of if it exists */
504 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
506 first
+= L1_CACHE_BYTES
/ sizeof(*first
);
507 } while (first
<= last
);
510 static void iopte_free(u32
*iopte
)
512 /* Note: freed iopte's must be clean ready for re-use */
513 kmem_cache_free(iopte_cachep
, iopte
);
516 static u32
*iopte_alloc(struct omap_iommu
*obj
, u32
*iopgd
, u32 da
)
520 /* a table has already existed */
525 * do the allocation outside the page table lock
527 spin_unlock(&obj
->page_table_lock
);
528 iopte
= kmem_cache_zalloc(iopte_cachep
, GFP_KERNEL
);
529 spin_lock(&obj
->page_table_lock
);
533 return ERR_PTR(-ENOMEM
);
535 *iopgd
= virt_to_phys(iopte
) | IOPGD_TABLE
;
536 flush_iopgd_range(iopgd
, iopgd
);
538 dev_vdbg(obj
->dev
, "%s: a new pte:%p\n", __func__
, iopte
);
540 /* We raced, free the reduniovant table */
545 iopte
= iopte_offset(iopgd
, da
);
548 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
549 __func__
, da
, iopgd
, *iopgd
, iopte
, *iopte
);
554 static int iopgd_alloc_section(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
556 u32
*iopgd
= iopgd_offset(obj
, da
);
558 if ((da
| pa
) & ~IOSECTION_MASK
) {
559 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
560 __func__
, da
, pa
, IOSECTION_SIZE
);
564 *iopgd
= (pa
& IOSECTION_MASK
) | prot
| IOPGD_SECTION
;
565 flush_iopgd_range(iopgd
, iopgd
);
569 static int iopgd_alloc_super(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
571 u32
*iopgd
= iopgd_offset(obj
, da
);
574 if ((da
| pa
) & ~IOSUPER_MASK
) {
575 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
576 __func__
, da
, pa
, IOSUPER_SIZE
);
580 for (i
= 0; i
< 16; i
++)
581 *(iopgd
+ i
) = (pa
& IOSUPER_MASK
) | prot
| IOPGD_SUPER
;
582 flush_iopgd_range(iopgd
, iopgd
+ 15);
586 static int iopte_alloc_page(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
588 u32
*iopgd
= iopgd_offset(obj
, da
);
589 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
592 return PTR_ERR(iopte
);
594 *iopte
= (pa
& IOPAGE_MASK
) | prot
| IOPTE_SMALL
;
595 flush_iopte_range(iopte
, iopte
);
597 dev_vdbg(obj
->dev
, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
598 __func__
, da
, pa
, iopte
, *iopte
);
603 static int iopte_alloc_large(struct omap_iommu
*obj
, u32 da
, u32 pa
, u32 prot
)
605 u32
*iopgd
= iopgd_offset(obj
, da
);
606 u32
*iopte
= iopte_alloc(obj
, iopgd
, da
);
609 if ((da
| pa
) & ~IOLARGE_MASK
) {
610 dev_err(obj
->dev
, "%s: %08x:%08x should aligned on %08lx\n",
611 __func__
, da
, pa
, IOLARGE_SIZE
);
616 return PTR_ERR(iopte
);
618 for (i
= 0; i
< 16; i
++)
619 *(iopte
+ i
) = (pa
& IOLARGE_MASK
) | prot
| IOPTE_LARGE
;
620 flush_iopte_range(iopte
, iopte
+ 15);
625 iopgtable_store_entry_core(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
627 int (*fn
)(struct omap_iommu
*, u32
, u32
, u32
);
635 case MMU_CAM_PGSZ_16M
:
636 fn
= iopgd_alloc_super
;
638 case MMU_CAM_PGSZ_1M
:
639 fn
= iopgd_alloc_section
;
641 case MMU_CAM_PGSZ_64K
:
642 fn
= iopte_alloc_large
;
644 case MMU_CAM_PGSZ_4K
:
645 fn
= iopte_alloc_page
;
653 prot
= get_iopte_attr(e
);
655 spin_lock(&obj
->page_table_lock
);
656 err
= fn(obj
, e
->da
, e
->pa
, prot
);
657 spin_unlock(&obj
->page_table_lock
);
663 * omap_iopgtable_store_entry - Make an iommu pte entry
665 * @e: an iommu tlb entry info
667 int omap_iopgtable_store_entry(struct omap_iommu
*obj
, struct iotlb_entry
*e
)
671 flush_iotlb_page(obj
, e
->da
);
672 err
= iopgtable_store_entry_core(obj
, e
);
674 prefetch_iotlb_entry(obj
, e
);
677 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry
);
680 * iopgtable_lookup_entry - Lookup an iommu pte entry
682 * @da: iommu device virtual address
683 * @ppgd: iommu pgd entry pointer to be returned
684 * @ppte: iommu pte entry pointer to be returned
687 iopgtable_lookup_entry(struct omap_iommu
*obj
, u32 da
, u32
**ppgd
, u32
**ppte
)
689 u32
*iopgd
, *iopte
= NULL
;
691 iopgd
= iopgd_offset(obj
, da
);
695 if (iopgd_is_table(*iopgd
))
696 iopte
= iopte_offset(iopgd
, da
);
702 static size_t iopgtable_clear_entry_core(struct omap_iommu
*obj
, u32 da
)
705 u32
*iopgd
= iopgd_offset(obj
, da
);
711 if (iopgd_is_table(*iopgd
)) {
713 u32
*iopte
= iopte_offset(iopgd
, da
);
716 if (*iopte
& IOPTE_LARGE
) {
718 /* rewind to the 1st entry */
719 iopte
= iopte_offset(iopgd
, (da
& IOLARGE_MASK
));
722 memset(iopte
, 0, nent
* sizeof(*iopte
));
723 flush_iopte_range(iopte
, iopte
+ (nent
- 1) * sizeof(*iopte
));
726 * do table walk to check if this table is necessary or not
728 iopte
= iopte_offset(iopgd
, 0);
729 for (i
= 0; i
< PTRS_PER_IOPTE
; i
++)
734 nent
= 1; /* for the next L1 entry */
737 if ((*iopgd
& IOPGD_SUPER
) == IOPGD_SUPER
) {
739 /* rewind to the 1st entry */
740 iopgd
= iopgd_offset(obj
, (da
& IOSUPER_MASK
));
744 memset(iopgd
, 0, nent
* sizeof(*iopgd
));
745 flush_iopgd_range(iopgd
, iopgd
+ (nent
- 1) * sizeof(*iopgd
));
751 * iopgtable_clear_entry - Remove an iommu pte entry
753 * @da: iommu device virtual address
755 static size_t iopgtable_clear_entry(struct omap_iommu
*obj
, u32 da
)
759 spin_lock(&obj
->page_table_lock
);
761 bytes
= iopgtable_clear_entry_core(obj
, da
);
762 flush_iotlb_page(obj
, da
);
764 spin_unlock(&obj
->page_table_lock
);
769 static void iopgtable_clear_entry_all(struct omap_iommu
*obj
)
773 spin_lock(&obj
->page_table_lock
);
775 for (i
= 0; i
< PTRS_PER_IOPGD
; i
++) {
779 da
= i
<< IOPGD_SHIFT
;
780 iopgd
= iopgd_offset(obj
, da
);
785 if (iopgd_is_table(*iopgd
))
786 iopte_free(iopte_offset(iopgd
, 0));
789 flush_iopgd_range(iopgd
, iopgd
);
792 flush_iotlb_all(obj
);
794 spin_unlock(&obj
->page_table_lock
);
798 * Device IOMMU generic operations
800 static irqreturn_t
iommu_fault_handler(int irq
, void *data
)
804 struct omap_iommu
*obj
= data
;
805 struct iommu_domain
*domain
= obj
->domain
;
810 clk_enable(obj
->clk
);
811 errs
= iommu_report_fault(obj
, &da
);
812 clk_disable(obj
->clk
);
816 /* Fault callback or TLB/PTE Dynamic loading */
817 if (!report_iommu_fault(domain
, obj
->dev
, da
, 0))
822 iopgd
= iopgd_offset(obj
, da
);
824 if (!iopgd_is_table(*iopgd
)) {
825 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
826 "*pgd:px%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
);
830 iopte
= iopte_offset(iopgd
, da
);
832 dev_err(obj
->dev
, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
833 "pte:0x%p *pte:0x%08x\n", obj
->name
, errs
, da
, iopgd
, *iopgd
,
839 static int device_match_by_alias(struct device
*dev
, void *data
)
841 struct omap_iommu
*obj
= to_iommu(dev
);
842 const char *name
= data
;
844 pr_debug("%s: %s %s\n", __func__
, obj
->name
, name
);
846 return strcmp(obj
->name
, name
) == 0;
850 * omap_iommu_attach() - attach iommu device to an iommu domain
851 * @name: name of target omap iommu device
854 static struct omap_iommu
*omap_iommu_attach(const char *name
, u32
*iopgd
)
858 struct omap_iommu
*obj
;
860 dev
= driver_find_device(&omap_iommu_driver
.driver
, NULL
,
862 device_match_by_alias
);
868 spin_lock(&obj
->iommu_lock
);
870 /* an iommu device can only be attached once */
871 if (++obj
->refcount
> 1) {
872 dev_err(dev
, "%s: already attached!\n", obj
->name
);
878 err
= iommu_enable(obj
);
881 flush_iotlb_all(obj
);
883 if (!try_module_get(obj
->owner
))
886 spin_unlock(&obj
->iommu_lock
);
888 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
892 if (obj
->refcount
== 1)
896 spin_unlock(&obj
->iommu_lock
);
901 * omap_iommu_detach - release iommu device
904 static void omap_iommu_detach(struct omap_iommu
*obj
)
906 if (!obj
|| IS_ERR(obj
))
909 spin_lock(&obj
->iommu_lock
);
911 if (--obj
->refcount
== 0)
914 module_put(obj
->owner
);
918 spin_unlock(&obj
->iommu_lock
);
920 dev_dbg(obj
->dev
, "%s: %s\n", __func__
, obj
->name
);
924 * OMAP Device MMU(IOMMU) detection
926 static int __devinit
omap_iommu_probe(struct platform_device
*pdev
)
930 struct omap_iommu
*obj
;
931 struct resource
*res
;
932 struct iommu_platform_data
*pdata
= pdev
->dev
.platform_data
;
934 if (pdev
->num_resources
!= 2)
937 obj
= kzalloc(sizeof(*obj
) + MMU_REG_SIZE
, GFP_KERNEL
);
941 obj
->clk
= clk_get(&pdev
->dev
, pdata
->clk_name
);
942 if (IS_ERR(obj
->clk
))
945 obj
->nr_tlb_entries
= pdata
->nr_tlb_entries
;
946 obj
->name
= pdata
->name
;
947 obj
->dev
= &pdev
->dev
;
948 obj
->ctx
= (void *)obj
+ sizeof(*obj
);
949 obj
->da_start
= pdata
->da_start
;
950 obj
->da_end
= pdata
->da_end
;
952 spin_lock_init(&obj
->iommu_lock
);
953 mutex_init(&obj
->mmap_lock
);
954 spin_lock_init(&obj
->page_table_lock
);
955 INIT_LIST_HEAD(&obj
->mmap
);
957 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
963 res
= request_mem_region(res
->start
, resource_size(res
),
964 dev_name(&pdev
->dev
));
970 obj
->regbase
= ioremap(res
->start
, resource_size(res
));
976 irq
= platform_get_irq(pdev
, 0);
981 err
= request_irq(irq
, iommu_fault_handler
, IRQF_SHARED
,
982 dev_name(&pdev
->dev
), obj
);
985 platform_set_drvdata(pdev
, obj
);
987 dev_info(&pdev
->dev
, "%s registered\n", obj
->name
);
991 iounmap(obj
->regbase
);
993 release_mem_region(res
->start
, resource_size(res
));
1001 static int __devexit
omap_iommu_remove(struct platform_device
*pdev
)
1004 struct resource
*res
;
1005 struct omap_iommu
*obj
= platform_get_drvdata(pdev
);
1007 platform_set_drvdata(pdev
, NULL
);
1009 iopgtable_clear_entry_all(obj
);
1011 irq
= platform_get_irq(pdev
, 0);
1013 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1014 release_mem_region(res
->start
, resource_size(res
));
1015 iounmap(obj
->regbase
);
1018 dev_info(&pdev
->dev
, "%s removed\n", obj
->name
);
1023 static struct platform_driver omap_iommu_driver
= {
1024 .probe
= omap_iommu_probe
,
1025 .remove
= __devexit_p(omap_iommu_remove
),
1027 .name
= "omap-iommu",
1031 static void iopte_cachep_ctor(void *iopte
)
1033 clean_dcache_area(iopte
, IOPTE_TABLE_SIZE
);
1036 static u32
iotlb_init_entry(struct iotlb_entry
*e
, u32 da
, u32 pa
,
1039 memset(e
, 0, sizeof(*e
));
1044 /* FIXME: add OMAP1 support */
1045 e
->pgsz
= flags
& MMU_CAM_PGSZ_MASK
;
1046 e
->endian
= flags
& MMU_RAM_ENDIAN_MASK
;
1047 e
->elsz
= flags
& MMU_RAM_ELSZ_MASK
;
1048 e
->mixed
= flags
& MMU_RAM_MIXED_MASK
;
1050 return iopgsz_to_bytes(e
->pgsz
);
1053 static int omap_iommu_map(struct iommu_domain
*domain
, unsigned long da
,
1054 phys_addr_t pa
, size_t bytes
, int prot
)
1056 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1057 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1058 struct device
*dev
= oiommu
->dev
;
1059 struct iotlb_entry e
;
1063 /* we only support mapping a single iommu page for now */
1064 omap_pgsz
= bytes_to_iopgsz(bytes
);
1065 if (omap_pgsz
< 0) {
1066 dev_err(dev
, "invalid size to map: %d\n", bytes
);
1070 dev_dbg(dev
, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da
, pa
, bytes
);
1072 flags
= omap_pgsz
| prot
;
1074 iotlb_init_entry(&e
, da
, pa
, flags
);
1076 ret
= omap_iopgtable_store_entry(oiommu
, &e
);
1078 dev_err(dev
, "omap_iopgtable_store_entry failed: %d\n", ret
);
1083 static size_t omap_iommu_unmap(struct iommu_domain
*domain
, unsigned long da
,
1086 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1087 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1088 struct device
*dev
= oiommu
->dev
;
1090 dev_dbg(dev
, "unmapping da 0x%lx size %u\n", da
, size
);
1092 return iopgtable_clear_entry(oiommu
, da
);
1096 omap_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1098 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1099 struct omap_iommu
*oiommu
;
1100 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1103 spin_lock(&omap_domain
->lock
);
1105 /* only a single device is supported per domain for now */
1106 if (omap_domain
->iommu_dev
) {
1107 dev_err(dev
, "iommu domain is already attached\n");
1112 /* get a handle to and enable the omap iommu */
1113 oiommu
= omap_iommu_attach(arch_data
->name
, omap_domain
->pgtable
);
1114 if (IS_ERR(oiommu
)) {
1115 ret
= PTR_ERR(oiommu
);
1116 dev_err(dev
, "can't get omap iommu: %d\n", ret
);
1120 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= oiommu
;
1121 omap_domain
->dev
= dev
;
1122 oiommu
->domain
= domain
;
1125 spin_unlock(&omap_domain
->lock
);
1129 static void _omap_iommu_detach_dev(struct omap_iommu_domain
*omap_domain
,
1132 struct omap_iommu
*oiommu
= dev_to_omap_iommu(dev
);
1133 struct omap_iommu_arch_data
*arch_data
= dev
->archdata
.iommu
;
1135 /* only a single device is supported per domain for now */
1136 if (omap_domain
->iommu_dev
!= oiommu
) {
1137 dev_err(dev
, "invalid iommu device\n");
1141 iopgtable_clear_entry_all(oiommu
);
1143 omap_iommu_detach(oiommu
);
1145 omap_domain
->iommu_dev
= arch_data
->iommu_dev
= NULL
;
1146 omap_domain
->dev
= NULL
;
1149 static void omap_iommu_detach_dev(struct iommu_domain
*domain
,
1152 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1154 spin_lock(&omap_domain
->lock
);
1155 _omap_iommu_detach_dev(omap_domain
, dev
);
1156 spin_unlock(&omap_domain
->lock
);
1159 static int omap_iommu_domain_init(struct iommu_domain
*domain
)
1161 struct omap_iommu_domain
*omap_domain
;
1163 omap_domain
= kzalloc(sizeof(*omap_domain
), GFP_KERNEL
);
1165 pr_err("kzalloc failed\n");
1169 omap_domain
->pgtable
= kzalloc(IOPGD_TABLE_SIZE
, GFP_KERNEL
);
1170 if (!omap_domain
->pgtable
) {
1171 pr_err("kzalloc failed\n");
1176 * should never fail, but please keep this around to ensure
1177 * we keep the hardware happy
1179 BUG_ON(!IS_ALIGNED((long)omap_domain
->pgtable
, IOPGD_TABLE_SIZE
));
1181 clean_dcache_area(omap_domain
->pgtable
, IOPGD_TABLE_SIZE
);
1182 spin_lock_init(&omap_domain
->lock
);
1184 domain
->priv
= omap_domain
;
1186 domain
->geometry
.aperture_start
= 0;
1187 domain
->geometry
.aperture_end
= (1ULL << 32) - 1;
1188 domain
->geometry
.force_aperture
= true;
1198 static void omap_iommu_domain_destroy(struct iommu_domain
*domain
)
1200 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1202 domain
->priv
= NULL
;
1205 * An iommu device is still attached
1206 * (currently, only one device can be attached) ?
1208 if (omap_domain
->iommu_dev
)
1209 _omap_iommu_detach_dev(omap_domain
, omap_domain
->dev
);
1211 kfree(omap_domain
->pgtable
);
1215 static phys_addr_t
omap_iommu_iova_to_phys(struct iommu_domain
*domain
,
1218 struct omap_iommu_domain
*omap_domain
= domain
->priv
;
1219 struct omap_iommu
*oiommu
= omap_domain
->iommu_dev
;
1220 struct device
*dev
= oiommu
->dev
;
1222 phys_addr_t ret
= 0;
1224 iopgtable_lookup_entry(oiommu
, da
, &pgd
, &pte
);
1227 if (iopte_is_small(*pte
))
1228 ret
= omap_iommu_translate(*pte
, da
, IOPTE_MASK
);
1229 else if (iopte_is_large(*pte
))
1230 ret
= omap_iommu_translate(*pte
, da
, IOLARGE_MASK
);
1232 dev_err(dev
, "bogus pte 0x%x, da 0x%lx", *pte
, da
);
1234 if (iopgd_is_section(*pgd
))
1235 ret
= omap_iommu_translate(*pgd
, da
, IOSECTION_MASK
);
1236 else if (iopgd_is_super(*pgd
))
1237 ret
= omap_iommu_translate(*pgd
, da
, IOSUPER_MASK
);
1239 dev_err(dev
, "bogus pgd 0x%x, da 0x%lx", *pgd
, da
);
1245 static int omap_iommu_domain_has_cap(struct iommu_domain
*domain
,
1251 static struct iommu_ops omap_iommu_ops
= {
1252 .domain_init
= omap_iommu_domain_init
,
1253 .domain_destroy
= omap_iommu_domain_destroy
,
1254 .attach_dev
= omap_iommu_attach_dev
,
1255 .detach_dev
= omap_iommu_detach_dev
,
1256 .map
= omap_iommu_map
,
1257 .unmap
= omap_iommu_unmap
,
1258 .iova_to_phys
= omap_iommu_iova_to_phys
,
1259 .domain_has_cap
= omap_iommu_domain_has_cap
,
1260 .pgsize_bitmap
= OMAP_IOMMU_PGSIZES
,
1263 static int __init
omap_iommu_init(void)
1265 struct kmem_cache
*p
;
1266 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
1267 size_t align
= 1 << 10; /* L2 pagetable alignement */
1269 p
= kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE
, align
, flags
,
1275 bus_set_iommu(&platform_bus_type
, &omap_iommu_ops
);
1277 return platform_driver_register(&omap_iommu_driver
);
1279 /* must be ready before omap3isp is probed */
1280 subsys_initcall(omap_iommu_init
);
1282 static void __exit
omap_iommu_exit(void)
1284 kmem_cache_destroy(iopte_cachep
);
1286 platform_driver_unregister(&omap_iommu_driver
);
1288 module_exit(omap_iommu_exit
);
1290 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1291 MODULE_ALIAS("platform:omap-iommu");
1292 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1293 MODULE_LICENSE("GPL v2");