2 * IOMMU API for SMMU in Tegra30
4 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #define pr_fmt(fmt) "%s(): " fmt, __func__
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
28 #include <linux/pagemap.h>
29 #include <linux/device.h>
30 #include <linux/sched.h>
31 #include <linux/iommu.h>
35 #include <asm/cacheflush.h>
37 #include <mach/iomap.h>
38 #include <mach/smmu.h>
40 /* bitmap of the page sizes currently supported */
41 #define SMMU_IOMMU_PGSIZES (SZ_4K)
43 #define SMMU_CONFIG 0x10
44 #define SMMU_CONFIG_DISABLE 0
45 #define SMMU_CONFIG_ENABLE 1
47 #define SMMU_TLB_CONFIG 0x14
48 #define SMMU_TLB_CONFIG_STATS__MASK (1 << 31)
49 #define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31)
50 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
51 #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
52 #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
54 #define SMMU_PTC_CONFIG 0x18
55 #define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
56 #define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
57 #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
58 #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
59 #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
61 #define SMMU_PTB_ASID 0x1c
62 #define SMMU_PTB_ASID_CURRENT_SHIFT 0
64 #define SMMU_PTB_DATA 0x20
65 #define SMMU_PTB_DATA_RESET_VAL 0
66 #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
67 #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
68 #define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
70 #define SMMU_TLB_FLUSH 0x30
71 #define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
72 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
73 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
74 #define SMMU_TLB_FLUSH_ASID_SHIFT 29
75 #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
76 #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
77 #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
79 #define SMMU_PTC_FLUSH 0x34
80 #define SMMU_PTC_FLUSH_TYPE_ALL 0
81 #define SMMU_PTC_FLUSH_TYPE_ADR 1
82 #define SMMU_PTC_FLUSH_ADR_SHIFT 4
84 #define SMMU_ASID_SECURITY 0x38
86 #define SMMU_STATS_TLB_HIT_COUNT 0x1f0
87 #define SMMU_STATS_TLB_MISS_COUNT 0x1f4
88 #define SMMU_STATS_PTC_HIT_COUNT 0x1f8
89 #define SMMU_STATS_PTC_MISS_COUNT 0x1fc
91 #define SMMU_TRANSLATION_ENABLE_0 0x228
92 #define SMMU_TRANSLATION_ENABLE_1 0x22c
93 #define SMMU_TRANSLATION_ENABLE_2 0x230
95 #define SMMU_AFI_ASID 0x238 /* PCIE */
96 #define SMMU_AVPC_ASID 0x23c /* AVP */
97 #define SMMU_DC_ASID 0x240 /* Display controller */
98 #define SMMU_DCB_ASID 0x244 /* Display controller B */
99 #define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
100 #define SMMU_G2_ASID 0x24c /* 2D engine */
101 #define SMMU_HC_ASID 0x250 /* Host1x */
102 #define SMMU_HDA_ASID 0x254 /* High-def audio */
103 #define SMMU_ISP_ASID 0x258 /* Image signal processor */
104 #define SMMU_MPE_ASID 0x264 /* MPEG encoder */
105 #define SMMU_NV_ASID 0x268 /* (3D) */
106 #define SMMU_NV2_ASID 0x26c /* (3D) */
107 #define SMMU_PPCS_ASID 0x270 /* AHB */
108 #define SMMU_SATA_ASID 0x278 /* SATA */
109 #define SMMU_VDE_ASID 0x27c /* Video decoder */
110 #define SMMU_VI_ASID 0x280 /* Video input */
112 #define SMMU_PDE_NEXT_SHIFT 28
114 /* AHB Arbiter Registers */
115 #define AHB_XBAR_CTRL 0xe0
116 #define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
117 #define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
119 #define SMMU_NUM_ASIDS 4
120 #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
121 #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
122 #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
123 #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
124 #define SMMU_TLB_FLUSH_VA(iova, which) \
125 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
126 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
127 SMMU_TLB_FLUSH_VA_MATCH_##which)
128 #define SMMU_PTB_ASID_CUR(n) \
129 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
130 #define SMMU_TLB_FLUSH_ASID_MATCH_disable \
131 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
132 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
133 #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
134 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
135 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
137 #define SMMU_PAGE_SHIFT 12
138 #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
140 #define SMMU_PDIR_COUNT 1024
141 #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
142 #define SMMU_PTBL_COUNT 1024
143 #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
144 #define SMMU_PDIR_SHIFT 12
145 #define SMMU_PDE_SHIFT 12
146 #define SMMU_PTE_SHIFT 12
147 #define SMMU_PFN_MASK 0x000fffff
149 #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
150 #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
151 #define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
153 #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
154 #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
155 #define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
156 #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
157 #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
159 #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
161 #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
162 #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
163 #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
165 #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
166 #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
168 #define SMMU_MK_PDIR(page, attr) \
169 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
170 #define SMMU_MK_PDE(page, attr) \
171 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
172 #define SMMU_EX_PTBL_PAGE(pde) \
173 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
174 #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
176 #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
177 #define SMMU_ASID_DISABLE 0
178 #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
180 #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
181 #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
182 #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
183 #define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
185 #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
187 static const u32 smmu_hwgrp_asid_reg
[] = {
205 #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
208 * Per client for address space
212 struct list_head list
;
221 struct smmu_device
*smmu
; /* back pointer to container */
223 spinlock_t lock
; /* for pagetable */
224 struct page
*pdir_page
;
225 unsigned long pdir_attr
;
226 unsigned long pde_attr
;
227 unsigned long pte_attr
;
228 unsigned int *pte_count
;
230 struct list_head client
;
231 spinlock_t client_lock
; /* for client list */
235 * Per SMMU device - IOMMU device
238 void __iomem
*regs
, *regs_ahbarb
;
239 unsigned long iovmm_base
; /* remappable base address */
240 unsigned long page_count
; /* total remappable size */
245 struct smmu_as
*as
; /* Run-time allocated array */
246 struct page
*avp_vector_page
; /* dummy page shared by all AS's */
249 * Register image savers for suspend/resume
251 unsigned long translation_enable_0
;
252 unsigned long translation_enable_1
;
253 unsigned long translation_enable_2
;
254 unsigned long asid_security
;
257 static struct smmu_device
*smmu_handle
; /* unique for a system */
260 * SMMU/AHB register accessors
262 static inline u32
smmu_read(struct smmu_device
*smmu
, size_t offs
)
264 return readl(smmu
->regs
+ offs
);
266 static inline void smmu_write(struct smmu_device
*smmu
, u32 val
, size_t offs
)
268 writel(val
, smmu
->regs
+ offs
);
271 static inline u32
ahb_read(struct smmu_device
*smmu
, size_t offs
)
273 return readl(smmu
->regs_ahbarb
+ offs
);
275 static inline void ahb_write(struct smmu_device
*smmu
, u32 val
, size_t offs
)
277 writel(val
, smmu
->regs_ahbarb
+ offs
);
280 #define VA_PAGE_TO_PA(va, page) \
281 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
283 #define FLUSH_CPU_DCACHE(va, page, size) \
285 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
286 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
287 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
291 * Any interaction between any block on PPSB and a block on APB or AHB
292 * must have these read-back barriers to ensure the APB/AHB bus
293 * transaction is complete before initiating activity on the PPSB
296 #define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
298 #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
300 static int __smmu_client_set_hwgrp(struct smmu_client
*c
,
301 unsigned long map
, int on
)
304 struct smmu_as
*as
= c
->as
;
305 u32 val
, offs
, mask
= SMMU_ASID_ENABLE(as
->asid
);
306 struct smmu_device
*smmu
= as
->smmu
;
312 map
= smmu_client_hwgrp(c
);
314 for_each_set_bit(i
, &map
, HWGRP_COUNT
) {
315 offs
= HWGRP_ASID_REG(i
);
316 val
= smmu_read(smmu
, offs
);
318 if (WARN_ON(val
& mask
))
322 WARN_ON((val
& mask
) == mask
);
325 smmu_write(smmu
, val
, offs
);
327 FLUSH_SMMU_REGS(smmu
);
332 for_each_set_bit(i
, &map
, HWGRP_COUNT
) {
333 offs
= HWGRP_ASID_REG(i
);
334 val
= smmu_read(smmu
, offs
);
336 smmu_write(smmu
, val
, offs
);
341 static int smmu_client_set_hwgrp(struct smmu_client
*c
, u32 map
, int on
)
345 struct smmu_as
*as
= c
->as
;
346 struct smmu_device
*smmu
= as
->smmu
;
348 spin_lock_irqsave(&smmu
->lock
, flags
);
349 val
= __smmu_client_set_hwgrp(c
, map
, on
);
350 spin_unlock_irqrestore(&smmu
->lock
, flags
);
355 * Flush all TLB entries and all PTC entries
356 * Caller must lock smmu
358 static void smmu_flush_regs(struct smmu_device
*smmu
, int enable
)
362 smmu_write(smmu
, SMMU_PTC_FLUSH_TYPE_ALL
, SMMU_PTC_FLUSH
);
363 FLUSH_SMMU_REGS(smmu
);
364 val
= SMMU_TLB_FLUSH_VA_MATCH_ALL
|
365 SMMU_TLB_FLUSH_ASID_MATCH_disable
;
366 smmu_write(smmu
, val
, SMMU_TLB_FLUSH
);
369 smmu_write(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
370 FLUSH_SMMU_REGS(smmu
);
373 static void smmu_setup_regs(struct smmu_device
*smmu
)
378 for (i
= 0; i
< smmu
->num_as
; i
++) {
379 struct smmu_as
*as
= &smmu
->as
[i
];
380 struct smmu_client
*c
;
382 smmu_write(smmu
, SMMU_PTB_ASID_CUR(as
->asid
), SMMU_PTB_ASID
);
383 val
= as
->pdir_page
?
384 SMMU_MK_PDIR(as
->pdir_page
, as
->pdir_attr
) :
385 SMMU_PTB_DATA_RESET_VAL
;
386 smmu_write(smmu
, val
, SMMU_PTB_DATA
);
388 list_for_each_entry(c
, &as
->client
, list
)
389 __smmu_client_set_hwgrp(c
, c
->hwgrp
, 1);
392 smmu_write(smmu
, smmu
->translation_enable_0
, SMMU_TRANSLATION_ENABLE_0
);
393 smmu_write(smmu
, smmu
->translation_enable_1
, SMMU_TRANSLATION_ENABLE_1
);
394 smmu_write(smmu
, smmu
->translation_enable_2
, SMMU_TRANSLATION_ENABLE_2
);
395 smmu_write(smmu
, smmu
->asid_security
, SMMU_ASID_SECURITY
);
396 smmu_write(smmu
, SMMU_TLB_CONFIG_RESET_VAL
, SMMU_TLB_CONFIG
);
397 smmu_write(smmu
, SMMU_PTC_CONFIG_RESET_VAL
, SMMU_PTC_CONFIG
);
399 smmu_flush_regs(smmu
, 1);
401 val
= ahb_read(smmu
, AHB_XBAR_CTRL
);
402 val
|= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE
<<
403 AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT
;
404 ahb_write(smmu
, val
, AHB_XBAR_CTRL
);
407 static void flush_ptc_and_tlb(struct smmu_device
*smmu
,
408 struct smmu_as
*as
, dma_addr_t iova
,
409 unsigned long *pte
, struct page
*page
, int is_pde
)
412 unsigned long tlb_flush_va
= is_pde
413 ? SMMU_TLB_FLUSH_VA(iova
, SECTION
)
414 : SMMU_TLB_FLUSH_VA(iova
, GROUP
);
416 val
= SMMU_PTC_FLUSH_TYPE_ADR
| VA_PAGE_TO_PA(pte
, page
);
417 smmu_write(smmu
, val
, SMMU_PTC_FLUSH
);
418 FLUSH_SMMU_REGS(smmu
);
420 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE
|
421 (as
->asid
<< SMMU_TLB_FLUSH_ASID_SHIFT
);
422 smmu_write(smmu
, val
, SMMU_TLB_FLUSH
);
423 FLUSH_SMMU_REGS(smmu
);
426 static void free_ptbl(struct smmu_as
*as
, dma_addr_t iova
)
428 unsigned long pdn
= SMMU_ADDR_TO_PDN(iova
);
429 unsigned long *pdir
= (unsigned long *)page_address(as
->pdir_page
);
431 if (pdir
[pdn
] != _PDE_VACANT(pdn
)) {
432 dev_dbg(as
->smmu
->dev
, "pdn: %lx\n", pdn
);
434 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir
[pdn
]));
435 __free_page(SMMU_EX_PTBL_PAGE(pdir
[pdn
]));
436 pdir
[pdn
] = _PDE_VACANT(pdn
);
437 FLUSH_CPU_DCACHE(&pdir
[pdn
], as
->pdir_page
, sizeof pdir
[pdn
]);
438 flush_ptc_and_tlb(as
->smmu
, as
, iova
, &pdir
[pdn
],
443 static void free_pdir(struct smmu_as
*as
)
447 struct device
*dev
= as
->smmu
->dev
;
452 addr
= as
->smmu
->iovmm_base
;
453 count
= as
->smmu
->page_count
;
454 while (count
-- > 0) {
456 addr
+= SMMU_PAGE_SIZE
* SMMU_PTBL_COUNT
;
458 ClearPageReserved(as
->pdir_page
);
459 __free_page(as
->pdir_page
);
460 as
->pdir_page
= NULL
;
461 devm_kfree(dev
, as
->pte_count
);
462 as
->pte_count
= NULL
;
466 * Maps PTBL for given iova and returns the PTE address
467 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
469 static unsigned long *locate_pte(struct smmu_as
*as
,
470 dma_addr_t iova
, bool allocate
,
471 struct page
**ptbl_page_p
,
472 unsigned int **count
)
474 unsigned long ptn
= SMMU_ADDR_TO_PFN(iova
);
475 unsigned long pdn
= SMMU_ADDR_TO_PDN(iova
);
476 unsigned long *pdir
= page_address(as
->pdir_page
);
479 if (pdir
[pdn
] != _PDE_VACANT(pdn
)) {
480 /* Mapped entry table already exists */
481 *ptbl_page_p
= SMMU_EX_PTBL_PAGE(pdir
[pdn
]);
482 ptbl
= page_address(*ptbl_page_p
);
483 } else if (!allocate
) {
487 unsigned long addr
= SMMU_PDN_TO_ADDR(pdn
);
489 /* Vacant - allocate a new page table */
490 dev_dbg(as
->smmu
->dev
, "New PTBL pdn: %lx\n", pdn
);
492 *ptbl_page_p
= alloc_page(GFP_ATOMIC
);
494 dev_err(as
->smmu
->dev
,
495 "failed to allocate smmu_device page table\n");
498 SetPageReserved(*ptbl_page_p
);
499 ptbl
= (unsigned long *)page_address(*ptbl_page_p
);
500 for (pn
= 0; pn
< SMMU_PTBL_COUNT
;
501 pn
++, addr
+= SMMU_PAGE_SIZE
) {
502 ptbl
[pn
] = _PTE_VACANT(addr
);
504 FLUSH_CPU_DCACHE(ptbl
, *ptbl_page_p
, SMMU_PTBL_SIZE
);
505 pdir
[pdn
] = SMMU_MK_PDE(*ptbl_page_p
,
506 as
->pde_attr
| _PDE_NEXT
);
507 FLUSH_CPU_DCACHE(&pdir
[pdn
], as
->pdir_page
, sizeof pdir
[pdn
]);
508 flush_ptc_and_tlb(as
->smmu
, as
, iova
, &pdir
[pdn
],
511 *count
= &as
->pte_count
[pdn
];
513 return &ptbl
[ptn
% SMMU_PTBL_COUNT
];
516 #ifdef CONFIG_SMMU_SIG_DEBUG
517 static void put_signature(struct smmu_as
*as
,
518 dma_addr_t iova
, unsigned long pfn
)
521 unsigned long *vaddr
;
523 page
= pfn_to_page(pfn
);
524 vaddr
= page_address(page
);
529 vaddr
[1] = pfn
<< PAGE_SHIFT
;
530 FLUSH_CPU_DCACHE(vaddr
, page
, sizeof(vaddr
[0]) * 2);
533 static inline void put_signature(struct smmu_as
*as
,
534 unsigned long addr
, unsigned long pfn
)
540 * Caller must lock/unlock as
542 static int alloc_pdir(struct smmu_as
*as
)
547 struct smmu_device
*smmu
= as
->smmu
;
552 as
->pte_count
= devm_kzalloc(smmu
->dev
,
553 sizeof(as
->pte_count
[0]) * SMMU_PDIR_COUNT
, GFP_KERNEL
);
554 if (!as
->pte_count
) {
556 "failed to allocate smmu_device PTE cunters\n");
559 as
->pdir_page
= alloc_page(GFP_KERNEL
| __GFP_DMA
);
560 if (!as
->pdir_page
) {
562 "failed to allocate smmu_device page directory\n");
563 devm_kfree(smmu
->dev
, as
->pte_count
);
564 as
->pte_count
= NULL
;
567 SetPageReserved(as
->pdir_page
);
568 pdir
= page_address(as
->pdir_page
);
570 for (pdn
= 0; pdn
< SMMU_PDIR_COUNT
; pdn
++)
571 pdir
[pdn
] = _PDE_VACANT(pdn
);
572 FLUSH_CPU_DCACHE(pdir
, as
->pdir_page
, SMMU_PDIR_SIZE
);
573 val
= SMMU_PTC_FLUSH_TYPE_ADR
| VA_PAGE_TO_PA(pdir
, as
->pdir_page
);
574 smmu_write(smmu
, val
, SMMU_PTC_FLUSH
);
575 FLUSH_SMMU_REGS(as
->smmu
);
576 val
= SMMU_TLB_FLUSH_VA_MATCH_ALL
|
577 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE
|
578 (as
->asid
<< SMMU_TLB_FLUSH_ASID_SHIFT
);
579 smmu_write(smmu
, val
, SMMU_TLB_FLUSH
);
580 FLUSH_SMMU_REGS(as
->smmu
);
585 static void __smmu_iommu_unmap(struct smmu_as
*as
, dma_addr_t iova
)
591 pte
= locate_pte(as
, iova
, false, &page
, &count
);
595 if (WARN_ON(*pte
== _PTE_VACANT(iova
)))
598 *pte
= _PTE_VACANT(iova
);
599 FLUSH_CPU_DCACHE(pte
, page
, sizeof(*pte
));
600 flush_ptc_and_tlb(as
->smmu
, as
, iova
, pte
, page
, 0);
603 smmu_flush_regs(as
->smmu
, 0);
607 static void __smmu_iommu_map_pfn(struct smmu_as
*as
, dma_addr_t iova
,
610 struct smmu_device
*smmu
= as
->smmu
;
615 pte
= locate_pte(as
, iova
, true, &page
, &count
);
619 if (*pte
== _PTE_VACANT(iova
))
621 *pte
= SMMU_PFN_TO_PTE(pfn
, as
->pte_attr
);
622 if (unlikely((*pte
== _PTE_VACANT(iova
))))
624 FLUSH_CPU_DCACHE(pte
, page
, sizeof(*pte
));
625 flush_ptc_and_tlb(smmu
, as
, iova
, pte
, page
, 0);
626 put_signature(as
, iova
, pfn
);
629 static int smmu_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
630 phys_addr_t pa
, size_t bytes
, int prot
)
632 struct smmu_as
*as
= domain
->priv
;
633 unsigned long pfn
= __phys_to_pfn(pa
);
636 dev_dbg(as
->smmu
->dev
, "[%d] %08lx:%08x\n", as
->asid
, iova
, pa
);
641 spin_lock_irqsave(&as
->lock
, flags
);
642 __smmu_iommu_map_pfn(as
, iova
, pfn
);
643 spin_unlock_irqrestore(&as
->lock
, flags
);
647 static size_t smmu_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
650 struct smmu_as
*as
= domain
->priv
;
653 dev_dbg(as
->smmu
->dev
, "[%d] %08lx\n", as
->asid
, iova
);
655 spin_lock_irqsave(&as
->lock
, flags
);
656 __smmu_iommu_unmap(as
, iova
);
657 spin_unlock_irqrestore(&as
->lock
, flags
);
658 return SMMU_PAGE_SIZE
;
661 static phys_addr_t
smmu_iommu_iova_to_phys(struct iommu_domain
*domain
,
664 struct smmu_as
*as
= domain
->priv
;
671 spin_lock_irqsave(&as
->lock
, flags
);
673 pte
= locate_pte(as
, iova
, true, &page
, &count
);
674 pfn
= *pte
& SMMU_PFN_MASK
;
675 WARN_ON(!pfn_valid(pfn
));
676 dev_dbg(as
->smmu
->dev
,
677 "iova:%08lx pfn:%08lx asid:%d\n", iova
, pfn
, as
->asid
);
679 spin_unlock_irqrestore(&as
->lock
, flags
);
680 return PFN_PHYS(pfn
);
683 static int smmu_iommu_domain_has_cap(struct iommu_domain
*domain
,
689 static int smmu_iommu_attach_dev(struct iommu_domain
*domain
,
692 struct smmu_as
*as
= domain
->priv
;
693 struct smmu_device
*smmu
= as
->smmu
;
694 struct smmu_client
*client
, *c
;
698 client
= devm_kzalloc(smmu
->dev
, sizeof(*c
), GFP_KERNEL
);
703 map
= (unsigned long)dev
->platform_data
;
707 err
= smmu_client_enable_hwgrp(client
, map
);
711 spin_lock(&as
->client_lock
);
712 list_for_each_entry(c
, &as
->client
, list
) {
715 "%s is already attached\n", dev_name(c
->dev
));
720 list_add(&client
->list
, &as
->client
);
721 spin_unlock(&as
->client_lock
);
724 * Reserve "page zero" for AVP vectors using a common dummy
727 if (map
& HWG_AVPC
) {
730 page
= as
->smmu
->avp_vector_page
;
731 __smmu_iommu_map_pfn(as
, 0, page_to_pfn(page
));
733 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
736 dev_dbg(smmu
->dev
, "%s is attached\n", dev_name(c
->dev
));
740 smmu_client_disable_hwgrp(client
);
741 spin_unlock(&as
->client_lock
);
743 devm_kfree(smmu
->dev
, client
);
747 static void smmu_iommu_detach_dev(struct iommu_domain
*domain
,
750 struct smmu_as
*as
= domain
->priv
;
751 struct smmu_device
*smmu
= as
->smmu
;
752 struct smmu_client
*c
;
754 spin_lock(&as
->client_lock
);
756 list_for_each_entry(c
, &as
->client
, list
) {
758 smmu_client_disable_hwgrp(c
);
760 devm_kfree(smmu
->dev
, c
);
763 "%s is detached\n", dev_name(c
->dev
));
767 dev_err(smmu
->dev
, "Couldn't find %s\n", dev_name(c
->dev
));
769 spin_unlock(&as
->client_lock
);
772 static int smmu_iommu_domain_init(struct iommu_domain
*domain
)
777 struct smmu_device
*smmu
= smmu_handle
;
779 /* Look for a free AS with lock held */
780 for (i
= 0; i
< smmu
->num_as
; i
++) {
781 struct smmu_as
*tmp
= &smmu
->as
[i
];
783 spin_lock_irqsave(&tmp
->lock
, flags
);
784 if (!tmp
->pdir_page
) {
788 spin_unlock_irqrestore(&tmp
->lock
, flags
);
790 dev_err(smmu
->dev
, "no free AS\n");
794 if (alloc_pdir(as
) < 0)
797 spin_lock(&smmu
->lock
);
799 /* Update PDIR register */
800 smmu_write(smmu
, SMMU_PTB_ASID_CUR(as
->asid
), SMMU_PTB_ASID
);
802 SMMU_MK_PDIR(as
->pdir_page
, as
->pdir_attr
), SMMU_PTB_DATA
);
803 FLUSH_SMMU_REGS(smmu
);
805 spin_unlock(&smmu
->lock
);
807 spin_unlock_irqrestore(&as
->lock
, flags
);
810 dev_dbg(smmu
->dev
, "smmu_as@%p\n", as
);
814 spin_unlock_irqrestore(&as
->lock
, flags
);
818 static void smmu_iommu_domain_destroy(struct iommu_domain
*domain
)
820 struct smmu_as
*as
= domain
->priv
;
821 struct smmu_device
*smmu
= as
->smmu
;
824 spin_lock_irqsave(&as
->lock
, flags
);
827 spin_lock(&smmu
->lock
);
828 smmu_write(smmu
, SMMU_PTB_ASID_CUR(as
->asid
), SMMU_PTB_ASID
);
829 smmu_write(smmu
, SMMU_PTB_DATA_RESET_VAL
, SMMU_PTB_DATA
);
830 FLUSH_SMMU_REGS(smmu
);
831 spin_unlock(&smmu
->lock
);
836 if (!list_empty(&as
->client
)) {
837 struct smmu_client
*c
;
839 list_for_each_entry(c
, &as
->client
, list
)
840 smmu_iommu_detach_dev(domain
, c
->dev
);
843 spin_unlock_irqrestore(&as
->lock
, flags
);
846 dev_dbg(smmu
->dev
, "smmu_as@%p\n", as
);
849 static struct iommu_ops smmu_iommu_ops
= {
850 .domain_init
= smmu_iommu_domain_init
,
851 .domain_destroy
= smmu_iommu_domain_destroy
,
852 .attach_dev
= smmu_iommu_attach_dev
,
853 .detach_dev
= smmu_iommu_detach_dev
,
854 .map
= smmu_iommu_map
,
855 .unmap
= smmu_iommu_unmap
,
856 .iova_to_phys
= smmu_iommu_iova_to_phys
,
857 .domain_has_cap
= smmu_iommu_domain_has_cap
,
858 .pgsize_bitmap
= SMMU_IOMMU_PGSIZES
,
861 static int tegra_smmu_suspend(struct device
*dev
)
863 struct smmu_device
*smmu
= dev_get_drvdata(dev
);
865 smmu
->translation_enable_0
= smmu_read(smmu
, SMMU_TRANSLATION_ENABLE_0
);
866 smmu
->translation_enable_1
= smmu_read(smmu
, SMMU_TRANSLATION_ENABLE_1
);
867 smmu
->translation_enable_2
= smmu_read(smmu
, SMMU_TRANSLATION_ENABLE_2
);
868 smmu
->asid_security
= smmu_read(smmu
, SMMU_ASID_SECURITY
);
872 static int tegra_smmu_resume(struct device
*dev
)
874 struct smmu_device
*smmu
= dev_get_drvdata(dev
);
877 spin_lock_irqsave(&smmu
->lock
, flags
);
878 smmu_setup_regs(smmu
);
879 spin_unlock_irqrestore(&smmu
->lock
, flags
);
883 static int tegra_smmu_probe(struct platform_device
*pdev
)
885 struct smmu_device
*smmu
;
886 struct resource
*regs
, *regs2
, *window
;
887 struct device
*dev
= &pdev
->dev
;
893 BUILD_BUG_ON(PAGE_SHIFT
!= SMMU_PAGE_SHIFT
);
895 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
896 regs2
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
897 window
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
898 if (!regs
|| !regs2
|| !window
) {
899 dev_err(dev
, "No SMMU resources\n");
903 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
905 dev_err(dev
, "failed to allocate smmu_device\n");
910 smmu
->num_as
= SMMU_NUM_ASIDS
;
911 smmu
->iovmm_base
= (unsigned long)window
->start
;
912 smmu
->page_count
= resource_size(window
) >> SMMU_PAGE_SHIFT
;
913 smmu
->regs
= devm_ioremap(dev
, regs
->start
, resource_size(regs
));
914 smmu
->regs_ahbarb
= devm_ioremap(dev
, regs2
->start
,
915 resource_size(regs2
));
916 if (!smmu
->regs
|| !smmu
->regs_ahbarb
) {
917 dev_err(dev
, "failed to remap SMMU registers\n");
922 smmu
->translation_enable_0
= ~0;
923 smmu
->translation_enable_1
= ~0;
924 smmu
->translation_enable_2
= ~0;
925 smmu
->asid_security
= 0;
927 smmu
->as
= devm_kzalloc(dev
,
928 sizeof(smmu
->as
[0]) * smmu
->num_as
, GFP_KERNEL
);
930 dev_err(dev
, "failed to allocate smmu_as\n");
935 for (i
= 0; i
< smmu
->num_as
; i
++) {
936 struct smmu_as
*as
= &smmu
->as
[i
];
940 as
->pdir_attr
= _PDIR_ATTR
;
941 as
->pde_attr
= _PDE_ATTR
;
942 as
->pte_attr
= _PTE_ATTR
;
944 spin_lock_init(&as
->lock
);
945 INIT_LIST_HEAD(&as
->client
);
947 spin_lock_init(&smmu
->lock
);
948 smmu_setup_regs(smmu
);
949 platform_set_drvdata(pdev
, smmu
);
951 smmu
->avp_vector_page
= alloc_page(GFP_KERNEL
);
952 if (!smmu
->avp_vector_page
)
959 if (smmu
->avp_vector_page
)
960 __free_page(smmu
->avp_vector_page
);
962 devm_iounmap(dev
, smmu
->regs
);
963 if (smmu
->regs_ahbarb
)
964 devm_iounmap(dev
, smmu
->regs_ahbarb
);
965 if (smmu
&& smmu
->as
) {
966 for (i
= 0; i
< smmu
->num_as
; i
++) {
967 if (smmu
->as
[i
].pdir_page
) {
968 ClearPageReserved(smmu
->as
[i
].pdir_page
);
969 __free_page(smmu
->as
[i
].pdir_page
);
972 devm_kfree(dev
, smmu
->as
);
974 devm_kfree(dev
, smmu
);
978 static int tegra_smmu_remove(struct platform_device
*pdev
)
980 struct smmu_device
*smmu
= platform_get_drvdata(pdev
);
981 struct device
*dev
= smmu
->dev
;
983 smmu_write(smmu
, SMMU_CONFIG_DISABLE
, SMMU_CONFIG
);
984 platform_set_drvdata(pdev
, NULL
);
988 for (i
= 0; i
< smmu
->num_as
; i
++)
989 free_pdir(&smmu
->as
[i
]);
990 devm_kfree(dev
, smmu
->as
);
992 if (smmu
->avp_vector_page
)
993 __free_page(smmu
->avp_vector_page
);
995 devm_iounmap(dev
, smmu
->regs
);
996 if (smmu
->regs_ahbarb
)
997 devm_iounmap(dev
, smmu
->regs_ahbarb
);
998 devm_kfree(dev
, smmu
);
1003 const struct dev_pm_ops tegra_smmu_pm_ops
= {
1004 .suspend
= tegra_smmu_suspend
,
1005 .resume
= tegra_smmu_resume
,
1008 static struct platform_driver tegra_smmu_driver
= {
1009 .probe
= tegra_smmu_probe
,
1010 .remove
= tegra_smmu_remove
,
1012 .owner
= THIS_MODULE
,
1013 .name
= "tegra-smmu",
1014 .pm
= &tegra_smmu_pm_ops
,
1018 static int __devinit
tegra_smmu_init(void)
1020 bus_set_iommu(&platform_bus_type
, &smmu_iommu_ops
);
1021 return platform_driver_register(&tegra_smmu_driver
);
1024 static void __exit
tegra_smmu_exit(void)
1026 platform_driver_unregister(&tegra_smmu_driver
);
1029 subsys_initcall(tegra_smmu_init
);
1030 module_exit(tegra_smmu_exit
);
1032 MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1033 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
1034 MODULE_LICENSE("GPL v2");