2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/iommu.h>
26 #include <linux/kernel.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/dma-mapping.h>
32 #include <asm/barrier.h>
34 #include "io-pgtable.h"
36 #define ARM_LPAE_MAX_ADDR_BITS 52
37 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
38 #define ARM_LPAE_MAX_LEVELS 4
40 /* Struct accessors */
41 #define io_pgtable_to_data(x) \
42 container_of((x), struct arm_lpae_io_pgtable, iop)
44 #define io_pgtable_ops_to_data(x) \
45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
48 * For consistency with the architecture, we always consider
49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
54 * Calculate the right shift amount to get to the portion describing level l
55 * in a virtual address mapped by the pagetable in d.
57 #define ARM_LPAE_LVL_SHIFT(l,d) \
58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
59 * (d)->bits_per_level) + (d)->pg_shift)
61 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
63 #define ARM_LPAE_PAGES_PER_PGD(d) \
64 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
67 * Calculate the index at level l used to map virtual address a using the
70 #define ARM_LPAE_PGD_IDX(l,d) \
71 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
73 #define ARM_LPAE_LVL_IDX(a,l,d) \
74 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
75 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
77 /* Calculate the block/page mapping size at level l for pagetable in d. */
78 #define ARM_LPAE_BLOCK_SIZE(l,d) \
79 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
80 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
83 #define ARM_LPAE_PTE_TYPE_SHIFT 0
84 #define ARM_LPAE_PTE_TYPE_MASK 0x3
86 #define ARM_LPAE_PTE_TYPE_BLOCK 1
87 #define ARM_LPAE_PTE_TYPE_TABLE 3
88 #define ARM_LPAE_PTE_TYPE_PAGE 3
90 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
92 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
93 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
94 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
95 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
96 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
97 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
98 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
99 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
101 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
102 /* Ignore the contiguous bit for block splitting */
103 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
104 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
105 ARM_LPAE_PTE_ATTR_HI_MASK)
106 /* Software bit for solving coherency races */
107 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
110 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
113 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
116 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
117 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
118 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
119 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
121 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
124 #define ARM_32_LPAE_TCR_EAE (1 << 31)
125 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
127 #define ARM_LPAE_TCR_EPD1 (1 << 23)
129 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
130 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
131 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
133 #define ARM_LPAE_TCR_SH0_SHIFT 12
134 #define ARM_LPAE_TCR_SH0_MASK 0x3
135 #define ARM_LPAE_TCR_SH_NS 0
136 #define ARM_LPAE_TCR_SH_OS 2
137 #define ARM_LPAE_TCR_SH_IS 3
139 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
140 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
141 #define ARM_LPAE_TCR_RGN_MASK 0x3
142 #define ARM_LPAE_TCR_RGN_NC 0
143 #define ARM_LPAE_TCR_RGN_WBWA 1
144 #define ARM_LPAE_TCR_RGN_WT 2
145 #define ARM_LPAE_TCR_RGN_WB 3
147 #define ARM_LPAE_TCR_SL0_SHIFT 6
148 #define ARM_LPAE_TCR_SL0_MASK 0x3
150 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
151 #define ARM_LPAE_TCR_SZ_MASK 0xf
153 #define ARM_LPAE_TCR_PS_SHIFT 16
154 #define ARM_LPAE_TCR_PS_MASK 0x7
156 #define ARM_LPAE_TCR_IPS_SHIFT 32
157 #define ARM_LPAE_TCR_IPS_MASK 0x7
159 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
160 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
161 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
162 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
163 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
164 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
165 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
167 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
168 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
169 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
170 #define ARM_LPAE_MAIR_ATTR_NC 0x44
171 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
172 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
173 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
174 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
176 /* IOPTE accessors */
177 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
179 #define iopte_type(pte,l) \
180 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
182 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
184 #define iopte_leaf(pte,l) \
185 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
187 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
189 struct arm_lpae_io_pgtable
{
190 struct io_pgtable iop
;
194 unsigned long pg_shift
;
195 unsigned long bits_per_level
;
200 typedef u64 arm_lpae_iopte
;
202 static arm_lpae_iopte
paddr_to_iopte(phys_addr_t paddr
,
203 struct arm_lpae_io_pgtable
*data
)
205 arm_lpae_iopte pte
= paddr
;
207 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
208 return (pte
| (pte
>> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK
;
211 static phys_addr_t
iopte_to_paddr(arm_lpae_iopte pte
,
212 struct arm_lpae_io_pgtable
*data
)
214 u64 paddr
= pte
& ARM_LPAE_PTE_ADDR_MASK
;
216 if (data
->pg_shift
< 16)
219 /* Rotate the packed high-order bits back to the top */
220 return (paddr
| (paddr
<< (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK
<< 4);
223 static bool selftest_running
= false;
225 static dma_addr_t
__arm_lpae_dma_addr(void *pages
)
227 return (dma_addr_t
)virt_to_phys(pages
);
230 static void *__arm_lpae_alloc_pages(size_t size
, gfp_t gfp
,
231 struct io_pgtable_cfg
*cfg
)
233 struct device
*dev
= cfg
->iommu_dev
;
234 int order
= get_order(size
);
239 VM_BUG_ON((gfp
& __GFP_HIGHMEM
));
240 p
= alloc_pages_node(dev
? dev_to_node(dev
) : NUMA_NO_NODE
,
241 gfp
| __GFP_ZERO
, order
);
245 pages
= page_address(p
);
246 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
)) {
247 dma
= dma_map_single(dev
, pages
, size
, DMA_TO_DEVICE
);
248 if (dma_mapping_error(dev
, dma
))
251 * We depend on the IOMMU being able to work with any physical
252 * address directly, so if the DMA layer suggests otherwise by
253 * translating or truncating them, that bodes very badly...
255 if (dma
!= virt_to_phys(pages
))
262 dev_err(dev
, "Cannot accommodate DMA translation for IOMMU page tables\n");
263 dma_unmap_single(dev
, dma
, size
, DMA_TO_DEVICE
);
265 __free_pages(p
, order
);
269 static void __arm_lpae_free_pages(void *pages
, size_t size
,
270 struct io_pgtable_cfg
*cfg
)
272 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
))
273 dma_unmap_single(cfg
->iommu_dev
, __arm_lpae_dma_addr(pages
),
274 size
, DMA_TO_DEVICE
);
275 free_pages((unsigned long)pages
, get_order(size
));
278 static void __arm_lpae_sync_pte(arm_lpae_iopte
*ptep
,
279 struct io_pgtable_cfg
*cfg
)
281 dma_sync_single_for_device(cfg
->iommu_dev
, __arm_lpae_dma_addr(ptep
),
282 sizeof(*ptep
), DMA_TO_DEVICE
);
285 static void __arm_lpae_set_pte(arm_lpae_iopte
*ptep
, arm_lpae_iopte pte
,
286 struct io_pgtable_cfg
*cfg
)
290 if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
))
291 __arm_lpae_sync_pte(ptep
, cfg
);
294 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
295 unsigned long iova
, size_t size
, int lvl
,
296 arm_lpae_iopte
*ptep
);
298 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
299 phys_addr_t paddr
, arm_lpae_iopte prot
,
300 int lvl
, arm_lpae_iopte
*ptep
)
302 arm_lpae_iopte pte
= prot
;
304 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
305 pte
|= ARM_LPAE_PTE_NS
;
307 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
308 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
310 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
312 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
313 pte
|= paddr_to_iopte(paddr
, data
);
315 __arm_lpae_set_pte(ptep
, pte
, &data
->iop
.cfg
);
318 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
319 unsigned long iova
, phys_addr_t paddr
,
320 arm_lpae_iopte prot
, int lvl
,
321 arm_lpae_iopte
*ptep
)
323 arm_lpae_iopte pte
= *ptep
;
325 if (iopte_leaf(pte
, lvl
)) {
326 /* We require an unmap first */
327 WARN_ON(!selftest_running
);
329 } else if (iopte_type(pte
, lvl
) == ARM_LPAE_PTE_TYPE_TABLE
) {
331 * We need to unmap and free the old table before
332 * overwriting it with a block entry.
334 arm_lpae_iopte
*tblp
;
335 size_t sz
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
337 tblp
= ptep
- ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
338 if (WARN_ON(__arm_lpae_unmap(data
, iova
, sz
, lvl
, tblp
) != sz
))
342 __arm_lpae_init_pte(data
, paddr
, prot
, lvl
, ptep
);
346 static arm_lpae_iopte
arm_lpae_install_table(arm_lpae_iopte
*table
,
347 arm_lpae_iopte
*ptep
,
349 struct io_pgtable_cfg
*cfg
)
351 arm_lpae_iopte old
, new;
353 new = __pa(table
) | ARM_LPAE_PTE_TYPE_TABLE
;
354 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
355 new |= ARM_LPAE_PTE_NSTABLE
;
358 * Ensure the table itself is visible before its PTE can be.
359 * Whilst we could get away with cmpxchg64_release below, this
360 * doesn't have any ordering semantics when !CONFIG_SMP.
364 old
= cmpxchg64_relaxed(ptep
, curr
, new);
366 if ((cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
) ||
367 (old
& ARM_LPAE_PTE_SW_SYNC
))
370 /* Even if it's not ours, there's no point waiting; just kick it */
371 __arm_lpae_sync_pte(ptep
, cfg
);
373 WRITE_ONCE(*ptep
, new | ARM_LPAE_PTE_SW_SYNC
);
378 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
379 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
380 int lvl
, arm_lpae_iopte
*ptep
)
382 arm_lpae_iopte
*cptep
, pte
;
383 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
384 size_t tblsz
= ARM_LPAE_GRANULE(data
);
385 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
387 /* Find our entry at the current level */
388 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
390 /* If we can install a leaf entry at this level, then do so */
391 if (size
== block_size
&& (size
& cfg
->pgsize_bitmap
))
392 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
394 /* We can't allocate tables at the final level */
395 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
398 /* Grab a pointer to the next level */
399 pte
= READ_ONCE(*ptep
);
401 cptep
= __arm_lpae_alloc_pages(tblsz
, GFP_ATOMIC
, cfg
);
405 pte
= arm_lpae_install_table(cptep
, ptep
, 0, cfg
);
407 __arm_lpae_free_pages(cptep
, tblsz
, cfg
);
408 } else if (!(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_DMA
) &&
409 !(pte
& ARM_LPAE_PTE_SW_SYNC
)) {
410 __arm_lpae_sync_pte(ptep
, cfg
);
413 if (pte
&& !iopte_leaf(pte
, lvl
)) {
414 cptep
= iopte_deref(pte
, data
);
416 /* We require an unmap first */
417 WARN_ON(!selftest_running
);
422 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
425 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
430 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
431 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
432 pte
= ARM_LPAE_PTE_nG
;
434 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
435 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
437 if (!(prot
& IOMMU_PRIV
))
438 pte
|= ARM_LPAE_PTE_AP_UNPRIV
;
440 if (prot
& IOMMU_MMIO
)
441 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_DEV
442 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
443 else if (prot
& IOMMU_CACHE
)
444 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
445 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
447 pte
= ARM_LPAE_PTE_HAP_FAULT
;
448 if (prot
& IOMMU_READ
)
449 pte
|= ARM_LPAE_PTE_HAP_READ
;
450 if (prot
& IOMMU_WRITE
)
451 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
452 if (prot
& IOMMU_MMIO
)
453 pte
|= ARM_LPAE_PTE_MEMATTR_DEV
;
454 else if (prot
& IOMMU_CACHE
)
455 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
457 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
460 if (prot
& IOMMU_NOEXEC
)
461 pte
|= ARM_LPAE_PTE_XN
;
466 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
467 phys_addr_t paddr
, size_t size
, int iommu_prot
)
469 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
470 arm_lpae_iopte
*ptep
= data
->pgd
;
471 int ret
, lvl
= ARM_LPAE_START_LVL(data
);
474 /* If no access, then nothing to do */
475 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
478 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
) ||
479 paddr
>= (1ULL << data
->iop
.cfg
.oas
)))
482 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
483 ret
= __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
485 * Synchronise all PTE updates for the new mapping before there's
486 * a chance for anything to kick off a table walk for the new iova.
493 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
494 arm_lpae_iopte
*ptep
)
496 arm_lpae_iopte
*start
, *end
;
497 unsigned long table_size
;
499 if (lvl
== ARM_LPAE_START_LVL(data
))
500 table_size
= data
->pgd_size
;
502 table_size
= ARM_LPAE_GRANULE(data
);
506 /* Only leaf entries at the last level */
507 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
510 end
= (void *)ptep
+ table_size
;
512 while (ptep
!= end
) {
513 arm_lpae_iopte pte
= *ptep
++;
515 if (!pte
|| iopte_leaf(pte
, lvl
))
518 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
521 __arm_lpae_free_pages(start
, table_size
, &data
->iop
.cfg
);
524 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
526 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
528 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
532 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
533 unsigned long iova
, size_t size
,
534 arm_lpae_iopte blk_pte
, int lvl
,
535 arm_lpae_iopte
*ptep
)
537 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
538 arm_lpae_iopte pte
, *tablep
;
539 phys_addr_t blk_paddr
;
540 size_t tablesz
= ARM_LPAE_GRANULE(data
);
541 size_t split_sz
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
542 int i
, unmap_idx
= -1;
544 if (WARN_ON(lvl
== ARM_LPAE_MAX_LEVELS
))
547 tablep
= __arm_lpae_alloc_pages(tablesz
, GFP_ATOMIC
, cfg
);
549 return 0; /* Bytes unmapped */
551 if (size
== split_sz
)
552 unmap_idx
= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
554 blk_paddr
= iopte_to_paddr(blk_pte
, data
);
555 pte
= iopte_prot(blk_pte
);
557 for (i
= 0; i
< tablesz
/ sizeof(pte
); i
++, blk_paddr
+= split_sz
) {
562 __arm_lpae_init_pte(data
, blk_paddr
, pte
, lvl
, &tablep
[i
]);
565 pte
= arm_lpae_install_table(tablep
, ptep
, blk_pte
, cfg
);
566 if (pte
!= blk_pte
) {
567 __arm_lpae_free_pages(tablep
, tablesz
, cfg
);
569 * We may race against someone unmapping another part of this
570 * block, but anything else is invalid. We can't misinterpret
571 * a page entry here since we're never at the last level.
573 if (iopte_type(pte
, lvl
- 1) != ARM_LPAE_PTE_TYPE_TABLE
)
576 tablep
= iopte_deref(pte
, data
);
577 } else if (unmap_idx
>= 0) {
578 io_pgtable_tlb_add_flush(&data
->iop
, iova
, size
, size
, true);
579 io_pgtable_tlb_sync(&data
->iop
);
583 return __arm_lpae_unmap(data
, iova
, size
, lvl
, tablep
);
586 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
587 unsigned long iova
, size_t size
, int lvl
,
588 arm_lpae_iopte
*ptep
)
591 struct io_pgtable
*iop
= &data
->iop
;
593 /* Something went horribly wrong and we ran out of page table */
594 if (WARN_ON(lvl
== ARM_LPAE_MAX_LEVELS
))
597 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
598 pte
= READ_ONCE(*ptep
);
602 /* If the size matches this level, we're in the right place */
603 if (size
== ARM_LPAE_BLOCK_SIZE(lvl
, data
)) {
604 __arm_lpae_set_pte(ptep
, 0, &iop
->cfg
);
606 if (!iopte_leaf(pte
, lvl
)) {
607 /* Also flush any partial walks */
608 io_pgtable_tlb_add_flush(iop
, iova
, size
,
609 ARM_LPAE_GRANULE(data
), false);
610 io_pgtable_tlb_sync(iop
);
611 ptep
= iopte_deref(pte
, data
);
612 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
613 } else if (iop
->cfg
.quirks
& IO_PGTABLE_QUIRK_NON_STRICT
) {
615 * Order the PTE update against queueing the IOVA, to
616 * guarantee that a flush callback from a different CPU
617 * has observed it before the TLBIALL can be issued.
621 io_pgtable_tlb_add_flush(iop
, iova
, size
, size
, true);
625 } else if (iopte_leaf(pte
, lvl
)) {
627 * Insert a table at the next level to map the old region,
628 * minus the part we want to unmap
630 return arm_lpae_split_blk_unmap(data
, iova
, size
, pte
,
634 /* Keep on walkin' */
635 ptep
= iopte_deref(pte
, data
);
636 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
639 static size_t arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
642 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
643 arm_lpae_iopte
*ptep
= data
->pgd
;
644 int lvl
= ARM_LPAE_START_LVL(data
);
646 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
)))
649 return __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
652 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
655 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
656 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
657 int lvl
= ARM_LPAE_START_LVL(data
);
660 /* Valid IOPTE pointer? */
664 /* Grab the IOPTE we're interested in */
665 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
666 pte
= READ_ONCE(*ptep
);
673 if (iopte_leaf(pte
,lvl
))
674 goto found_translation
;
676 /* Take it to the next level */
677 ptep
= iopte_deref(pte
, data
);
678 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
680 /* Ran out of page tables to walk */
684 iova
&= (ARM_LPAE_BLOCK_SIZE(lvl
, data
) - 1);
685 return iopte_to_paddr(pte
, data
) | iova
;
688 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
690 unsigned long granule
, page_sizes
;
691 unsigned int max_addr_bits
= 48;
694 * We need to restrict the supported page sizes to match the
695 * translation regime for a particular granule. Aim to match
696 * the CPU page size if possible, otherwise prefer smaller sizes.
697 * While we're at it, restrict the block sizes to match the
700 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
702 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
703 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
704 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
705 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
711 page_sizes
= (SZ_4K
| SZ_2M
| SZ_1G
);
714 page_sizes
= (SZ_16K
| SZ_32M
);
718 page_sizes
= (SZ_64K
| SZ_512M
);
720 page_sizes
|= 1ULL << 42; /* 4TB */
726 cfg
->pgsize_bitmap
&= page_sizes
;
727 cfg
->ias
= min(cfg
->ias
, max_addr_bits
);
728 cfg
->oas
= min(cfg
->oas
, max_addr_bits
);
731 static struct arm_lpae_io_pgtable
*
732 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
734 unsigned long va_bits
, pgd_bits
;
735 struct arm_lpae_io_pgtable
*data
;
737 arm_lpae_restrict_pgsizes(cfg
);
739 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
742 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
745 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
748 if (!selftest_running
&& cfg
->iommu_dev
->dma_pfn_offset
) {
749 dev_err(cfg
->iommu_dev
, "Cannot accommodate DMA offset for IOMMU page tables\n");
753 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
757 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
758 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
760 va_bits
= cfg
->ias
- data
->pg_shift
;
761 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
763 /* Calculate the actual size of our pgd (without concatenation) */
764 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
765 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
767 data
->iop
.ops
= (struct io_pgtable_ops
) {
769 .unmap
= arm_lpae_unmap
,
770 .iova_to_phys
= arm_lpae_iova_to_phys
,
776 static struct io_pgtable
*
777 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
780 struct arm_lpae_io_pgtable
*data
;
782 if (cfg
->quirks
& ~(IO_PGTABLE_QUIRK_ARM_NS
| IO_PGTABLE_QUIRK_NO_DMA
|
783 IO_PGTABLE_QUIRK_NON_STRICT
))
786 data
= arm_lpae_alloc_pgtable(cfg
);
791 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
792 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
793 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
795 switch (ARM_LPAE_GRANULE(data
)) {
797 reg
|= ARM_LPAE_TCR_TG0_4K
;
800 reg
|= ARM_LPAE_TCR_TG0_16K
;
803 reg
|= ARM_LPAE_TCR_TG0_64K
;
809 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
812 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
815 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
818 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
821 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
824 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
827 reg
|= (ARM_LPAE_TCR_PS_52_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
833 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
835 /* Disable speculative walks through TTBR1 */
836 reg
|= ARM_LPAE_TCR_EPD1
;
837 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
840 reg
= (ARM_LPAE_MAIR_ATTR_NC
841 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
842 (ARM_LPAE_MAIR_ATTR_WBRWA
843 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
844 (ARM_LPAE_MAIR_ATTR_DEVICE
845 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
847 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
848 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
850 /* Looking good; allocate a pgd */
851 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
855 /* Ensure the empty pgd is visible before any actual TTBR write */
859 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
860 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
868 static struct io_pgtable
*
869 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
872 struct arm_lpae_io_pgtable
*data
;
874 /* The NS quirk doesn't apply at stage 2 */
875 if (cfg
->quirks
& ~(IO_PGTABLE_QUIRK_NO_DMA
|
876 IO_PGTABLE_QUIRK_NON_STRICT
))
879 data
= arm_lpae_alloc_pgtable(cfg
);
884 * Concatenate PGDs at level 1 if possible in order to reduce
885 * the depth of the stage-2 walk.
887 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
888 unsigned long pgd_pages
;
890 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
891 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
892 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
898 reg
= ARM_64_LPAE_S2_TCR_RES1
|
899 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
900 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
901 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
903 sl
= ARM_LPAE_START_LVL(data
);
905 switch (ARM_LPAE_GRANULE(data
)) {
907 reg
|= ARM_LPAE_TCR_TG0_4K
;
908 sl
++; /* SL0 format is different for 4K granule size */
911 reg
|= ARM_LPAE_TCR_TG0_16K
;
914 reg
|= ARM_LPAE_TCR_TG0_64K
;
920 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
923 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
926 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
929 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
932 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
935 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
938 reg
|= (ARM_LPAE_TCR_PS_52_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
944 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
945 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
946 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
948 /* Allocate pgd pages */
949 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
953 /* Ensure the empty pgd is visible before any actual TTBR write */
957 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
965 static struct io_pgtable
*
966 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
968 struct io_pgtable
*iop
;
970 if (cfg
->ias
> 32 || cfg
->oas
> 40)
973 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
974 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
976 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
977 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
983 static struct io_pgtable
*
984 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
986 struct io_pgtable
*iop
;
988 if (cfg
->ias
> 40 || cfg
->oas
> 40)
991 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
992 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
994 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
999 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
1000 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
1001 .free
= arm_lpae_free_pgtable
,
1004 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
1005 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
1006 .free
= arm_lpae_free_pgtable
,
1009 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
1010 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
1011 .free
= arm_lpae_free_pgtable
,
1014 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
1015 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
1016 .free
= arm_lpae_free_pgtable
,
1019 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1021 static struct io_pgtable_cfg
*cfg_cookie
;
1023 static void dummy_tlb_flush_all(void *cookie
)
1025 WARN_ON(cookie
!= cfg_cookie
);
1028 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
,
1029 size_t granule
, bool leaf
, void *cookie
)
1031 WARN_ON(cookie
!= cfg_cookie
);
1032 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
1035 static void dummy_tlb_sync(void *cookie
)
1037 WARN_ON(cookie
!= cfg_cookie
);
1040 static const struct iommu_gather_ops dummy_tlb_ops __initconst
= {
1041 .tlb_flush_all
= dummy_tlb_flush_all
,
1042 .tlb_add_flush
= dummy_tlb_add_flush
,
1043 .tlb_sync
= dummy_tlb_sync
,
1046 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
1048 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
1049 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
1051 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1052 cfg
->pgsize_bitmap
, cfg
->ias
);
1053 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1054 data
->levels
, data
->pgd_size
, data
->pg_shift
,
1055 data
->bits_per_level
, data
->pgd
);
1058 #define __FAIL(ops, i) ({ \
1059 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1060 arm_lpae_dump_ops(ops); \
1061 selftest_running = false; \
1065 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
1067 static const enum io_pgtable_fmt fmts
[] = {
1075 struct io_pgtable_ops
*ops
;
1077 selftest_running
= true;
1079 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
1081 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
1083 pr_err("selftest: failed to allocate io pgtable ops\n");
1088 * Initial sanity checks.
1089 * Empty page tables shouldn't provide any translations.
1091 if (ops
->iova_to_phys(ops
, 42))
1092 return __FAIL(ops
, i
);
1094 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
1095 return __FAIL(ops
, i
);
1097 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
1098 return __FAIL(ops
, i
);
1101 * Distinct mappings of different granule sizes.
1104 for_each_set_bit(j
, &cfg
->pgsize_bitmap
, BITS_PER_LONG
) {
1107 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
1111 return __FAIL(ops
, i
);
1113 /* Overlapping mappings */
1114 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
1115 IOMMU_READ
| IOMMU_NOEXEC
))
1116 return __FAIL(ops
, i
);
1118 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1119 return __FAIL(ops
, i
);
1125 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
1126 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
1127 return __FAIL(ops
, i
);
1129 /* Remap of partial unmap */
1130 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
1131 return __FAIL(ops
, i
);
1133 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
1134 return __FAIL(ops
, i
);
1138 for_each_set_bit(j
, &cfg
->pgsize_bitmap
, BITS_PER_LONG
) {
1141 if (ops
->unmap(ops
, iova
, size
) != size
)
1142 return __FAIL(ops
, i
);
1144 if (ops
->iova_to_phys(ops
, iova
+ 42))
1145 return __FAIL(ops
, i
);
1147 /* Remap full block */
1148 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
1149 return __FAIL(ops
, i
);
1151 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1152 return __FAIL(ops
, i
);
1157 free_io_pgtable_ops(ops
);
1160 selftest_running
= false;
1164 static int __init
arm_lpae_do_selftests(void)
1166 static const unsigned long pgsize
[] = {
1167 SZ_4K
| SZ_2M
| SZ_1G
,
1172 static const unsigned int ias
[] = {
1173 32, 36, 40, 42, 44, 48,
1176 int i
, j
, pass
= 0, fail
= 0;
1177 struct io_pgtable_cfg cfg
= {
1178 .tlb
= &dummy_tlb_ops
,
1180 .quirks
= IO_PGTABLE_QUIRK_NO_DMA
,
1183 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
1184 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
1185 cfg
.pgsize_bitmap
= pgsize
[i
];
1187 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1189 if (arm_lpae_run_tests(&cfg
))
1196 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
1197 return fail
? -EFAULT
: 0;
1199 subsys_initcall(arm_lpae_do_selftests
);