rds: tcp: allow progress of rds_conn_shutdown if the rds_connection is marked ERROR...
[linux-2.6/btrfs-unstable.git] / drivers / iommu / io-pgtable-arm.c
blobfeacc54bec683b535fcba37e47ecb46af014ef5a
1 /*
2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/dma-mapping.h>
30 #include <asm/barrier.h>
32 #include "io-pgtable.h"
34 #define ARM_LPAE_MAX_ADDR_BITS 48
35 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
36 #define ARM_LPAE_MAX_LEVELS 4
38 /* Struct accessors */
39 #define io_pgtable_to_data(x) \
40 container_of((x), struct arm_lpae_io_pgtable, iop)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
59 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61 #define ARM_LPAE_PAGES_PER_PGD(d) \
62 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
65 * Calculate the index at level l used to map virtual address a using the
66 * pagetable in d.
68 #define ARM_LPAE_PGD_IDX(l,d) \
69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71 #define ARM_LPAE_LVL_IDX(a,l,d) \
72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75 /* Calculate the block/page mapping size at level l for pagetable in d. */
76 #define ARM_LPAE_BLOCK_SIZE(l,d) \
77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80 /* Page table bits */
81 #define ARM_LPAE_PTE_TYPE_SHIFT 0
82 #define ARM_LPAE_PTE_TYPE_MASK 0x3
84 #define ARM_LPAE_PTE_TYPE_BLOCK 1
85 #define ARM_LPAE_PTE_TYPE_TABLE 3
86 #define ARM_LPAE_PTE_TYPE_PAGE 3
88 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
89 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
90 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
91 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
92 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
93 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
94 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
95 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
97 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
98 /* Ignore the contiguous bit for block splitting */
99 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
100 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
101 ARM_LPAE_PTE_ATTR_HI_MASK)
103 /* Stage-1 PTE */
104 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
107 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
109 /* Stage-2 PTE */
110 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
111 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
112 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
113 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
115 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
117 /* Register bits */
118 #define ARM_32_LPAE_TCR_EAE (1 << 31)
119 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
121 #define ARM_LPAE_TCR_EPD1 (1 << 23)
123 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
124 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
125 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
127 #define ARM_LPAE_TCR_SH0_SHIFT 12
128 #define ARM_LPAE_TCR_SH0_MASK 0x3
129 #define ARM_LPAE_TCR_SH_NS 0
130 #define ARM_LPAE_TCR_SH_OS 2
131 #define ARM_LPAE_TCR_SH_IS 3
133 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
134 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
135 #define ARM_LPAE_TCR_RGN_MASK 0x3
136 #define ARM_LPAE_TCR_RGN_NC 0
137 #define ARM_LPAE_TCR_RGN_WBWA 1
138 #define ARM_LPAE_TCR_RGN_WT 2
139 #define ARM_LPAE_TCR_RGN_WB 3
141 #define ARM_LPAE_TCR_SL0_SHIFT 6
142 #define ARM_LPAE_TCR_SL0_MASK 0x3
144 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
145 #define ARM_LPAE_TCR_SZ_MASK 0xf
147 #define ARM_LPAE_TCR_PS_SHIFT 16
148 #define ARM_LPAE_TCR_PS_MASK 0x7
150 #define ARM_LPAE_TCR_IPS_SHIFT 32
151 #define ARM_LPAE_TCR_IPS_MASK 0x7
153 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
154 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
155 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
156 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
157 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
158 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
161 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
162 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
163 #define ARM_LPAE_MAIR_ATTR_NC 0x44
164 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
165 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d) \
171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
172 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
174 #define iopte_type(pte,l) \
175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
177 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
179 #define iopte_leaf(pte,l) \
180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
184 #define iopte_to_pfn(pte,d) \
185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
187 #define pfn_to_iopte(pfn,d) \
188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
190 struct arm_lpae_io_pgtable {
191 struct io_pgtable iop;
193 int levels;
194 size_t pgd_size;
195 unsigned long pg_shift;
196 unsigned long bits_per_level;
198 void *pgd;
201 typedef u64 arm_lpae_iopte;
203 static bool selftest_running = false;
205 static dma_addr_t __arm_lpae_dma_addr(void *pages)
207 return (dma_addr_t)virt_to_phys(pages);
210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 struct io_pgtable_cfg *cfg)
213 struct device *dev = cfg->iommu_dev;
214 dma_addr_t dma;
215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
217 if (!pages)
218 return NULL;
220 if (!selftest_running) {
221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 if (dma_mapping_error(dev, dma))
223 goto out_free;
225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests otherwise by
227 * translating or truncating them, that bodes very badly...
229 if (dma != virt_to_phys(pages))
230 goto out_unmap;
233 return pages;
235 out_unmap:
236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238 out_free:
239 free_pages_exact(pages, size);
240 return NULL;
243 static void __arm_lpae_free_pages(void *pages, size_t size,
244 struct io_pgtable_cfg *cfg)
246 if (!selftest_running)
247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
248 size, DMA_TO_DEVICE);
249 free_pages_exact(pages, size);
252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 struct io_pgtable_cfg *cfg)
255 *ptep = pte;
257 if (!selftest_running)
258 dma_sync_single_for_device(cfg->iommu_dev,
259 __arm_lpae_dma_addr(ptep),
260 sizeof(pte), DMA_TO_DEVICE);
263 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
264 unsigned long iova, size_t size, int lvl,
265 arm_lpae_iopte *ptep);
267 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
268 unsigned long iova, phys_addr_t paddr,
269 arm_lpae_iopte prot, int lvl,
270 arm_lpae_iopte *ptep)
272 arm_lpae_iopte pte = prot;
273 struct io_pgtable_cfg *cfg = &data->iop.cfg;
275 if (iopte_leaf(*ptep, lvl)) {
276 /* We require an unmap first */
277 WARN_ON(!selftest_running);
278 return -EEXIST;
279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
281 * We need to unmap and free the old table before
282 * overwriting it with a block entry.
284 arm_lpae_iopte *tblp;
285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
289 return -EINVAL;
292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
293 pte |= ARM_LPAE_PTE_NS;
295 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
296 pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 else
298 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
303 __arm_lpae_set_pte(ptep, pte, cfg);
304 return 0;
307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
309 int lvl, arm_lpae_iopte *ptep)
311 arm_lpae_iopte *cptep, pte;
312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 struct io_pgtable_cfg *cfg = &data->iop.cfg;
315 /* Find our entry at the current level */
316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
318 /* If we can install a leaf entry at this level, then do so */
319 if (size == block_size && (size & cfg->pgsize_bitmap))
320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
322 /* We can't allocate tables at the final level */
323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
324 return -EINVAL;
326 /* Grab a pointer to the next level */
327 pte = *ptep;
328 if (!pte) {
329 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
330 GFP_ATOMIC, cfg);
331 if (!cptep)
332 return -ENOMEM;
334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else {
339 cptep = iopte_deref(pte, data);
342 /* Rinse, repeat */
343 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
346 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
347 int prot)
349 arm_lpae_iopte pte;
351 if (data->iop.fmt == ARM_64_LPAE_S1 ||
352 data->iop.fmt == ARM_32_LPAE_S1) {
353 pte = ARM_LPAE_PTE_nG;
355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
356 pte |= ARM_LPAE_PTE_AP_RDONLY;
358 if (!(prot & IOMMU_PRIV))
359 pte |= ARM_LPAE_PTE_AP_UNPRIV;
361 if (prot & IOMMU_MMIO)
362 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
363 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
364 else if (prot & IOMMU_CACHE)
365 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
366 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
367 } else {
368 pte = ARM_LPAE_PTE_HAP_FAULT;
369 if (prot & IOMMU_READ)
370 pte |= ARM_LPAE_PTE_HAP_READ;
371 if (prot & IOMMU_WRITE)
372 pte |= ARM_LPAE_PTE_HAP_WRITE;
373 if (prot & IOMMU_MMIO)
374 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
375 else if (prot & IOMMU_CACHE)
376 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
377 else
378 pte |= ARM_LPAE_PTE_MEMATTR_NC;
381 if (prot & IOMMU_NOEXEC)
382 pte |= ARM_LPAE_PTE_XN;
384 return pte;
387 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
388 phys_addr_t paddr, size_t size, int iommu_prot)
390 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
391 arm_lpae_iopte *ptep = data->pgd;
392 int ret, lvl = ARM_LPAE_START_LVL(data);
393 arm_lpae_iopte prot;
395 /* If no access, then nothing to do */
396 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
397 return 0;
399 prot = arm_lpae_prot_to_pte(data, iommu_prot);
400 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
402 * Synchronise all PTE updates for the new mapping before there's
403 * a chance for anything to kick off a table walk for the new iova.
405 wmb();
407 return ret;
410 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
411 arm_lpae_iopte *ptep)
413 arm_lpae_iopte *start, *end;
414 unsigned long table_size;
416 if (lvl == ARM_LPAE_START_LVL(data))
417 table_size = data->pgd_size;
418 else
419 table_size = ARM_LPAE_GRANULE(data);
421 start = ptep;
423 /* Only leaf entries at the last level */
424 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
425 end = ptep;
426 else
427 end = (void *)ptep + table_size;
429 while (ptep != end) {
430 arm_lpae_iopte pte = *ptep++;
432 if (!pte || iopte_leaf(pte, lvl))
433 continue;
435 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
438 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
441 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
443 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
445 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
446 kfree(data);
449 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
450 unsigned long iova, size_t size,
451 arm_lpae_iopte prot, int lvl,
452 arm_lpae_iopte *ptep, size_t blk_size)
454 unsigned long blk_start, blk_end;
455 phys_addr_t blk_paddr;
456 arm_lpae_iopte table = 0;
458 blk_start = iova & ~(blk_size - 1);
459 blk_end = blk_start + blk_size;
460 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
462 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
463 arm_lpae_iopte *tablep;
465 /* Unmap! */
466 if (blk_start == iova)
467 continue;
469 /* __arm_lpae_map expects a pointer to the start of the table */
470 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
471 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
472 tablep) < 0) {
473 if (table) {
474 /* Free the table we allocated */
475 tablep = iopte_deref(table, data);
476 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
478 return 0; /* Bytes unmapped */
482 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
483 iova &= ~(blk_size - 1);
484 io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
485 return size;
488 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
489 unsigned long iova, size_t size, int lvl,
490 arm_lpae_iopte *ptep)
492 arm_lpae_iopte pte;
493 struct io_pgtable *iop = &data->iop;
494 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
496 /* Something went horribly wrong and we ran out of page table */
497 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
498 return 0;
500 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
501 pte = *ptep;
502 if (WARN_ON(!pte))
503 return 0;
505 /* If the size matches this level, we're in the right place */
506 if (size == blk_size) {
507 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
509 if (!iopte_leaf(pte, lvl)) {
510 /* Also flush any partial walks */
511 io_pgtable_tlb_add_flush(iop, iova, size,
512 ARM_LPAE_GRANULE(data), false);
513 io_pgtable_tlb_sync(iop);
514 ptep = iopte_deref(pte, data);
515 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
516 } else {
517 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
520 return size;
521 } else if (iopte_leaf(pte, lvl)) {
523 * Insert a table at the next level to map the old region,
524 * minus the part we want to unmap
526 return arm_lpae_split_blk_unmap(data, iova, size,
527 iopte_prot(pte), lvl, ptep,
528 blk_size);
531 /* Keep on walkin' */
532 ptep = iopte_deref(pte, data);
533 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
536 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
537 size_t size)
539 size_t unmapped;
540 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
541 arm_lpae_iopte *ptep = data->pgd;
542 int lvl = ARM_LPAE_START_LVL(data);
544 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
545 if (unmapped)
546 io_pgtable_tlb_sync(&data->iop);
548 return unmapped;
551 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
552 unsigned long iova)
554 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
555 arm_lpae_iopte pte, *ptep = data->pgd;
556 int lvl = ARM_LPAE_START_LVL(data);
558 do {
559 /* Valid IOPTE pointer? */
560 if (!ptep)
561 return 0;
563 /* Grab the IOPTE we're interested in */
564 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
566 /* Valid entry? */
567 if (!pte)
568 return 0;
570 /* Leaf entry? */
571 if (iopte_leaf(pte,lvl))
572 goto found_translation;
574 /* Take it to the next level */
575 ptep = iopte_deref(pte, data);
576 } while (++lvl < ARM_LPAE_MAX_LEVELS);
578 /* Ran out of page tables to walk */
579 return 0;
581 found_translation:
582 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
583 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
586 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
588 unsigned long granule;
591 * We need to restrict the supported page sizes to match the
592 * translation regime for a particular granule. Aim to match
593 * the CPU page size if possible, otherwise prefer smaller sizes.
594 * While we're at it, restrict the block sizes to match the
595 * chosen granule.
597 if (cfg->pgsize_bitmap & PAGE_SIZE)
598 granule = PAGE_SIZE;
599 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
600 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
601 else if (cfg->pgsize_bitmap & PAGE_MASK)
602 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
603 else
604 granule = 0;
606 switch (granule) {
607 case SZ_4K:
608 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
609 break;
610 case SZ_16K:
611 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
612 break;
613 case SZ_64K:
614 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
615 break;
616 default:
617 cfg->pgsize_bitmap = 0;
621 static struct arm_lpae_io_pgtable *
622 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
624 unsigned long va_bits, pgd_bits;
625 struct arm_lpae_io_pgtable *data;
627 arm_lpae_restrict_pgsizes(cfg);
629 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
630 return NULL;
632 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
633 return NULL;
635 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
636 return NULL;
638 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
639 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
640 return NULL;
643 data = kmalloc(sizeof(*data), GFP_KERNEL);
644 if (!data)
645 return NULL;
647 data->pg_shift = __ffs(cfg->pgsize_bitmap);
648 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
650 va_bits = cfg->ias - data->pg_shift;
651 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
653 /* Calculate the actual size of our pgd (without concatenation) */
654 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
655 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
657 data->iop.ops = (struct io_pgtable_ops) {
658 .map = arm_lpae_map,
659 .unmap = arm_lpae_unmap,
660 .iova_to_phys = arm_lpae_iova_to_phys,
663 return data;
666 static struct io_pgtable *
667 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
669 u64 reg;
670 struct arm_lpae_io_pgtable *data;
672 if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
673 return NULL;
675 data = arm_lpae_alloc_pgtable(cfg);
676 if (!data)
677 return NULL;
679 /* TCR */
680 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
681 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
682 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
684 switch (ARM_LPAE_GRANULE(data)) {
685 case SZ_4K:
686 reg |= ARM_LPAE_TCR_TG0_4K;
687 break;
688 case SZ_16K:
689 reg |= ARM_LPAE_TCR_TG0_16K;
690 break;
691 case SZ_64K:
692 reg |= ARM_LPAE_TCR_TG0_64K;
693 break;
696 switch (cfg->oas) {
697 case 32:
698 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
699 break;
700 case 36:
701 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
702 break;
703 case 40:
704 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
705 break;
706 case 42:
707 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
708 break;
709 case 44:
710 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
711 break;
712 case 48:
713 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
714 break;
715 default:
716 goto out_free_data;
719 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
721 /* Disable speculative walks through TTBR1 */
722 reg |= ARM_LPAE_TCR_EPD1;
723 cfg->arm_lpae_s1_cfg.tcr = reg;
725 /* MAIRs */
726 reg = (ARM_LPAE_MAIR_ATTR_NC
727 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
728 (ARM_LPAE_MAIR_ATTR_WBRWA
729 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
730 (ARM_LPAE_MAIR_ATTR_DEVICE
731 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
733 cfg->arm_lpae_s1_cfg.mair[0] = reg;
734 cfg->arm_lpae_s1_cfg.mair[1] = 0;
736 /* Looking good; allocate a pgd */
737 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
738 if (!data->pgd)
739 goto out_free_data;
741 /* Ensure the empty pgd is visible before any actual TTBR write */
742 wmb();
744 /* TTBRs */
745 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
746 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
747 return &data->iop;
749 out_free_data:
750 kfree(data);
751 return NULL;
754 static struct io_pgtable *
755 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
757 u64 reg, sl;
758 struct arm_lpae_io_pgtable *data;
760 /* The NS quirk doesn't apply at stage 2 */
761 if (cfg->quirks)
762 return NULL;
764 data = arm_lpae_alloc_pgtable(cfg);
765 if (!data)
766 return NULL;
769 * Concatenate PGDs at level 1 if possible in order to reduce
770 * the depth of the stage-2 walk.
772 if (data->levels == ARM_LPAE_MAX_LEVELS) {
773 unsigned long pgd_pages;
775 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
776 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
777 data->pgd_size = pgd_pages << data->pg_shift;
778 data->levels--;
782 /* VTCR */
783 reg = ARM_64_LPAE_S2_TCR_RES1 |
784 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
785 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
786 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
788 sl = ARM_LPAE_START_LVL(data);
790 switch (ARM_LPAE_GRANULE(data)) {
791 case SZ_4K:
792 reg |= ARM_LPAE_TCR_TG0_4K;
793 sl++; /* SL0 format is different for 4K granule size */
794 break;
795 case SZ_16K:
796 reg |= ARM_LPAE_TCR_TG0_16K;
797 break;
798 case SZ_64K:
799 reg |= ARM_LPAE_TCR_TG0_64K;
800 break;
803 switch (cfg->oas) {
804 case 32:
805 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
806 break;
807 case 36:
808 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
809 break;
810 case 40:
811 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
812 break;
813 case 42:
814 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
815 break;
816 case 44:
817 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
818 break;
819 case 48:
820 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
821 break;
822 default:
823 goto out_free_data;
826 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
827 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
828 cfg->arm_lpae_s2_cfg.vtcr = reg;
830 /* Allocate pgd pages */
831 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
832 if (!data->pgd)
833 goto out_free_data;
835 /* Ensure the empty pgd is visible before any actual TTBR write */
836 wmb();
838 /* VTTBR */
839 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
840 return &data->iop;
842 out_free_data:
843 kfree(data);
844 return NULL;
847 static struct io_pgtable *
848 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
850 struct io_pgtable *iop;
852 if (cfg->ias > 32 || cfg->oas > 40)
853 return NULL;
855 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
856 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
857 if (iop) {
858 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
859 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
862 return iop;
865 static struct io_pgtable *
866 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
868 struct io_pgtable *iop;
870 if (cfg->ias > 40 || cfg->oas > 40)
871 return NULL;
873 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
874 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
875 if (iop)
876 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
878 return iop;
881 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
882 .alloc = arm_64_lpae_alloc_pgtable_s1,
883 .free = arm_lpae_free_pgtable,
886 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
887 .alloc = arm_64_lpae_alloc_pgtable_s2,
888 .free = arm_lpae_free_pgtable,
891 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
892 .alloc = arm_32_lpae_alloc_pgtable_s1,
893 .free = arm_lpae_free_pgtable,
896 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
897 .alloc = arm_32_lpae_alloc_pgtable_s2,
898 .free = arm_lpae_free_pgtable,
901 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
903 static struct io_pgtable_cfg *cfg_cookie;
905 static void dummy_tlb_flush_all(void *cookie)
907 WARN_ON(cookie != cfg_cookie);
910 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
911 size_t granule, bool leaf, void *cookie)
913 WARN_ON(cookie != cfg_cookie);
914 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
917 static void dummy_tlb_sync(void *cookie)
919 WARN_ON(cookie != cfg_cookie);
922 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
923 .tlb_flush_all = dummy_tlb_flush_all,
924 .tlb_add_flush = dummy_tlb_add_flush,
925 .tlb_sync = dummy_tlb_sync,
928 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
930 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
931 struct io_pgtable_cfg *cfg = &data->iop.cfg;
933 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
934 cfg->pgsize_bitmap, cfg->ias);
935 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
936 data->levels, data->pgd_size, data->pg_shift,
937 data->bits_per_level, data->pgd);
940 #define __FAIL(ops, i) ({ \
941 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
942 arm_lpae_dump_ops(ops); \
943 selftest_running = false; \
944 -EFAULT; \
947 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
949 static const enum io_pgtable_fmt fmts[] = {
950 ARM_64_LPAE_S1,
951 ARM_64_LPAE_S2,
954 int i, j;
955 unsigned long iova;
956 size_t size;
957 struct io_pgtable_ops *ops;
959 selftest_running = true;
961 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
962 cfg_cookie = cfg;
963 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
964 if (!ops) {
965 pr_err("selftest: failed to allocate io pgtable ops\n");
966 return -ENOMEM;
970 * Initial sanity checks.
971 * Empty page tables shouldn't provide any translations.
973 if (ops->iova_to_phys(ops, 42))
974 return __FAIL(ops, i);
976 if (ops->iova_to_phys(ops, SZ_1G + 42))
977 return __FAIL(ops, i);
979 if (ops->iova_to_phys(ops, SZ_2G + 42))
980 return __FAIL(ops, i);
983 * Distinct mappings of different granule sizes.
985 iova = 0;
986 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
987 size = 1UL << j;
989 if (ops->map(ops, iova, iova, size, IOMMU_READ |
990 IOMMU_WRITE |
991 IOMMU_NOEXEC |
992 IOMMU_CACHE))
993 return __FAIL(ops, i);
995 /* Overlapping mappings */
996 if (!ops->map(ops, iova, iova + size, size,
997 IOMMU_READ | IOMMU_NOEXEC))
998 return __FAIL(ops, i);
1000 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1001 return __FAIL(ops, i);
1003 iova += SZ_1G;
1006 /* Partial unmap */
1007 size = 1UL << __ffs(cfg->pgsize_bitmap);
1008 if (ops->unmap(ops, SZ_1G + size, size) != size)
1009 return __FAIL(ops, i);
1011 /* Remap of partial unmap */
1012 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1013 return __FAIL(ops, i);
1015 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1016 return __FAIL(ops, i);
1018 /* Full unmap */
1019 iova = 0;
1020 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1021 while (j != BITS_PER_LONG) {
1022 size = 1UL << j;
1024 if (ops->unmap(ops, iova, size) != size)
1025 return __FAIL(ops, i);
1027 if (ops->iova_to_phys(ops, iova + 42))
1028 return __FAIL(ops, i);
1030 /* Remap full block */
1031 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1032 return __FAIL(ops, i);
1034 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1035 return __FAIL(ops, i);
1037 iova += SZ_1G;
1038 j++;
1039 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1042 free_io_pgtable_ops(ops);
1045 selftest_running = false;
1046 return 0;
1049 static int __init arm_lpae_do_selftests(void)
1051 static const unsigned long pgsize[] = {
1052 SZ_4K | SZ_2M | SZ_1G,
1053 SZ_16K | SZ_32M,
1054 SZ_64K | SZ_512M,
1057 static const unsigned int ias[] = {
1058 32, 36, 40, 42, 44, 48,
1061 int i, j, pass = 0, fail = 0;
1062 struct io_pgtable_cfg cfg = {
1063 .tlb = &dummy_tlb_ops,
1064 .oas = 48,
1067 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1068 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1069 cfg.pgsize_bitmap = pgsize[i];
1070 cfg.ias = ias[j];
1071 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1072 pgsize[i], ias[j]);
1073 if (arm_lpae_run_tests(&cfg))
1074 fail++;
1075 else
1076 pass++;
1080 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1081 return fail ? -EFAULT : 0;
1083 subsys_initcall(arm_lpae_do_selftests);
1084 #endif