mpls: Return error for RTA_GATEWAY attribute
[linux-stable.git] / drivers / iommu / io-pgtable-arm-v7s.c
blobcec29bf45c9bd79d38e4f6c7e97052ca9363255f
1 /*
2 * CPU-agnostic ARM page table allocator.
4 * ARMv7 Short-descriptor format, supporting
5 * - Basic memory attributes
6 * - Simplified access permissions (AP[2:1] model)
7 * - Backwards-compatible TEX remap
8 * - Large pages/supersections (if indicated by the caller)
10 * Not supporting:
11 * - Legacy access permissions (AP[2:0] model)
13 * Almost certainly never supporting:
14 * - PXN
15 * - Domains
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program. If not, see <http://www.gnu.org/licenses/>.
29 * Copyright (C) 2014-2015 ARM Limited
30 * Copyright (c) 2014-2015 MediaTek Inc.
33 #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
35 #include <linux/atomic.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/gfp.h>
38 #include <linux/iommu.h>
39 #include <linux/kernel.h>
40 #include <linux/kmemleak.h>
41 #include <linux/sizes.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/types.h>
46 #include <asm/barrier.h>
48 #include "io-pgtable.h"
50 /* Struct accessors */
51 #define io_pgtable_to_data(x) \
52 container_of((x), struct arm_v7s_io_pgtable, iop)
54 #define io_pgtable_ops_to_data(x) \
55 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
58 * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
59 * and 12 bits in a page. With some carefully-chosen coefficients we can
60 * hide the ugly inconsistencies behind these macros and at least let the
61 * rest of the code pretend to be somewhat sane.
63 #define ARM_V7S_ADDR_BITS 32
64 #define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
65 #define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
66 #define ARM_V7S_TABLE_SHIFT 10
68 #define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
69 #define ARM_V7S_TABLE_SIZE(lvl) \
70 (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
72 #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
73 #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
74 #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
75 #define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
76 #define ARM_V7S_LVL_IDX(addr, lvl) ({ \
77 int _l = lvl; \
78 ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
82 * Large page/supersection entries are effectively a block of 16 page/section
83 * entries, along the lines of the LPAE contiguous hint, but all with the
84 * same output address. For want of a better common name we'll call them
85 * "contiguous" versions of their respective page/section entries here, but
86 * noting the distinction (WRT to TLB maintenance) that they represent *one*
87 * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
89 #define ARM_V7S_CONT_PAGES 16
91 /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
92 #define ARM_V7S_PTE_TYPE_TABLE 0x1
93 #define ARM_V7S_PTE_TYPE_PAGE 0x2
94 #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
96 #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
97 #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \
98 ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE))
100 /* Page table bits */
101 #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
102 #define ARM_V7S_ATTR_B BIT(2)
103 #define ARM_V7S_ATTR_C BIT(3)
104 #define ARM_V7S_ATTR_NS_TABLE BIT(3)
105 #define ARM_V7S_ATTR_NS_SECTION BIT(19)
107 #define ARM_V7S_CONT_SECTION BIT(18)
108 #define ARM_V7S_CONT_PAGE_XN_SHIFT 15
111 * The attribute bits are consistently ordered*, but occupy bits [17:10] of
112 * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
113 * fields relative to that 8-bit block, plus a total shift relative to the PTE.
115 #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
117 #define ARM_V7S_ATTR_MASK 0xff
118 #define ARM_V7S_ATTR_AP0 BIT(0)
119 #define ARM_V7S_ATTR_AP1 BIT(1)
120 #define ARM_V7S_ATTR_AP2 BIT(5)
121 #define ARM_V7S_ATTR_S BIT(6)
122 #define ARM_V7S_ATTR_NG BIT(7)
123 #define ARM_V7S_TEX_SHIFT 2
124 #define ARM_V7S_TEX_MASK 0x7
125 #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
127 #define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
129 /* *well, except for TEX on level 2 large pages, of course :( */
130 #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
131 #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
133 /* Simplified access permissions */
134 #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
135 #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
136 #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
138 /* Register bits */
139 #define ARM_V7S_RGN_NC 0
140 #define ARM_V7S_RGN_WBWA 1
141 #define ARM_V7S_RGN_WT 2
142 #define ARM_V7S_RGN_WB 3
144 #define ARM_V7S_PRRR_TYPE_DEVICE 1
145 #define ARM_V7S_PRRR_TYPE_NORMAL 2
146 #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
147 #define ARM_V7S_PRRR_DS0 BIT(16)
148 #define ARM_V7S_PRRR_DS1 BIT(17)
149 #define ARM_V7S_PRRR_NS0 BIT(18)
150 #define ARM_V7S_PRRR_NS1 BIT(19)
151 #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
153 #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
154 #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
156 #define ARM_V7S_TTBR_S BIT(1)
157 #define ARM_V7S_TTBR_NOS BIT(5)
158 #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
159 #define ARM_V7S_TTBR_IRGN_ATTR(attr) \
160 ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
162 #define ARM_V7S_TCR_PD1 BIT(5)
164 typedef u32 arm_v7s_iopte;
166 static bool selftest_running;
168 struct arm_v7s_io_pgtable {
169 struct io_pgtable iop;
171 arm_v7s_iopte *pgd;
172 struct kmem_cache *l2_tables;
173 spinlock_t split_lock;
176 static dma_addr_t __arm_v7s_dma_addr(void *pages)
178 return (dma_addr_t)virt_to_phys(pages);
181 static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
183 if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
184 pte &= ARM_V7S_TABLE_MASK;
185 else
186 pte &= ARM_V7S_LVL_MASK(lvl);
187 return phys_to_virt(pte);
190 static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
191 struct arm_v7s_io_pgtable *data)
193 struct io_pgtable_cfg *cfg = &data->iop.cfg;
194 struct device *dev = cfg->iommu_dev;
195 phys_addr_t phys;
196 dma_addr_t dma;
197 size_t size = ARM_V7S_TABLE_SIZE(lvl);
198 void *table = NULL;
200 if (lvl == 1)
201 table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
202 else if (lvl == 2)
203 table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
204 phys = virt_to_phys(table);
205 if (phys != (arm_v7s_iopte)phys)
206 /* Doesn't fit in PTE */
207 goto out_free;
208 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
209 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
210 if (dma_mapping_error(dev, dma))
211 goto out_free;
213 * We depend on the IOMMU being able to work with any physical
214 * address directly, so if the DMA layer suggests otherwise by
215 * translating or truncating them, that bodes very badly...
217 if (dma != phys)
218 goto out_unmap;
220 kmemleak_ignore(table);
221 return table;
223 out_unmap:
224 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
225 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
226 out_free:
227 if (lvl == 1)
228 free_pages((unsigned long)table, get_order(size));
229 else
230 kmem_cache_free(data->l2_tables, table);
231 return NULL;
234 static void __arm_v7s_free_table(void *table, int lvl,
235 struct arm_v7s_io_pgtable *data)
237 struct io_pgtable_cfg *cfg = &data->iop.cfg;
238 struct device *dev = cfg->iommu_dev;
239 size_t size = ARM_V7S_TABLE_SIZE(lvl);
241 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
242 dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
243 DMA_TO_DEVICE);
244 if (lvl == 1)
245 free_pages((unsigned long)table, get_order(size));
246 else
247 kmem_cache_free(data->l2_tables, table);
250 static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
251 struct io_pgtable_cfg *cfg)
253 if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
254 return;
256 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
257 num_entries * sizeof(*ptep), DMA_TO_DEVICE);
259 static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte,
260 int num_entries, struct io_pgtable_cfg *cfg)
262 int i;
264 for (i = 0; i < num_entries; i++)
265 ptep[i] = pte;
267 __arm_v7s_pte_sync(ptep, num_entries, cfg);
270 static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
271 struct io_pgtable_cfg *cfg)
273 bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
274 arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
276 if (!(prot & IOMMU_MMIO))
277 pte |= ARM_V7S_ATTR_TEX(1);
278 if (ap) {
279 pte |= ARM_V7S_PTE_AF;
280 if (!(prot & IOMMU_PRIV))
281 pte |= ARM_V7S_PTE_AP_UNPRIV;
282 if (!(prot & IOMMU_WRITE))
283 pte |= ARM_V7S_PTE_AP_RDONLY;
285 pte <<= ARM_V7S_ATTR_SHIFT(lvl);
287 if ((prot & IOMMU_NOEXEC) && ap)
288 pte |= ARM_V7S_ATTR_XN(lvl);
289 if (prot & IOMMU_MMIO)
290 pte |= ARM_V7S_ATTR_B;
291 else if (prot & IOMMU_CACHE)
292 pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
294 pte |= ARM_V7S_PTE_TYPE_PAGE;
295 if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
296 pte |= ARM_V7S_ATTR_NS_SECTION;
298 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
299 pte |= ARM_V7S_ATTR_MTK_4GB;
301 return pte;
304 static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
306 int prot = IOMMU_READ;
307 arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
309 if (!(attr & ARM_V7S_PTE_AP_RDONLY))
310 prot |= IOMMU_WRITE;
311 if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
312 prot |= IOMMU_PRIV;
313 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
314 prot |= IOMMU_MMIO;
315 else if (pte & ARM_V7S_ATTR_C)
316 prot |= IOMMU_CACHE;
317 if (pte & ARM_V7S_ATTR_XN(lvl))
318 prot |= IOMMU_NOEXEC;
320 return prot;
323 static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
325 if (lvl == 1) {
326 pte |= ARM_V7S_CONT_SECTION;
327 } else if (lvl == 2) {
328 arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl);
329 arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK;
331 pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE;
332 pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) |
333 (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
334 ARM_V7S_PTE_TYPE_CONT_PAGE;
336 return pte;
339 static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
341 if (lvl == 1) {
342 pte &= ~ARM_V7S_CONT_SECTION;
343 } else if (lvl == 2) {
344 arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
345 arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
346 ARM_V7S_CONT_PAGE_TEX_SHIFT);
348 pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
349 pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
350 (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
351 ARM_V7S_PTE_TYPE_PAGE;
353 return pte;
356 static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
358 if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
359 return pte & ARM_V7S_CONT_SECTION;
360 else if (lvl == 2)
361 return !(pte & ARM_V7S_PTE_TYPE_PAGE);
362 return false;
365 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
366 size_t, int, arm_v7s_iopte *);
368 static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
369 unsigned long iova, phys_addr_t paddr, int prot,
370 int lvl, int num_entries, arm_v7s_iopte *ptep)
372 struct io_pgtable_cfg *cfg = &data->iop.cfg;
373 arm_v7s_iopte pte;
374 int i;
376 for (i = 0; i < num_entries; i++)
377 if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) {
379 * We need to unmap and free the old table before
380 * overwriting it with a block entry.
382 arm_v7s_iopte *tblp;
383 size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
385 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
386 if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
387 sz, lvl, tblp) != sz))
388 return -EINVAL;
389 } else if (ptep[i]) {
390 /* We require an unmap first */
391 WARN_ON(!selftest_running);
392 return -EEXIST;
395 pte = arm_v7s_prot_to_pte(prot, lvl, cfg);
396 if (num_entries > 1)
397 pte = arm_v7s_pte_to_cont(pte, lvl);
399 pte |= paddr & ARM_V7S_LVL_MASK(lvl);
401 __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
402 return 0;
405 static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
406 arm_v7s_iopte *ptep,
407 arm_v7s_iopte curr,
408 struct io_pgtable_cfg *cfg)
410 arm_v7s_iopte old, new;
412 new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
413 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
414 new |= ARM_V7S_ATTR_NS_TABLE;
417 * Ensure the table itself is visible before its PTE can be.
418 * Whilst we could get away with cmpxchg64_release below, this
419 * doesn't have any ordering semantics when !CONFIG_SMP.
421 dma_wmb();
423 old = cmpxchg_relaxed(ptep, curr, new);
424 __arm_v7s_pte_sync(ptep, 1, cfg);
426 return old;
429 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
430 phys_addr_t paddr, size_t size, int prot,
431 int lvl, arm_v7s_iopte *ptep)
433 struct io_pgtable_cfg *cfg = &data->iop.cfg;
434 arm_v7s_iopte pte, *cptep;
435 int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
437 /* Find our entry at the current level */
438 ptep += ARM_V7S_LVL_IDX(iova, lvl);
440 /* If we can install a leaf entry at this level, then do so */
441 if (num_entries)
442 return arm_v7s_init_pte(data, iova, paddr, prot,
443 lvl, num_entries, ptep);
445 /* We can't allocate tables at the final level */
446 if (WARN_ON(lvl == 2))
447 return -EINVAL;
449 /* Grab a pointer to the next level */
450 pte = READ_ONCE(*ptep);
451 if (!pte) {
452 cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
453 if (!cptep)
454 return -ENOMEM;
456 pte = arm_v7s_install_table(cptep, ptep, 0, cfg);
457 if (pte)
458 __arm_v7s_free_table(cptep, lvl + 1, data);
459 } else {
460 /* We've no easy way of knowing if it's synced yet, so... */
461 __arm_v7s_pte_sync(ptep, 1, cfg);
464 if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
465 cptep = iopte_deref(pte, lvl);
466 } else if (pte) {
467 /* We require an unmap first */
468 WARN_ON(!selftest_running);
469 return -EEXIST;
472 /* Rinse, repeat */
473 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
476 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
477 phys_addr_t paddr, size_t size, int prot)
479 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
480 struct io_pgtable *iop = &data->iop;
481 int ret;
483 /* If no access, then nothing to do */
484 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
485 return 0;
487 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
488 return -ERANGE;
490 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
492 * Synchronise all PTE updates for the new mapping before there's
493 * a chance for anything to kick off a table walk for the new iova.
495 if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
496 io_pgtable_tlb_add_flush(iop, iova, size,
497 ARM_V7S_BLOCK_SIZE(2), false);
498 io_pgtable_tlb_sync(iop);
499 } else {
500 wmb();
503 return ret;
506 static void arm_v7s_free_pgtable(struct io_pgtable *iop)
508 struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
509 int i;
511 for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
512 arm_v7s_iopte pte = data->pgd[i];
514 if (ARM_V7S_PTE_IS_TABLE(pte, 1))
515 __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
517 __arm_v7s_free_table(data->pgd, 1, data);
518 kmem_cache_destroy(data->l2_tables);
519 kfree(data);
522 static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
523 unsigned long iova, int idx, int lvl,
524 arm_v7s_iopte *ptep)
526 struct io_pgtable *iop = &data->iop;
527 arm_v7s_iopte pte;
528 size_t size = ARM_V7S_BLOCK_SIZE(lvl);
529 int i;
531 /* Check that we didn't lose a race to get the lock */
532 pte = *ptep;
533 if (!arm_v7s_pte_is_cont(pte, lvl))
534 return pte;
536 ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
537 pte = arm_v7s_cont_to_pte(pte, lvl);
538 for (i = 0; i < ARM_V7S_CONT_PAGES; i++)
539 ptep[i] = pte + i * size;
541 __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
543 size *= ARM_V7S_CONT_PAGES;
544 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
545 io_pgtable_tlb_sync(iop);
546 return pte;
549 static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
550 unsigned long iova, size_t size,
551 arm_v7s_iopte blk_pte,
552 arm_v7s_iopte *ptep)
554 struct io_pgtable_cfg *cfg = &data->iop.cfg;
555 arm_v7s_iopte pte, *tablep;
556 int i, unmap_idx, num_entries, num_ptes;
558 tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data);
559 if (!tablep)
560 return 0; /* Bytes unmapped */
562 num_ptes = ARM_V7S_PTES_PER_LVL(2);
563 num_entries = size >> ARM_V7S_LVL_SHIFT(2);
564 unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
566 pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
567 if (num_entries > 1)
568 pte = arm_v7s_pte_to_cont(pte, 2);
570 for (i = 0; i < num_ptes; i += num_entries, pte += size) {
571 /* Unmap! */
572 if (i == unmap_idx)
573 continue;
575 __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg);
578 pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg);
579 if (pte != blk_pte) {
580 __arm_v7s_free_table(tablep, 2, data);
582 if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
583 return 0;
585 tablep = iopte_deref(pte, 1);
586 return __arm_v7s_unmap(data, iova, size, 2, tablep);
589 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
590 io_pgtable_tlb_sync(&data->iop);
591 return size;
594 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
595 unsigned long iova, size_t size, int lvl,
596 arm_v7s_iopte *ptep)
598 arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
599 struct io_pgtable *iop = &data->iop;
600 int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
602 /* Something went horribly wrong and we ran out of page table */
603 if (WARN_ON(lvl > 2))
604 return 0;
606 idx = ARM_V7S_LVL_IDX(iova, lvl);
607 ptep += idx;
608 do {
609 pte[i] = READ_ONCE(ptep[i]);
610 if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i])))
611 return 0;
612 } while (++i < num_entries);
615 * If we've hit a contiguous 'large page' entry at this level, it
616 * needs splitting first, unless we're unmapping the whole lot.
618 * For splitting, we can't rewrite 16 PTEs atomically, and since we
619 * can't necessarily assume TEX remap we don't have a software bit to
620 * mark live entries being split. In practice (i.e. DMA API code), we
621 * will never be splitting large pages anyway, so just wrap this edge
622 * case in a lock for the sake of correctness and be done with it.
624 if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) {
625 unsigned long flags;
627 spin_lock_irqsave(&data->split_lock, flags);
628 pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep);
629 spin_unlock_irqrestore(&data->split_lock, flags);
632 /* If the size matches this level, we're in the right place */
633 if (num_entries) {
634 size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
636 __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
638 for (i = 0; i < num_entries; i++) {
639 if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
640 /* Also flush any partial walks */
641 io_pgtable_tlb_add_flush(iop, iova, blk_size,
642 ARM_V7S_BLOCK_SIZE(lvl + 1), false);
643 io_pgtable_tlb_sync(iop);
644 ptep = iopte_deref(pte[i], lvl);
645 __arm_v7s_free_table(ptep, lvl + 1, data);
646 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
648 * Order the PTE update against queueing the IOVA, to
649 * guarantee that a flush callback from a different CPU
650 * has observed it before the TLBIALL can be issued.
652 smp_wmb();
653 } else {
654 io_pgtable_tlb_add_flush(iop, iova, blk_size,
655 blk_size, true);
657 iova += blk_size;
659 return size;
660 } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
662 * Insert a table at the next level to map the old region,
663 * minus the part we want to unmap
665 return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep);
668 /* Keep on walkin' */
669 ptep = iopte_deref(pte[0], lvl);
670 return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
673 static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
674 size_t size)
676 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
678 if (WARN_ON(upper_32_bits(iova)))
679 return 0;
681 return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
684 static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
685 unsigned long iova)
687 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
688 arm_v7s_iopte *ptep = data->pgd, pte;
689 int lvl = 0;
690 u32 mask;
692 do {
693 ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
694 pte = READ_ONCE(*ptep);
695 ptep = iopte_deref(pte, lvl);
696 } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
698 if (!ARM_V7S_PTE_IS_VALID(pte))
699 return 0;
701 mask = ARM_V7S_LVL_MASK(lvl);
702 if (arm_v7s_pte_is_cont(pte, lvl))
703 mask *= ARM_V7S_CONT_PAGES;
704 return (pte & mask) | (iova & ~mask);
707 static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
708 void *cookie)
710 struct arm_v7s_io_pgtable *data;
712 if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
713 return NULL;
715 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
716 IO_PGTABLE_QUIRK_NO_PERMS |
717 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
718 IO_PGTABLE_QUIRK_ARM_MTK_4GB |
719 IO_PGTABLE_QUIRK_NO_DMA |
720 IO_PGTABLE_QUIRK_NON_STRICT))
721 return NULL;
723 /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
724 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
725 !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
726 return NULL;
728 data = kmalloc(sizeof(*data), GFP_KERNEL);
729 if (!data)
730 return NULL;
732 spin_lock_init(&data->split_lock);
733 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
734 ARM_V7S_TABLE_SIZE(2),
735 ARM_V7S_TABLE_SIZE(2),
736 SLAB_CACHE_DMA, NULL);
737 if (!data->l2_tables)
738 goto out_free_data;
740 data->iop.ops = (struct io_pgtable_ops) {
741 .map = arm_v7s_map,
742 .unmap = arm_v7s_unmap,
743 .iova_to_phys = arm_v7s_iova_to_phys,
746 /* We have to do this early for __arm_v7s_alloc_table to work... */
747 data->iop.cfg = *cfg;
750 * Unless the IOMMU driver indicates supersection support by
751 * having SZ_16M set in the initial bitmap, they won't be used.
753 cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
755 /* TCR: T0SZ=0, disable TTBR1 */
756 cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
759 * TEX remap: the indices used map to the closest equivalent types
760 * under the non-TEX-remap interpretation of those attribute bits,
761 * excepting various implementation-defined aspects of shareability.
763 cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
764 ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
765 ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
766 ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
767 ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
768 cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
769 ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
771 /* Looking good; allocate a pgd */
772 data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
773 if (!data->pgd)
774 goto out_free_data;
776 /* Ensure the empty pgd is visible before any actual TTBR write */
777 wmb();
779 /* TTBRs */
780 cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
781 ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
782 ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
783 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
784 cfg->arm_v7s_cfg.ttbr[1] = 0;
785 return &data->iop;
787 out_free_data:
788 kmem_cache_destroy(data->l2_tables);
789 kfree(data);
790 return NULL;
793 struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
794 .alloc = arm_v7s_alloc_pgtable,
795 .free = arm_v7s_free_pgtable,
798 #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
800 static struct io_pgtable_cfg *cfg_cookie;
802 static void dummy_tlb_flush_all(void *cookie)
804 WARN_ON(cookie != cfg_cookie);
807 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
808 size_t granule, bool leaf, void *cookie)
810 WARN_ON(cookie != cfg_cookie);
811 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
814 static void dummy_tlb_sync(void *cookie)
816 WARN_ON(cookie != cfg_cookie);
819 static const struct iommu_gather_ops dummy_tlb_ops = {
820 .tlb_flush_all = dummy_tlb_flush_all,
821 .tlb_add_flush = dummy_tlb_add_flush,
822 .tlb_sync = dummy_tlb_sync,
825 #define __FAIL(ops) ({ \
826 WARN(1, "selftest: test failed\n"); \
827 selftest_running = false; \
828 -EFAULT; \
831 static int __init arm_v7s_do_selftests(void)
833 struct io_pgtable_ops *ops;
834 struct io_pgtable_cfg cfg = {
835 .tlb = &dummy_tlb_ops,
836 .oas = 32,
837 .ias = 32,
838 .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
839 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
841 unsigned int iova, size, iova_start;
842 unsigned int i, loopnr = 0;
844 selftest_running = true;
846 cfg_cookie = &cfg;
848 ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
849 if (!ops) {
850 pr_err("selftest: failed to allocate io pgtable ops\n");
851 return -EINVAL;
855 * Initial sanity checks.
856 * Empty page tables shouldn't provide any translations.
858 if (ops->iova_to_phys(ops, 42))
859 return __FAIL(ops);
861 if (ops->iova_to_phys(ops, SZ_1G + 42))
862 return __FAIL(ops);
864 if (ops->iova_to_phys(ops, SZ_2G + 42))
865 return __FAIL(ops);
868 * Distinct mappings of different granule sizes.
870 iova = 0;
871 for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
872 size = 1UL << i;
873 if (ops->map(ops, iova, iova, size, IOMMU_READ |
874 IOMMU_WRITE |
875 IOMMU_NOEXEC |
876 IOMMU_CACHE))
877 return __FAIL(ops);
879 /* Overlapping mappings */
880 if (!ops->map(ops, iova, iova + size, size,
881 IOMMU_READ | IOMMU_NOEXEC))
882 return __FAIL(ops);
884 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
885 return __FAIL(ops);
887 iova += SZ_16M;
888 loopnr++;
891 /* Partial unmap */
892 i = 1;
893 size = 1UL << __ffs(cfg.pgsize_bitmap);
894 while (i < loopnr) {
895 iova_start = i * SZ_16M;
896 if (ops->unmap(ops, iova_start + size, size) != size)
897 return __FAIL(ops);
899 /* Remap of partial unmap */
900 if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
901 return __FAIL(ops);
903 if (ops->iova_to_phys(ops, iova_start + size + 42)
904 != (size + 42))
905 return __FAIL(ops);
906 i++;
909 /* Full unmap */
910 iova = 0;
911 for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
912 size = 1UL << i;
914 if (ops->unmap(ops, iova, size) != size)
915 return __FAIL(ops);
917 if (ops->iova_to_phys(ops, iova + 42))
918 return __FAIL(ops);
920 /* Remap full block */
921 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
922 return __FAIL(ops);
924 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
925 return __FAIL(ops);
927 iova += SZ_16M;
930 free_io_pgtable_ops(ops);
932 selftest_running = false;
934 pr_info("self test ok\n");
935 return 0;
937 subsys_initcall(arm_v7s_do_selftests);
938 #endif