f2fs: flush dirty nat entries when exceeding threshold
[linux-2.6/btrfs-unstable.git] / drivers / iommu / exynos-iommu.c
blob97c41b8ab5d980667667bde15ad1d16105278b65
1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12 #define DEBUG
13 #endif
15 #include <linux/clk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/io.h>
19 #include <linux/iommu.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/of.h>
23 #include <linux/of_iommu.h>
24 #include <linux/of_platform.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/slab.h>
29 #include <asm/cacheflush.h>
30 #include <asm/dma-iommu.h>
31 #include <asm/pgtable.h>
33 typedef u32 sysmmu_iova_t;
34 typedef u32 sysmmu_pte_t;
36 /* We do not consider super section mapping (16MB) */
37 #define SECT_ORDER 20
38 #define LPAGE_ORDER 16
39 #define SPAGE_ORDER 12
41 #define SECT_SIZE (1 << SECT_ORDER)
42 #define LPAGE_SIZE (1 << LPAGE_ORDER)
43 #define SPAGE_SIZE (1 << SPAGE_ORDER)
45 #define SECT_MASK (~(SECT_SIZE - 1))
46 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
47 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
49 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
50 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
51 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
52 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
53 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
54 ((*(sent) & 3) == 1))
55 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
57 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
58 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
59 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
61 static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
63 return iova & (size - 1);
66 #define section_phys(sent) (*(sent) & SECT_MASK)
67 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
68 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
69 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
70 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
71 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
73 #define NUM_LV1ENTRIES 4096
74 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
76 static u32 lv1ent_offset(sysmmu_iova_t iova)
78 return iova >> SECT_ORDER;
81 static u32 lv2ent_offset(sysmmu_iova_t iova)
83 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
86 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
88 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
90 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
92 #define mk_lv1ent_sect(pa) ((pa) | 2)
93 #define mk_lv1ent_page(pa) ((pa) | 1)
94 #define mk_lv2ent_lpage(pa) ((pa) | 1)
95 #define mk_lv2ent_spage(pa) ((pa) | 2)
97 #define CTRL_ENABLE 0x5
98 #define CTRL_BLOCK 0x7
99 #define CTRL_DISABLE 0x0
101 #define CFG_LRU 0x1
102 #define CFG_QOS(n) ((n & 0xF) << 7)
103 #define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
104 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
105 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
106 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
108 #define REG_MMU_CTRL 0x000
109 #define REG_MMU_CFG 0x004
110 #define REG_MMU_STATUS 0x008
111 #define REG_MMU_FLUSH 0x00C
112 #define REG_MMU_FLUSH_ENTRY 0x010
113 #define REG_PT_BASE_ADDR 0x014
114 #define REG_INT_STATUS 0x018
115 #define REG_INT_CLEAR 0x01C
117 #define REG_PAGE_FAULT_ADDR 0x024
118 #define REG_AW_FAULT_ADDR 0x028
119 #define REG_AR_FAULT_ADDR 0x02C
120 #define REG_DEFAULT_SLAVE_ADDR 0x030
122 #define REG_MMU_VERSION 0x034
124 #define MMU_MAJ_VER(val) ((val) >> 7)
125 #define MMU_MIN_VER(val) ((val) & 0x7F)
126 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
128 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
130 #define REG_PB0_SADDR 0x04C
131 #define REG_PB0_EADDR 0x050
132 #define REG_PB1_SADDR 0x054
133 #define REG_PB1_EADDR 0x058
135 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
137 static struct kmem_cache *lv2table_kmem_cache;
138 static sysmmu_pte_t *zero_lv2_table;
139 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
141 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
143 return pgtable + lv1ent_offset(iova);
146 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
148 return (sysmmu_pte_t *)phys_to_virt(
149 lv2table_base(sent)) + lv2ent_offset(iova);
152 enum exynos_sysmmu_inttype {
153 SYSMMU_PAGEFAULT,
154 SYSMMU_AR_MULTIHIT,
155 SYSMMU_AW_MULTIHIT,
156 SYSMMU_BUSERROR,
157 SYSMMU_AR_SECURITY,
158 SYSMMU_AR_ACCESS,
159 SYSMMU_AW_SECURITY,
160 SYSMMU_AW_PROTECTION, /* 7 */
161 SYSMMU_FAULT_UNKNOWN,
162 SYSMMU_FAULTS_NUM
165 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
166 REG_PAGE_FAULT_ADDR,
167 REG_AR_FAULT_ADDR,
168 REG_AW_FAULT_ADDR,
169 REG_DEFAULT_SLAVE_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AR_FAULT_ADDR,
172 REG_AW_FAULT_ADDR,
173 REG_AW_FAULT_ADDR
176 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
177 "PAGE FAULT",
178 "AR MULTI-HIT FAULT",
179 "AW MULTI-HIT FAULT",
180 "BUS ERROR",
181 "AR SECURITY PROTECTION FAULT",
182 "AR ACCESS PROTECTION FAULT",
183 "AW SECURITY PROTECTION FAULT",
184 "AW ACCESS PROTECTION FAULT",
185 "UNKNOWN FAULT"
189 * This structure is attached to dev.archdata.iommu of the master device
190 * on device add, contains a list of SYSMMU controllers defined by device tree,
191 * which are bound to given master device. It is usually referenced by 'owner'
192 * pointer.
194 struct exynos_iommu_owner {
195 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
199 * This structure exynos specific generalization of struct iommu_domain.
200 * It contains list of SYSMMU controllers from all master devices, which has
201 * been attached to this domain and page tables of IO address space defined by
202 * it. It is usually referenced by 'domain' pointer.
204 struct exynos_iommu_domain {
205 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
206 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
207 short *lv2entcnt; /* free lv2 entry counter for each section */
208 spinlock_t lock; /* lock for modyfying list of clients */
209 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
210 struct iommu_domain domain; /* generic domain data structure */
214 * This structure hold all data of a single SYSMMU controller, this includes
215 * hw resources like registers and clocks, pointers and list nodes to connect
216 * it to all other structures, internal state and parameters read from device
217 * tree. It is usually referenced by 'data' pointer.
219 struct sysmmu_drvdata {
220 struct device *sysmmu; /* SYSMMU controller device */
221 struct device *master; /* master device (owner) */
222 void __iomem *sfrbase; /* our registers */
223 struct clk *clk; /* SYSMMU's clock */
224 struct clk *clk_master; /* master's device clock */
225 int activations; /* number of calls to sysmmu_enable */
226 spinlock_t lock; /* lock for modyfying state */
227 struct exynos_iommu_domain *domain; /* domain we belong to */
228 struct list_head domain_node; /* node for domain clients list */
229 struct list_head owner_node; /* node for owner controllers list */
230 phys_addr_t pgtable; /* assigned page table structure */
231 unsigned int version; /* our version */
234 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
236 return container_of(dom, struct exynos_iommu_domain, domain);
239 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
241 /* return true if the System MMU was not active previously
242 and it needs to be initialized */
243 return ++data->activations == 1;
246 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
248 /* return true if the System MMU is needed to be disabled */
249 BUG_ON(data->activations < 1);
250 return --data->activations == 0;
253 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
255 return data->activations > 0;
258 static void sysmmu_unblock(void __iomem *sfrbase)
260 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
263 static bool sysmmu_block(void __iomem *sfrbase)
265 int i = 120;
267 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
268 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
269 --i;
271 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
272 sysmmu_unblock(sfrbase);
273 return false;
276 return true;
279 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
281 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
284 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
285 sysmmu_iova_t iova, unsigned int num_inv)
287 unsigned int i;
289 for (i = 0; i < num_inv; i++) {
290 __raw_writel((iova & SPAGE_MASK) | 1,
291 sfrbase + REG_MMU_FLUSH_ENTRY);
292 iova += SPAGE_SIZE;
296 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
297 phys_addr_t pgd)
299 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
301 __sysmmu_tlb_invalidate(sfrbase);
304 static void show_fault_information(const char *name,
305 enum exynos_sysmmu_inttype itype,
306 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
308 sysmmu_pte_t *ent;
310 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
311 itype = SYSMMU_FAULT_UNKNOWN;
313 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
314 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
316 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
317 pr_err("\tLv1 entry: %#x\n", *ent);
319 if (lv1ent_page(ent)) {
320 ent = page_entry(ent, fault_addr);
321 pr_err("\t Lv2 entry: %#x\n", *ent);
325 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
327 /* SYSMMU is in blocked state when interrupt occurred. */
328 struct sysmmu_drvdata *data = dev_id;
329 enum exynos_sysmmu_inttype itype;
330 sysmmu_iova_t addr = -1;
331 int ret = -ENOSYS;
333 WARN_ON(!is_sysmmu_active(data));
335 spin_lock(&data->lock);
337 if (!IS_ERR(data->clk_master))
338 clk_enable(data->clk_master);
340 itype = (enum exynos_sysmmu_inttype)
341 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
342 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
343 itype = SYSMMU_FAULT_UNKNOWN;
344 else
345 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
347 if (itype == SYSMMU_FAULT_UNKNOWN) {
348 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
349 __func__, dev_name(data->sysmmu));
350 pr_err("%s: Please check if IRQ is correctly configured.\n",
351 __func__);
352 BUG();
353 } else {
354 unsigned int base =
355 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
356 show_fault_information(dev_name(data->sysmmu),
357 itype, base, addr);
358 if (data->domain)
359 ret = report_iommu_fault(&data->domain->domain,
360 data->master, addr, itype);
363 /* fault is not recovered by fault handler */
364 BUG_ON(ret != 0);
366 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
368 sysmmu_unblock(data->sfrbase);
370 if (!IS_ERR(data->clk_master))
371 clk_disable(data->clk_master);
373 spin_unlock(&data->lock);
375 return IRQ_HANDLED;
378 static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
380 if (!IS_ERR(data->clk_master))
381 clk_enable(data->clk_master);
383 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
384 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
386 clk_disable(data->clk);
387 if (!IS_ERR(data->clk_master))
388 clk_disable(data->clk_master);
391 static bool __sysmmu_disable(struct sysmmu_drvdata *data)
393 bool disabled;
394 unsigned long flags;
396 spin_lock_irqsave(&data->lock, flags);
398 disabled = set_sysmmu_inactive(data);
400 if (disabled) {
401 data->pgtable = 0;
402 data->domain = NULL;
404 __sysmmu_disable_nocount(data);
406 dev_dbg(data->sysmmu, "Disabled\n");
407 } else {
408 dev_dbg(data->sysmmu, "%d times left to disable\n",
409 data->activations);
412 spin_unlock_irqrestore(&data->lock, flags);
414 return disabled;
417 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
419 unsigned int cfg = CFG_LRU | CFG_QOS(15);
420 unsigned int ver;
422 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
423 if (MMU_MAJ_VER(ver) == 3) {
424 if (MMU_MIN_VER(ver) >= 2) {
425 cfg |= CFG_FLPDCACHE;
426 if (MMU_MIN_VER(ver) == 3) {
427 cfg |= CFG_ACGEN;
428 cfg &= ~CFG_LRU;
429 } else {
430 cfg |= CFG_SYSSEL;
435 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
436 data->version = ver;
439 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
441 if (!IS_ERR(data->clk_master))
442 clk_enable(data->clk_master);
443 clk_enable(data->clk);
445 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
447 __sysmmu_init_config(data);
449 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
451 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
453 if (!IS_ERR(data->clk_master))
454 clk_disable(data->clk_master);
457 static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
458 struct exynos_iommu_domain *domain)
460 int ret = 0;
461 unsigned long flags;
463 spin_lock_irqsave(&data->lock, flags);
464 if (set_sysmmu_active(data)) {
465 data->pgtable = pgtable;
466 data->domain = domain;
468 __sysmmu_enable_nocount(data);
470 dev_dbg(data->sysmmu, "Enabled\n");
471 } else {
472 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
474 dev_dbg(data->sysmmu, "already enabled\n");
477 if (WARN_ON(ret < 0))
478 set_sysmmu_inactive(data); /* decrement count */
480 spin_unlock_irqrestore(&data->lock, flags);
482 return ret;
485 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
486 sysmmu_iova_t iova)
488 if (data->version == MAKE_MMU_VER(3, 3))
489 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
492 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
493 sysmmu_iova_t iova)
495 unsigned long flags;
497 if (!IS_ERR(data->clk_master))
498 clk_enable(data->clk_master);
500 spin_lock_irqsave(&data->lock, flags);
501 if (is_sysmmu_active(data))
502 __sysmmu_tlb_invalidate_flpdcache(data, iova);
503 spin_unlock_irqrestore(&data->lock, flags);
505 if (!IS_ERR(data->clk_master))
506 clk_disable(data->clk_master);
509 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
510 sysmmu_iova_t iova, size_t size)
512 unsigned long flags;
514 spin_lock_irqsave(&data->lock, flags);
515 if (is_sysmmu_active(data)) {
516 unsigned int num_inv = 1;
518 if (!IS_ERR(data->clk_master))
519 clk_enable(data->clk_master);
522 * L2TLB invalidation required
523 * 4KB page: 1 invalidation
524 * 64KB page: 16 invalidations
525 * 1MB page: 64 invalidations
526 * because it is set-associative TLB
527 * with 8-way and 64 sets.
528 * 1MB page can be cached in one of all sets.
529 * 64KB page can be one of 16 consecutive sets.
531 if (MMU_MAJ_VER(data->version) == 2)
532 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
534 if (sysmmu_block(data->sfrbase)) {
535 __sysmmu_tlb_invalidate_entry(
536 data->sfrbase, iova, num_inv);
537 sysmmu_unblock(data->sfrbase);
539 if (!IS_ERR(data->clk_master))
540 clk_disable(data->clk_master);
541 } else {
542 dev_dbg(data->master,
543 "disabled. Skipping TLB invalidation @ %#x\n", iova);
545 spin_unlock_irqrestore(&data->lock, flags);
548 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
550 int irq, ret;
551 struct device *dev = &pdev->dev;
552 struct sysmmu_drvdata *data;
553 struct resource *res;
555 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
556 if (!data)
557 return -ENOMEM;
559 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
560 data->sfrbase = devm_ioremap_resource(dev, res);
561 if (IS_ERR(data->sfrbase))
562 return PTR_ERR(data->sfrbase);
564 irq = platform_get_irq(pdev, 0);
565 if (irq <= 0) {
566 dev_err(dev, "Unable to find IRQ resource\n");
567 return irq;
570 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
571 dev_name(dev), data);
572 if (ret) {
573 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
574 return ret;
577 data->clk = devm_clk_get(dev, "sysmmu");
578 if (IS_ERR(data->clk)) {
579 dev_err(dev, "Failed to get clock!\n");
580 return PTR_ERR(data->clk);
581 } else {
582 ret = clk_prepare(data->clk);
583 if (ret) {
584 dev_err(dev, "Failed to prepare clk\n");
585 return ret;
589 data->clk_master = devm_clk_get(dev, "master");
590 if (!IS_ERR(data->clk_master)) {
591 ret = clk_prepare(data->clk_master);
592 if (ret) {
593 clk_unprepare(data->clk);
594 dev_err(dev, "Failed to prepare master's clk\n");
595 return ret;
599 data->sysmmu = dev;
600 spin_lock_init(&data->lock);
602 platform_set_drvdata(pdev, data);
604 pm_runtime_enable(dev);
606 return 0;
609 #ifdef CONFIG_PM_SLEEP
610 static int exynos_sysmmu_suspend(struct device *dev)
612 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
614 dev_dbg(dev, "suspend\n");
615 if (is_sysmmu_active(data)) {
616 __sysmmu_disable_nocount(data);
617 pm_runtime_put(dev);
619 return 0;
622 static int exynos_sysmmu_resume(struct device *dev)
624 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
626 dev_dbg(dev, "resume\n");
627 if (is_sysmmu_active(data)) {
628 pm_runtime_get_sync(dev);
629 __sysmmu_enable_nocount(data);
631 return 0;
633 #endif
635 static const struct dev_pm_ops sysmmu_pm_ops = {
636 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
639 static const struct of_device_id sysmmu_of_match[] __initconst = {
640 { .compatible = "samsung,exynos-sysmmu", },
641 { },
644 static struct platform_driver exynos_sysmmu_driver __refdata = {
645 .probe = exynos_sysmmu_probe,
646 .driver = {
647 .name = "exynos-sysmmu",
648 .of_match_table = sysmmu_of_match,
649 .pm = &sysmmu_pm_ops,
653 static inline void pgtable_flush(void *vastart, void *vaend)
655 dmac_flush_range(vastart, vaend);
656 outer_flush_range(virt_to_phys(vastart),
657 virt_to_phys(vaend));
660 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
662 struct exynos_iommu_domain *domain;
663 int i;
665 if (type != IOMMU_DOMAIN_UNMANAGED)
666 return NULL;
668 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
669 if (!domain)
670 return NULL;
672 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
673 if (!domain->pgtable)
674 goto err_pgtable;
676 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
677 if (!domain->lv2entcnt)
678 goto err_counter;
680 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
681 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
682 domain->pgtable[i + 0] = ZERO_LV2LINK;
683 domain->pgtable[i + 1] = ZERO_LV2LINK;
684 domain->pgtable[i + 2] = ZERO_LV2LINK;
685 domain->pgtable[i + 3] = ZERO_LV2LINK;
686 domain->pgtable[i + 4] = ZERO_LV2LINK;
687 domain->pgtable[i + 5] = ZERO_LV2LINK;
688 domain->pgtable[i + 6] = ZERO_LV2LINK;
689 domain->pgtable[i + 7] = ZERO_LV2LINK;
692 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
694 spin_lock_init(&domain->lock);
695 spin_lock_init(&domain->pgtablelock);
696 INIT_LIST_HEAD(&domain->clients);
698 domain->domain.geometry.aperture_start = 0;
699 domain->domain.geometry.aperture_end = ~0UL;
700 domain->domain.geometry.force_aperture = true;
702 return &domain->domain;
704 err_counter:
705 free_pages((unsigned long)domain->pgtable, 2);
706 err_pgtable:
707 kfree(domain);
708 return NULL;
711 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
713 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
714 struct sysmmu_drvdata *data, *next;
715 unsigned long flags;
716 int i;
718 WARN_ON(!list_empty(&domain->clients));
720 spin_lock_irqsave(&domain->lock, flags);
722 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
723 if (__sysmmu_disable(data))
724 data->master = NULL;
725 list_del_init(&data->domain_node);
728 spin_unlock_irqrestore(&domain->lock, flags);
730 for (i = 0; i < NUM_LV1ENTRIES; i++)
731 if (lv1ent_page(domain->pgtable + i))
732 kmem_cache_free(lv2table_kmem_cache,
733 phys_to_virt(lv2table_base(domain->pgtable + i)));
735 free_pages((unsigned long)domain->pgtable, 2);
736 free_pages((unsigned long)domain->lv2entcnt, 1);
737 kfree(domain);
740 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
741 struct device *dev)
743 struct exynos_iommu_owner *owner = dev->archdata.iommu;
744 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
745 struct sysmmu_drvdata *data;
746 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
747 unsigned long flags;
748 int ret = -ENODEV;
750 if (!has_sysmmu(dev))
751 return -ENODEV;
753 list_for_each_entry(data, &owner->controllers, owner_node) {
754 pm_runtime_get_sync(data->sysmmu);
755 ret = __sysmmu_enable(data, pagetable, domain);
756 if (ret >= 0) {
757 data->master = dev;
759 spin_lock_irqsave(&domain->lock, flags);
760 list_add_tail(&data->domain_node, &domain->clients);
761 spin_unlock_irqrestore(&domain->lock, flags);
765 if (ret < 0) {
766 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
767 __func__, &pagetable);
768 return ret;
771 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
772 __func__, &pagetable, (ret == 0) ? "" : ", again");
774 return ret;
777 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
778 struct device *dev)
780 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
781 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
782 struct sysmmu_drvdata *data, *next;
783 unsigned long flags;
784 bool found = false;
786 if (!has_sysmmu(dev))
787 return;
789 spin_lock_irqsave(&domain->lock, flags);
790 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
791 if (data->master == dev) {
792 if (__sysmmu_disable(data)) {
793 data->master = NULL;
794 list_del_init(&data->domain_node);
796 pm_runtime_put(data->sysmmu);
797 found = true;
800 spin_unlock_irqrestore(&domain->lock, flags);
802 if (found)
803 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
804 __func__, &pagetable);
805 else
806 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
809 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
810 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
812 if (lv1ent_section(sent)) {
813 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
814 return ERR_PTR(-EADDRINUSE);
817 if (lv1ent_fault(sent)) {
818 sysmmu_pte_t *pent;
819 bool need_flush_flpd_cache = lv1ent_zero(sent);
821 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
822 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
823 if (!pent)
824 return ERR_PTR(-ENOMEM);
826 *sent = mk_lv1ent_page(virt_to_phys(pent));
827 kmemleak_ignore(pent);
828 *pgcounter = NUM_LV2ENTRIES;
829 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
830 pgtable_flush(sent, sent + 1);
833 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
834 * FLPD cache may cache the address of zero_l2_table. This
835 * function replaces the zero_l2_table with new L2 page table
836 * to write valid mappings.
837 * Accessing the valid area may cause page fault since FLPD
838 * cache may still cache zero_l2_table for the valid area
839 * instead of new L2 page table that has the mapping
840 * information of the valid area.
841 * Thus any replacement of zero_l2_table with other valid L2
842 * page table must involve FLPD cache invalidation for System
843 * MMU v3.3.
844 * FLPD cache invalidation is performed with TLB invalidation
845 * by VPN without blocking. It is safe to invalidate TLB without
846 * blocking because the target address of TLB invalidation is
847 * not currently mapped.
849 if (need_flush_flpd_cache) {
850 struct sysmmu_drvdata *data;
852 spin_lock(&domain->lock);
853 list_for_each_entry(data, &domain->clients, domain_node)
854 sysmmu_tlb_invalidate_flpdcache(data, iova);
855 spin_unlock(&domain->lock);
859 return page_entry(sent, iova);
862 static int lv1set_section(struct exynos_iommu_domain *domain,
863 sysmmu_pte_t *sent, sysmmu_iova_t iova,
864 phys_addr_t paddr, short *pgcnt)
866 if (lv1ent_section(sent)) {
867 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
868 iova);
869 return -EADDRINUSE;
872 if (lv1ent_page(sent)) {
873 if (*pgcnt != NUM_LV2ENTRIES) {
874 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
875 iova);
876 return -EADDRINUSE;
879 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
880 *pgcnt = 0;
883 *sent = mk_lv1ent_sect(paddr);
885 pgtable_flush(sent, sent + 1);
887 spin_lock(&domain->lock);
888 if (lv1ent_page_zero(sent)) {
889 struct sysmmu_drvdata *data;
891 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
892 * entry by speculative prefetch of SLPD which has no mapping.
894 list_for_each_entry(data, &domain->clients, domain_node)
895 sysmmu_tlb_invalidate_flpdcache(data, iova);
897 spin_unlock(&domain->lock);
899 return 0;
902 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
903 short *pgcnt)
905 if (size == SPAGE_SIZE) {
906 if (WARN_ON(!lv2ent_fault(pent)))
907 return -EADDRINUSE;
909 *pent = mk_lv2ent_spage(paddr);
910 pgtable_flush(pent, pent + 1);
911 *pgcnt -= 1;
912 } else { /* size == LPAGE_SIZE */
913 int i;
915 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
916 if (WARN_ON(!lv2ent_fault(pent))) {
917 if (i > 0)
918 memset(pent - i, 0, sizeof(*pent) * i);
919 return -EADDRINUSE;
922 *pent = mk_lv2ent_lpage(paddr);
924 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
925 *pgcnt -= SPAGES_PER_LPAGE;
928 return 0;
932 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
934 * System MMU v3.x has advanced logic to improve address translation
935 * performance with caching more page table entries by a page table walk.
936 * However, the logic has a bug that while caching faulty page table entries,
937 * System MMU reports page fault if the cached fault entry is hit even though
938 * the fault entry is updated to a valid entry after the entry is cached.
939 * To prevent caching faulty page table entries which may be updated to valid
940 * entries later, the virtual memory manager should care about the workaround
941 * for the problem. The following describes the workaround.
943 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
944 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
946 * Precisely, any start address of I/O virtual region must be aligned with
947 * the following sizes for System MMU v3.1 and v3.2.
948 * System MMU v3.1: 128KiB
949 * System MMU v3.2: 256KiB
951 * Because System MMU v3.3 caches page table entries more aggressively, it needs
952 * more workarounds.
953 * - Any two consecutive I/O virtual regions must have a hole of size larger
954 * than or equal to 128KiB.
955 * - Start address of an I/O virtual region must be aligned by 128KiB.
957 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
958 unsigned long l_iova, phys_addr_t paddr, size_t size,
959 int prot)
961 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
962 sysmmu_pte_t *entry;
963 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
964 unsigned long flags;
965 int ret = -ENOMEM;
967 BUG_ON(domain->pgtable == NULL);
969 spin_lock_irqsave(&domain->pgtablelock, flags);
971 entry = section_entry(domain->pgtable, iova);
973 if (size == SECT_SIZE) {
974 ret = lv1set_section(domain, entry, iova, paddr,
975 &domain->lv2entcnt[lv1ent_offset(iova)]);
976 } else {
977 sysmmu_pte_t *pent;
979 pent = alloc_lv2entry(domain, entry, iova,
980 &domain->lv2entcnt[lv1ent_offset(iova)]);
982 if (IS_ERR(pent))
983 ret = PTR_ERR(pent);
984 else
985 ret = lv2set_page(pent, paddr, size,
986 &domain->lv2entcnt[lv1ent_offset(iova)]);
989 if (ret)
990 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
991 __func__, ret, size, iova);
993 spin_unlock_irqrestore(&domain->pgtablelock, flags);
995 return ret;
998 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
999 sysmmu_iova_t iova, size_t size)
1001 struct sysmmu_drvdata *data;
1002 unsigned long flags;
1004 spin_lock_irqsave(&domain->lock, flags);
1006 list_for_each_entry(data, &domain->clients, domain_node)
1007 sysmmu_tlb_invalidate_entry(data, iova, size);
1009 spin_unlock_irqrestore(&domain->lock, flags);
1012 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1013 unsigned long l_iova, size_t size)
1015 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1016 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1017 sysmmu_pte_t *ent;
1018 size_t err_pgsize;
1019 unsigned long flags;
1021 BUG_ON(domain->pgtable == NULL);
1023 spin_lock_irqsave(&domain->pgtablelock, flags);
1025 ent = section_entry(domain->pgtable, iova);
1027 if (lv1ent_section(ent)) {
1028 if (WARN_ON(size < SECT_SIZE)) {
1029 err_pgsize = SECT_SIZE;
1030 goto err;
1033 /* workaround for h/w bug in System MMU v3.3 */
1034 *ent = ZERO_LV2LINK;
1035 pgtable_flush(ent, ent + 1);
1036 size = SECT_SIZE;
1037 goto done;
1040 if (unlikely(lv1ent_fault(ent))) {
1041 if (size > SECT_SIZE)
1042 size = SECT_SIZE;
1043 goto done;
1046 /* lv1ent_page(sent) == true here */
1048 ent = page_entry(ent, iova);
1050 if (unlikely(lv2ent_fault(ent))) {
1051 size = SPAGE_SIZE;
1052 goto done;
1055 if (lv2ent_small(ent)) {
1056 *ent = 0;
1057 size = SPAGE_SIZE;
1058 pgtable_flush(ent, ent + 1);
1059 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1060 goto done;
1063 /* lv1ent_large(ent) == true here */
1064 if (WARN_ON(size < LPAGE_SIZE)) {
1065 err_pgsize = LPAGE_SIZE;
1066 goto err;
1069 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1070 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
1072 size = LPAGE_SIZE;
1073 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1074 done:
1075 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1077 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1079 return size;
1080 err:
1081 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1083 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1084 __func__, size, iova, err_pgsize);
1086 return 0;
1089 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1090 dma_addr_t iova)
1092 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1093 sysmmu_pte_t *entry;
1094 unsigned long flags;
1095 phys_addr_t phys = 0;
1097 spin_lock_irqsave(&domain->pgtablelock, flags);
1099 entry = section_entry(domain->pgtable, iova);
1101 if (lv1ent_section(entry)) {
1102 phys = section_phys(entry) + section_offs(iova);
1103 } else if (lv1ent_page(entry)) {
1104 entry = page_entry(entry, iova);
1106 if (lv2ent_large(entry))
1107 phys = lpage_phys(entry) + lpage_offs(iova);
1108 else if (lv2ent_small(entry))
1109 phys = spage_phys(entry) + spage_offs(iova);
1112 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1114 return phys;
1117 static int exynos_iommu_add_device(struct device *dev)
1119 struct iommu_group *group;
1120 int ret;
1122 if (!has_sysmmu(dev))
1123 return -ENODEV;
1125 group = iommu_group_get(dev);
1127 if (!group) {
1128 group = iommu_group_alloc();
1129 if (IS_ERR(group)) {
1130 dev_err(dev, "Failed to allocate IOMMU group\n");
1131 return PTR_ERR(group);
1135 ret = iommu_group_add_device(group, dev);
1136 iommu_group_put(group);
1138 return ret;
1141 static void exynos_iommu_remove_device(struct device *dev)
1143 if (!has_sysmmu(dev))
1144 return;
1146 iommu_group_remove_device(dev);
1149 static int exynos_iommu_of_xlate(struct device *dev,
1150 struct of_phandle_args *spec)
1152 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1153 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1154 struct sysmmu_drvdata *data;
1156 if (!sysmmu)
1157 return -ENODEV;
1159 data = platform_get_drvdata(sysmmu);
1160 if (!data)
1161 return -ENODEV;
1163 if (!owner) {
1164 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1165 if (!owner)
1166 return -ENOMEM;
1168 INIT_LIST_HEAD(&owner->controllers);
1169 dev->archdata.iommu = owner;
1172 list_add_tail(&data->owner_node, &owner->controllers);
1173 return 0;
1176 static struct iommu_ops exynos_iommu_ops = {
1177 .domain_alloc = exynos_iommu_domain_alloc,
1178 .domain_free = exynos_iommu_domain_free,
1179 .attach_dev = exynos_iommu_attach_device,
1180 .detach_dev = exynos_iommu_detach_device,
1181 .map = exynos_iommu_map,
1182 .unmap = exynos_iommu_unmap,
1183 .map_sg = default_iommu_map_sg,
1184 .iova_to_phys = exynos_iommu_iova_to_phys,
1185 .add_device = exynos_iommu_add_device,
1186 .remove_device = exynos_iommu_remove_device,
1187 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1188 .of_xlate = exynos_iommu_of_xlate,
1191 static bool init_done;
1193 static int __init exynos_iommu_init(void)
1195 int ret;
1197 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1198 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1199 if (!lv2table_kmem_cache) {
1200 pr_err("%s: Failed to create kmem cache\n", __func__);
1201 return -ENOMEM;
1204 ret = platform_driver_register(&exynos_sysmmu_driver);
1205 if (ret) {
1206 pr_err("%s: Failed to register driver\n", __func__);
1207 goto err_reg_driver;
1210 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1211 if (zero_lv2_table == NULL) {
1212 pr_err("%s: Failed to allocate zero level2 page table\n",
1213 __func__);
1214 ret = -ENOMEM;
1215 goto err_zero_lv2;
1218 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1219 if (ret) {
1220 pr_err("%s: Failed to register exynos-iommu driver.\n",
1221 __func__);
1222 goto err_set_iommu;
1225 init_done = true;
1227 return 0;
1228 err_set_iommu:
1229 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1230 err_zero_lv2:
1231 platform_driver_unregister(&exynos_sysmmu_driver);
1232 err_reg_driver:
1233 kmem_cache_destroy(lv2table_kmem_cache);
1234 return ret;
1237 static int __init exynos_iommu_of_setup(struct device_node *np)
1239 struct platform_device *pdev;
1241 if (!init_done)
1242 exynos_iommu_init();
1244 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1245 if (IS_ERR(pdev))
1246 return PTR_ERR(pdev);
1248 of_iommu_set_ops(np, &exynos_iommu_ops);
1249 return 0;
1252 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1253 exynos_iommu_of_setup);