dw_dmac: use __func__ constant in the debug prints
[linux-2.6/libata-dev.git] / drivers / iommu / exynos-iommu.c
blob9a114b9ff1704f60278a5c94dd6b55a411adfa4d
1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12 #define DEBUG
13 #endif
15 #include <linux/io.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
22 #include <linux/mm.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 #include <mach/sysmmu.h>
34 /* We does not consider super section mapping (16MB) */
35 #define SECT_ORDER 20
36 #define LPAGE_ORDER 16
37 #define SPAGE_ORDER 12
39 #define SECT_SIZE (1 << SECT_ORDER)
40 #define LPAGE_SIZE (1 << LPAGE_ORDER)
41 #define SPAGE_SIZE (1 << SPAGE_ORDER)
43 #define SECT_MASK (~(SECT_SIZE - 1))
44 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
45 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
47 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48 #define lv1ent_page(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
55 #define section_phys(sent) (*(sent) & SECT_MASK)
56 #define section_offs(iova) ((iova) & 0xFFFFF)
57 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58 #define lpage_offs(iova) ((iova) & 0xFFFF)
59 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
60 #define spage_offs(iova) ((iova) & 0xFFF)
62 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63 #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
65 #define NUM_LV1ENTRIES 4096
66 #define NUM_LV2ENTRIES 256
68 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
70 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
72 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
74 #define mk_lv1ent_sect(pa) ((pa) | 2)
75 #define mk_lv1ent_page(pa) ((pa) | 1)
76 #define mk_lv2ent_lpage(pa) ((pa) | 1)
77 #define mk_lv2ent_spage(pa) ((pa) | 2)
79 #define CTRL_ENABLE 0x5
80 #define CTRL_BLOCK 0x7
81 #define CTRL_DISABLE 0x0
83 #define REG_MMU_CTRL 0x000
84 #define REG_MMU_CFG 0x004
85 #define REG_MMU_STATUS 0x008
86 #define REG_MMU_FLUSH 0x00C
87 #define REG_MMU_FLUSH_ENTRY 0x010
88 #define REG_PT_BASE_ADDR 0x014
89 #define REG_INT_STATUS 0x018
90 #define REG_INT_CLEAR 0x01C
92 #define REG_PAGE_FAULT_ADDR 0x024
93 #define REG_AW_FAULT_ADDR 0x028
94 #define REG_AR_FAULT_ADDR 0x02C
95 #define REG_DEFAULT_SLAVE_ADDR 0x030
97 #define REG_MMU_VERSION 0x034
99 #define REG_PB0_SADDR 0x04C
100 #define REG_PB0_EADDR 0x050
101 #define REG_PB1_SADDR 0x054
102 #define REG_PB1_EADDR 0x058
104 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
106 return pgtable + lv1ent_offset(iova);
109 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
114 enum exynos_sysmmu_inttype {
115 SYSMMU_PAGEFAULT,
116 SYSMMU_AR_MULTIHIT,
117 SYSMMU_AW_MULTIHIT,
118 SYSMMU_BUSERROR,
119 SYSMMU_AR_SECURITY,
120 SYSMMU_AR_ACCESS,
121 SYSMMU_AW_SECURITY,
122 SYSMMU_AW_PROTECTION, /* 7 */
123 SYSMMU_FAULT_UNKNOWN,
124 SYSMMU_FAULTS_NUM
128 * @itype: type of fault.
129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
130 * is SYSMMU_BUSERROR.
131 * @fault_addr: the device (virtual) address that the System MMU tried to
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
134 typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr);
137 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR,
139 REG_AR_FAULT_ADDR,
140 REG_AW_FAULT_ADDR,
141 REG_DEFAULT_SLAVE_ADDR,
142 REG_AR_FAULT_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AW_FAULT_ADDR,
145 REG_AW_FAULT_ADDR
148 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
149 "PAGE FAULT",
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
152 "BUS ERROR",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
157 "UNKNOWN FAULT"
160 struct exynos_iommu_domain {
161 struct list_head clients; /* list of sysmmu_drvdata.node */
162 unsigned long *pgtable; /* lv1 page table, 16KB */
163 short *lv2entcnt; /* free lv2 entry counter for each section */
164 spinlock_t lock; /* lock for this structure */
165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
168 struct sysmmu_drvdata {
169 struct list_head node; /* entry of exynos_iommu_domain.clients */
170 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */
172 char *dbgname;
173 int nsfrs;
174 void __iomem **sfrbases;
175 struct clk *clk[2];
176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable;
183 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
190 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
197 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
199 return data->activations > 0;
202 static void sysmmu_unblock(void __iomem *sfrbase)
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
207 static bool sysmmu_block(void __iomem *sfrbase)
209 int i = 120;
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
220 return true;
223 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
228 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
229 unsigned long iova)
231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
234 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
235 unsigned long pgd)
237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
240 __sysmmu_tlb_invalidate(sfrbase);
243 static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
244 unsigned long size, int idx)
246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
250 void exynos_sysmmu_set_prefbuf(struct device *dev,
251 unsigned long base0, unsigned long size0,
252 unsigned long base1, unsigned long size1)
254 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
255 unsigned long flags;
256 int i;
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
261 read_lock_irqsave(&data->lock, flags);
262 if (!is_sysmmu_active(data))
263 goto finish;
265 for (i = 0; i < data->nsfrs; i++) {
266 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
267 if (!sysmmu_block(data->sfrbases[i]))
268 continue;
270 if (size1 == 0) {
271 if (size0 <= SZ_128K) {
272 base1 = base0;
273 size1 = size0;
274 } else {
275 size1 = size0 -
276 ALIGN(size0 / 2, SZ_64K);
277 size0 = size0 - size1;
278 base1 = base0 + size0;
282 __sysmmu_set_prefbuf(
283 data->sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->sfrbases[i], base1, size1, 1);
287 sysmmu_unblock(data->sfrbases[i]);
290 finish:
291 read_unlock_irqrestore(&data->lock, flags);
294 static void __set_fault_handler(struct sysmmu_drvdata *data,
295 sysmmu_fault_handler_t handler)
297 unsigned long flags;
299 write_lock_irqsave(&data->lock, flags);
300 data->fault_handler = handler;
301 write_unlock_irqrestore(&data->lock, flags);
304 void exynos_sysmmu_set_fault_handler(struct device *dev,
305 sysmmu_fault_handler_t handler)
307 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
309 __set_fault_handler(data, handler);
312 static int default_fault_handler(enum exynos_sysmmu_inttype itype,
313 unsigned long pgtable_base, unsigned long fault_addr)
315 unsigned long *ent;
317 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
318 itype = SYSMMU_FAULT_UNKNOWN;
320 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
321 sysmmu_fault_name[itype], fault_addr, pgtable_base);
323 ent = section_entry(__va(pgtable_base), fault_addr);
324 pr_err("\tLv1 entry: 0x%lx\n", *ent);
326 if (lv1ent_page(ent)) {
327 ent = page_entry(ent, fault_addr);
328 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
331 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
333 BUG();
335 return 0;
338 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
340 /* SYSMMU is in blocked when interrupt occurred. */
341 struct sysmmu_drvdata *data = dev_id;
342 struct resource *irqres;
343 struct platform_device *pdev;
344 enum exynos_sysmmu_inttype itype;
345 unsigned long addr = -1;
347 int i, ret = -ENOSYS;
349 read_lock(&data->lock);
351 WARN_ON(!is_sysmmu_active(data));
353 pdev = to_platform_device(data->sysmmu);
354 for (i = 0; i < (pdev->num_resources / 2); i++) {
355 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
356 if (irqres && ((int)irqres->start == irq))
357 break;
360 if (i == pdev->num_resources) {
361 itype = SYSMMU_FAULT_UNKNOWN;
362 } else {
363 itype = (enum exynos_sysmmu_inttype)
364 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
365 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
366 itype = SYSMMU_FAULT_UNKNOWN;
367 else
368 addr = __raw_readl(
369 data->sfrbases[i] + fault_reg_offset[itype]);
372 if (data->domain)
373 ret = report_iommu_fault(data->domain, data->dev,
374 addr, itype);
376 if ((ret == -ENOSYS) && data->fault_handler) {
377 unsigned long base = data->pgtable;
378 if (itype != SYSMMU_FAULT_UNKNOWN)
379 base = __raw_readl(
380 data->sfrbases[i] + REG_PT_BASE_ADDR);
381 ret = data->fault_handler(itype, base, addr);
384 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
385 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
386 else
387 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
388 data->dbgname, sysmmu_fault_name[itype]);
390 if (itype != SYSMMU_FAULT_UNKNOWN)
391 sysmmu_unblock(data->sfrbases[i]);
393 read_unlock(&data->lock);
395 return IRQ_HANDLED;
398 static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
400 unsigned long flags;
401 bool disabled = false;
402 int i;
404 write_lock_irqsave(&data->lock, flags);
406 if (!set_sysmmu_inactive(data))
407 goto finish;
409 for (i = 0; i < data->nsfrs; i++)
410 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
412 if (data->clk[1])
413 clk_disable(data->clk[1]);
414 if (data->clk[0])
415 clk_disable(data->clk[0]);
417 disabled = true;
418 data->pgtable = 0;
419 data->domain = NULL;
420 finish:
421 write_unlock_irqrestore(&data->lock, flags);
423 if (disabled)
424 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
425 else
426 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
427 data->dbgname, data->activations);
429 return disabled;
432 /* __exynos_sysmmu_enable: Enables System MMU
434 * returns -error if an error occurred and System MMU is not enabled,
435 * 0 if the System MMU has been just enabled and 1 if System MMU was already
436 * enabled before.
438 static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
439 unsigned long pgtable, struct iommu_domain *domain)
441 int i, ret = 0;
442 unsigned long flags;
444 write_lock_irqsave(&data->lock, flags);
446 if (!set_sysmmu_active(data)) {
447 if (WARN_ON(pgtable != data->pgtable)) {
448 ret = -EBUSY;
449 set_sysmmu_inactive(data);
450 } else {
451 ret = 1;
454 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
455 goto finish;
458 if (data->clk[0])
459 clk_enable(data->clk[0]);
460 if (data->clk[1])
461 clk_enable(data->clk[1]);
463 data->pgtable = pgtable;
465 for (i = 0; i < data->nsfrs; i++) {
466 __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
468 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
469 /* System MMU version is 3.x */
470 __raw_writel((1 << 12) | (2 << 28),
471 data->sfrbases[i] + REG_MMU_CFG);
472 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
473 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
476 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
479 data->domain = domain;
481 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
482 finish:
483 write_unlock_irqrestore(&data->lock, flags);
485 return ret;
488 int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
490 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
491 int ret;
493 BUG_ON(!memblock_is_memory(pgtable));
495 ret = pm_runtime_get_sync(data->sysmmu);
496 if (ret < 0) {
497 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
498 return ret;
501 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
502 if (WARN_ON(ret < 0)) {
503 pm_runtime_put(data->sysmmu);
504 dev_err(data->sysmmu,
505 "(%s) Already enabled with page table %#lx\n",
506 data->dbgname, data->pgtable);
507 } else {
508 data->dev = dev;
511 return ret;
514 bool exynos_sysmmu_disable(struct device *dev)
516 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
517 bool disabled;
519 disabled = __exynos_sysmmu_disable(data);
520 pm_runtime_put(data->sysmmu);
522 return disabled;
525 static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
527 unsigned long flags;
528 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
530 read_lock_irqsave(&data->lock, flags);
532 if (is_sysmmu_active(data)) {
533 int i;
534 for (i = 0; i < data->nsfrs; i++) {
535 if (sysmmu_block(data->sfrbases[i])) {
536 __sysmmu_tlb_invalidate_entry(
537 data->sfrbases[i], iova);
538 sysmmu_unblock(data->sfrbases[i]);
541 } else {
542 dev_dbg(data->sysmmu,
543 "(%s) Disabled. Skipping invalidating TLB.\n",
544 data->dbgname);
547 read_unlock_irqrestore(&data->lock, flags);
550 void exynos_sysmmu_tlb_invalidate(struct device *dev)
552 unsigned long flags;
553 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
555 read_lock_irqsave(&data->lock, flags);
557 if (is_sysmmu_active(data)) {
558 int i;
559 for (i = 0; i < data->nsfrs; i++) {
560 if (sysmmu_block(data->sfrbases[i])) {
561 __sysmmu_tlb_invalidate(data->sfrbases[i]);
562 sysmmu_unblock(data->sfrbases[i]);
565 } else {
566 dev_dbg(data->sysmmu,
567 "(%s) Disabled. Skipping invalidating TLB.\n",
568 data->dbgname);
571 read_unlock_irqrestore(&data->lock, flags);
574 static int exynos_sysmmu_probe(struct platform_device *pdev)
576 int i, ret;
577 struct device *dev;
578 struct sysmmu_drvdata *data;
580 dev = &pdev->dev;
582 data = kzalloc(sizeof(*data), GFP_KERNEL);
583 if (!data) {
584 dev_dbg(dev, "Not enough memory\n");
585 ret = -ENOMEM;
586 goto err_alloc;
589 ret = dev_set_drvdata(dev, data);
590 if (ret) {
591 dev_dbg(dev, "Unabled to initialize driver data\n");
592 goto err_init;
595 data->nsfrs = pdev->num_resources / 2;
596 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
597 GFP_KERNEL);
598 if (data->sfrbases == NULL) {
599 dev_dbg(dev, "Not enough memory\n");
600 ret = -ENOMEM;
601 goto err_init;
604 for (i = 0; i < data->nsfrs; i++) {
605 struct resource *res;
606 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
607 if (!res) {
608 dev_dbg(dev, "Unable to find IOMEM region\n");
609 ret = -ENOENT;
610 goto err_res;
613 data->sfrbases[i] = ioremap(res->start, resource_size(res));
614 if (!data->sfrbases[i]) {
615 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
616 res->start);
617 ret = -ENOENT;
618 goto err_res;
622 for (i = 0; i < data->nsfrs; i++) {
623 ret = platform_get_irq(pdev, i);
624 if (ret <= 0) {
625 dev_dbg(dev, "Unable to find IRQ resource\n");
626 goto err_irq;
629 ret = request_irq(ret, exynos_sysmmu_irq, 0,
630 dev_name(dev), data);
631 if (ret) {
632 dev_dbg(dev, "Unabled to register interrupt handler\n");
633 goto err_irq;
637 if (dev_get_platdata(dev)) {
638 char *deli, *beg;
639 struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
641 beg = platdata->clockname;
643 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
644 /* NOTHING */;
646 if (*deli == '\0')
647 deli = NULL;
648 else
649 *deli = '\0';
651 data->clk[0] = clk_get(dev, beg);
652 if (IS_ERR(data->clk[0])) {
653 data->clk[0] = NULL;
654 dev_dbg(dev, "No clock descriptor registered\n");
657 if (data->clk[0] && deli) {
658 *deli = ',';
659 data->clk[1] = clk_get(dev, deli + 1);
660 if (IS_ERR(data->clk[1]))
661 data->clk[1] = NULL;
664 data->dbgname = platdata->dbgname;
667 data->sysmmu = dev;
668 rwlock_init(&data->lock);
669 INIT_LIST_HEAD(&data->node);
671 __set_fault_handler(data, &default_fault_handler);
673 if (dev->parent)
674 pm_runtime_enable(dev);
676 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
677 return 0;
678 err_irq:
679 while (i-- > 0) {
680 int irq;
682 irq = platform_get_irq(pdev, i);
683 free_irq(irq, data);
685 err_res:
686 while (data->nsfrs-- > 0)
687 iounmap(data->sfrbases[data->nsfrs]);
688 kfree(data->sfrbases);
689 err_init:
690 kfree(data);
691 err_alloc:
692 dev_err(dev, "Failed to initialize\n");
693 return ret;
696 static struct platform_driver exynos_sysmmu_driver = {
697 .probe = exynos_sysmmu_probe,
698 .driver = {
699 .owner = THIS_MODULE,
700 .name = "exynos-sysmmu",
704 static inline void pgtable_flush(void *vastart, void *vaend)
706 dmac_flush_range(vastart, vaend);
707 outer_flush_range(virt_to_phys(vastart),
708 virt_to_phys(vaend));
711 static int exynos_iommu_domain_init(struct iommu_domain *domain)
713 struct exynos_iommu_domain *priv;
715 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
716 if (!priv)
717 return -ENOMEM;
719 priv->pgtable = (unsigned long *)__get_free_pages(
720 GFP_KERNEL | __GFP_ZERO, 2);
721 if (!priv->pgtable)
722 goto err_pgtable;
724 priv->lv2entcnt = (short *)__get_free_pages(
725 GFP_KERNEL | __GFP_ZERO, 1);
726 if (!priv->lv2entcnt)
727 goto err_counter;
729 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
731 spin_lock_init(&priv->lock);
732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients);
735 domain->priv = priv;
736 return 0;
738 err_counter:
739 free_pages((unsigned long)priv->pgtable, 2);
740 err_pgtable:
741 kfree(priv);
742 return -ENOMEM;
745 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
747 struct exynos_iommu_domain *priv = domain->priv;
748 struct sysmmu_drvdata *data;
749 unsigned long flags;
750 int i;
752 WARN_ON(!list_empty(&priv->clients));
754 spin_lock_irqsave(&priv->lock, flags);
756 list_for_each_entry(data, &priv->clients, node) {
757 while (!exynos_sysmmu_disable(data->dev))
758 ; /* until System MMU is actually disabled */
761 spin_unlock_irqrestore(&priv->lock, flags);
763 for (i = 0; i < NUM_LV1ENTRIES; i++)
764 if (lv1ent_page(priv->pgtable + i))
765 kfree(__va(lv2table_base(priv->pgtable + i)));
767 free_pages((unsigned long)priv->pgtable, 2);
768 free_pages((unsigned long)priv->lv2entcnt, 1);
769 kfree(domain->priv);
770 domain->priv = NULL;
773 static int exynos_iommu_attach_device(struct iommu_domain *domain,
774 struct device *dev)
776 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
777 struct exynos_iommu_domain *priv = domain->priv;
778 unsigned long flags;
779 int ret;
781 ret = pm_runtime_get_sync(data->sysmmu);
782 if (ret < 0)
783 return ret;
785 ret = 0;
787 spin_lock_irqsave(&priv->lock, flags);
789 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
791 if (ret == 0) {
792 /* 'data->node' must not be appeared in priv->clients */
793 BUG_ON(!list_empty(&data->node));
794 data->dev = dev;
795 list_add_tail(&data->node, &priv->clients);
798 spin_unlock_irqrestore(&priv->lock, flags);
800 if (ret < 0) {
801 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
802 __func__, __pa(priv->pgtable));
803 pm_runtime_put(data->sysmmu);
804 } else if (ret > 0) {
805 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
806 __func__, __pa(priv->pgtable));
807 } else {
808 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
809 __func__, __pa(priv->pgtable));
812 return ret;
815 static void exynos_iommu_detach_device(struct iommu_domain *domain,
816 struct device *dev)
818 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
819 struct exynos_iommu_domain *priv = domain->priv;
820 struct list_head *pos;
821 unsigned long flags;
822 bool found = false;
824 spin_lock_irqsave(&priv->lock, flags);
826 list_for_each(pos, &priv->clients) {
827 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
828 found = true;
829 break;
833 if (!found)
834 goto finish;
836 if (__exynos_sysmmu_disable(data)) {
837 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
838 __func__, __pa(priv->pgtable));
839 list_del(&data->node);
840 INIT_LIST_HEAD(&data->node);
842 } else {
843 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
844 __func__, __pa(priv->pgtable));
847 finish:
848 spin_unlock_irqrestore(&priv->lock, flags);
850 if (found)
851 pm_runtime_put(data->sysmmu);
854 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
855 short *pgcounter)
857 if (lv1ent_fault(sent)) {
858 unsigned long *pent;
860 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
861 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
862 if (!pent)
863 return NULL;
865 *sent = mk_lv1ent_page(__pa(pent));
866 *pgcounter = NUM_LV2ENTRIES;
867 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
868 pgtable_flush(sent, sent + 1);
871 return page_entry(sent, iova);
874 static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
876 if (lv1ent_section(sent))
877 return -EADDRINUSE;
879 if (lv1ent_page(sent)) {
880 if (*pgcnt != NUM_LV2ENTRIES)
881 return -EADDRINUSE;
883 kfree(page_entry(sent, 0));
885 *pgcnt = 0;
888 *sent = mk_lv1ent_sect(paddr);
890 pgtable_flush(sent, sent + 1);
892 return 0;
895 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
896 short *pgcnt)
898 if (size == SPAGE_SIZE) {
899 if (!lv2ent_fault(pent))
900 return -EADDRINUSE;
902 *pent = mk_lv2ent_spage(paddr);
903 pgtable_flush(pent, pent + 1);
904 *pgcnt -= 1;
905 } else { /* size == LPAGE_SIZE */
906 int i;
907 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
908 if (!lv2ent_fault(pent)) {
909 memset(pent, 0, sizeof(*pent) * i);
910 return -EADDRINUSE;
913 *pent = mk_lv2ent_lpage(paddr);
915 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
916 *pgcnt -= SPAGES_PER_LPAGE;
919 return 0;
922 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
923 phys_addr_t paddr, size_t size, int prot)
925 struct exynos_iommu_domain *priv = domain->priv;
926 unsigned long *entry;
927 unsigned long flags;
928 int ret = -ENOMEM;
930 BUG_ON(priv->pgtable == NULL);
932 spin_lock_irqsave(&priv->pgtablelock, flags);
934 entry = section_entry(priv->pgtable, iova);
936 if (size == SECT_SIZE) {
937 ret = lv1set_section(entry, paddr,
938 &priv->lv2entcnt[lv1ent_offset(iova)]);
939 } else {
940 unsigned long *pent;
942 pent = alloc_lv2entry(entry, iova,
943 &priv->lv2entcnt[lv1ent_offset(iova)]);
945 if (!pent)
946 ret = -ENOMEM;
947 else
948 ret = lv2set_page(pent, paddr, size,
949 &priv->lv2entcnt[lv1ent_offset(iova)]);
952 if (ret) {
953 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
954 __func__, iova, size);
957 spin_unlock_irqrestore(&priv->pgtablelock, flags);
959 return ret;
962 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
963 unsigned long iova, size_t size)
965 struct exynos_iommu_domain *priv = domain->priv;
966 struct sysmmu_drvdata *data;
967 unsigned long flags;
968 unsigned long *ent;
970 BUG_ON(priv->pgtable == NULL);
972 spin_lock_irqsave(&priv->pgtablelock, flags);
974 ent = section_entry(priv->pgtable, iova);
976 if (lv1ent_section(ent)) {
977 BUG_ON(size < SECT_SIZE);
979 *ent = 0;
980 pgtable_flush(ent, ent + 1);
981 size = SECT_SIZE;
982 goto done;
985 if (unlikely(lv1ent_fault(ent))) {
986 if (size > SECT_SIZE)
987 size = SECT_SIZE;
988 goto done;
991 /* lv1ent_page(sent) == true here */
993 ent = page_entry(ent, iova);
995 if (unlikely(lv2ent_fault(ent))) {
996 size = SPAGE_SIZE;
997 goto done;
1000 if (lv2ent_small(ent)) {
1001 *ent = 0;
1002 size = SPAGE_SIZE;
1003 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1004 goto done;
1007 /* lv1ent_large(ent) == true here */
1008 BUG_ON(size < LPAGE_SIZE);
1010 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1012 size = LPAGE_SIZE;
1013 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1014 done:
1015 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1017 spin_lock_irqsave(&priv->lock, flags);
1018 list_for_each_entry(data, &priv->clients, node)
1019 sysmmu_tlb_invalidate_entry(data->dev, iova);
1020 spin_unlock_irqrestore(&priv->lock, flags);
1023 return size;
1026 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1027 unsigned long iova)
1029 struct exynos_iommu_domain *priv = domain->priv;
1030 unsigned long *entry;
1031 unsigned long flags;
1032 phys_addr_t phys = 0;
1034 spin_lock_irqsave(&priv->pgtablelock, flags);
1036 entry = section_entry(priv->pgtable, iova);
1038 if (lv1ent_section(entry)) {
1039 phys = section_phys(entry) + section_offs(iova);
1040 } else if (lv1ent_page(entry)) {
1041 entry = page_entry(entry, iova);
1043 if (lv2ent_large(entry))
1044 phys = lpage_phys(entry) + lpage_offs(iova);
1045 else if (lv2ent_small(entry))
1046 phys = spage_phys(entry) + spage_offs(iova);
1049 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1051 return phys;
1054 static struct iommu_ops exynos_iommu_ops = {
1055 .domain_init = &exynos_iommu_domain_init,
1056 .domain_destroy = &exynos_iommu_domain_destroy,
1057 .attach_dev = &exynos_iommu_attach_device,
1058 .detach_dev = &exynos_iommu_detach_device,
1059 .map = &exynos_iommu_map,
1060 .unmap = &exynos_iommu_unmap,
1061 .iova_to_phys = &exynos_iommu_iova_to_phys,
1062 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1065 static int __init exynos_iommu_init(void)
1067 int ret;
1069 ret = platform_driver_register(&exynos_sysmmu_driver);
1071 if (ret == 0)
1072 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1074 return ret;
1076 subsys_initcall(exynos_iommu_init);