mmc: dw_mmc: make const arrays mszs static
[linux-2.6/btrfs-unstable.git] / drivers / iommu / tegra-smmu.c
blob3b6449e2cbf1c47d6d923f28d598d90e7382b043
1 /*
2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <soc/tegra/ahb.h>
21 #include <soc/tegra/mc.h>
23 struct tegra_smmu {
24 void __iomem *regs;
25 struct device *dev;
27 struct tegra_mc *mc;
28 const struct tegra_smmu_soc *soc;
30 unsigned long pfn_mask;
31 unsigned long tlb_mask;
33 unsigned long *asids;
34 struct mutex lock;
36 struct list_head list;
38 struct dentry *debugfs;
40 struct iommu_device iommu; /* IOMMU Core code handle */
43 struct tegra_smmu_as {
44 struct iommu_domain domain;
45 struct tegra_smmu *smmu;
46 unsigned int use_count;
47 u32 *count;
48 struct page **pts;
49 struct page *pd;
50 dma_addr_t pd_dma;
51 unsigned id;
52 u32 attr;
55 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
57 return container_of(dom, struct tegra_smmu_as, domain);
60 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
61 unsigned long offset)
63 writel(value, smmu->regs + offset);
66 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
68 return readl(smmu->regs + offset);
71 #define SMMU_CONFIG 0x010
72 #define SMMU_CONFIG_ENABLE (1 << 0)
74 #define SMMU_TLB_CONFIG 0x14
75 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
76 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
77 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
78 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
80 #define SMMU_PTC_CONFIG 0x18
81 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
82 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
83 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
85 #define SMMU_PTB_ASID 0x01c
86 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
88 #define SMMU_PTB_DATA 0x020
89 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
91 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
93 #define SMMU_TLB_FLUSH 0x030
94 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
95 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
96 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
97 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
98 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
99 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
100 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
101 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
102 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
104 #define SMMU_PTC_FLUSH 0x034
105 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
106 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
108 #define SMMU_PTC_FLUSH_HI 0x9b8
109 #define SMMU_PTC_FLUSH_HI_MASK 0x3
111 /* per-SWGROUP SMMU_*_ASID register */
112 #define SMMU_ASID_ENABLE (1 << 31)
113 #define SMMU_ASID_MASK 0x7f
114 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
116 /* page table definitions */
117 #define SMMU_NUM_PDE 1024
118 #define SMMU_NUM_PTE 1024
120 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
121 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
123 #define SMMU_PDE_SHIFT 22
124 #define SMMU_PTE_SHIFT 12
126 #define SMMU_PD_READABLE (1 << 31)
127 #define SMMU_PD_WRITABLE (1 << 30)
128 #define SMMU_PD_NONSECURE (1 << 29)
130 #define SMMU_PDE_READABLE (1 << 31)
131 #define SMMU_PDE_WRITABLE (1 << 30)
132 #define SMMU_PDE_NONSECURE (1 << 29)
133 #define SMMU_PDE_NEXT (1 << 28)
135 #define SMMU_PTE_READABLE (1 << 31)
136 #define SMMU_PTE_WRITABLE (1 << 30)
137 #define SMMU_PTE_NONSECURE (1 << 29)
139 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
140 SMMU_PDE_NONSECURE)
141 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
142 SMMU_PTE_NONSECURE)
144 static unsigned int iova_pd_index(unsigned long iova)
146 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
149 static unsigned int iova_pt_index(unsigned long iova)
151 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
154 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
156 addr >>= 12;
157 return (addr & smmu->pfn_mask) == addr;
160 static dma_addr_t smmu_pde_to_dma(u32 pde)
162 return pde << 12;
165 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
167 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
170 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
171 unsigned long offset)
173 u32 value;
175 offset &= ~(smmu->mc->soc->atom_size - 1);
177 if (smmu->mc->soc->num_address_bits > 32) {
178 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
179 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
180 #else
181 value = 0;
182 #endif
183 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
186 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
187 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
190 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
192 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
195 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
196 unsigned long asid)
198 u32 value;
200 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
201 SMMU_TLB_FLUSH_VA_MATCH_ALL;
202 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
205 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
206 unsigned long asid,
207 unsigned long iova)
209 u32 value;
211 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
212 SMMU_TLB_FLUSH_VA_SECTION(iova);
213 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
216 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
217 unsigned long asid,
218 unsigned long iova)
220 u32 value;
222 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
223 SMMU_TLB_FLUSH_VA_GROUP(iova);
224 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
227 static inline void smmu_flush(struct tegra_smmu *smmu)
229 smmu_readl(smmu, SMMU_CONFIG);
232 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
234 unsigned long id;
236 mutex_lock(&smmu->lock);
238 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
239 if (id >= smmu->soc->num_asids) {
240 mutex_unlock(&smmu->lock);
241 return -ENOSPC;
244 set_bit(id, smmu->asids);
245 *idp = id;
247 mutex_unlock(&smmu->lock);
248 return 0;
251 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
253 mutex_lock(&smmu->lock);
254 clear_bit(id, smmu->asids);
255 mutex_unlock(&smmu->lock);
258 static bool tegra_smmu_capable(enum iommu_cap cap)
260 return false;
263 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
265 struct tegra_smmu_as *as;
267 if (type != IOMMU_DOMAIN_UNMANAGED)
268 return NULL;
270 as = kzalloc(sizeof(*as), GFP_KERNEL);
271 if (!as)
272 return NULL;
274 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
276 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
277 if (!as->pd) {
278 kfree(as);
279 return NULL;
282 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
283 if (!as->count) {
284 __free_page(as->pd);
285 kfree(as);
286 return NULL;
289 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
290 if (!as->pts) {
291 kfree(as->count);
292 __free_page(as->pd);
293 kfree(as);
294 return NULL;
297 /* setup aperture */
298 as->domain.geometry.aperture_start = 0;
299 as->domain.geometry.aperture_end = 0xffffffff;
300 as->domain.geometry.force_aperture = true;
302 return &as->domain;
305 static void tegra_smmu_domain_free(struct iommu_domain *domain)
307 struct tegra_smmu_as *as = to_smmu_as(domain);
309 /* TODO: free page directory and page tables */
311 kfree(as);
314 static const struct tegra_smmu_swgroup *
315 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
317 const struct tegra_smmu_swgroup *group = NULL;
318 unsigned int i;
320 for (i = 0; i < smmu->soc->num_swgroups; i++) {
321 if (smmu->soc->swgroups[i].swgroup == swgroup) {
322 group = &smmu->soc->swgroups[i];
323 break;
327 return group;
330 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
331 unsigned int asid)
333 const struct tegra_smmu_swgroup *group;
334 unsigned int i;
335 u32 value;
337 for (i = 0; i < smmu->soc->num_clients; i++) {
338 const struct tegra_mc_client *client = &smmu->soc->clients[i];
340 if (client->swgroup != swgroup)
341 continue;
343 value = smmu_readl(smmu, client->smmu.reg);
344 value |= BIT(client->smmu.bit);
345 smmu_writel(smmu, value, client->smmu.reg);
348 group = tegra_smmu_find_swgroup(smmu, swgroup);
349 if (group) {
350 value = smmu_readl(smmu, group->reg);
351 value &= ~SMMU_ASID_MASK;
352 value |= SMMU_ASID_VALUE(asid);
353 value |= SMMU_ASID_ENABLE;
354 smmu_writel(smmu, value, group->reg);
358 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
359 unsigned int asid)
361 const struct tegra_smmu_swgroup *group;
362 unsigned int i;
363 u32 value;
365 group = tegra_smmu_find_swgroup(smmu, swgroup);
366 if (group) {
367 value = smmu_readl(smmu, group->reg);
368 value &= ~SMMU_ASID_MASK;
369 value |= SMMU_ASID_VALUE(asid);
370 value &= ~SMMU_ASID_ENABLE;
371 smmu_writel(smmu, value, group->reg);
374 for (i = 0; i < smmu->soc->num_clients; i++) {
375 const struct tegra_mc_client *client = &smmu->soc->clients[i];
377 if (client->swgroup != swgroup)
378 continue;
380 value = smmu_readl(smmu, client->smmu.reg);
381 value &= ~BIT(client->smmu.bit);
382 smmu_writel(smmu, value, client->smmu.reg);
386 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
387 struct tegra_smmu_as *as)
389 u32 value;
390 int err;
392 if (as->use_count > 0) {
393 as->use_count++;
394 return 0;
397 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
398 DMA_TO_DEVICE);
399 if (dma_mapping_error(smmu->dev, as->pd_dma))
400 return -ENOMEM;
402 /* We can't handle 64-bit DMA addresses */
403 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
404 err = -ENOMEM;
405 goto err_unmap;
408 err = tegra_smmu_alloc_asid(smmu, &as->id);
409 if (err < 0)
410 goto err_unmap;
412 smmu_flush_ptc(smmu, as->pd_dma, 0);
413 smmu_flush_tlb_asid(smmu, as->id);
415 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
416 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
417 smmu_writel(smmu, value, SMMU_PTB_DATA);
418 smmu_flush(smmu);
420 as->smmu = smmu;
421 as->use_count++;
423 return 0;
425 err_unmap:
426 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
427 return err;
430 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
431 struct tegra_smmu_as *as)
433 if (--as->use_count > 0)
434 return;
436 tegra_smmu_free_asid(smmu, as->id);
438 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
440 as->smmu = NULL;
443 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
444 struct device *dev)
446 struct tegra_smmu *smmu = dev->archdata.iommu;
447 struct tegra_smmu_as *as = to_smmu_as(domain);
448 struct device_node *np = dev->of_node;
449 struct of_phandle_args args;
450 unsigned int index = 0;
451 int err = 0;
453 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
454 &args)) {
455 unsigned int swgroup = args.args[0];
457 if (args.np != smmu->dev->of_node) {
458 of_node_put(args.np);
459 continue;
462 of_node_put(args.np);
464 err = tegra_smmu_as_prepare(smmu, as);
465 if (err < 0)
466 return err;
468 tegra_smmu_enable(smmu, swgroup, as->id);
469 index++;
472 if (index == 0)
473 return -ENODEV;
475 return 0;
478 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
480 struct tegra_smmu_as *as = to_smmu_as(domain);
481 struct device_node *np = dev->of_node;
482 struct tegra_smmu *smmu = as->smmu;
483 struct of_phandle_args args;
484 unsigned int index = 0;
486 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
487 &args)) {
488 unsigned int swgroup = args.args[0];
490 if (args.np != smmu->dev->of_node) {
491 of_node_put(args.np);
492 continue;
495 of_node_put(args.np);
497 tegra_smmu_disable(smmu, swgroup, as->id);
498 tegra_smmu_as_unprepare(smmu, as);
499 index++;
503 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
504 u32 value)
506 unsigned int pd_index = iova_pd_index(iova);
507 struct tegra_smmu *smmu = as->smmu;
508 u32 *pd = page_address(as->pd);
509 unsigned long offset = pd_index * sizeof(*pd);
511 /* Set the page directory entry first */
512 pd[pd_index] = value;
514 /* The flush the page directory entry from caches */
515 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
516 sizeof(*pd), DMA_TO_DEVICE);
518 /* And flush the iommu */
519 smmu_flush_ptc(smmu, as->pd_dma, offset);
520 smmu_flush_tlb_section(smmu, as->id, iova);
521 smmu_flush(smmu);
524 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
526 u32 *pt = page_address(pt_page);
528 return pt + iova_pt_index(iova);
531 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
532 dma_addr_t *dmap)
534 unsigned int pd_index = iova_pd_index(iova);
535 struct page *pt_page;
536 u32 *pd;
538 pt_page = as->pts[pd_index];
539 if (!pt_page)
540 return NULL;
542 pd = page_address(as->pd);
543 *dmap = smmu_pde_to_dma(pd[pd_index]);
545 return tegra_smmu_pte_offset(pt_page, iova);
548 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
549 dma_addr_t *dmap)
551 unsigned int pde = iova_pd_index(iova);
552 struct tegra_smmu *smmu = as->smmu;
554 if (!as->pts[pde]) {
555 struct page *page;
556 dma_addr_t dma;
558 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
559 if (!page)
560 return NULL;
562 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(smmu->dev, dma)) {
565 __free_page(page);
566 return NULL;
569 if (!smmu_dma_addr_valid(smmu, dma)) {
570 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
571 DMA_TO_DEVICE);
572 __free_page(page);
573 return NULL;
576 as->pts[pde] = page;
578 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
579 SMMU_PDE_NEXT));
581 *dmap = dma;
582 } else {
583 u32 *pd = page_address(as->pd);
585 *dmap = smmu_pde_to_dma(pd[pde]);
588 return tegra_smmu_pte_offset(as->pts[pde], iova);
591 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
593 unsigned int pd_index = iova_pd_index(iova);
595 as->count[pd_index]++;
598 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
600 unsigned int pde = iova_pd_index(iova);
601 struct page *page = as->pts[pde];
604 * When no entries in this page table are used anymore, return the
605 * memory page to the system.
607 if (--as->count[pde] == 0) {
608 struct tegra_smmu *smmu = as->smmu;
609 u32 *pd = page_address(as->pd);
610 dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
612 tegra_smmu_set_pde(as, iova, 0);
614 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
615 __free_page(page);
616 as->pts[pde] = NULL;
620 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
621 u32 *pte, dma_addr_t pte_dma, u32 val)
623 struct tegra_smmu *smmu = as->smmu;
624 unsigned long offset = offset_in_page(pte);
626 *pte = val;
628 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
629 4, DMA_TO_DEVICE);
630 smmu_flush_ptc(smmu, pte_dma, offset);
631 smmu_flush_tlb_group(smmu, as->id, iova);
632 smmu_flush(smmu);
635 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
636 phys_addr_t paddr, size_t size, int prot)
638 struct tegra_smmu_as *as = to_smmu_as(domain);
639 dma_addr_t pte_dma;
640 u32 *pte;
642 pte = as_get_pte(as, iova, &pte_dma);
643 if (!pte)
644 return -ENOMEM;
646 /* If we aren't overwriting a pre-existing entry, increment use */
647 if (*pte == 0)
648 tegra_smmu_pte_get_use(as, iova);
650 tegra_smmu_set_pte(as, iova, pte, pte_dma,
651 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
653 return 0;
656 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
657 size_t size)
659 struct tegra_smmu_as *as = to_smmu_as(domain);
660 dma_addr_t pte_dma;
661 u32 *pte;
663 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
664 if (!pte || !*pte)
665 return 0;
667 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
668 tegra_smmu_pte_put_use(as, iova);
670 return size;
673 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
674 dma_addr_t iova)
676 struct tegra_smmu_as *as = to_smmu_as(domain);
677 unsigned long pfn;
678 dma_addr_t pte_dma;
679 u32 *pte;
681 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
682 if (!pte || !*pte)
683 return 0;
685 pfn = *pte & as->smmu->pfn_mask;
687 return PFN_PHYS(pfn);
690 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
692 struct platform_device *pdev;
693 struct tegra_mc *mc;
695 pdev = of_find_device_by_node(np);
696 if (!pdev)
697 return NULL;
699 mc = platform_get_drvdata(pdev);
700 if (!mc)
701 return NULL;
703 return mc->smmu;
706 static int tegra_smmu_add_device(struct device *dev)
708 struct device_node *np = dev->of_node;
709 struct iommu_group *group;
710 struct of_phandle_args args;
711 unsigned int index = 0;
713 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
714 &args) == 0) {
715 struct tegra_smmu *smmu;
717 smmu = tegra_smmu_find(args.np);
718 if (smmu) {
720 * Only a single IOMMU master interface is currently
721 * supported by the Linux kernel, so abort after the
722 * first match.
724 dev->archdata.iommu = smmu;
726 iommu_device_link(&smmu->iommu, dev);
728 break;
731 index++;
734 group = iommu_group_get_for_dev(dev);
735 if (IS_ERR(group))
736 return PTR_ERR(group);
738 iommu_group_put(group);
740 return 0;
743 static void tegra_smmu_remove_device(struct device *dev)
745 struct tegra_smmu *smmu = dev->archdata.iommu;
747 if (smmu)
748 iommu_device_unlink(&smmu->iommu, dev);
750 dev->archdata.iommu = NULL;
751 iommu_group_remove_device(dev);
754 static const struct iommu_ops tegra_smmu_ops = {
755 .capable = tegra_smmu_capable,
756 .domain_alloc = tegra_smmu_domain_alloc,
757 .domain_free = tegra_smmu_domain_free,
758 .attach_dev = tegra_smmu_attach_dev,
759 .detach_dev = tegra_smmu_detach_dev,
760 .add_device = tegra_smmu_add_device,
761 .remove_device = tegra_smmu_remove_device,
762 .device_group = generic_device_group,
763 .map = tegra_smmu_map,
764 .unmap = tegra_smmu_unmap,
765 .map_sg = default_iommu_map_sg,
766 .iova_to_phys = tegra_smmu_iova_to_phys,
768 .pgsize_bitmap = SZ_4K,
771 static void tegra_smmu_ahb_enable(void)
773 static const struct of_device_id ahb_match[] = {
774 { .compatible = "nvidia,tegra30-ahb", },
777 struct device_node *ahb;
779 ahb = of_find_matching_node(NULL, ahb_match);
780 if (ahb) {
781 tegra_ahb_enable_smmu(ahb);
782 of_node_put(ahb);
786 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
788 struct tegra_smmu *smmu = s->private;
789 unsigned int i;
790 u32 value;
792 seq_printf(s, "swgroup enabled ASID\n");
793 seq_printf(s, "------------------------\n");
795 for (i = 0; i < smmu->soc->num_swgroups; i++) {
796 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
797 const char *status;
798 unsigned int asid;
800 value = smmu_readl(smmu, group->reg);
802 if (value & SMMU_ASID_ENABLE)
803 status = "yes";
804 else
805 status = "no";
807 asid = value & SMMU_ASID_MASK;
809 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
810 asid);
813 return 0;
816 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
818 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
821 static const struct file_operations tegra_smmu_swgroups_fops = {
822 .open = tegra_smmu_swgroups_open,
823 .read = seq_read,
824 .llseek = seq_lseek,
825 .release = single_release,
828 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
830 struct tegra_smmu *smmu = s->private;
831 unsigned int i;
832 u32 value;
834 seq_printf(s, "client enabled\n");
835 seq_printf(s, "--------------------\n");
837 for (i = 0; i < smmu->soc->num_clients; i++) {
838 const struct tegra_mc_client *client = &smmu->soc->clients[i];
839 const char *status;
841 value = smmu_readl(smmu, client->smmu.reg);
843 if (value & BIT(client->smmu.bit))
844 status = "yes";
845 else
846 status = "no";
848 seq_printf(s, "%-12s %s\n", client->name, status);
851 return 0;
854 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
856 return single_open(file, tegra_smmu_clients_show, inode->i_private);
859 static const struct file_operations tegra_smmu_clients_fops = {
860 .open = tegra_smmu_clients_open,
861 .read = seq_read,
862 .llseek = seq_lseek,
863 .release = single_release,
866 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
868 smmu->debugfs = debugfs_create_dir("smmu", NULL);
869 if (!smmu->debugfs)
870 return;
872 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
873 &tegra_smmu_swgroups_fops);
874 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
875 &tegra_smmu_clients_fops);
878 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
880 debugfs_remove_recursive(smmu->debugfs);
883 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
884 const struct tegra_smmu_soc *soc,
885 struct tegra_mc *mc)
887 struct tegra_smmu *smmu;
888 size_t size;
889 u32 value;
890 int err;
892 /* This can happen on Tegra20 which doesn't have an SMMU */
893 if (!soc)
894 return NULL;
896 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
897 if (!smmu)
898 return ERR_PTR(-ENOMEM);
901 * This is a bit of a hack. Ideally we'd want to simply return this
902 * value. However the IOMMU registration process will attempt to add
903 * all devices to the IOMMU when bus_set_iommu() is called. In order
904 * not to rely on global variables to track the IOMMU instance, we
905 * set it here so that it can be looked up from the .add_device()
906 * callback via the IOMMU device's .drvdata field.
908 mc->smmu = smmu;
910 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
912 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
913 if (!smmu->asids)
914 return ERR_PTR(-ENOMEM);
916 mutex_init(&smmu->lock);
918 smmu->regs = mc->regs;
919 smmu->soc = soc;
920 smmu->dev = dev;
921 smmu->mc = mc;
923 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
924 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
925 mc->soc->num_address_bits, smmu->pfn_mask);
926 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
927 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
928 smmu->tlb_mask);
930 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
932 if (soc->supports_request_limit)
933 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
935 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
937 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
938 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
940 if (soc->supports_round_robin_arbitration)
941 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
943 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
945 smmu_flush_ptc_all(smmu);
946 smmu_flush_tlb(smmu);
947 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
948 smmu_flush(smmu);
950 tegra_smmu_ahb_enable();
952 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
953 if (err)
954 return ERR_PTR(err);
956 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
958 err = iommu_device_register(&smmu->iommu);
959 if (err) {
960 iommu_device_sysfs_remove(&smmu->iommu);
961 return ERR_PTR(err);
964 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
965 if (err < 0) {
966 iommu_device_unregister(&smmu->iommu);
967 iommu_device_sysfs_remove(&smmu->iommu);
968 return ERR_PTR(err);
971 if (IS_ENABLED(CONFIG_DEBUG_FS))
972 tegra_smmu_debugfs_init(smmu);
974 return smmu;
977 void tegra_smmu_remove(struct tegra_smmu *smmu)
979 iommu_device_unregister(&smmu->iommu);
980 iommu_device_sysfs_remove(&smmu->iommu);
982 if (IS_ENABLED(CONFIG_DEBUG_FS))
983 tegra_smmu_debugfs_exit(smmu);