Revert "staging: tidspbridge: remove dw_dmmu_base from cfg_hostres struct"
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / tidspbridge / core / dsp-mmu.c
blob983c95adc8ff2b84f51602da4324059c7b5345f5
1 /*
2 * dsp-mmu.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP iommu.
8 * Copyright (C) 2010 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <dspbridge/host_os.h>
20 #include <plat/dmtimer.h>
21 #include <dspbridge/dbdefs.h>
22 #include <dspbridge/dev.h>
23 #include <dspbridge/io_sm.h>
24 #include <dspbridge/dspdeh.h>
25 #include "_tiomap.h"
27 #include <dspbridge/dsp-mmu.h>
29 #define MMU_CNTL_TWL_EN (1 << 2)
31 static struct tasklet_struct mmu_tasklet;
33 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34 static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
36 void *dummy_addr;
37 u32 fa, tmp;
38 struct iotlb_entry e;
39 struct iommu *mmu = dev_context->dsp_mmu;
40 dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
47 tmp = iommu_read_reg(mmu, MMU_CNTL);
48 tmp &= ~MMU_CNTL_TWL_EN;
49 iommu_write_reg(mmu, tmp, MMU_CNTL);
50 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
51 e.da = fa & PAGE_MASK;
52 e.pa = virt_to_phys(dummy_addr);
53 e.valid = 1;
54 e.prsvd = 1;
55 e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
56 e.endian = MMU_RAM_ENDIAN_LITTLE;
57 e.elsz = MMU_RAM_ELSZ_32;
58 e.mixed = 0;
60 load_iotlb_entry(mmu, &e);
62 dsp_clk_enable(DSP_CLK_GPT8);
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
66 /* Clear MMU interrupt */
67 tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
68 iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
70 dump_dsp_stack(dev_context);
71 dsp_clk_disable(DSP_CLK_GPT8);
73 iopgtable_clear_entry(mmu, fa);
74 free_page((unsigned long)dummy_addr);
76 #endif
79 static void fault_tasklet(unsigned long data)
81 struct iommu *mmu = (struct iommu *)data;
82 struct bridge_dev_context *dev_ctx;
83 struct deh_mgr *dm;
84 u32 fa;
85 dev_get_deh_mgr(dev_get_first(), &dm);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx);
88 if (!dm || !dev_ctx)
89 return;
91 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
93 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx);
95 dump_dl_modules(dev_ctx);
96 mmu_fault_print_stack(dev_ctx);
97 #endif
99 bridge_deh_notify(dm, DSP_MMUFAULT, fa);
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
106 static int mmu_fault_callback(struct iommu *mmu)
108 if (!mmu)
109 return -EPERM;
111 iommu_write_reg(mmu, 0, MMU_IRQENABLE);
112 tasklet_schedule(&mmu_tasklet);
113 return 0;
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
123 struct iommu *dsp_mmu_init()
125 struct iommu *mmu;
127 mmu = iommu_get("iva2");
129 if (!IS_ERR(mmu)) {
130 tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
131 mmu->isr = mmu_fault_callback;
134 return mmu;
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
141 * This function destroys dsp mmu module.
144 void dsp_mmu_exit(struct iommu *mmu)
146 if (mmu)
147 iommu_put(mmu);
148 tasklet_kill(&mmu_tasklet);
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
157 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
159 pgd_t *pgd;
160 pmd_t *pmd;
161 pte_t *ptep, pte;
163 pgd = pgd_offset(mm, address);
164 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
165 pmd = pmd_offset(pgd, address);
166 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
167 ptep = pte_offset_map(pmd, address);
168 if (ptep) {
169 pte = *ptep;
170 if (pte_present(pte))
171 return pte & PAGE_MASK;
176 return 0;
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
187 static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
188 struct page **usr_pgs)
190 u32 pa;
191 int i;
192 struct page *pg;
194 for (i = 0; i < pages; i++) {
195 pa = user_va2_pa(mm, uva);
197 if (!pfn_valid(__phys_to_pfn(pa)))
198 break;
200 pg = phys_to_page(pa);
201 usr_pgs[i] = pg;
202 get_page(pg);
204 return i;
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
211 * @da DSP address
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
215 * This function maps a user space buffer into DSP virtual address.
218 u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
219 struct page **usr_pgs)
221 int res, w;
222 unsigned pages;
223 int i;
224 struct vm_area_struct *vma;
225 struct mm_struct *mm = current->mm;
226 struct sg_table *sgt;
227 struct scatterlist *sg;
229 if (!size || !usr_pgs)
230 return -EINVAL;
232 pages = size / PG_SIZE4K;
234 down_read(&mm->mmap_sem);
235 vma = find_vma(mm, uva);
236 while (vma && (uva + size > vma->vm_end))
237 vma = find_vma(mm, vma->vm_end + 1);
239 if (!vma) {
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__, uva, size);
242 up_read(&mm->mmap_sem);
243 return -EINVAL;
245 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
246 w = 1;
248 if (vma->vm_flags & VM_IO)
249 i = get_io_pages(mm, uva, pages, usr_pgs);
250 else
251 i = get_user_pages(current, mm, uva, pages, w, 1,
252 usr_pgs, NULL);
253 up_read(&mm->mmap_sem);
255 if (i < 0)
256 return i;
258 if (i < pages) {
259 res = -EFAULT;
260 goto err_pages;
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
264 if (!sgt) {
265 res = -ENOMEM;
266 goto err_pages;
269 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
271 if (res < 0)
272 goto err_sg;
274 for_each_sg(sgt->sgl, sg, sgt->nents, i)
275 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
277 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
279 if (!IS_ERR_VALUE(da))
280 return da;
281 res = (int)da;
283 sg_free_table(sgt);
284 err_sg:
285 kfree(sgt);
286 i = pages;
287 err_pages:
288 while (i--)
289 put_page(usr_pgs[i]);
290 return res;
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
296 * @da DSP address
298 * This function unmaps a user space buffer into DSP virtual address.
301 int user_to_dsp_unmap(struct iommu *mmu, u32 da)
303 unsigned i;
304 struct sg_table *sgt;
305 struct scatterlist *sg;
307 sgt = iommu_vunmap(mmu, da);
308 if (!sgt)
309 return -EFAULT;
311 for_each_sg(sgt->sgl, sg, sgt->nents, i)
312 put_page(sg_page(sg));
313 sg_free_table(sgt);
314 kfree(sgt);
316 return 0;