Revert "staging: tidspbridge - move all iommu related code to a new file"
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / tidspbridge / core / tiomap3430.c
blob984a35a068ef8e8628f993f10ef302f87ff85125
1 /*
2 * tiomap.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor Manager Driver for TI OMAP3430 EVM.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <plat/dsp.h>
21 #include <linux/types.h>
22 /* ----------------------------------- Host OS */
23 #include <dspbridge/host_os.h>
24 #include <linux/mm.h>
25 #include <linux/mmzone.h>
27 /* ----------------------------------- DSP/BIOS Bridge */
28 #include <dspbridge/dbdefs.h>
30 /* ----------------------------------- Trace & Debug */
31 #include <dspbridge/dbc.h>
33 /* ----------------------------------- OS Adaptation Layer */
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
37 /* ----------------------------------- Link Driver */
38 #include <dspbridge/dspdefs.h>
39 #include <dspbridge/dspchnl.h>
40 #include <dspbridge/dspdeh.h>
41 #include <dspbridge/dspio.h>
42 #include <dspbridge/dspmsg.h>
43 #include <dspbridge/pwr.h>
44 #include <dspbridge/io_sm.h>
46 /* ----------------------------------- Platform Manager */
47 #include <dspbridge/dev.h>
48 #include <dspbridge/dspapi.h>
49 #include <dspbridge/dmm.h>
50 #include <dspbridge/wdt.h>
52 /* ----------------------------------- Local */
53 #include "_tiomap.h"
54 #include "_tiomap_pwr.h"
55 #include "tiomap_io.h"
56 #include "_deh.h"
58 /* Offset in shared mem to write to in order to synchronize start with DSP */
59 #define SHMSYNCOFFSET 4 /* GPP byte offset */
61 #define BUFFERSIZE 1024
63 #define TIHELEN_ACKTIMEOUT 10000
65 #define MMU_SECTION_ADDR_MASK 0xFFF00000
66 #define MMU_SSECTION_ADDR_MASK 0xFF000000
67 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
68 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
69 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
70 #define PAGES_II_LVL_TABLE 512
71 #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
74 * This is a totally ugly layer violation, but needed until
75 * omap_ctrl_set_dsp_boot*() are provided.
77 #define OMAP3_IVA2_BOOTMOD_IDLE 1
78 #define OMAP2_CONTROL_GENERAL 0x270
79 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
80 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
82 #define OMAP343X_CTRL_REGADDR(reg) \
83 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
86 /* Forward Declarations: */
87 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
88 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
89 u8 *host_buff,
90 u32 dsp_addr, u32 ul_num_bytes,
91 u32 mem_type);
92 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
93 u32 dsp_addr);
94 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
95 int *board_state);
96 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
97 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
98 u8 *host_buff,
99 u32 dsp_addr, u32 ul_num_bytes,
100 u32 mem_type);
101 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
102 u32 brd_state);
103 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
104 u32 dsp_dest_addr, u32 dsp_src_addr,
105 u32 ul_num_bytes, u32 mem_type);
106 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
107 u8 *host_buff, u32 dsp_addr,
108 u32 ul_num_bytes, u32 mem_type);
109 static int bridge_dev_create(struct bridge_dev_context
110 **dev_cntxt,
111 struct dev_object *hdev_obj,
112 struct cfg_hostres *config_param);
113 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
114 u32 dw_cmd, void *pargs);
115 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
116 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
119 * This Bridge driver's function interface table.
121 static struct bridge_drv_interface drv_interface_fxns = {
122 /* Bridge API ver. for which this bridge driver is built. */
123 BRD_API_MAJOR_VERSION,
124 BRD_API_MINOR_VERSION,
125 bridge_dev_create,
126 bridge_dev_destroy,
127 bridge_dev_ctrl,
128 bridge_brd_monitor,
129 bridge_brd_start,
130 bridge_brd_stop,
131 bridge_brd_status,
132 bridge_brd_read,
133 bridge_brd_write,
134 bridge_brd_set_state,
135 bridge_brd_mem_copy,
136 bridge_brd_mem_write,
137 /* The following CHNL functions are provided by chnl_io.lib: */
138 bridge_chnl_create,
139 bridge_chnl_destroy,
140 bridge_chnl_open,
141 bridge_chnl_close,
142 bridge_chnl_add_io_req,
143 bridge_chnl_get_ioc,
144 bridge_chnl_cancel_io,
145 bridge_chnl_flush_io,
146 bridge_chnl_get_info,
147 bridge_chnl_get_mgr_info,
148 bridge_chnl_idle,
149 bridge_chnl_register_notify,
150 /* The following IO functions are provided by chnl_io.lib: */
151 bridge_io_create,
152 bridge_io_destroy,
153 bridge_io_on_loaded,
154 bridge_io_get_proc_load,
155 /* The following msg_ctrl functions are provided by chnl_io.lib: */
156 bridge_msg_create,
157 bridge_msg_create_queue,
158 bridge_msg_delete,
159 bridge_msg_delete_queue,
160 bridge_msg_get,
161 bridge_msg_put,
162 bridge_msg_register_notify,
163 bridge_msg_set_queue_id,
167 * ======== bridge_drv_entry ========
168 * purpose:
169 * Bridge Driver entry point.
171 void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
172 const char *driver_file_name)
175 DBC_REQUIRE(driver_file_name != NULL);
177 io_sm_init(); /* Initialization of io_sm module */
179 if (strcmp(driver_file_name, "UMA") == 0)
180 *drv_intf = &drv_interface_fxns;
181 else
182 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
187 * ======== bridge_brd_monitor ========
188 * purpose:
189 * This bridge_brd_monitor puts DSP into a Loadable state.
190 * i.e Application can load and start the device.
192 * Preconditions:
193 * Device in 'OFF' state.
195 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
197 struct bridge_dev_context *dev_context = dev_ctxt;
198 u32 temp;
199 struct omap_dsp_platform_data *pdata =
200 omap_dspbridge_dev->dev.platform_data;
202 temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
203 OMAP_POWERSTATEST_MASK;
204 if (!(temp & 0x02)) {
205 /* IVA2 is not in ON state */
206 /* Read and set PM_PWSTCTRL_IVA2 to ON */
207 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
208 PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
209 /* Set the SW supervised state transition */
210 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
211 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
213 /* Wait until the state has moved to ON */
214 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
215 OMAP_INTRANSITION_MASK)
217 /* Disable Automatic transition */
218 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
219 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
222 dsp_clk_enable(DSP_CLK_IVA2);
224 /* set the device state to IDLE */
225 dev_context->dw_brd_state = BRD_IDLE;
227 return 0;
231 * ======== bridge_brd_read ========
232 * purpose:
233 * Reads buffers for DSP memory.
235 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
236 u8 *host_buff, u32 dsp_addr,
237 u32 ul_num_bytes, u32 mem_type)
239 int status = 0;
240 struct bridge_dev_context *dev_context = dev_ctxt;
241 u32 offset;
242 u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
244 if (dsp_addr < dev_context->dw_dsp_start_add) {
245 status = -EPERM;
246 return status;
248 /* change here to account for the 3 bands of the DSP internal memory */
249 if ((dsp_addr - dev_context->dw_dsp_start_add) <
250 dev_context->dw_internal_size) {
251 offset = dsp_addr - dev_context->dw_dsp_start_add;
252 } else {
253 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
254 ul_num_bytes, mem_type);
255 return status;
257 /* copy the data from DSP memory, */
258 memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
259 return status;
263 * ======== bridge_brd_set_state ========
264 * purpose:
265 * This routine updates the Board status.
267 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
268 u32 brd_state)
270 int status = 0;
271 struct bridge_dev_context *dev_context = dev_ctxt;
273 dev_context->dw_brd_state = brd_state;
274 return status;
278 * ======== bridge_brd_start ========
279 * purpose:
280 * Initializes DSP MMU and Starts DSP.
282 * Preconditions:
283 * a) DSP domain is 'ACTIVE'.
284 * b) DSP_RST1 is asserted.
285 * b) DSP_RST2 is released.
287 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
288 u32 dsp_addr)
290 int status = 0;
291 struct bridge_dev_context *dev_context = dev_ctxt;
292 struct iommu *mmu = NULL;
293 struct shm_segs *sm_sg;
294 int l4_i = 0, tlb_i = 0;
295 u32 sg0_da = 0, sg1_da = 0;
296 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
297 u32 dw_sync_addr = 0;
298 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
299 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
300 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
301 /* Offset of shm_base_virt from tlb_base_virt */
302 u32 ul_shm_offset_virt;
303 struct cfg_hostres *resources = NULL;
304 u32 temp;
305 u32 ul_dsp_clk_rate;
306 u32 ul_dsp_clk_addr;
307 u32 ul_bios_gp_timer;
308 u32 clk_cmd;
309 struct io_mgr *hio_mgr;
310 u32 ul_load_monitor_timer;
311 struct omap_dsp_platform_data *pdata =
312 omap_dspbridge_dev->dev.platform_data;
314 /* The device context contains all the mmu setup info from when the
315 * last dsp base image was loaded. The first entry is always
316 * SHMMEM base. */
317 /* Get SHM_BEG - convert to byte address */
318 (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
319 &ul_shm_base_virt);
320 ul_shm_base_virt *= DSPWORDSIZE;
321 DBC_ASSERT(ul_shm_base_virt != 0);
322 /* DSP Virtual address */
323 ul_tlb_base_virt = dev_context->sh_s.seg0_da;
324 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
325 ul_shm_offset_virt =
326 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
327 /* Kernel logical address */
328 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
330 DBC_ASSERT(ul_shm_base != 0);
331 /* 2nd wd is used as sync field */
332 dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
333 /* Write a signature into the shm base + offset; this will
334 * get cleared when the DSP program starts. */
335 if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
336 pr_err("%s: Illegal SM base\n", __func__);
337 status = -EPERM;
338 } else
339 __raw_writel(0xffffffff, dw_sync_addr);
341 if (!status) {
342 resources = dev_context->resources;
343 if (!resources)
344 status = -EPERM;
346 /* Assert RST1 i.e only the RST only for DSP megacell */
347 if (!status) {
348 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
349 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
350 OMAP2_RM_RSTCTRL);
351 /* Mask address with 1K for compatibility */
352 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
353 OMAP343X_CTRL_REGADDR(
354 OMAP343X_CONTROL_IVA2_BOOTADDR));
356 * Set bootmode to self loop if dsp_debug flag is true
358 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
359 OMAP343X_CTRL_REGADDR(
360 OMAP343X_CONTROL_IVA2_BOOTMOD));
364 if (!status) {
365 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
366 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
367 mmu = dev_context->dsp_mmu;
368 if (mmu)
369 iommu_put(mmu);
370 mmu = iommu_get("iva2");
371 if (IS_ERR(mmu)) {
372 dev_err(bridge, "iommu_get failed!\n");
373 dev_context->dsp_mmu = NULL;
374 status = (int)mmu;
377 if (!status) {
378 dev_context->dsp_mmu = mmu;
379 mmu->isr = mmu_fault_isr;
380 sm_sg = &dev_context->sh_s;
381 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
382 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
383 if (IS_ERR_VALUE(sg0_da)) {
384 status = (int)sg0_da;
385 sg0_da = 0;
388 if (!status) {
389 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
390 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
391 if (IS_ERR_VALUE(sg1_da)) {
392 status = (int)sg1_da;
393 sg1_da = 0;
396 if (!status) {
397 u32 da;
398 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
399 if (!tlb[tlb_i].ul_gpp_pa)
400 continue;
402 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
403 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
404 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
406 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
407 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
408 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
409 if (IS_ERR_VALUE(da)) {
410 status = (int)da;
411 break;
415 if (!status) {
416 u32 da;
417 l4_i = 0;
418 while (l4_peripheral_table[l4_i].phys_addr) {
419 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
420 dsp_virt_addr, l4_peripheral_table[l4_i].
421 phys_addr, PAGE_SIZE,
422 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
423 if (IS_ERR_VALUE(da)) {
424 status = (int)da;
425 break;
427 l4_i++;
431 /* Lock the above TLB entries and get the BIOS and load monitor timer
432 * information */
433 if (!status) {
434 /* Enable the BIOS clock */
435 (void)dev_get_symbol(dev_context->hdev_obj,
436 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
437 (void)dev_get_symbol(dev_context->hdev_obj,
438 BRIDGEINIT_LOADMON_GPTIMER,
439 &ul_load_monitor_timer);
441 if (ul_load_monitor_timer != 0xFFFF) {
442 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
443 ul_load_monitor_timer;
444 dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
445 } else {
446 dev_dbg(bridge, "Not able to get the symbol for Load "
447 "Monitor Timer\n");
450 if (ul_bios_gp_timer != 0xFFFF) {
451 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
452 ul_bios_gp_timer;
453 dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
454 } else {
455 dev_dbg(bridge,
456 "Not able to get the symbol for BIOS Timer\n");
459 /* Set the DSP clock rate */
460 (void)dev_get_symbol(dev_context->hdev_obj,
461 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
462 /*Set Autoidle Mode for IVA2 PLL */
463 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
464 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
466 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
467 /* Get the clock rate */
468 ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
469 dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
470 __func__, ul_dsp_clk_rate);
471 (void)bridge_brd_write(dev_context,
472 (u8 *) &ul_dsp_clk_rate,
473 ul_dsp_clk_addr, sizeof(u32), 0);
476 * Enable Mailbox events and also drain any pending
477 * stale messages.
479 dev_context->mbox = omap_mbox_get("dsp");
480 if (IS_ERR(dev_context->mbox)) {
481 dev_context->mbox = NULL;
482 pr_err("%s: Failed to get dsp mailbox handle\n",
483 __func__);
484 status = -EPERM;
488 if (!status) {
489 dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
491 /*PM_IVA2GRPSEL_PER = 0xC0;*/
492 temp = readl(resources->dw_per_pm_base + 0xA8);
493 temp = (temp & 0xFFFFFF30) | 0xC0;
494 writel(temp, resources->dw_per_pm_base + 0xA8);
496 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
497 temp = readl(resources->dw_per_pm_base + 0xA4);
498 temp = (temp & 0xFFFFFF3F);
499 writel(temp, resources->dw_per_pm_base + 0xA4);
500 /*CM_SLEEPDEP_PER |= 0x04; */
501 temp = readl(resources->dw_per_base + 0x44);
502 temp = (temp & 0xFFFFFFFB) | 0x04;
503 writel(temp, resources->dw_per_base + 0x44);
505 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
506 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
507 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
509 /* Let DSP go */
510 dev_dbg(bridge, "%s Unreset\n", __func__);
511 /* release the RST1, DSP starts executing now .. */
512 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
513 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
515 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
516 dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
517 if (dsp_debug)
518 while (__raw_readw(dw_sync_addr))
521 /* Wait for DSP to clear word in shared memory */
522 /* Read the Location */
523 if (!wait_for_start(dev_context, dw_sync_addr))
524 status = -ETIMEDOUT;
526 /* Start wdt */
527 dsp_wdt_sm_set((void *)ul_shm_base);
528 dsp_wdt_enable(true);
530 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
531 if (hio_mgr) {
532 io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
533 /* Write the synchronization bit to indicate the
534 * completion of OPP table update to DSP
536 __raw_writel(0XCAFECAFE, dw_sync_addr);
538 /* update board state */
539 dev_context->dw_brd_state = BRD_RUNNING;
540 return 0;
541 } else {
542 dev_context->dw_brd_state = BRD_UNKNOWN;
546 while (tlb_i--) {
547 if (!tlb[tlb_i].ul_gpp_pa)
548 continue;
549 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
551 while (l4_i--)
552 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
553 if (sg0_da)
554 iommu_kunmap(mmu, sg0_da);
555 if (sg1_da)
556 iommu_kunmap(mmu, sg1_da);
557 return status;
561 * ======== bridge_brd_stop ========
562 * purpose:
563 * Puts DSP in self loop.
565 * Preconditions :
566 * a) None
568 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
570 int status = 0;
571 struct bridge_dev_context *dev_context = dev_ctxt;
572 u32 dsp_pwr_state;
573 int i;
574 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
575 struct omap_dsp_platform_data *pdata =
576 omap_dspbridge_dev->dev.platform_data;
578 if (dev_context->dw_brd_state == BRD_STOPPED)
579 return status;
581 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
582 * before turning off the clocks.. This is to ensure that there are no
583 * pending L3 or other transactons from IVA2 */
584 dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
585 OMAP_POWERSTATEST_MASK;
586 if (dsp_pwr_state != PWRDM_POWER_OFF) {
587 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
588 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
589 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
590 mdelay(10);
592 /* IVA2 is not in OFF state */
593 /* Set PM_PWSTCTRL_IVA2 to OFF */
594 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
595 PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
596 /* Set the SW supervised state transition for Sleep */
597 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
598 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
600 udelay(10);
601 /* Release the Ext Base virtual Address as the next DSP Program
602 * may have a different load address */
603 if (dev_context->dw_dsp_ext_base_addr)
604 dev_context->dw_dsp_ext_base_addr = 0;
606 dev_context->dw_brd_state = BRD_STOPPED; /* update board state */
608 dsp_wdt_enable(false);
610 /* Reset DSP */
611 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
612 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
614 /* Disable the mailbox interrupts */
615 if (dev_context->mbox) {
616 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
617 omap_mbox_put(dev_context->mbox);
618 dev_context->mbox = NULL;
620 if (dev_context->dsp_mmu) {
621 pr_err("Proc stop mmu if statement\n");
622 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
623 if (!tlb[i].ul_gpp_pa)
624 continue;
625 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
627 i = 0;
628 while (l4_peripheral_table[i].phys_addr) {
629 iommu_kunmap(dev_context->dsp_mmu,
630 l4_peripheral_table[i].dsp_virt_addr);
631 i++;
633 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
634 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
635 iommu_put(dev_context->dsp_mmu);
636 dev_context->dsp_mmu = NULL;
638 /* Reset IVA IOMMU*/
639 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
640 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
642 dsp_clock_disable_all(dev_context->dsp_per_clks);
643 dsp_clk_disable(DSP_CLK_IVA2);
645 return status;
649 * ======== bridge_brd_status ========
650 * Returns the board status.
652 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
653 int *board_state)
655 struct bridge_dev_context *dev_context = dev_ctxt;
656 *board_state = dev_context->dw_brd_state;
657 return 0;
661 * ======== bridge_brd_write ========
662 * Copies the buffers to DSP internal or external memory.
664 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
665 u8 *host_buff, u32 dsp_addr,
666 u32 ul_num_bytes, u32 mem_type)
668 int status = 0;
669 struct bridge_dev_context *dev_context = dev_ctxt;
671 if (dsp_addr < dev_context->dw_dsp_start_add) {
672 status = -EPERM;
673 return status;
675 if ((dsp_addr - dev_context->dw_dsp_start_add) <
676 dev_context->dw_internal_size) {
677 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
678 ul_num_bytes, mem_type);
679 } else {
680 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
681 ul_num_bytes, mem_type, false);
684 return status;
688 * ======== bridge_dev_create ========
689 * Creates a driver object. Puts DSP in self loop.
691 static int bridge_dev_create(struct bridge_dev_context
692 **dev_cntxt,
693 struct dev_object *hdev_obj,
694 struct cfg_hostres *config_param)
696 int status = 0;
697 struct bridge_dev_context *dev_context = NULL;
698 s32 entry_ndx;
699 struct cfg_hostres *resources = config_param;
700 struct drv_data *drv_datap = dev_get_drvdata(bridge);
702 /* Allocate and initialize a data structure to contain the bridge driver
703 * state, which becomes the context for later calls into this driver */
704 dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
705 if (!dev_context) {
706 status = -ENOMEM;
707 goto func_end;
710 dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
711 dev_context->dw_self_loop = (u32) NULL;
712 dev_context->dsp_per_clks = 0;
713 dev_context->dw_internal_size = OMAP_DSP_SIZE;
714 /* Clear dev context MMU table entries.
715 * These get set on bridge_io_on_loaded() call after program loaded. */
716 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
717 dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
718 dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
720 dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
721 (config_param->
722 dw_mem_base
723 [3]),
724 config_param->
725 dw_mem_length
726 [3]);
727 if (!dev_context->dw_dsp_base_addr)
728 status = -EPERM;
730 if (!status) {
731 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
732 dev_context->hdev_obj = hdev_obj;
733 /* Store current board state. */
734 dev_context->dw_brd_state = BRD_UNKNOWN;
735 dev_context->resources = resources;
736 dsp_clk_enable(DSP_CLK_IVA2);
737 bridge_brd_stop(dev_context);
738 /* Return ptr to our device state to the DSP API for storage */
739 *dev_cntxt = dev_context;
740 } else {
741 kfree(dev_context);
743 func_end:
744 return status;
748 * ======== bridge_dev_ctrl ========
749 * Receives device specific commands.
751 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
752 u32 dw_cmd, void *pargs)
754 int status = 0;
755 struct bridge_ioctl_extproc *pa_ext_proc =
756 (struct bridge_ioctl_extproc *)pargs;
757 s32 ndx;
759 switch (dw_cmd) {
760 case BRDIOCTL_CHNLREAD:
761 break;
762 case BRDIOCTL_CHNLWRITE:
763 break;
764 case BRDIOCTL_SETMMUCONFIG:
765 /* store away dsp-mmu setup values for later use */
766 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
767 dev_context->atlb_entry[ndx] = *pa_ext_proc;
768 break;
769 case BRDIOCTL_DEEPSLEEP:
770 case BRDIOCTL_EMERGENCYSLEEP:
771 /* Currently only DSP Idle is supported Need to update for
772 * later releases */
773 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
774 break;
775 case BRDIOCTL_WAKEUP:
776 status = wake_dsp(dev_context, pargs);
777 break;
778 case BRDIOCTL_CLK_CTRL:
779 status = 0;
780 /* Looking For Baseport Fix for Clocks */
781 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
782 break;
783 case BRDIOCTL_PWR_HIBERNATE:
784 status = handle_hibernation_from_dsp(dev_context);
785 break;
786 case BRDIOCTL_PRESCALE_NOTIFY:
787 status = pre_scale_dsp(dev_context, pargs);
788 break;
789 case BRDIOCTL_POSTSCALE_NOTIFY:
790 status = post_scale_dsp(dev_context, pargs);
791 break;
792 case BRDIOCTL_CONSTRAINT_REQUEST:
793 status = handle_constraints_set(dev_context, pargs);
794 break;
795 default:
796 status = -EPERM;
797 break;
799 return status;
803 * ======== bridge_dev_destroy ========
804 * Destroys the driver object.
806 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
808 int status = 0;
809 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
810 dev_ctxt;
811 struct cfg_hostres *host_res;
812 u32 shm_size;
813 struct drv_data *drv_datap = dev_get_drvdata(bridge);
815 /* It should never happen */
816 if (!dev_ctxt)
817 return -EFAULT;
819 /* first put the device to stop state */
820 bridge_brd_stop(dev_context);
822 if (dev_context->resources) {
823 host_res = dev_context->resources;
824 shm_size = drv_datap->shm_size;
825 if (shm_size >= 0x10000) {
826 if ((host_res->dw_mem_base[1]) &&
827 (host_res->dw_mem_phys[1])) {
828 mem_free_phys_mem((void *)
829 host_res->dw_mem_base
830 [1],
831 host_res->dw_mem_phys
832 [1], shm_size);
834 } else {
835 dev_dbg(bridge, "%s: Error getting shm size "
836 "from registry: %x. Not calling "
837 "mem_free_phys_mem\n", __func__,
838 status);
840 host_res->dw_mem_base[1] = 0;
841 host_res->dw_mem_phys[1] = 0;
843 if (host_res->dw_mem_base[0])
844 iounmap((void *)host_res->dw_mem_base[0]);
845 if (host_res->dw_mem_base[2])
846 iounmap((void *)host_res->dw_mem_base[2]);
847 if (host_res->dw_mem_base[3])
848 iounmap((void *)host_res->dw_mem_base[3]);
849 if (host_res->dw_mem_base[4])
850 iounmap((void *)host_res->dw_mem_base[4]);
851 if (host_res->dw_dmmu_base)
852 iounmap(host_res->dw_dmmu_base);
853 if (host_res->dw_per_base)
854 iounmap(host_res->dw_per_base);
855 if (host_res->dw_per_pm_base)
856 iounmap((void *)host_res->dw_per_pm_base);
857 if (host_res->dw_core_pm_base)
858 iounmap((void *)host_res->dw_core_pm_base);
859 if (host_res->dw_sys_ctrl_base)
860 iounmap(host_res->dw_sys_ctrl_base);
862 host_res->dw_mem_base[0] = (u32) NULL;
863 host_res->dw_mem_base[2] = (u32) NULL;
864 host_res->dw_mem_base[3] = (u32) NULL;
865 host_res->dw_mem_base[4] = (u32) NULL;
866 host_res->dw_dmmu_base = NULL;
867 host_res->dw_sys_ctrl_base = NULL;
869 kfree(host_res);
872 /* Free the driver's device context: */
873 kfree(drv_datap->base_img);
874 kfree(drv_datap);
875 dev_set_drvdata(bridge, NULL);
876 kfree((void *)dev_ctxt);
877 return status;
880 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
881 u32 dsp_dest_addr, u32 dsp_src_addr,
882 u32 ul_num_bytes, u32 mem_type)
884 int status = 0;
885 u32 src_addr = dsp_src_addr;
886 u32 dest_addr = dsp_dest_addr;
887 u32 copy_bytes = 0;
888 u32 total_bytes = ul_num_bytes;
889 u8 host_buf[BUFFERSIZE];
890 struct bridge_dev_context *dev_context = dev_ctxt;
891 while (total_bytes > 0 && !status) {
892 copy_bytes =
893 total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
894 /* Read from External memory */
895 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
896 copy_bytes, mem_type);
897 if (!status) {
898 if (dest_addr < (dev_context->dw_dsp_start_add +
899 dev_context->dw_internal_size)) {
900 /* Write to Internal memory */
901 status = write_dsp_data(dev_ctxt, host_buf,
902 dest_addr, copy_bytes,
903 mem_type);
904 } else {
905 /* Write to External memory */
906 status =
907 write_ext_dsp_data(dev_ctxt, host_buf,
908 dest_addr, copy_bytes,
909 mem_type, false);
912 total_bytes -= copy_bytes;
913 src_addr += copy_bytes;
914 dest_addr += copy_bytes;
916 return status;
919 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
920 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
921 u8 *host_buff, u32 dsp_addr,
922 u32 ul_num_bytes, u32 mem_type)
924 int status = 0;
925 struct bridge_dev_context *dev_context = dev_ctxt;
926 u32 ul_remain_bytes = 0;
927 u32 ul_bytes = 0;
928 ul_remain_bytes = ul_num_bytes;
929 while (ul_remain_bytes > 0 && !status) {
930 ul_bytes =
931 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
932 if (dsp_addr < (dev_context->dw_dsp_start_add +
933 dev_context->dw_internal_size)) {
934 status =
935 write_dsp_data(dev_ctxt, host_buff, dsp_addr,
936 ul_bytes, mem_type);
937 } else {
938 status = write_ext_dsp_data(dev_ctxt, host_buff,
939 dsp_addr, ul_bytes,
940 mem_type, true);
942 ul_remain_bytes -= ul_bytes;
943 dsp_addr += ul_bytes;
944 host_buff = host_buff + ul_bytes;
946 return status;
950 * ======== user_va2_pa ========
951 * Purpose:
952 * This function walks through the page tables to convert a userland
953 * virtual address to physical address
955 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
957 pgd_t *pgd;
958 pmd_t *pmd;
959 pte_t *ptep, pte;
961 pgd = pgd_offset(mm, address);
962 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
963 pmd = pmd_offset(pgd, address);
964 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
965 ptep = pte_offset_map(pmd, address);
966 if (ptep) {
967 pte = *ptep;
968 if (pte_present(pte))
969 return pte & PAGE_MASK;
974 return 0;
978 * get_io_pages() - pin and get pages of io user's buffer.
979 * @mm: mm_struct Pointer of the process.
980 * @uva: Virtual user space address.
981 * @pages Pages to be pined.
982 * @usr_pgs struct page array pointer where the user pages will be stored
985 static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
986 struct page **usr_pgs)
988 u32 pa;
989 int i;
990 struct page *pg;
992 for (i = 0; i < pages; i++) {
993 pa = user_va2_pa(mm, uva);
995 if (!pfn_valid(__phys_to_pfn(pa)))
996 break;
998 pg = PHYS_TO_PAGE(pa);
999 usr_pgs[i] = pg;
1000 get_page(pg);
1002 return i;
1006 * user_to_dsp_map() - maps user to dsp virtual address
1007 * @mmu: Pointer to iommu handle.
1008 * @uva: Virtual user space address.
1009 * @da DSP address
1010 * @size Buffer size to map.
1011 * @usr_pgs struct page array pointer where the user pages will be stored
1013 * This function maps a user space buffer into DSP virtual address.
1016 u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
1017 struct page **usr_pgs)
1019 int res, w;
1020 unsigned pages, i;
1021 struct vm_area_struct *vma;
1022 struct mm_struct *mm = current->mm;
1023 struct sg_table *sgt;
1024 struct scatterlist *sg;
1026 if (!size || !usr_pgs)
1027 return -EINVAL;
1029 pages = size / PG_SIZE4K;
1031 down_read(&mm->mmap_sem);
1032 vma = find_vma(mm, uva);
1033 while (vma && (uva + size > vma->vm_end))
1034 vma = find_vma(mm, vma->vm_end + 1);
1036 if (!vma) {
1037 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1038 __func__, uva, size);
1039 up_read(&mm->mmap_sem);
1040 return -EINVAL;
1042 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1043 w = 1;
1045 if (vma->vm_flags & VM_IO)
1046 i = get_io_pages(mm, uva, pages, usr_pgs);
1047 else
1048 i = get_user_pages(current, mm, uva, pages, w, 1,
1049 usr_pgs, NULL);
1050 up_read(&mm->mmap_sem);
1052 if (i < 0)
1053 return i;
1055 if (i < pages) {
1056 res = -EFAULT;
1057 goto err_pages;
1060 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1061 if (!sgt) {
1062 res = -ENOMEM;
1063 goto err_pages;
1066 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
1068 if (res < 0)
1069 goto err_sg;
1071 for_each_sg(sgt->sgl, sg, sgt->nents, i)
1072 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
1074 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
1076 if (!IS_ERR_VALUE(da))
1077 return da;
1078 res = (int)da;
1080 sg_free_table(sgt);
1081 err_sg:
1082 kfree(sgt);
1083 i = pages;
1084 err_pages:
1085 while (i--)
1086 put_page(usr_pgs[i]);
1087 return res;
1091 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
1092 * @mmu: Pointer to iommu handle.
1093 * @da DSP address
1095 * This function unmaps a user space buffer into DSP virtual address.
1098 int user_to_dsp_unmap(struct iommu *mmu, u32 da)
1100 unsigned i;
1101 struct sg_table *sgt;
1102 struct scatterlist *sg;
1104 sgt = iommu_vunmap(mmu, da);
1105 if (!sgt)
1106 return -EFAULT;
1108 for_each_sg(sgt->sgl, sg, sgt->nents, i)
1109 put_page(sg_page(sg));
1110 sg_free_table(sgt);
1111 kfree(sgt);
1113 return 0;
1117 * ======== wait_for_start ========
1118 * Wait for the singal from DSP that it has started, or time out.
1120 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1122 u16 timeout = TIHELEN_ACKTIMEOUT;
1124 /* Wait for response from board */
1125 while (__raw_readw(dw_sync_addr) && --timeout)
1126 udelay(10);
1128 /* If timed out: return false */
1129 if (!timeout) {
1130 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1131 return false;
1133 return true;