4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor Manager Driver for TI OMAP3430 EVM.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 #include <linux/mmzone.h>
24 #include <plat/control.h>
26 /* ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
29 /* ----------------------------------- Trace & Debug */
30 #include <dspbridge/dbc.h>
32 /* ----------------------------------- OS Adaptation Layer */
33 #include <dspbridge/cfg.h>
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
37 /* ------------------------------------ Hardware Abstraction Layer */
41 /* ----------------------------------- Link Driver */
42 #include <dspbridge/dspdefs.h>
43 #include <dspbridge/dspchnl.h>
44 #include <dspbridge/dspdeh.h>
45 #include <dspbridge/dspio.h>
46 #include <dspbridge/dspmsg.h>
47 #include <dspbridge/pwr.h>
48 #include <dspbridge/io_sm.h>
50 /* ----------------------------------- Platform Manager */
51 #include <dspbridge/dev.h>
52 #include <dspbridge/dspapi.h>
53 #include <dspbridge/dmm.h>
54 #include <dspbridge/wdt.h>
56 /* ----------------------------------- Local */
58 #include "_tiomap_pwr.h"
59 #include "tiomap_io.h"
61 /* Offset in shared mem to write to in order to synchronize start with DSP */
62 #define SHMSYNCOFFSET 4 /* GPP byte offset */
64 #define BUFFERSIZE 1024
66 #define TIHELEN_ACKTIMEOUT 10000
68 #define MMU_SECTION_ADDR_MASK 0xFFF00000
69 #define MMU_SSECTION_ADDR_MASK 0xFF000000
70 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
71 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
72 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
73 #define PAGES_II_LVL_TABLE 512
74 #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
76 /* Forward Declarations: */
77 static int bridge_brd_monitor(struct bridge_dev_context
*dev_ctxt
);
78 static int bridge_brd_read(struct bridge_dev_context
*dev_ctxt
,
80 u32 dsp_addr
, u32 ul_num_bytes
,
82 static int bridge_brd_start(struct bridge_dev_context
*dev_ctxt
,
84 static int bridge_brd_status(struct bridge_dev_context
*dev_ctxt
,
86 static int bridge_brd_stop(struct bridge_dev_context
*dev_ctxt
);
87 static int bridge_brd_write(struct bridge_dev_context
*dev_ctxt
,
89 u32 dsp_addr
, u32 ul_num_bytes
,
91 static int bridge_brd_set_state(struct bridge_dev_context
*dev_ctxt
,
93 static int bridge_brd_mem_copy(struct bridge_dev_context
*dev_ctxt
,
94 u32 dsp_dest_addr
, u32 dsp_src_addr
,
95 u32 ul_num_bytes
, u32 mem_type
);
96 static int bridge_brd_mem_write(struct bridge_dev_context
*dev_ctxt
,
97 u8
*host_buff
, u32 dsp_addr
,
98 u32 ul_num_bytes
, u32 mem_type
);
99 static int bridge_brd_mem_map(struct bridge_dev_context
*dev_ctxt
,
100 u32 ul_mpu_addr
, u32 virt_addr
,
101 u32 ul_num_bytes
, u32 ul_map_attr
,
102 struct page
**mapped_pages
);
103 static int bridge_brd_mem_un_map(struct bridge_dev_context
*dev_ctxt
,
104 u32 virt_addr
, u32 ul_num_bytes
);
105 static int bridge_dev_create(struct bridge_dev_context
107 struct dev_object
*hdev_obj
,
108 struct cfg_hostres
*config_param
);
109 static int bridge_dev_ctrl(struct bridge_dev_context
*dev_context
,
110 u32 dw_cmd
, void *pargs
);
111 static int bridge_dev_destroy(struct bridge_dev_context
*dev_ctxt
);
112 static u32
user_va2_pa(struct mm_struct
*mm
, u32 address
);
113 static int pte_update(struct bridge_dev_context
*dev_ctxt
, u32 pa
,
115 struct hw_mmu_map_attrs_t
*map_attrs
);
116 static int pte_set(struct pg_table_attrs
*pt
, u32 pa
, u32 va
,
117 u32 size
, struct hw_mmu_map_attrs_t
*attrs
);
118 static int mem_map_vmalloc(struct bridge_dev_context
*dev_context
,
119 u32 ul_mpu_addr
, u32 virt_addr
,
121 struct hw_mmu_map_attrs_t
*hw_attrs
);
123 bool wait_for_start(struct bridge_dev_context
*dev_context
, u32 dw_sync_addr
);
125 /* ----------------------------------- Globals */
127 /* Attributes of L2 page tables for DSP MMU */
129 u32 num_entries
; /* Number of valid PTEs in the L2 PT */
132 /* Attributes used to manage the DSP MMU page tables */
133 struct pg_table_attrs
{
134 spinlock_t pg_lock
; /* Critical section object handle */
136 u32 l1_base_pa
; /* Physical address of the L1 PT */
137 u32 l1_base_va
; /* Virtual address of the L1 PT */
138 u32 l1_size
; /* Size of the L1 PT */
140 /* Physical address of Allocated mem for L1 table. May not be aligned */
142 /* Virtual address of Allocated mem for L1 table. May not be aligned */
144 /* Size of consistent memory allocated for L1 table.
145 * May not be aligned */
147 u32 l2_base_pa
; /* Physical address of the L2 PT */
148 u32 l2_base_va
; /* Virtual address of the L2 PT */
149 u32 l2_size
; /* Size of the L2 PT */
151 /* Physical address of Allocated mem for L2 table. May not be aligned */
153 /* Virtual address of Allocated mem for L2 table. May not be aligned */
155 /* Size of consistent memory allocated for L2 table.
156 * May not be aligned */
158 u32 l2_num_pages
; /* Number of allocated L2 PT */
159 /* Array [l2_num_pages] of L2 PT info structs */
160 struct page_info
*pg_info
;
164 * This Bridge driver's function interface table.
166 static struct bridge_drv_interface drv_interface_fxns
= {
167 /* Bridge API ver. for which this bridge driver is built. */
168 BRD_API_MAJOR_VERSION
,
169 BRD_API_MINOR_VERSION
,
179 bridge_brd_set_state
,
181 bridge_brd_mem_write
,
183 bridge_brd_mem_un_map
,
184 /* The following CHNL functions are provided by chnl_io.lib: */
189 bridge_chnl_add_io_req
,
191 bridge_chnl_cancel_io
,
192 bridge_chnl_flush_io
,
193 bridge_chnl_get_info
,
194 bridge_chnl_get_mgr_info
,
196 bridge_chnl_register_notify
,
197 /* The following IO functions are provided by chnl_io.lib: */
201 bridge_io_get_proc_load
,
202 /* The following msg_ctrl functions are provided by chnl_io.lib: */
204 bridge_msg_create_queue
,
206 bridge_msg_delete_queue
,
209 bridge_msg_register_notify
,
210 bridge_msg_set_queue_id
,
213 static inline void flush_all(struct bridge_dev_context
*dev_context
)
215 if (dev_context
->dw_brd_state
== BRD_DSP_HIBERNATION
||
216 dev_context
->dw_brd_state
== BRD_HIBERNATION
)
217 wake_dsp(dev_context
, NULL
);
219 hw_mmu_tlb_flush_all(dev_context
->dw_dsp_mmu_base
);
222 static void bad_page_dump(u32 pa
, struct page
*pg
)
224 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa
);
225 pr_emerg("Bad page state in process '%s'\n"
226 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
228 current
->comm
, pg
, (int)(2 * sizeof(unsigned long)),
229 (unsigned long)pg
->flags
, pg
->mapping
,
230 page_mapcount(pg
), page_count(pg
));
235 * ======== bridge_drv_entry ========
237 * Bridge Driver entry point.
239 void bridge_drv_entry(struct bridge_drv_interface
**drv_intf
,
240 const char *driver_file_name
)
243 DBC_REQUIRE(driver_file_name
!= NULL
);
245 io_sm_init(); /* Initialization of io_sm module */
247 if (strcmp(driver_file_name
, "UMA") == 0)
248 *drv_intf
= &drv_interface_fxns
;
250 dev_dbg(bridge
, "%s Unknown Bridge file name", __func__
);
255 * ======== bridge_brd_monitor ========
257 * This bridge_brd_monitor puts DSP into a Loadable state.
258 * i.e Application can load and start the device.
261 * Device in 'OFF' state.
263 static int bridge_brd_monitor(struct bridge_dev_context
*dev_ctxt
)
265 struct bridge_dev_context
*dev_context
= dev_ctxt
;
267 struct dspbridge_platform_data
*pdata
=
268 omap_dspbridge_dev
->dev
.platform_data
;
270 temp
= (*pdata
->dsp_prm_read
)(OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTST
) &
271 OMAP_POWERSTATEST_MASK
;
272 if (!(temp
& 0x02)) {
273 /* IVA2 is not in ON state */
274 /* Read and set PM_PWSTCTRL_IVA2 to ON */
275 (*pdata
->dsp_prm_rmw_bits
)(OMAP_POWERSTATEST_MASK
,
276 PWRDM_POWER_ON
, OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTCTRL
);
277 /* Set the SW supervised state transition */
278 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP
,
279 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
281 /* Wait until the state has moved to ON */
282 while ((*pdata
->dsp_prm_read
)(OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTST
) &
283 OMAP_INTRANSITION_MASK
)
285 /* Disable Automatic transition */
286 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO
,
287 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
289 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
290 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
291 dsp_clk_enable(DSP_CLK_IVA2
);
293 /* set the device state to IDLE */
294 dev_context
->dw_brd_state
= BRD_IDLE
;
300 * ======== bridge_brd_read ========
302 * Reads buffers for DSP memory.
304 static int bridge_brd_read(struct bridge_dev_context
*dev_ctxt
,
305 u8
*host_buff
, u32 dsp_addr
,
306 u32 ul_num_bytes
, u32 mem_type
)
309 struct bridge_dev_context
*dev_context
= dev_ctxt
;
311 u32 dsp_base_addr
= dev_ctxt
->dw_dsp_base_addr
;
313 if (dsp_addr
< dev_context
->dw_dsp_start_add
) {
317 /* change here to account for the 3 bands of the DSP internal memory */
318 if ((dsp_addr
- dev_context
->dw_dsp_start_add
) <
319 dev_context
->dw_internal_size
) {
320 offset
= dsp_addr
- dev_context
->dw_dsp_start_add
;
322 status
= read_ext_dsp_data(dev_context
, host_buff
, dsp_addr
,
323 ul_num_bytes
, mem_type
);
326 /* copy the data from DSP memory, */
327 memcpy(host_buff
, (void *)(dsp_base_addr
+ offset
), ul_num_bytes
);
332 * ======== bridge_brd_set_state ========
334 * This routine updates the Board status.
336 static int bridge_brd_set_state(struct bridge_dev_context
*dev_ctxt
,
340 struct bridge_dev_context
*dev_context
= dev_ctxt
;
342 dev_context
->dw_brd_state
= brd_state
;
347 * ======== bridge_brd_start ========
349 * Initializes DSP MMU and Starts DSP.
352 * a) DSP domain is 'ACTIVE'.
353 * b) DSP_RST1 is asserted.
354 * b) DSP_RST2 is released.
356 static int bridge_brd_start(struct bridge_dev_context
*dev_ctxt
,
360 struct bridge_dev_context
*dev_context
= dev_ctxt
;
361 u32 dw_sync_addr
= 0;
362 u32 ul_shm_base
; /* Gpp Phys SM base addr(byte) */
363 u32 ul_shm_base_virt
; /* Dsp Virt SM base addr */
364 u32 ul_tlb_base_virt
; /* Base of MMU TLB entry */
365 /* Offset of shm_base_virt from tlb_base_virt */
366 u32 ul_shm_offset_virt
;
368 s32 itmp_entry_ndx
= 0; /* DSP-MMU TLB entry base address */
369 struct cfg_hostres
*resources
= NULL
;
373 u32 ul_bios_gp_timer
;
375 struct io_mgr
*hio_mgr
;
376 u32 ul_load_monitor_timer
;
377 struct dspbridge_platform_data
*pdata
=
378 omap_dspbridge_dev
->dev
.platform_data
;
380 /* The device context contains all the mmu setup info from when the
381 * last dsp base image was loaded. The first entry is always
383 /* Get SHM_BEG - convert to byte address */
384 (void)dev_get_symbol(dev_context
->hdev_obj
, SHMBASENAME
,
386 ul_shm_base_virt
*= DSPWORDSIZE
;
387 DBC_ASSERT(ul_shm_base_virt
!= 0);
388 /* DSP Virtual address */
389 ul_tlb_base_virt
= dev_context
->atlb_entry
[0].ul_dsp_va
;
390 DBC_ASSERT(ul_tlb_base_virt
<= ul_shm_base_virt
);
392 ul_shm_base_virt
- (ul_tlb_base_virt
* DSPWORDSIZE
);
393 /* Kernel logical address */
394 ul_shm_base
= dev_context
->atlb_entry
[0].ul_gpp_va
+ ul_shm_offset_virt
;
396 DBC_ASSERT(ul_shm_base
!= 0);
397 /* 2nd wd is used as sync field */
398 dw_sync_addr
= ul_shm_base
+ SHMSYNCOFFSET
;
399 /* Write a signature into the shm base + offset; this will
400 * get cleared when the DSP program starts. */
401 if ((ul_shm_base_virt
== 0) || (ul_shm_base
== 0)) {
402 pr_err("%s: Illegal SM base\n", __func__
);
405 __raw_writel(0xffffffff, dw_sync_addr
);
408 resources
= dev_context
->resources
;
412 /* Assert RST1 i.e only the RST only for DSP megacell */
414 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST1_IVA2_MASK
,
415 OMAP3430_RST1_IVA2_MASK
, OMAP3430_IVA2_MOD
,
417 /* Mask address with 1K for compatibility */
418 __raw_writel(dsp_addr
& OMAP3_IVA2_BOOTADDR_MASK
,
419 OMAP343X_CTRL_REGADDR(
420 OMAP343X_CONTROL_IVA2_BOOTADDR
));
422 * Set bootmode to self loop if dsp_debug flag is true
424 __raw_writel((dsp_debug
) ? OMAP3_IVA2_BOOTMOD_IDLE
: 0,
425 OMAP343X_CTRL_REGADDR(
426 OMAP343X_CONTROL_IVA2_BOOTMOD
));
430 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
431 * IVA2 SYSC register */
432 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
,
433 OMAP3430_RST2_IVA2_MASK
, OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
435 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
436 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
439 /* Disbale the DSP MMU */
440 hw_mmu_disable(resources
->dw_dmmu_base
);
442 hw_mmu_twl_disable(resources
->dw_dmmu_base
);
444 /* Only make TLB entry if both addresses are non-zero */
445 for (entry_ndx
= 0; entry_ndx
< BRDIOCTL_NUMOFMMUTLB
;
447 struct bridge_ioctl_extproc
*e
= &dev_context
->atlb_entry
[entry_ndx
];
448 struct hw_mmu_map_attrs_t map_attrs
= {
449 .endianism
= e
->endianism
,
450 .element_size
= e
->elem_size
,
451 .mixed_size
= e
->mixed_mode
,
454 if (!e
->ul_gpp_pa
|| !e
->ul_dsp_va
)
458 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
464 hw_mmu_tlb_add(dev_context
->dw_dsp_mmu_base
,
475 /* Lock the above TLB entries and get the BIOS and load monitor timer
478 hw_mmu_num_locked_set(resources
->dw_dmmu_base
, itmp_entry_ndx
);
479 hw_mmu_victim_num_set(resources
->dw_dmmu_base
, itmp_entry_ndx
);
480 hw_mmu_ttb_set(resources
->dw_dmmu_base
,
481 dev_context
->pt_attrs
->l1_base_pa
);
482 hw_mmu_twl_enable(resources
->dw_dmmu_base
);
483 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
485 temp
= __raw_readl((resources
->dw_dmmu_base
) + 0x10);
486 temp
= (temp
& 0xFFFFFFEF) | 0x11;
487 __raw_writel(temp
, (resources
->dw_dmmu_base
) + 0x10);
489 /* Let the DSP MMU run */
490 hw_mmu_enable(resources
->dw_dmmu_base
);
492 /* Enable the BIOS clock */
493 (void)dev_get_symbol(dev_context
->hdev_obj
,
494 BRIDGEINIT_BIOSGPTIMER
, &ul_bios_gp_timer
);
495 (void)dev_get_symbol(dev_context
->hdev_obj
,
496 BRIDGEINIT_LOADMON_GPTIMER
,
497 &ul_load_monitor_timer
);
501 if (ul_load_monitor_timer
!= 0xFFFF) {
502 clk_cmd
= (BPWR_ENABLE_CLOCK
<< MBX_PM_CLK_CMDSHIFT
) |
503 ul_load_monitor_timer
;
504 dsp_peripheral_clk_ctrl(dev_context
, &clk_cmd
);
506 dev_dbg(bridge
, "Not able to get the symbol for Load "
512 if (ul_bios_gp_timer
!= 0xFFFF) {
513 clk_cmd
= (BPWR_ENABLE_CLOCK
<< MBX_PM_CLK_CMDSHIFT
) |
515 dsp_peripheral_clk_ctrl(dev_context
, &clk_cmd
);
518 "Not able to get the symbol for BIOS Timer\n");
523 /* Set the DSP clock rate */
524 (void)dev_get_symbol(dev_context
->hdev_obj
,
525 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr
);
526 /*Set Autoidle Mode for IVA2 PLL */
527 (*pdata
->dsp_cm_write
)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT
,
528 OMAP3430_IVA2_MOD
, OMAP3430_CM_AUTOIDLE_PLL
);
530 if ((unsigned int *)ul_dsp_clk_addr
!= NULL
) {
531 /* Get the clock rate */
532 ul_dsp_clk_rate
= dsp_clk_get_iva2_rate();
533 dev_dbg(bridge
, "%s: DSP clock rate (KHZ): 0x%x \n",
534 __func__
, ul_dsp_clk_rate
);
535 (void)bridge_brd_write(dev_context
,
536 (u8
*) &ul_dsp_clk_rate
,
537 ul_dsp_clk_addr
, sizeof(u32
), 0);
540 * Enable Mailbox events and also drain any pending
543 dev_context
->mbox
= omap_mbox_get("dsp");
544 if (IS_ERR(dev_context
->mbox
)) {
545 dev_context
->mbox
= NULL
;
546 pr_err("%s: Failed to get dsp mailbox handle\n",
553 dev_context
->mbox
->rxq
->callback
= (int (*)(void *))io_mbox_msg
;
555 /*PM_IVA2GRPSEL_PER = 0xC0;*/
556 temp
= readl(resources
->dw_per_pm_base
+ 0xA8);
557 temp
= (temp
& 0xFFFFFF30) | 0xC0;
558 writel(temp
, resources
->dw_per_pm_base
+ 0xA8);
560 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
561 temp
= readl(resources
->dw_per_pm_base
+ 0xA4);
562 temp
= (temp
& 0xFFFFFF3F);
563 writel(temp
, resources
->dw_per_pm_base
+ 0xA4);
564 /*CM_SLEEPDEP_PER |= 0x04; */
565 temp
= readl(resources
->dw_per_base
+ 0x44);
566 temp
= (temp
& 0xFFFFFFFB) | 0x04;
567 writel(temp
, resources
->dw_per_base
+ 0x44);
569 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
570 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO
,
571 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
574 dev_dbg(bridge
, "%s Unreset\n", __func__
);
575 /* Enable DSP MMU Interrupts */
576 hw_mmu_event_enable(resources
->dw_dmmu_base
,
577 HW_MMU_ALL_INTERRUPTS
);
578 /* release the RST1, DSP starts executing now .. */
579 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST1_IVA2_MASK
, 0,
580 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
582 dev_dbg(bridge
, "Waiting for Sync @ 0x%x\n", dw_sync_addr
);
583 dev_dbg(bridge
, "DSP c_int00 Address = 0x%x\n", dsp_addr
);
585 while (__raw_readw(dw_sync_addr
))
588 /* Wait for DSP to clear word in shared memory */
589 /* Read the Location */
590 if (!wait_for_start(dev_context
, dw_sync_addr
))
594 dsp_wdt_sm_set((void *)ul_shm_base
);
595 dsp_wdt_enable(true);
597 status
= dev_get_io_mgr(dev_context
->hdev_obj
, &hio_mgr
);
599 io_sh_msetting(hio_mgr
, SHM_OPPINFO
, NULL
);
600 /* Write the synchronization bit to indicate the
601 * completion of OPP table update to DSP
603 __raw_writel(0XCAFECAFE, dw_sync_addr
);
605 /* update board state */
606 dev_context
->dw_brd_state
= BRD_RUNNING
;
607 /* (void)chnlsm_enable_interrupt(dev_context); */
609 dev_context
->dw_brd_state
= BRD_UNKNOWN
;
616 * ======== bridge_brd_stop ========
618 * Puts DSP in self loop.
623 static int bridge_brd_stop(struct bridge_dev_context
*dev_ctxt
)
626 struct bridge_dev_context
*dev_context
= dev_ctxt
;
627 struct pg_table_attrs
*pt_attrs
;
630 struct dspbridge_platform_data
*pdata
=
631 omap_dspbridge_dev
->dev
.platform_data
;
633 if (dev_context
->dw_brd_state
== BRD_STOPPED
)
636 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
637 * before turning off the clocks.. This is to ensure that there are no
638 * pending L3 or other transactons from IVA2 */
639 dsp_pwr_state
= (*pdata
->dsp_prm_read
)(OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTST
) &
640 OMAP_POWERSTATEST_MASK
;
641 if (dsp_pwr_state
!= PWRDM_POWER_OFF
) {
642 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
643 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
644 sm_interrupt_dsp(dev_context
, MBX_PM_DSPIDLE
);
647 /* IVA2 is not in OFF state */
648 /* Set PM_PWSTCTRL_IVA2 to OFF */
649 (*pdata
->dsp_prm_rmw_bits
)(OMAP_POWERSTATEST_MASK
,
650 PWRDM_POWER_OFF
, OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTCTRL
);
651 /* Set the SW supervised state transition for Sleep */
652 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP
,
653 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
656 /* Release the Ext Base virtual Address as the next DSP Program
657 * may have a different load address */
658 if (dev_context
->dw_dsp_ext_base_addr
)
659 dev_context
->dw_dsp_ext_base_addr
= 0;
661 dev_context
->dw_brd_state
= BRD_STOPPED
; /* update board state */
663 dsp_wdt_enable(false);
665 /* This is a good place to clear the MMU page tables as well */
666 if (dev_context
->pt_attrs
) {
667 pt_attrs
= dev_context
->pt_attrs
;
668 memset((u8
*) pt_attrs
->l1_base_va
, 0x00, pt_attrs
->l1_size
);
669 memset((u8
*) pt_attrs
->l2_base_va
, 0x00, pt_attrs
->l2_size
);
670 memset((u8
*) pt_attrs
->pg_info
, 0x00,
671 (pt_attrs
->l2_num_pages
* sizeof(struct page_info
)));
673 /* Disable the mailbox interrupts */
674 if (dev_context
->mbox
) {
675 omap_mbox_disable_irq(dev_context
->mbox
, IRQ_RX
);
676 omap_mbox_put(dev_context
->mbox
);
677 dev_context
->mbox
= NULL
;
679 /* Reset IVA2 clocks*/
680 (*pdata
->dsp_prm_write
)(OMAP3430_RST1_IVA2_MASK
| OMAP3430_RST2_IVA2_MASK
|
681 OMAP3430_RST3_IVA2_MASK
, OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
683 clk_status
= dsp_clk_disable(DSP_CLK_IVA2
);
689 * ======== bridge_brd_status ========
690 * Returns the board status.
692 static int bridge_brd_status(struct bridge_dev_context
*dev_ctxt
,
695 struct bridge_dev_context
*dev_context
= dev_ctxt
;
696 *board_state
= dev_context
->dw_brd_state
;
701 * ======== bridge_brd_write ========
702 * Copies the buffers to DSP internal or external memory.
704 static int bridge_brd_write(struct bridge_dev_context
*dev_ctxt
,
705 u8
*host_buff
, u32 dsp_addr
,
706 u32 ul_num_bytes
, u32 mem_type
)
709 struct bridge_dev_context
*dev_context
= dev_ctxt
;
711 if (dsp_addr
< dev_context
->dw_dsp_start_add
) {
715 if ((dsp_addr
- dev_context
->dw_dsp_start_add
) <
716 dev_context
->dw_internal_size
) {
717 status
= write_dsp_data(dev_ctxt
, host_buff
, dsp_addr
,
718 ul_num_bytes
, mem_type
);
720 status
= write_ext_dsp_data(dev_context
, host_buff
, dsp_addr
,
721 ul_num_bytes
, mem_type
, false);
728 * ======== bridge_dev_create ========
729 * Creates a driver object. Puts DSP in self loop.
731 static int bridge_dev_create(struct bridge_dev_context
733 struct dev_object
*hdev_obj
,
734 struct cfg_hostres
*config_param
)
737 struct bridge_dev_context
*dev_context
= NULL
;
739 struct cfg_hostres
*resources
= config_param
;
740 struct pg_table_attrs
*pt_attrs
;
744 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
746 /* Allocate and initialize a data structure to contain the bridge driver
747 * state, which becomes the context for later calls into this driver */
748 dev_context
= kzalloc(sizeof(struct bridge_dev_context
), GFP_KERNEL
);
754 dev_context
->dw_dsp_start_add
= (u32
) OMAP_GEM_BASE
;
755 dev_context
->dw_self_loop
= (u32
) NULL
;
756 dev_context
->dsp_per_clks
= 0;
757 dev_context
->dw_internal_size
= OMAP_DSP_SIZE
;
758 /* Clear dev context MMU table entries.
759 * These get set on bridge_io_on_loaded() call after program loaded. */
760 for (entry_ndx
= 0; entry_ndx
< BRDIOCTL_NUMOFMMUTLB
; entry_ndx
++) {
761 dev_context
->atlb_entry
[entry_ndx
].ul_gpp_pa
=
762 dev_context
->atlb_entry
[entry_ndx
].ul_dsp_va
= 0;
764 dev_context
->dw_dsp_base_addr
= (u32
) MEM_LINEAR_ADDRESS((void *)
771 if (!dev_context
->dw_dsp_base_addr
)
774 pt_attrs
= kzalloc(sizeof(struct pg_table_attrs
), GFP_KERNEL
);
775 if (pt_attrs
!= NULL
) {
776 /* Assuming that we use only DSP's memory map
777 * until 0x4000:0000 , we would need only 1024
778 * L1 enties i.e L1 size = 4K */
779 pt_attrs
->l1_size
= 0x1000;
780 align_size
= pt_attrs
->l1_size
;
781 /* Align sizes are expected to be power of 2 */
782 /* we like to get aligned on L1 table size */
783 pg_tbl_va
= (u32
) mem_alloc_phys_mem(pt_attrs
->l1_size
,
784 align_size
, &pg_tbl_pa
);
786 /* Check if the PA is aligned for us */
787 if ((pg_tbl_pa
) & (align_size
- 1)) {
788 /* PA not aligned to page table size ,
789 * try with more allocation and align */
790 mem_free_phys_mem((void *)pg_tbl_va
, pg_tbl_pa
,
792 /* we like to get aligned on L1 table size */
794 (u32
) mem_alloc_phys_mem((pt_attrs
->l1_size
) * 2,
795 align_size
, &pg_tbl_pa
);
796 /* We should be able to get aligned table now */
797 pt_attrs
->l1_tbl_alloc_pa
= pg_tbl_pa
;
798 pt_attrs
->l1_tbl_alloc_va
= pg_tbl_va
;
799 pt_attrs
->l1_tbl_alloc_sz
= pt_attrs
->l1_size
* 2;
800 /* Align the PA to the next 'align' boundary */
801 pt_attrs
->l1_base_pa
=
803 (align_size
- 1)) & (~(align_size
- 1));
804 pt_attrs
->l1_base_va
=
805 pg_tbl_va
+ (pt_attrs
->l1_base_pa
- pg_tbl_pa
);
807 /* We got aligned PA, cool */
808 pt_attrs
->l1_tbl_alloc_pa
= pg_tbl_pa
;
809 pt_attrs
->l1_tbl_alloc_va
= pg_tbl_va
;
810 pt_attrs
->l1_tbl_alloc_sz
= pt_attrs
->l1_size
;
811 pt_attrs
->l1_base_pa
= pg_tbl_pa
;
812 pt_attrs
->l1_base_va
= pg_tbl_va
;
814 if (pt_attrs
->l1_base_va
)
815 memset((u8
*) pt_attrs
->l1_base_va
, 0x00,
818 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
820 pt_attrs
->l2_num_pages
= ((DMMPOOLSIZE
>> 20) + 6);
821 pt_attrs
->l2_size
= HW_MMU_COARSE_PAGE_SIZE
*
822 pt_attrs
->l2_num_pages
;
823 align_size
= 4; /* Make it u32 aligned */
824 /* we like to get aligned on L1 table size */
825 pg_tbl_va
= (u32
) mem_alloc_phys_mem(pt_attrs
->l2_size
,
826 align_size
, &pg_tbl_pa
);
827 pt_attrs
->l2_tbl_alloc_pa
= pg_tbl_pa
;
828 pt_attrs
->l2_tbl_alloc_va
= pg_tbl_va
;
829 pt_attrs
->l2_tbl_alloc_sz
= pt_attrs
->l2_size
;
830 pt_attrs
->l2_base_pa
= pg_tbl_pa
;
831 pt_attrs
->l2_base_va
= pg_tbl_va
;
833 if (pt_attrs
->l2_base_va
)
834 memset((u8
*) pt_attrs
->l2_base_va
, 0x00,
837 pt_attrs
->pg_info
= kzalloc(pt_attrs
->l2_num_pages
*
838 sizeof(struct page_info
), GFP_KERNEL
);
840 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
841 "%x, size %x\n", pt_attrs
->l1_base_pa
,
842 pt_attrs
->l1_base_va
, pt_attrs
->l1_size
,
843 pt_attrs
->l2_base_pa
, pt_attrs
->l2_base_va
,
845 dev_dbg(bridge
, "pt_attrs %p L2 NumPages %x pg_info %p\n",
846 pt_attrs
, pt_attrs
->l2_num_pages
, pt_attrs
->pg_info
);
848 if ((pt_attrs
!= NULL
) && (pt_attrs
->l1_base_va
!= 0) &&
849 (pt_attrs
->l2_base_va
!= 0) && (pt_attrs
->pg_info
!= NULL
))
850 dev_context
->pt_attrs
= pt_attrs
;
855 spin_lock_init(&pt_attrs
->pg_lock
);
856 dev_context
->tc_word_swap_on
= drv_datap
->tc_wordswapon
;
858 /* Set the Clock Divisor for the DSP module */
860 /* MMU address is obtained from the host
861 * resources struct */
862 dev_context
->dw_dsp_mmu_base
= resources
->dw_dmmu_base
;
865 dev_context
->hdev_obj
= hdev_obj
;
866 /* Store current board state. */
867 dev_context
->dw_brd_state
= BRD_UNKNOWN
;
868 dev_context
->resources
= resources
;
869 dsp_clk_enable(DSP_CLK_IVA2
);
870 bridge_brd_stop(dev_context
);
871 /* Return ptr to our device state to the DSP API for storage */
872 *dev_cntxt
= dev_context
;
874 if (pt_attrs
!= NULL
) {
875 kfree(pt_attrs
->pg_info
);
877 if (pt_attrs
->l2_tbl_alloc_va
) {
878 mem_free_phys_mem((void *)
879 pt_attrs
->l2_tbl_alloc_va
,
880 pt_attrs
->l2_tbl_alloc_pa
,
881 pt_attrs
->l2_tbl_alloc_sz
);
883 if (pt_attrs
->l1_tbl_alloc_va
) {
884 mem_free_phys_mem((void *)
885 pt_attrs
->l1_tbl_alloc_va
,
886 pt_attrs
->l1_tbl_alloc_pa
,
887 pt_attrs
->l1_tbl_alloc_sz
);
898 * ======== bridge_dev_ctrl ========
899 * Receives device specific commands.
901 static int bridge_dev_ctrl(struct bridge_dev_context
*dev_context
,
902 u32 dw_cmd
, void *pargs
)
905 struct bridge_ioctl_extproc
*pa_ext_proc
=
906 (struct bridge_ioctl_extproc
*)pargs
;
910 case BRDIOCTL_CHNLREAD
:
912 case BRDIOCTL_CHNLWRITE
:
914 case BRDIOCTL_SETMMUCONFIG
:
915 /* store away dsp-mmu setup values for later use */
916 for (ndx
= 0; ndx
< BRDIOCTL_NUMOFMMUTLB
; ndx
++, pa_ext_proc
++)
917 dev_context
->atlb_entry
[ndx
] = *pa_ext_proc
;
919 case BRDIOCTL_DEEPSLEEP
:
920 case BRDIOCTL_EMERGENCYSLEEP
:
921 /* Currently only DSP Idle is supported Need to update for
923 status
= sleep_dsp(dev_context
, PWR_DEEPSLEEP
, pargs
);
925 case BRDIOCTL_WAKEUP
:
926 status
= wake_dsp(dev_context
, pargs
);
928 case BRDIOCTL_CLK_CTRL
:
930 /* Looking For Baseport Fix for Clocks */
931 status
= dsp_peripheral_clk_ctrl(dev_context
, pargs
);
933 case BRDIOCTL_PWR_HIBERNATE
:
934 status
= handle_hibernation_from_dsp(dev_context
);
936 case BRDIOCTL_PRESCALE_NOTIFY
:
937 status
= pre_scale_dsp(dev_context
, pargs
);
939 case BRDIOCTL_POSTSCALE_NOTIFY
:
940 status
= post_scale_dsp(dev_context
, pargs
);
942 case BRDIOCTL_CONSTRAINT_REQUEST
:
943 status
= handle_constraints_set(dev_context
, pargs
);
953 * ======== bridge_dev_destroy ========
954 * Destroys the driver object.
956 static int bridge_dev_destroy(struct bridge_dev_context
*dev_ctxt
)
958 struct pg_table_attrs
*pt_attrs
;
960 struct bridge_dev_context
*dev_context
= (struct bridge_dev_context
*)
962 struct cfg_hostres
*host_res
;
964 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
966 /* It should never happen */
970 /* first put the device to stop state */
971 bridge_brd_stop(dev_context
);
972 if (dev_context
->pt_attrs
) {
973 pt_attrs
= dev_context
->pt_attrs
;
974 kfree(pt_attrs
->pg_info
);
976 if (pt_attrs
->l2_tbl_alloc_va
) {
977 mem_free_phys_mem((void *)pt_attrs
->l2_tbl_alloc_va
,
978 pt_attrs
->l2_tbl_alloc_pa
,
979 pt_attrs
->l2_tbl_alloc_sz
);
981 if (pt_attrs
->l1_tbl_alloc_va
) {
982 mem_free_phys_mem((void *)pt_attrs
->l1_tbl_alloc_va
,
983 pt_attrs
->l1_tbl_alloc_pa
,
984 pt_attrs
->l1_tbl_alloc_sz
);
990 if (dev_context
->resources
) {
991 host_res
= dev_context
->resources
;
992 shm_size
= drv_datap
->shm_size
;
993 if (shm_size
>= 0x10000) {
994 if ((host_res
->dw_mem_base
[1]) &&
995 (host_res
->dw_mem_phys
[1])) {
996 mem_free_phys_mem((void *)
997 host_res
->dw_mem_base
999 host_res
->dw_mem_phys
1003 dev_dbg(bridge
, "%s: Error getting shm size "
1004 "from registry: %x. Not calling "
1005 "mem_free_phys_mem\n", __func__
,
1008 host_res
->dw_mem_base
[1] = 0;
1009 host_res
->dw_mem_phys
[1] = 0;
1011 if (host_res
->dw_mem_base
[0])
1012 iounmap((void *)host_res
->dw_mem_base
[0]);
1013 if (host_res
->dw_mem_base
[2])
1014 iounmap((void *)host_res
->dw_mem_base
[2]);
1015 if (host_res
->dw_mem_base
[3])
1016 iounmap((void *)host_res
->dw_mem_base
[3]);
1017 if (host_res
->dw_mem_base
[4])
1018 iounmap((void *)host_res
->dw_mem_base
[4]);
1019 if (host_res
->dw_dmmu_base
)
1020 iounmap(host_res
->dw_dmmu_base
);
1021 if (host_res
->dw_per_base
)
1022 iounmap(host_res
->dw_per_base
);
1023 if (host_res
->dw_per_pm_base
)
1024 iounmap((void *)host_res
->dw_per_pm_base
);
1025 if (host_res
->dw_core_pm_base
)
1026 iounmap((void *)host_res
->dw_core_pm_base
);
1027 if (host_res
->dw_sys_ctrl_base
)
1028 iounmap(host_res
->dw_sys_ctrl_base
);
1030 host_res
->dw_mem_base
[0] = (u32
) NULL
;
1031 host_res
->dw_mem_base
[2] = (u32
) NULL
;
1032 host_res
->dw_mem_base
[3] = (u32
) NULL
;
1033 host_res
->dw_mem_base
[4] = (u32
) NULL
;
1034 host_res
->dw_dmmu_base
= NULL
;
1035 host_res
->dw_sys_ctrl_base
= NULL
;
1040 /* Free the driver's device context: */
1041 kfree(drv_datap
->base_img
);
1043 dev_set_drvdata(bridge
, NULL
);
1044 kfree((void *)dev_ctxt
);
1048 static int bridge_brd_mem_copy(struct bridge_dev_context
*dev_ctxt
,
1049 u32 dsp_dest_addr
, u32 dsp_src_addr
,
1050 u32 ul_num_bytes
, u32 mem_type
)
1053 u32 src_addr
= dsp_src_addr
;
1054 u32 dest_addr
= dsp_dest_addr
;
1056 u32 total_bytes
= ul_num_bytes
;
1057 u8 host_buf
[BUFFERSIZE
];
1058 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1059 while (total_bytes
> 0 && !status
) {
1061 total_bytes
> BUFFERSIZE
? BUFFERSIZE
: total_bytes
;
1062 /* Read from External memory */
1063 status
= read_ext_dsp_data(dev_ctxt
, host_buf
, src_addr
,
1064 copy_bytes
, mem_type
);
1066 if (dest_addr
< (dev_context
->dw_dsp_start_add
+
1067 dev_context
->dw_internal_size
)) {
1068 /* Write to Internal memory */
1069 status
= write_dsp_data(dev_ctxt
, host_buf
,
1070 dest_addr
, copy_bytes
,
1073 /* Write to External memory */
1075 write_ext_dsp_data(dev_ctxt
, host_buf
,
1076 dest_addr
, copy_bytes
,
1080 total_bytes
-= copy_bytes
;
1081 src_addr
+= copy_bytes
;
1082 dest_addr
+= copy_bytes
;
1087 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1088 static int bridge_brd_mem_write(struct bridge_dev_context
*dev_ctxt
,
1089 u8
*host_buff
, u32 dsp_addr
,
1090 u32 ul_num_bytes
, u32 mem_type
)
1093 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1094 u32 ul_remain_bytes
= 0;
1096 ul_remain_bytes
= ul_num_bytes
;
1097 while (ul_remain_bytes
> 0 && !status
) {
1099 ul_remain_bytes
> BUFFERSIZE
? BUFFERSIZE
: ul_remain_bytes
;
1100 if (dsp_addr
< (dev_context
->dw_dsp_start_add
+
1101 dev_context
->dw_internal_size
)) {
1103 write_dsp_data(dev_ctxt
, host_buff
, dsp_addr
,
1104 ul_bytes
, mem_type
);
1106 status
= write_ext_dsp_data(dev_ctxt
, host_buff
,
1110 ul_remain_bytes
-= ul_bytes
;
1111 dsp_addr
+= ul_bytes
;
1112 host_buff
= host_buff
+ ul_bytes
;
1118 * ======== bridge_brd_mem_map ========
1119 * This function maps MPU buffer to the DSP address space. It performs
1120 * linear to physical address translation if required. It translates each
1121 * page since linear addresses can be physically non-contiguous
1122 * All address & size arguments are assumed to be page aligned (in proc.c)
1124 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1126 static int bridge_brd_mem_map(struct bridge_dev_context
*dev_ctxt
,
1127 u32 ul_mpu_addr
, u32 virt_addr
,
1128 u32 ul_num_bytes
, u32 ul_map_attr
,
1129 struct page
**mapped_pages
)
1133 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1134 struct hw_mmu_map_attrs_t hw_attrs
;
1135 struct vm_area_struct
*vma
;
1136 struct mm_struct
*mm
= current
->mm
;
1138 u32 num_usr_pgs
= 0;
1139 struct page
*mapped_page
, *pg
;
1142 struct task_struct
*curr_task
= current
;
1147 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1148 __func__
, dev_ctxt
, ul_mpu_addr
, virt_addr
, ul_num_bytes
,
1150 if (ul_num_bytes
== 0)
1153 if (ul_map_attr
& DSP_MAP_DIR_MASK
) {
1154 attrs
= ul_map_attr
;
1156 /* Assign default attributes */
1157 attrs
= ul_map_attr
| (DSP_MAPVIRTUALADDR
| DSP_MAPELEMSIZE16
);
1159 /* Take mapping properties */
1160 if (attrs
& DSP_MAPBIGENDIAN
)
1161 hw_attrs
.endianism
= HW_BIG_ENDIAN
;
1163 hw_attrs
.endianism
= HW_LITTLE_ENDIAN
;
1165 hw_attrs
.mixed_size
= (enum hw_mmu_mixed_size_t
)
1166 ((attrs
& DSP_MAPMIXEDELEMSIZE
) >> 2);
1167 /* Ignore element_size if mixed_size is enabled */
1168 if (hw_attrs
.mixed_size
== 0) {
1169 if (attrs
& DSP_MAPELEMSIZE8
) {
1171 hw_attrs
.element_size
= HW_ELEM_SIZE8BIT
;
1172 } else if (attrs
& DSP_MAPELEMSIZE16
) {
1173 /* Size is 16 bit */
1174 hw_attrs
.element_size
= HW_ELEM_SIZE16BIT
;
1175 } else if (attrs
& DSP_MAPELEMSIZE32
) {
1176 /* Size is 32 bit */
1177 hw_attrs
.element_size
= HW_ELEM_SIZE32BIT
;
1178 } else if (attrs
& DSP_MAPELEMSIZE64
) {
1179 /* Size is 64 bit */
1180 hw_attrs
.element_size
= HW_ELEM_SIZE64BIT
;
1183 * Mixedsize isn't enabled, so size can't be
1189 if (attrs
& DSP_MAPDONOTLOCK
)
1190 hw_attrs
.donotlockmpupage
= 1;
1192 hw_attrs
.donotlockmpupage
= 0;
1194 if (attrs
& DSP_MAPVMALLOCADDR
) {
1195 return mem_map_vmalloc(dev_ctxt
, ul_mpu_addr
, virt_addr
,
1196 ul_num_bytes
, &hw_attrs
);
1199 * Do OS-specific user-va to pa translation.
1200 * Combine physically contiguous regions to reduce TLBs.
1201 * Pass the translated pa to pte_update.
1203 if ((attrs
& DSP_MAPPHYSICALADDR
)) {
1204 status
= pte_update(dev_context
, ul_mpu_addr
, virt_addr
,
1205 ul_num_bytes
, &hw_attrs
);
1210 * Important Note: ul_mpu_addr is mapped from user application process
1211 * to current process - it must lie completely within the current
1212 * virtual memory address space in order to be of use to us here!
1214 down_read(&mm
->mmap_sem
);
1215 vma
= find_vma(mm
, ul_mpu_addr
);
1218 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1219 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr
,
1220 ul_num_bytes
, vma
->vm_start
, vma
->vm_end
,
1224 * It is observed that under some circumstances, the user buffer is
1225 * spread across several VMAs. So loop through and check if the entire
1226 * user buffer is covered
1228 while ((vma
) && (ul_mpu_addr
+ ul_num_bytes
> vma
->vm_end
)) {
1229 /* jump to the next VMA region */
1230 vma
= find_vma(mm
, vma
->vm_end
+ 1);
1232 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1233 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr
,
1234 ul_num_bytes
, vma
->vm_start
, vma
->vm_end
,
1238 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1239 __func__
, ul_mpu_addr
, ul_num_bytes
);
1241 up_read(&mm
->mmap_sem
);
1245 if (vma
->vm_flags
& VM_IO
) {
1246 num_usr_pgs
= ul_num_bytes
/ PG_SIZE4K
;
1247 mpu_addr
= ul_mpu_addr
;
1249 /* Get the physical addresses for user buffer */
1250 for (pg_i
= 0; pg_i
< num_usr_pgs
; pg_i
++) {
1251 pa
= user_va2_pa(mm
, mpu_addr
);
1254 pr_err("DSPBRIDGE: VM_IO mapping physical"
1255 "address is invalid\n");
1258 if (pfn_valid(__phys_to_pfn(pa
))) {
1259 pg
= PHYS_TO_PAGE(pa
);
1261 if (page_count(pg
) < 1) {
1262 pr_err("Bad page in VM_IO buffer\n");
1263 bad_page_dump(pa
, pg
);
1266 status
= pte_set(dev_context
->pt_attrs
, pa
,
1267 va
, HW_PAGE_SIZE4KB
, &hw_attrs
);
1271 va
+= HW_PAGE_SIZE4KB
;
1272 mpu_addr
+= HW_PAGE_SIZE4KB
;
1273 pa
+= HW_PAGE_SIZE4KB
;
1276 num_usr_pgs
= ul_num_bytes
/ PG_SIZE4K
;
1277 if (vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))
1280 for (pg_i
= 0; pg_i
< num_usr_pgs
; pg_i
++) {
1281 pg_num
= get_user_pages(curr_task
, mm
, ul_mpu_addr
, 1,
1282 write
, 1, &mapped_page
, NULL
);
1284 if (page_count(mapped_page
) < 1) {
1285 pr_err("Bad page count after doing"
1288 bad_page_dump(page_to_phys(mapped_page
),
1291 status
= pte_set(dev_context
->pt_attrs
,
1292 page_to_phys(mapped_page
), va
,
1293 HW_PAGE_SIZE4KB
, &hw_attrs
);
1298 mapped_pages
[pg_i
] = mapped_page
;
1300 va
+= HW_PAGE_SIZE4KB
;
1301 ul_mpu_addr
+= HW_PAGE_SIZE4KB
;
1303 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1305 "vma->vm_flags = 0x%lx,"
1306 "get_user_pages Err"
1307 "Value = %d, Buffer"
1308 "size=0x%x\n", ul_mpu_addr
,
1309 vma
->vm_flags
, pg_num
, ul_num_bytes
);
1315 up_read(&mm
->mmap_sem
);
1319 * Roll out the mapped pages incase it failed in middle of
1323 bridge_brd_mem_un_map(dev_context
, virt_addr
,
1324 (pg_i
* PG_SIZE4K
));
1329 * In any case, flush the TLB
1330 * This is called from here instead from pte_update to avoid unnecessary
1331 * repetition while mapping non-contiguous physical regions of a virtual
1334 flush_all(dev_context
);
1335 dev_dbg(bridge
, "%s status %x\n", __func__
, status
);
1340 * ======== bridge_brd_mem_un_map ========
1341 * Invalidate the PTEs for the DSP VA block to be unmapped.
1343 * PTEs of a mapped memory block are contiguous in any page table
1344 * So, instead of looking up the PTE address for every 4K block,
1345 * we clear consecutive PTEs until we unmap all the bytes
1347 static int bridge_brd_mem_un_map(struct bridge_dev_context
*dev_ctxt
,
1348 u32 virt_addr
, u32 ul_num_bytes
)
1358 u32 pte_addr_l2
= 0;
1362 struct page
*pg
= NULL
;
1364 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1365 struct pg_table_attrs
*pt
= dev_context
->pt_attrs
;
1368 u32 numof4k_pages
= 0;
1370 va_curr
= virt_addr
;
1371 rem_bytes
= ul_num_bytes
;
1373 l1_base_va
= pt
->l1_base_va
;
1374 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va_curr
);
1375 dev_dbg(bridge
, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1376 "pte_addr_l1 %x\n", __func__
, dev_ctxt
, virt_addr
,
1377 ul_num_bytes
, l1_base_va
, pte_addr_l1
);
1379 while (rem_bytes
&& !status
) {
1380 u32 va_curr_orig
= va_curr
;
1381 /* Find whether the L1 PTE points to a valid L2 PT */
1382 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va_curr
);
1383 pte_val
= *(u32
*) pte_addr_l1
;
1384 pte_size
= hw_mmu_pte_size_l1(pte_val
);
1386 if (pte_size
!= HW_MMU_COARSE_PAGE_SIZE
)
1387 goto skip_coarse_page
;
1390 * Get the L2 PA from the L1 PTE, and find
1391 * corresponding L2 VA
1393 l2_base_pa
= hw_mmu_pte_coarse_l1(pte_val
);
1394 l2_base_va
= l2_base_pa
- pt
->l2_base_pa
+ pt
->l2_base_va
;
1396 (l2_base_pa
- pt
->l2_base_pa
) / HW_MMU_COARSE_PAGE_SIZE
;
1398 * Find the L2 PTE address from which we will start
1399 * clearing, the number of PTEs to be cleared on this
1400 * page, and the size of VA space that needs to be
1401 * cleared on this L2 page
1403 pte_addr_l2
= hw_mmu_pte_addr_l2(l2_base_va
, va_curr
);
1404 pte_count
= pte_addr_l2
& (HW_MMU_COARSE_PAGE_SIZE
- 1);
1405 pte_count
= (HW_MMU_COARSE_PAGE_SIZE
- pte_count
) / sizeof(u32
);
1406 if (rem_bytes
< (pte_count
* PG_SIZE4K
))
1407 pte_count
= rem_bytes
/ PG_SIZE4K
;
1408 rem_bytes_l2
= pte_count
* PG_SIZE4K
;
1411 * Unmap the VA space on this L2 PT. A quicker way
1412 * would be to clear pte_count entries starting from
1413 * pte_addr_l2. However, below code checks that we don't
1414 * clear invalid entries or less than 64KB for a 64KB
1415 * entry. Similar checking is done for L1 PTEs too
1418 while (rem_bytes_l2
&& !status
) {
1419 pte_val
= *(u32
*) pte_addr_l2
;
1420 pte_size
= hw_mmu_pte_size_l2(pte_val
);
1421 /* va_curr aligned to pte_size? */
1422 if (pte_size
== 0 || rem_bytes_l2
< pte_size
||
1423 va_curr
& (pte_size
- 1)) {
1428 /* Collect Physical addresses from VA */
1429 paddr
= (pte_val
& ~(pte_size
- 1));
1430 if (pte_size
== HW_PAGE_SIZE64KB
)
1435 while (temp
++ < numof4k_pages
) {
1436 if (!pfn_valid(__phys_to_pfn(paddr
))) {
1437 paddr
+= HW_PAGE_SIZE4KB
;
1440 pg
= PHYS_TO_PAGE(paddr
);
1441 if (page_count(pg
) < 1) {
1442 pr_info("DSPBRIDGE: UNMAP function: "
1443 "COUNT 0 FOR PA 0x%x, size = "
1444 "0x%x\n", paddr
, ul_num_bytes
);
1445 bad_page_dump(paddr
, pg
);
1448 page_cache_release(pg
);
1450 paddr
+= HW_PAGE_SIZE4KB
;
1452 if (hw_mmu_pte_clear(pte_addr_l2
, va_curr
, pte_size
)) {
1458 rem_bytes_l2
-= pte_size
;
1459 va_curr
+= pte_size
;
1460 pte_addr_l2
+= (pte_size
>> 12) * sizeof(u32
);
1462 spin_lock(&pt
->pg_lock
);
1463 if (rem_bytes_l2
== 0) {
1464 pt
->pg_info
[l2_page_num
].num_entries
-= pte_count
;
1465 if (pt
->pg_info
[l2_page_num
].num_entries
== 0) {
1467 * Clear the L1 PTE pointing to the L2 PT
1469 if (!hw_mmu_pte_clear(l1_base_va
, va_curr_orig
,
1470 HW_MMU_COARSE_PAGE_SIZE
))
1474 spin_unlock(&pt
->pg_lock
);
1478 rem_bytes
-= pte_count
* PG_SIZE4K
;
1482 spin_unlock(&pt
->pg_lock
);
1485 /* va_curr aligned to pte_size? */
1486 /* pte_size = 1 MB or 16 MB */
1487 if (pte_size
== 0 || rem_bytes
< pte_size
||
1488 va_curr
& (pte_size
- 1)) {
1493 if (pte_size
== HW_PAGE_SIZE1MB
)
1494 numof4k_pages
= 256;
1496 numof4k_pages
= 4096;
1498 /* Collect Physical addresses from VA */
1499 paddr
= (pte_val
& ~(pte_size
- 1));
1500 while (temp
++ < numof4k_pages
) {
1501 if (pfn_valid(__phys_to_pfn(paddr
))) {
1502 pg
= PHYS_TO_PAGE(paddr
);
1503 if (page_count(pg
) < 1) {
1504 pr_info("DSPBRIDGE: UNMAP function: "
1505 "COUNT 0 FOR PA 0x%x, size = "
1506 "0x%x\n", paddr
, ul_num_bytes
);
1507 bad_page_dump(paddr
, pg
);
1510 page_cache_release(pg
);
1513 paddr
+= HW_PAGE_SIZE4KB
;
1515 if (!hw_mmu_pte_clear(l1_base_va
, va_curr
, pte_size
)) {
1517 rem_bytes
-= pte_size
;
1518 va_curr
+= pte_size
;
1525 * It is better to flush the TLB here, so that any stale old entries
1529 flush_all(dev_context
);
1531 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1532 " rem_bytes_l2 %x status %x\n", __func__
, va_curr
, pte_addr_l1
,
1533 pte_addr_l2
, rem_bytes
, rem_bytes_l2
, status
);
1538 * ======== user_va2_pa ========
1540 * This function walks through the page tables to convert a userland
1541 * virtual address to physical address
1543 static u32
user_va2_pa(struct mm_struct
*mm
, u32 address
)
1549 pgd
= pgd_offset(mm
, address
);
1550 if (!(pgd_none(*pgd
) || pgd_bad(*pgd
))) {
1551 pmd
= pmd_offset(pgd
, address
);
1552 if (!(pmd_none(*pmd
) || pmd_bad(*pmd
))) {
1553 ptep
= pte_offset_map(pmd
, address
);
1556 if (pte_present(pte
))
1557 return pte
& PAGE_MASK
;
1566 * ======== pte_update ========
1567 * This function calculates the optimum page-aligned addresses and sizes
1568 * Caller must pass page-aligned values
1570 static int pte_update(struct bridge_dev_context
*dev_ctxt
, u32 pa
,
1572 struct hw_mmu_map_attrs_t
*map_attrs
)
1578 u32 num_bytes
= size
;
1579 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1581 u32 page_size
[] = { HW_PAGE_SIZE16MB
, HW_PAGE_SIZE1MB
,
1582 HW_PAGE_SIZE64KB
, HW_PAGE_SIZE4KB
1585 while (num_bytes
&& !status
) {
1586 /* To find the max. page size with which both PA & VA are
1588 all_bits
= pa_curr
| va_curr
;
1590 for (i
= 0; i
< 4; i
++) {
1591 if ((num_bytes
>= page_size
[i
]) && ((all_bits
&
1595 pte_set(dev_context
->pt_attrs
, pa_curr
,
1596 va_curr
, page_size
[i
], map_attrs
);
1597 pa_curr
+= page_size
[i
];
1598 va_curr
+= page_size
[i
];
1599 num_bytes
-= page_size
[i
];
1600 /* Don't try smaller sizes. Hopefully we have
1601 * reached an address aligned to a bigger page
1612 * ======== pte_set ========
1613 * This function calculates PTE address (MPU virtual) to be updated
1614 * It also manages the L2 page tables
1616 static int pte_set(struct pg_table_attrs
*pt
, u32 pa
, u32 va
,
1617 u32 size
, struct hw_mmu_map_attrs_t
*attrs
)
1623 /* Base address of the PT that will be updated */
1626 /* Compiler warns that the next three variables might be used
1627 * uninitialized in this function. Doesn't seem so. Working around,
1631 u32 l2_page_num
= 0;
1634 l1_base_va
= pt
->l1_base_va
;
1635 pg_tbl_va
= l1_base_va
;
1636 if ((size
== HW_PAGE_SIZE64KB
) || (size
== HW_PAGE_SIZE4KB
)) {
1637 /* Find whether the L1 PTE points to a valid L2 PT */
1638 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va
);
1639 if (pte_addr_l1
<= (pt
->l1_base_va
+ pt
->l1_size
)) {
1640 pte_val
= *(u32
*) pte_addr_l1
;
1641 pte_size
= hw_mmu_pte_size_l1(pte_val
);
1645 spin_lock(&pt
->pg_lock
);
1646 if (pte_size
== HW_MMU_COARSE_PAGE_SIZE
) {
1647 /* Get the L2 PA from the L1 PTE, and find
1648 * corresponding L2 VA */
1649 l2_base_pa
= hw_mmu_pte_coarse_l1(pte_val
);
1651 l2_base_pa
- pt
->l2_base_pa
+ pt
->l2_base_va
;
1654 pt
->l2_base_pa
) / HW_MMU_COARSE_PAGE_SIZE
;
1655 } else if (pte_size
== 0) {
1656 /* L1 PTE is invalid. Allocate a L2 PT and
1657 * point the L1 PTE to it */
1658 /* Find a free L2 PT. */
1659 for (i
= 0; (i
< pt
->l2_num_pages
) &&
1660 (pt
->pg_info
[i
].num_entries
!= 0); i
++)
1662 if (i
< pt
->l2_num_pages
) {
1664 l2_base_pa
= pt
->l2_base_pa
+ (l2_page_num
*
1665 HW_MMU_COARSE_PAGE_SIZE
);
1666 l2_base_va
= pt
->l2_base_va
+ (l2_page_num
*
1667 HW_MMU_COARSE_PAGE_SIZE
);
1668 /* Endianness attributes are ignored for
1669 * HW_MMU_COARSE_PAGE_SIZE */
1671 hw_mmu_pte_set(l1_base_va
, l2_base_pa
, va
,
1672 HW_MMU_COARSE_PAGE_SIZE
,
1678 /* Found valid L1 PTE of another size.
1679 * Should not overwrite it. */
1683 pg_tbl_va
= l2_base_va
;
1684 if (size
== HW_PAGE_SIZE64KB
)
1685 pt
->pg_info
[l2_page_num
].num_entries
+= 16;
1687 pt
->pg_info
[l2_page_num
].num_entries
++;
1688 dev_dbg(bridge
, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1689 "%x, num_entries %x\n", l2_base_va
,
1690 l2_base_pa
, l2_page_num
,
1691 pt
->pg_info
[l2_page_num
].num_entries
);
1693 spin_unlock(&pt
->pg_lock
);
1696 dev_dbg(bridge
, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1697 pg_tbl_va
, pa
, va
, size
);
1698 dev_dbg(bridge
, "PTE: endianism %x, element_size %x, "
1699 "mixed_size %x\n", attrs
->endianism
,
1700 attrs
->element_size
, attrs
->mixed_size
);
1701 status
= hw_mmu_pte_set(pg_tbl_va
, pa
, va
, size
, attrs
);
1707 /* Memory map kernel VA -- memory allocated with vmalloc */
1708 static int mem_map_vmalloc(struct bridge_dev_context
*dev_context
,
1709 u32 ul_mpu_addr
, u32 virt_addr
,
1711 struct hw_mmu_map_attrs_t
*hw_attrs
)
1714 struct page
*page
[1];
1726 * Do Kernel va to pa translation.
1727 * Combine physically contiguous regions to reduce TLBs.
1728 * Pass the translated pa to pte_update.
1730 num_pages
= ul_num_bytes
/ PAGE_SIZE
; /* PAGE_SIZE = OS page size */
1732 va_curr
= ul_mpu_addr
;
1733 page
[0] = vmalloc_to_page((void *)va_curr
);
1734 pa_next
= page_to_phys(page
[0]);
1735 while (!status
&& (i
< num_pages
)) {
1737 * Reuse pa_next from the previous iteraion to avoid
1738 * an extra va2pa call
1741 size_curr
= PAGE_SIZE
;
1743 * If the next page is physically contiguous,
1744 * map it with the current one by increasing
1745 * the size of the region to be mapped
1747 while (++i
< num_pages
) {
1749 vmalloc_to_page((void *)(va_curr
+ size_curr
));
1750 pa_next
= page_to_phys(page
[0]);
1752 if (pa_next
== (pa_curr
+ size_curr
))
1753 size_curr
+= PAGE_SIZE
;
1763 num_of4k_pages
= size_curr
/ HW_PAGE_SIZE4KB
;
1764 while (temp
++ < num_of4k_pages
) {
1765 get_page(PHYS_TO_PAGE(pa
));
1766 pa
+= HW_PAGE_SIZE4KB
;
1768 status
= pte_update(dev_context
, pa_curr
, virt_addr
+
1769 (va_curr
- ul_mpu_addr
), size_curr
,
1771 va_curr
+= size_curr
;
1774 * In any case, flush the TLB
1775 * This is called from here instead from pte_update to avoid unnecessary
1776 * repetition while mapping non-contiguous physical regions of a virtual
1779 flush_all(dev_context
);
1780 dev_dbg(bridge
, "%s status %x\n", __func__
, status
);
1785 * ======== wait_for_start ========
1786 * Wait for the singal from DSP that it has started, or time out.
1788 bool wait_for_start(struct bridge_dev_context
*dev_context
, u32 dw_sync_addr
)
1790 u16 timeout
= TIHELEN_ACKTIMEOUT
;
1792 /* Wait for response from board */
1793 while (__raw_readw(dw_sync_addr
) && --timeout
)
1796 /* If timed out: return false */
1798 pr_err("%s: Timed out waiting DSP to Start\n", __func__
);