4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor interface at the driver level.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ------------------------------------ Host OS */
21 #include <linux/dma-mapping.h>
22 #include <linux/scatterlist.h>
23 #include <dspbridge/host_os.h>
25 /* ----------------------------------- DSP/BIOS Bridge */
26 #include <dspbridge/dbdefs.h>
28 /* ----------------------------------- Trace & Debug */
29 #include <dspbridge/dbc.h>
31 /* ----------------------------------- OS Adaptation Layer */
32 #include <dspbridge/list.h>
33 #include <dspbridge/ntfy.h>
34 #include <dspbridge/sync.h>
35 /* ----------------------------------- Bridge Driver */
36 #include <dspbridge/dspdefs.h>
37 #include <dspbridge/dspdeh.h>
38 /* ----------------------------------- Platform Manager */
39 #include <dspbridge/cod.h>
40 #include <dspbridge/dev.h>
41 #include <dspbridge/procpriv.h>
42 #include <dspbridge/dmm.h>
44 /* ----------------------------------- Resource Manager */
45 #include <dspbridge/mgr.h>
46 #include <dspbridge/node.h>
47 #include <dspbridge/nldr.h>
48 #include <dspbridge/rmm.h>
50 /* ----------------------------------- Others */
51 #include <dspbridge/dbdcd.h>
52 #include <dspbridge/msg.h>
53 #include <dspbridge/dspioctl.h>
54 #include <dspbridge/drv.h>
57 /* ----------------------------------- This */
58 #include <dspbridge/proc.h>
59 #include <dspbridge/pwr.h>
61 #include <dspbridge/resourcecleanup.h>
62 /* ----------------------------------- Defines, Data Structures, Typedefs */
63 #define MAXCMDLINELEN 255
64 #define PROC_ENVPROCID "PROC_ID=%d"
65 #define MAXPROCIDLEN (8 + 5)
66 #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
67 #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
68 #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
70 #define DSP_CACHE_LINE 128
72 #define BUFMODE_MASK (3 << 14)
74 /* Buffer modes from DSP perspective */
75 #define RBUF 0x4000 /* Input buffer */
76 #define WBUF 0x8000 /* Output Buffer */
78 extern struct device
*bridge
;
80 /* ----------------------------------- Globals */
82 /* The proc_object structure. */
84 struct list_head link
; /* Link to next proc_object */
85 struct dev_object
*hdev_obj
; /* Device this PROC represents */
86 u32 process
; /* Process owning this Processor */
87 struct mgr_object
*hmgr_obj
; /* Manager Object Handle */
88 u32 attach_count
; /* Processor attach count */
89 u32 processor_id
; /* Processor number */
90 u32 utimeout
; /* Time out count */
91 enum dsp_procstate proc_state
; /* Processor state */
92 u32 ul_unit
; /* DDSP unit number */
93 bool is_already_attached
; /*
94 * True if the Device below has
97 struct ntfy_object
*ntfy_obj
; /* Manages notifications */
98 /* Bridge Context Handle */
99 struct bridge_dev_context
*hbridge_context
;
100 /* Function interface to Bridge driver */
101 struct bridge_drv_interface
*intf_fxns
;
103 struct list_head proc_list
;
108 DEFINE_MUTEX(proc_lock
); /* For critical sections */
110 /* ----------------------------------- Function Prototypes */
111 static int proc_monitor(struct proc_object
*proc_obj
);
112 static s32
get_envp_count(char **envp
);
113 static char **prepend_envp(char **new_envp
, char **envp
, s32 envp_elems
,
114 s32 cnew_envp
, char *sz_var
);
116 /* remember mapping information */
117 static struct dmm_map_object
*add_mapping_info(struct process_context
*pr_ctxt
,
118 u32 mpu_addr
, u32 dsp_addr
, u32 size
)
120 struct dmm_map_object
*map_obj
;
122 u32 num_usr_pgs
= size
/ PG_SIZE4K
;
124 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
128 map_obj
= kzalloc(sizeof(struct dmm_map_object
), GFP_KERNEL
);
130 pr_err("%s: kzalloc failed\n", __func__
);
133 INIT_LIST_HEAD(&map_obj
->link
);
135 map_obj
->pages
= kcalloc(num_usr_pgs
, sizeof(struct page
*),
137 if (!map_obj
->pages
) {
138 pr_err("%s: kzalloc failed\n", __func__
);
143 map_obj
->mpu_addr
= mpu_addr
;
144 map_obj
->dsp_addr
= dsp_addr
;
145 map_obj
->size
= size
;
146 map_obj
->num_usr_pgs
= num_usr_pgs
;
148 spin_lock(&pr_ctxt
->dmm_map_lock
);
149 list_add(&map_obj
->link
, &pr_ctxt
->dmm_map_list
);
150 spin_unlock(&pr_ctxt
->dmm_map_lock
);
155 static int match_exact_map_obj(struct dmm_map_object
*map_obj
,
156 u32 dsp_addr
, u32 size
)
158 if (map_obj
->dsp_addr
== dsp_addr
&& map_obj
->size
!= size
)
159 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
160 __func__
, dsp_addr
, map_obj
->size
, size
);
162 return map_obj
->dsp_addr
== dsp_addr
&&
163 map_obj
->size
== size
;
166 static void remove_mapping_information(struct process_context
*pr_ctxt
,
167 u32 dsp_addr
, u32 size
)
169 struct dmm_map_object
*map_obj
;
171 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__
,
174 spin_lock(&pr_ctxt
->dmm_map_lock
);
175 list_for_each_entry(map_obj
, &pr_ctxt
->dmm_map_list
, link
) {
176 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
182 if (match_exact_map_obj(map_obj
, dsp_addr
, size
)) {
183 pr_debug("%s: match, deleting map info\n", __func__
);
184 list_del(&map_obj
->link
);
185 kfree(map_obj
->dma_info
.sg
);
186 kfree(map_obj
->pages
);
190 pr_debug("%s: candidate didn't match\n", __func__
);
193 pr_err("%s: failed to find given map info\n", __func__
);
195 spin_unlock(&pr_ctxt
->dmm_map_lock
);
198 static int match_containing_map_obj(struct dmm_map_object
*map_obj
,
199 u32 mpu_addr
, u32 size
)
201 u32 map_obj_end
= map_obj
->mpu_addr
+ map_obj
->size
;
203 return mpu_addr
>= map_obj
->mpu_addr
&&
204 mpu_addr
+ size
<= map_obj_end
;
207 static struct dmm_map_object
*find_containing_mapping(
208 struct process_context
*pr_ctxt
,
209 u32 mpu_addr
, u32 size
)
211 struct dmm_map_object
*map_obj
;
212 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__
,
215 spin_lock(&pr_ctxt
->dmm_map_lock
);
216 list_for_each_entry(map_obj
, &pr_ctxt
->dmm_map_list
, link
) {
217 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
222 if (match_containing_map_obj(map_obj
, mpu_addr
, size
)) {
223 pr_debug("%s: match!\n", __func__
);
227 pr_debug("%s: no match!\n", __func__
);
232 spin_unlock(&pr_ctxt
->dmm_map_lock
);
236 static int find_first_page_in_cache(struct dmm_map_object
*map_obj
,
237 unsigned long mpu_addr
)
239 u32 mapped_base_page
= map_obj
->mpu_addr
>> PAGE_SHIFT
;
240 u32 requested_base_page
= mpu_addr
>> PAGE_SHIFT
;
241 int pg_index
= requested_base_page
- mapped_base_page
;
243 if (pg_index
< 0 || pg_index
>= map_obj
->num_usr_pgs
) {
244 pr_err("%s: failed (got %d)\n", __func__
, pg_index
);
248 pr_debug("%s: first page is %d\n", __func__
, pg_index
);
252 static inline struct page
*get_mapping_page(struct dmm_map_object
*map_obj
,
255 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__
,
256 pg_i
, map_obj
->num_usr_pgs
);
258 if (pg_i
< 0 || pg_i
>= map_obj
->num_usr_pgs
) {
259 pr_err("%s: requested pg_i %d is out of mapped range\n",
264 return map_obj
->pages
[pg_i
];
268 * ======== proc_attach ========
270 * Prepare for communication with a particular DSP processor, and return
271 * a handle to the processor object.
274 proc_attach(u32 processor_id
,
275 const struct dsp_processorattrin
*attr_in
,
276 void **ph_processor
, struct process_context
*pr_ctxt
)
279 struct dev_object
*hdev_obj
;
280 struct proc_object
*p_proc_object
= NULL
;
281 struct mgr_object
*hmgr_obj
= NULL
;
282 struct drv_object
*hdrv_obj
= NULL
;
283 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
286 DBC_REQUIRE(refs
> 0);
287 DBC_REQUIRE(ph_processor
!= NULL
);
289 if (pr_ctxt
->hprocessor
) {
290 *ph_processor
= pr_ctxt
->hprocessor
;
294 /* Get the Driver and Manager Object Handles */
295 if (!drv_datap
|| !drv_datap
->drv_object
|| !drv_datap
->mgr_object
) {
297 pr_err("%s: Failed to get object handles\n", __func__
);
299 hdrv_obj
= drv_datap
->drv_object
;
300 hmgr_obj
= drv_datap
->mgr_object
;
304 /* Get the Device Object */
305 status
= drv_get_dev_object(processor_id
, hdrv_obj
, &hdev_obj
);
308 status
= dev_get_dev_type(hdev_obj
, &dev_type
);
313 /* If we made it this far, create the Proceesor object: */
314 p_proc_object
= kzalloc(sizeof(struct proc_object
), GFP_KERNEL
);
315 /* Fill out the Processor Object: */
316 if (p_proc_object
== NULL
) {
320 p_proc_object
->hdev_obj
= hdev_obj
;
321 p_proc_object
->hmgr_obj
= hmgr_obj
;
322 p_proc_object
->processor_id
= dev_type
;
323 /* Store TGID instead of process handle */
324 p_proc_object
->process
= current
->tgid
;
326 INIT_LIST_HEAD(&p_proc_object
->proc_list
);
329 p_proc_object
->utimeout
= attr_in
->utimeout
;
331 p_proc_object
->utimeout
= PROC_DFLT_TIMEOUT
;
333 status
= dev_get_intf_fxns(hdev_obj
, &p_proc_object
->intf_fxns
);
335 status
= dev_get_bridge_context(hdev_obj
,
336 &p_proc_object
->hbridge_context
);
338 kfree(p_proc_object
);
340 kfree(p_proc_object
);
345 /* Create the Notification Object */
346 /* This is created with no event mask, no notify mask
347 * and no valid handle to the notification. They all get
348 * filled up when proc_register_notify is called */
349 p_proc_object
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
),
351 if (p_proc_object
->ntfy_obj
)
352 ntfy_init(p_proc_object
->ntfy_obj
);
357 /* Insert the Processor Object into the DEV List.
358 * Return handle to this Processor Object:
359 * Find out if the Device is already attached to a
360 * Processor. If so, return AlreadyAttached status */
361 lst_init_elem(&p_proc_object
->link
);
362 status
= dev_insert_proc_object(p_proc_object
->hdev_obj
,
365 is_already_attached
);
367 if (p_proc_object
->is_already_attached
)
370 if (p_proc_object
->ntfy_obj
) {
371 ntfy_delete(p_proc_object
->ntfy_obj
);
372 kfree(p_proc_object
->ntfy_obj
);
375 kfree(p_proc_object
);
378 *ph_processor
= (void *)p_proc_object
;
379 pr_ctxt
->hprocessor
= *ph_processor
;
380 (void)proc_notify_clients(p_proc_object
,
381 DSP_PROCESSORATTACH
);
384 /* Don't leak memory if status is failed */
385 kfree(p_proc_object
);
388 DBC_ENSURE((status
== -EPERM
&& *ph_processor
== NULL
) ||
389 (!status
&& p_proc_object
) ||
390 (status
== 0 && p_proc_object
));
395 static int get_exec_file(struct cfg_devnode
*dev_node_obj
,
396 struct dev_object
*hdev_obj
,
397 u32 size
, char *exec_file
)
401 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
403 dev_get_dev_type(hdev_obj
, (u8
*) &dev_type
);
408 if (dev_type
== DSP_UNIT
) {
409 if (!drv_datap
|| !drv_datap
->base_img
)
412 if (strlen(drv_datap
->base_img
) > size
)
415 strcpy(exec_file
, drv_datap
->base_img
);
416 } else if (dev_type
== IVA_UNIT
&& iva_img
) {
417 len
= strlen(iva_img
);
418 strncpy(exec_file
, iva_img
, len
+ 1);
427 * ======== proc_auto_start ======== =
429 * A Particular device gets loaded with the default image
430 * if the AutoStart flag is set.
432 * hdev_obj: Handle to the Device
434 * 0: On Successful Loading
435 * -EPERM General Failure
440 int proc_auto_start(struct cfg_devnode
*dev_node_obj
,
441 struct dev_object
*hdev_obj
)
444 struct proc_object
*p_proc_object
;
445 char sz_exec_file
[MAXCMDLINELEN
];
447 struct mgr_object
*hmgr_obj
= NULL
;
448 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
451 DBC_REQUIRE(refs
> 0);
452 DBC_REQUIRE(dev_node_obj
!= NULL
);
453 DBC_REQUIRE(hdev_obj
!= NULL
);
455 /* Create a Dummy PROC Object */
456 if (!drv_datap
|| !drv_datap
->mgr_object
) {
458 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
461 hmgr_obj
= drv_datap
->mgr_object
;
464 p_proc_object
= kzalloc(sizeof(struct proc_object
), GFP_KERNEL
);
465 if (p_proc_object
== NULL
) {
469 p_proc_object
->hdev_obj
= hdev_obj
;
470 p_proc_object
->hmgr_obj
= hmgr_obj
;
471 status
= dev_get_intf_fxns(hdev_obj
, &p_proc_object
->intf_fxns
);
473 status
= dev_get_bridge_context(hdev_obj
,
474 &p_proc_object
->hbridge_context
);
478 /* Stop the Device, put it into standby mode */
479 status
= proc_stop(p_proc_object
);
484 /* Get the default executable for this board... */
485 dev_get_dev_type(hdev_obj
, (u8
*) &dev_type
);
486 p_proc_object
->processor_id
= dev_type
;
487 status
= get_exec_file(dev_node_obj
, hdev_obj
, sizeof(sz_exec_file
),
490 argv
[0] = sz_exec_file
;
492 /* ...and try to load it: */
493 status
= proc_load(p_proc_object
, 1, (const char **)argv
, NULL
);
495 status
= proc_start(p_proc_object
);
497 kfree(p_proc_object
->psz_last_coff
);
498 p_proc_object
->psz_last_coff
= NULL
;
500 kfree(p_proc_object
);
506 * ======== proc_ctrl ========
508 * Pass control information to the GPP device driver managing the
511 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
512 * application developer's API.
513 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
514 * Operation. arg can be null.
516 int proc_ctrl(void *hprocessor
, u32 dw_cmd
, struct dsp_cbdata
* arg
)
519 struct proc_object
*p_proc_object
= hprocessor
;
522 DBC_REQUIRE(refs
> 0);
525 /* intercept PWR deep sleep command */
526 if (dw_cmd
== BRDIOCTL_DEEPSLEEP
) {
527 timeout
= arg
->cb_data
;
528 status
= pwr_sleep_dsp(PWR_DEEPSLEEP
, timeout
);
530 /* intercept PWR emergency sleep command */
531 else if (dw_cmd
== BRDIOCTL_EMERGENCYSLEEP
) {
532 timeout
= arg
->cb_data
;
533 status
= pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP
, timeout
);
534 } else if (dw_cmd
== PWR_DEEPSLEEP
) {
535 /* timeout = arg->cb_data; */
536 status
= pwr_sleep_dsp(PWR_DEEPSLEEP
, timeout
);
538 /* intercept PWR wake commands */
539 else if (dw_cmd
== BRDIOCTL_WAKEUP
) {
540 timeout
= arg
->cb_data
;
541 status
= pwr_wake_dsp(timeout
);
542 } else if (dw_cmd
== PWR_WAKEUP
) {
543 /* timeout = arg->cb_data; */
544 status
= pwr_wake_dsp(timeout
);
546 if (!((*p_proc_object
->intf_fxns
->pfn_dev_cntrl
)
547 (p_proc_object
->hbridge_context
, dw_cmd
,
561 * ======== proc_detach ========
563 * Destroys the Processor Object. Removes the notification from the Dev
566 int proc_detach(struct process_context
*pr_ctxt
)
569 struct proc_object
*p_proc_object
= NULL
;
571 DBC_REQUIRE(refs
> 0);
573 p_proc_object
= (struct proc_object
*)pr_ctxt
->hprocessor
;
576 /* Notify the Client */
577 ntfy_notify(p_proc_object
->ntfy_obj
, DSP_PROCESSORDETACH
);
578 /* Remove the notification memory */
579 if (p_proc_object
->ntfy_obj
) {
580 ntfy_delete(p_proc_object
->ntfy_obj
);
581 kfree(p_proc_object
->ntfy_obj
);
584 kfree(p_proc_object
->psz_last_coff
);
585 p_proc_object
->psz_last_coff
= NULL
;
586 /* Remove the Proc from the DEV List */
587 (void)dev_remove_proc_object(p_proc_object
->hdev_obj
,
588 (u32
) p_proc_object
);
589 /* Free the Processor Object */
590 kfree(p_proc_object
);
591 pr_ctxt
->hprocessor
= NULL
;
600 * ======== proc_enum_nodes ========
602 * Enumerate and get configuration information about nodes allocated
603 * on a DSP processor.
605 int proc_enum_nodes(void *hprocessor
, void **node_tab
,
606 u32 node_tab_size
, u32
*pu_num_nodes
,
610 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
611 struct node_mgr
*hnode_mgr
= NULL
;
613 DBC_REQUIRE(refs
> 0);
614 DBC_REQUIRE(node_tab
!= NULL
|| node_tab_size
== 0);
615 DBC_REQUIRE(pu_num_nodes
!= NULL
);
616 DBC_REQUIRE(pu_allocated
!= NULL
);
619 if (!(dev_get_node_manager(p_proc_object
->hdev_obj
,
622 status
= node_enum_nodes(hnode_mgr
, node_tab
,
635 /* Cache operation against kernel address instead of users */
636 static int build_dma_sg(struct dmm_map_object
*map_obj
, unsigned long start
,
637 ssize_t len
, int pg_i
)
640 unsigned long offset
;
643 struct scatterlist
*sg
= map_obj
->dma_info
.sg
;
646 page
= get_mapping_page(map_obj
, pg_i
);
648 pr_err("%s: no page for %08lx\n", __func__
, start
);
651 } else if (IS_ERR(page
)) {
652 pr_err("%s: err page for %08lx(%lu)\n", __func__
, start
,
658 offset
= start
& ~PAGE_MASK
;
659 rest
= min_t(ssize_t
, PAGE_SIZE
- offset
, len
);
661 sg_set_page(&sg
[i
], page
, rest
, offset
);
668 if (i
!= map_obj
->dma_info
.num_pages
) {
669 pr_err("%s: bad number of sg iterations\n", __func__
);
678 static int memory_regain_ownership(struct dmm_map_object
*map_obj
,
679 unsigned long start
, ssize_t len
, enum dma_data_direction dir
)
682 unsigned long first_data_page
= start
>> PAGE_SHIFT
;
683 unsigned long last_data_page
= ((u32
)(start
+ len
- 1) >> PAGE_SHIFT
);
684 /* calculating the number of pages this area spans */
685 unsigned long num_pages
= last_data_page
- first_data_page
+ 1;
686 struct bridge_dma_map_info
*dma_info
= &map_obj
->dma_info
;
691 if (dma_info
->dir
!= dir
|| dma_info
->num_pages
!= num_pages
) {
692 pr_err("%s: dma info doesn't match given params\n", __func__
);
696 dma_unmap_sg(bridge
, dma_info
->sg
, num_pages
, dma_info
->dir
);
698 pr_debug("%s: dma_map_sg unmapped\n", __func__
);
702 map_obj
->dma_info
.sg
= NULL
;
708 /* Cache operation against kernel address instead of users */
709 static int memory_give_ownership(struct dmm_map_object
*map_obj
,
710 unsigned long start
, ssize_t len
, enum dma_data_direction dir
)
712 int pg_i
, ret
, sg_num
;
713 struct scatterlist
*sg
;
714 unsigned long first_data_page
= start
>> PAGE_SHIFT
;
715 unsigned long last_data_page
= ((u32
)(start
+ len
- 1) >> PAGE_SHIFT
);
716 /* calculating the number of pages this area spans */
717 unsigned long num_pages
= last_data_page
- first_data_page
+ 1;
719 pg_i
= find_first_page_in_cache(map_obj
, start
);
721 pr_err("%s: failed to find first page in cache\n", __func__
);
726 sg
= kcalloc(num_pages
, sizeof(*sg
), GFP_KERNEL
);
728 pr_err("%s: kcalloc failed\n", __func__
);
733 sg_init_table(sg
, num_pages
);
735 /* cleanup a previous sg allocation */
736 /* this may happen if application doesn't signal for e/o DMA */
737 kfree(map_obj
->dma_info
.sg
);
739 map_obj
->dma_info
.sg
= sg
;
740 map_obj
->dma_info
.dir
= dir
;
741 map_obj
->dma_info
.num_pages
= num_pages
;
743 ret
= build_dma_sg(map_obj
, start
, len
, pg_i
);
747 sg_num
= dma_map_sg(bridge
, sg
, num_pages
, dir
);
749 pr_err("%s: dma_map_sg failed: %d\n", __func__
, sg_num
);
754 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__
, sg_num
);
755 map_obj
->dma_info
.sg_num
= sg_num
;
761 map_obj
->dma_info
.sg
= NULL
;
766 int proc_begin_dma(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
767 enum dma_data_direction dir
)
769 /* Keep STATUS here for future additions to this function */
771 struct process_context
*pr_ctxt
= (struct process_context
*) hprocessor
;
772 struct dmm_map_object
*map_obj
;
774 DBC_REQUIRE(refs
> 0);
781 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__
,
785 /* find requested memory are in cached mapping information */
786 map_obj
= find_containing_mapping(pr_ctxt
, (u32
) pmpu_addr
, ul_size
);
788 pr_err("%s: find_containing_mapping failed\n", __func__
);
793 if (memory_give_ownership(map_obj
, (u32
) pmpu_addr
, ul_size
, dir
)) {
794 pr_err("%s: InValid address parameters %p %x\n",
795 __func__
, pmpu_addr
, ul_size
);
804 int proc_end_dma(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
805 enum dma_data_direction dir
)
807 /* Keep STATUS here for future additions to this function */
809 struct process_context
*pr_ctxt
= (struct process_context
*) hprocessor
;
810 struct dmm_map_object
*map_obj
;
812 DBC_REQUIRE(refs
> 0);
819 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__
,
823 /* find requested memory are in cached mapping information */
824 map_obj
= find_containing_mapping(pr_ctxt
, (u32
) pmpu_addr
, ul_size
);
826 pr_err("%s: find_containing_mapping failed\n", __func__
);
831 if (memory_regain_ownership(map_obj
, (u32
) pmpu_addr
, ul_size
, dir
)) {
832 pr_err("%s: InValid address parameters %p %x\n",
833 __func__
, pmpu_addr
, ul_size
);
843 * ======== proc_flush_memory ========
847 int proc_flush_memory(void *hprocessor
, void *pmpu_addr
,
848 u32 ul_size
, u32 ul_flags
)
850 enum dma_data_direction dir
= DMA_BIDIRECTIONAL
;
852 return proc_begin_dma(hprocessor
, pmpu_addr
, ul_size
, dir
);
856 * ======== proc_invalidate_memory ========
858 * Invalidates the memory specified
860 int proc_invalidate_memory(void *hprocessor
, void *pmpu_addr
, u32 size
)
862 enum dma_data_direction dir
= DMA_FROM_DEVICE
;
864 return proc_begin_dma(hprocessor
, pmpu_addr
, size
, dir
);
868 * ======== proc_get_resource_info ========
870 * Enumerate the resources currently available on a processor.
872 int proc_get_resource_info(void *hprocessor
, u32 resource_type
,
873 struct dsp_resourceinfo
*resource_info
,
874 u32 resource_info_size
)
877 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
878 struct node_mgr
*hnode_mgr
= NULL
;
879 struct nldr_object
*nldr_obj
= NULL
;
880 struct rmm_target_obj
*rmm
= NULL
;
881 struct io_mgr
*hio_mgr
= NULL
; /* IO manager handle */
883 DBC_REQUIRE(refs
> 0);
884 DBC_REQUIRE(resource_info
!= NULL
);
885 DBC_REQUIRE(resource_info_size
>= sizeof(struct dsp_resourceinfo
));
887 if (!p_proc_object
) {
891 switch (resource_type
) {
892 case DSP_RESOURCE_DYNDARAM
:
893 case DSP_RESOURCE_DYNSARAM
:
894 case DSP_RESOURCE_DYNEXTERNAL
:
895 case DSP_RESOURCE_DYNSRAM
:
896 status
= dev_get_node_manager(p_proc_object
->hdev_obj
,
903 status
= node_get_nldr_obj(hnode_mgr
, &nldr_obj
);
905 status
= nldr_get_rmm_manager(nldr_obj
, &rmm
);
908 (enum dsp_memtype
)resource_type
,
909 (struct dsp_memstat
*)
910 &(resource_info
->result
.
918 case DSP_RESOURCE_PROCLOAD
:
919 status
= dev_get_io_mgr(p_proc_object
->hdev_obj
, &hio_mgr
);
922 p_proc_object
->intf_fxns
->
923 pfn_io_get_proc_load(hio_mgr
,
924 (struct dsp_procloadstat
*)
925 &(resource_info
->result
.
939 * ======== proc_exit ========
941 * Decrement reference count, and free resources when reference count is
946 DBC_REQUIRE(refs
> 0);
950 DBC_ENSURE(refs
>= 0);
954 * ======== proc_get_dev_object ========
956 * Return the Dev Object handle for a given Processor.
959 int proc_get_dev_object(void *hprocessor
,
960 struct dev_object
**device_obj
)
963 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
965 DBC_REQUIRE(refs
> 0);
966 DBC_REQUIRE(device_obj
!= NULL
);
969 *device_obj
= p_proc_object
->hdev_obj
;
976 DBC_ENSURE((!status
&& *device_obj
!= NULL
) ||
977 (status
&& *device_obj
== NULL
));
983 * ======== proc_get_state ========
985 * Report the state of the specified DSP processor.
987 int proc_get_state(void *hprocessor
,
988 struct dsp_processorstate
*proc_state_obj
,
992 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
995 DBC_REQUIRE(refs
> 0);
996 DBC_REQUIRE(proc_state_obj
!= NULL
);
997 DBC_REQUIRE(state_info_size
>= sizeof(struct dsp_processorstate
));
1000 /* First, retrieve BRD state information */
1001 status
= (*p_proc_object
->intf_fxns
->pfn_brd_status
)
1002 (p_proc_object
->hbridge_context
, &brd_status
);
1004 switch (brd_status
) {
1006 proc_state_obj
->proc_state
= PROC_STOPPED
;
1008 case BRD_SLEEP_TRANSITION
:
1009 case BRD_DSP_HIBERNATION
:
1012 proc_state_obj
->proc_state
= PROC_RUNNING
;
1015 proc_state_obj
->proc_state
= PROC_LOADED
;
1018 proc_state_obj
->proc_state
= PROC_ERROR
;
1021 proc_state_obj
->proc_state
= 0xFF;
1029 dev_dbg(bridge
, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1030 __func__
, status
, proc_state_obj
->proc_state
);
1035 * ======== proc_get_trace ========
1037 * Retrieve the current contents of the trace buffer, located on the
1038 * Processor. Predefined symbols for the trace buffer must have been
1039 * configured into the DSP executable.
1041 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1042 * trace buffer, only. Treat it as an undocumented feature.
1043 * This call is destructive, meaning the processor is placed in the monitor
1044 * state as a result of this function.
1046 int proc_get_trace(void *hprocessor
, u8
* pbuf
, u32 max_size
)
1054 * ======== proc_init ========
1056 * Initialize PROC's private state, keeping a reference count on each call
1058 bool proc_init(void)
1062 DBC_REQUIRE(refs
>= 0);
1067 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
1073 * ======== proc_load ========
1075 * Reset a processor and load a new base program image.
1076 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1077 * application developer's API.
1079 int proc_load(void *hprocessor
, const s32 argc_index
,
1080 const char **user_args
, const char **user_envp
)
1083 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1084 struct io_mgr
*hio_mgr
; /* IO manager handle */
1085 struct msg_mgr
*hmsg_mgr
;
1086 struct cod_manager
*cod_mgr
; /* Code manager handle */
1087 char *pargv0
; /* temp argv[0] ptr */
1088 char **new_envp
; /* Updated envp[] array. */
1089 char sz_proc_id
[MAXPROCIDLEN
]; /* Size of "PROC_ID=<n>" */
1090 s32 envp_elems
; /* Num elements in envp[]. */
1091 s32 cnew_envp
; /* " " in new_envp[] */
1092 s32 nproc_id
= 0; /* Anticipate MP version. */
1093 struct dcd_manager
*hdcd_handle
;
1094 struct dmm_object
*dmm_mgr
;
1098 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
1100 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1105 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1106 struct dspbridge_platform_data
*pdata
=
1107 omap_dspbridge_dev
->dev
.platform_data
;
1110 DBC_REQUIRE(refs
> 0);
1111 DBC_REQUIRE(argc_index
> 0);
1112 DBC_REQUIRE(user_args
!= NULL
);
1114 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1115 do_gettimeofday(&tv1
);
1117 if (!p_proc_object
) {
1121 dev_get_cod_mgr(p_proc_object
->hdev_obj
, &cod_mgr
);
1126 status
= proc_stop(hprocessor
);
1130 /* Place the board in the monitor state. */
1131 status
= proc_monitor(hprocessor
);
1135 /* Save ptr to original argv[0]. */
1136 pargv0
= (char *)user_args
[0];
1137 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1138 envp_elems
= get_envp_count((char **)user_envp
);
1139 cnew_envp
= (envp_elems
? (envp_elems
+ 1) : (envp_elems
+ 2));
1140 new_envp
= kzalloc(cnew_envp
* sizeof(char **), GFP_KERNEL
);
1142 status
= snprintf(sz_proc_id
, MAXPROCIDLEN
, PROC_ENVPROCID
,
1145 dev_dbg(bridge
, "%s: Proc ID string overflow\n",
1150 prepend_envp(new_envp
, (char **)user_envp
,
1151 envp_elems
, cnew_envp
, sz_proc_id
);
1152 /* Get the DCD Handle */
1153 status
= mgr_get_dcd_handle(p_proc_object
->hmgr_obj
,
1154 (u32
*) &hdcd_handle
);
1156 /* Before proceeding with new load,
1157 * check if a previously registered COFF
1159 * If yes, unregister nodes in previously
1160 * registered COFF. If any error occurred,
1161 * set previously registered COFF to NULL. */
1162 if (p_proc_object
->psz_last_coff
!= NULL
) {
1164 dcd_auto_unregister(hdcd_handle
,
1167 /* Regardless of auto unregister status,
1168 * free previously allocated
1170 kfree(p_proc_object
->psz_last_coff
);
1171 p_proc_object
->psz_last_coff
= NULL
;
1174 /* On success, do cod_open_base() */
1175 status
= cod_open_base(cod_mgr
, (char *)user_args
[0],
1182 /* Auto-register data base */
1183 /* Get the DCD Handle */
1184 status
= mgr_get_dcd_handle(p_proc_object
->hmgr_obj
,
1185 (u32
*) &hdcd_handle
);
1187 /* Auto register nodes in specified COFF
1188 * file. If registration did not fail,
1189 * (status = 0 or -EACCES)
1190 * save the name of the COFF file for
1191 * de-registration in the future. */
1193 dcd_auto_register(hdcd_handle
,
1194 (char *)user_args
[0]);
1195 if (status
== -EACCES
)
1201 DBC_ASSERT(p_proc_object
->psz_last_coff
==
1203 /* Allocate memory for pszLastCoff */
1204 p_proc_object
->psz_last_coff
=
1205 kzalloc((strlen(user_args
[0]) +
1207 /* If memory allocated, save COFF file name */
1208 if (p_proc_object
->psz_last_coff
) {
1209 strncpy(p_proc_object
->psz_last_coff
,
1210 (char *)user_args
[0],
1211 (strlen((char *)user_args
[0]) +
1217 /* Update shared memory address and size */
1219 /* Create the message manager. This must be done
1220 * before calling the IOOnLoaded function. */
1221 dev_get_msg_mgr(p_proc_object
->hdev_obj
, &hmsg_mgr
);
1223 status
= msg_create(&hmsg_mgr
, p_proc_object
->hdev_obj
,
1224 (msg_onexit
) node_on_exit
);
1225 DBC_ASSERT(!status
);
1226 dev_set_msg_mgr(p_proc_object
->hdev_obj
, hmsg_mgr
);
1230 /* Set the Device object's message manager */
1231 status
= dev_get_io_mgr(p_proc_object
->hdev_obj
, &hio_mgr
);
1233 status
= (*p_proc_object
->intf_fxns
->pfn_io_on_loaded
)
1239 /* Now, attempt to load an exec: */
1241 /* Boost the OPP level to Maximum level supported by baseport */
1242 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1243 if (pdata
->cpu_set_freq
)
1244 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP5
]);
1246 status
= cod_load_base(cod_mgr
, argc_index
, (char **)user_args
,
1248 p_proc_object
->hdev_obj
, NULL
);
1250 if (status
== -EBADF
) {
1251 dev_dbg(bridge
, "%s: Failure to Load the EXE\n",
1254 if (status
== -ESPIPE
) {
1255 pr_err("%s: Couldn't parse the file\n",
1259 /* Requesting the lowest opp supported */
1260 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1261 if (pdata
->cpu_set_freq
)
1262 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP1
]);
1267 /* Update the Processor status to loaded */
1268 status
= (*p_proc_object
->intf_fxns
->pfn_brd_set_state
)
1269 (p_proc_object
->hbridge_context
, BRD_LOADED
);
1271 p_proc_object
->proc_state
= PROC_LOADED
;
1272 if (p_proc_object
->ntfy_obj
)
1273 proc_notify_clients(p_proc_object
,
1274 DSP_PROCESSORSTATECHANGE
);
1278 status
= proc_get_processor_id(hprocessor
, &proc_id
);
1279 if (proc_id
== DSP_UNIT
) {
1280 /* Use all available DSP address space after EXTMEM
1283 status
= cod_get_sym_value(cod_mgr
, EXTEND
,
1286 /* Reset DMM structs and add an initial free chunk */
1289 dev_get_dmm_mgr(p_proc_object
->hdev_obj
,
1292 /* Set dw_ext_end to DMM START u8
1295 (dw_ext_end
+ 1) * DSPWORDSIZE
;
1296 /* DMM memory is from EXT_END */
1297 status
= dmm_create_tables(dmm_mgr
,
1306 /* Restore the original argv[0] */
1308 user_args
[0] = pargv0
;
1310 if (!((*p_proc_object
->intf_fxns
->pfn_brd_status
)
1311 (p_proc_object
->hbridge_context
, &brd_state
))) {
1312 pr_info("%s: Processor Loaded %s\n", __func__
, pargv0
);
1313 kfree(drv_datap
->base_img
);
1314 drv_datap
->base_img
= kmalloc(strlen(pargv0
) + 1,
1316 if (drv_datap
->base_img
)
1317 strncpy(drv_datap
->base_img
, pargv0
,
1318 strlen(pargv0
) + 1);
1321 DBC_ASSERT(brd_state
== BRD_LOADED
);
1327 pr_err("%s: Processor failed to load\n", __func__
);
1328 proc_stop(p_proc_object
);
1331 && p_proc_object
->proc_state
== PROC_LOADED
)
1333 #ifdef OPT_LOAD_TIME_INSTRUMENTATION
1334 do_gettimeofday(&tv2
);
1335 if (tv2
.tv_usec
< tv1
.tv_usec
) {
1336 tv2
.tv_usec
+= 1000000;
1339 dev_dbg(bridge
, "%s: time to load %d sec and %d usec\n", __func__
,
1340 tv2
.tv_sec
- tv1
.tv_sec
, tv2
.tv_usec
- tv1
.tv_usec
);
1346 * ======== proc_map ========
1348 * Maps a MPU buffer to DSP address space.
1350 int proc_map(void *hprocessor
, void *pmpu_addr
, u32 ul_size
,
1351 void *req_addr
, void **pp_map_addr
, u32 ul_map_attr
,
1352 struct process_context
*pr_ctxt
)
1356 struct dmm_object
*dmm_mgr
;
1359 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1360 struct dmm_map_object
*map_obj
;
1362 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1363 if ((ul_map_attr
& BUFMODE_MASK
) != RBUF
) {
1364 if (!IS_ALIGNED((u32
)pmpu_addr
, DSP_CACHE_LINE
) ||
1365 !IS_ALIGNED(ul_size
, DSP_CACHE_LINE
)) {
1366 pr_err("%s: not aligned: 0x%x (%d)\n", __func__
,
1367 (u32
)pmpu_addr
, ul_size
);
1373 /* Calculate the page-aligned PA, VA and size */
1374 va_align
= PG_ALIGN_LOW((u32
) req_addr
, PG_SIZE4K
);
1375 pa_align
= PG_ALIGN_LOW((u32
) pmpu_addr
, PG_SIZE4K
);
1376 size_align
= PG_ALIGN_HIGH(ul_size
+ (u32
) pmpu_addr
- pa_align
,
1379 if (!p_proc_object
) {
1383 /* Critical section */
1384 mutex_lock(&proc_lock
);
1385 dmm_get_handle(p_proc_object
, &dmm_mgr
);
1387 status
= dmm_map_memory(dmm_mgr
, va_align
, size_align
);
1391 /* Add mapping to the page tables. */
1393 /* mapped memory resource tracking */
1394 map_obj
= add_mapping_info(pr_ctxt
, pa_align
, va_align
,
1399 va_align
= user_to_dsp_map(
1400 p_proc_object
->hbridge_context
->dsp_mmu
,
1401 pa_align
, va_align
, size_align
,
1403 if (IS_ERR_VALUE(va_align
))
1404 status
= (int)va_align
;
1408 /* Mapped address = MSB of VA | LSB of PA */
1409 map_obj
->dsp_addr
= (va_align
|
1410 ((u32
)pmpu_addr
& (PG_SIZE4K
- 1)));
1411 *pp_map_addr
= (void *)map_obj
->dsp_addr
;
1413 remove_mapping_information(pr_ctxt
, va_align
, size_align
);
1414 dmm_un_map_memory(dmm_mgr
, va_align
, &size_align
);
1416 mutex_unlock(&proc_lock
);
1422 dev_dbg(bridge
, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1423 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1424 "pa_align %x, size_align %x status 0x%x\n", __func__
,
1425 hprocessor
, pmpu_addr
, ul_size
, req_addr
, ul_map_attr
,
1426 pp_map_addr
, va_align
, pa_align
, size_align
, status
);
1432 * ======== proc_register_notify ========
1434 * Register to be notified of specific processor events.
1436 int proc_register_notify(void *hprocessor
, u32 event_mask
,
1437 u32 notify_type
, struct dsp_notification
1441 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1442 struct deh_mgr
*hdeh_mgr
;
1444 DBC_REQUIRE(hnotification
!= NULL
);
1445 DBC_REQUIRE(refs
> 0);
1447 /* Check processor handle */
1448 if (!p_proc_object
) {
1452 /* Check if event mask is a valid processor related event */
1453 if (event_mask
& ~(DSP_PROCESSORSTATECHANGE
| DSP_PROCESSORATTACH
|
1454 DSP_PROCESSORDETACH
| DSP_PROCESSORRESTART
|
1455 DSP_MMUFAULT
| DSP_SYSERROR
| DSP_PWRERROR
|
1459 /* Check if notify type is valid */
1460 if (notify_type
!= DSP_SIGNALEVENT
)
1464 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1465 * or DSP_PWRERROR then register event immediately. */
1467 ~(DSP_SYSERROR
| DSP_MMUFAULT
| DSP_PWRERROR
|
1469 status
= ntfy_register(p_proc_object
->ntfy_obj
,
1470 hnotification
, event_mask
,
1472 /* Special case alert, special case alert!
1473 * If we're trying to *deregister* (i.e. event_mask
1474 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1475 * we have to deregister with the DEH manager.
1476 * There's no way to know, based on event_mask which
1477 * manager the notification event was registered with,
1478 * so if we're trying to deregister and ntfy_register
1479 * failed, we'll give the deh manager a shot.
1481 if ((event_mask
== 0) && status
) {
1483 dev_get_deh_mgr(p_proc_object
->hdev_obj
,
1486 bridge_deh_register_notify(hdeh_mgr
,
1492 status
= dev_get_deh_mgr(p_proc_object
->hdev_obj
,
1495 bridge_deh_register_notify(hdeh_mgr
,
1507 * ======== proc_reserve_memory ========
1509 * Reserve a virtually contiguous region of DSP address space.
1511 int proc_reserve_memory(void *hprocessor
, u32 ul_size
,
1513 struct process_context
*pr_ctxt
)
1515 struct dmm_object
*dmm_mgr
;
1517 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1518 struct dmm_rsv_object
*rsv_obj
;
1520 if (!p_proc_object
) {
1525 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
1531 status
= dmm_reserve_memory(dmm_mgr
, ul_size
, (u32
*) pp_rsv_addr
);
1536 * A successful reserve should be followed by insertion of rsv_obj
1537 * into dmm_rsv_list, so that reserved memory resource tracking
1540 rsv_obj
= kmalloc(sizeof(struct dmm_rsv_object
), GFP_KERNEL
);
1542 rsv_obj
->dsp_reserved_addr
= (u32
) *pp_rsv_addr
;
1543 spin_lock(&pr_ctxt
->dmm_rsv_lock
);
1544 list_add(&rsv_obj
->link
, &pr_ctxt
->dmm_rsv_list
);
1545 spin_unlock(&pr_ctxt
->dmm_rsv_lock
);
1549 dev_dbg(bridge
, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1550 "status 0x%x\n", __func__
, hprocessor
,
1551 ul_size
, pp_rsv_addr
, status
);
1556 * ======== proc_start ========
1558 * Start a processor running.
1560 int proc_start(void *hprocessor
)
1563 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1564 struct cod_manager
*cod_mgr
; /* Code manager handle */
1565 u32 dw_dsp_addr
; /* Loaded code's entry point. */
1568 DBC_REQUIRE(refs
> 0);
1569 if (!p_proc_object
) {
1573 /* Call the bridge_brd_start */
1574 if (p_proc_object
->proc_state
!= PROC_LOADED
) {
1578 status
= dev_get_cod_mgr(p_proc_object
->hdev_obj
, &cod_mgr
);
1584 status
= cod_get_entry(cod_mgr
, &dw_dsp_addr
);
1588 status
= (*p_proc_object
->intf_fxns
->pfn_brd_start
)
1589 (p_proc_object
->hbridge_context
, dw_dsp_addr
);
1593 /* Call dev_create2 */
1594 status
= dev_create2(p_proc_object
->hdev_obj
);
1596 p_proc_object
->proc_state
= PROC_RUNNING
;
1597 /* Deep sleep switces off the peripheral clocks.
1598 * we just put the DSP CPU in idle in the idle loop.
1599 * so there is no need to send a command to DSP */
1601 if (p_proc_object
->ntfy_obj
) {
1602 proc_notify_clients(p_proc_object
,
1603 DSP_PROCESSORSTATECHANGE
);
1606 /* Failed to Create Node Manager and DISP Object
1607 * Stop the Processor from running. Put it in STOPPED State */
1608 (void)(*p_proc_object
->intf_fxns
->
1609 pfn_brd_stop
) (p_proc_object
->hbridge_context
);
1610 p_proc_object
->proc_state
= PROC_STOPPED
;
1614 if (!((*p_proc_object
->intf_fxns
->pfn_brd_status
)
1615 (p_proc_object
->hbridge_context
, &brd_state
))) {
1616 pr_info("%s: dsp in running state\n", __func__
);
1617 DBC_ASSERT(brd_state
!= BRD_HIBERNATION
);
1620 pr_err("%s: Failed to start the dsp\n", __func__
);
1621 proc_stop(p_proc_object
);
1625 DBC_ENSURE((!status
&& p_proc_object
->proc_state
==
1626 PROC_RUNNING
) || status
);
1631 * ======== proc_stop ========
1633 * Stop a processor running.
1635 int proc_stop(void *hprocessor
)
1638 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1639 struct msg_mgr
*hmsg_mgr
;
1640 struct node_mgr
*hnode_mgr
;
1642 u32 node_tab_size
= 1;
1644 u32 nodes_allocated
= 0;
1647 DBC_REQUIRE(refs
> 0);
1648 if (!p_proc_object
) {
1652 /* check if there are any running nodes */
1653 status
= dev_get_node_manager(p_proc_object
->hdev_obj
, &hnode_mgr
);
1654 if (!status
&& hnode_mgr
) {
1655 status
= node_enum_nodes(hnode_mgr
, &hnode
, node_tab_size
,
1656 &num_nodes
, &nodes_allocated
);
1657 if ((status
== -EINVAL
) || (nodes_allocated
> 0)) {
1658 pr_err("%s: Can't stop device, active nodes = %d \n",
1659 __func__
, nodes_allocated
);
1663 /* Call the bridge_brd_stop */
1664 /* It is OK to stop a device that does n't have nodes OR not started */
1666 (*p_proc_object
->intf_fxns
->
1667 pfn_brd_stop
) (p_proc_object
->hbridge_context
);
1669 dev_dbg(bridge
, "%s: processor in standby mode\n", __func__
);
1670 p_proc_object
->proc_state
= PROC_STOPPED
;
1671 /* Destory the Node Manager, msg_ctrl Manager */
1672 if (!(dev_destroy2(p_proc_object
->hdev_obj
))) {
1673 /* Destroy the msg_ctrl by calling msg_delete */
1674 dev_get_msg_mgr(p_proc_object
->hdev_obj
, &hmsg_mgr
);
1676 msg_delete(hmsg_mgr
);
1677 dev_set_msg_mgr(p_proc_object
->hdev_obj
, NULL
);
1679 if (!((*p_proc_object
->
1680 intf_fxns
->pfn_brd_status
) (p_proc_object
->
1683 DBC_ASSERT(brd_state
== BRD_STOPPED
);
1686 pr_err("%s: Failed to stop the processor\n", __func__
);
1694 * ======== proc_un_map ========
1696 * Removes a MPU buffer mapping from the DSP address space.
1698 int proc_un_map(void *hprocessor
, void *map_addr
,
1699 struct process_context
*pr_ctxt
)
1702 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1703 struct dmm_object
*dmm_mgr
;
1707 va_align
= PG_ALIGN_LOW((u32
) map_addr
, PG_SIZE4K
);
1708 if (!p_proc_object
) {
1713 status
= dmm_get_handle(hprocessor
, &dmm_mgr
);
1719 /* Critical section */
1720 mutex_lock(&proc_lock
);
1722 * Update DMM structures. Get the size to unmap.
1723 * This function returns error if the VA is not mapped
1725 status
= dmm_un_map_memory(dmm_mgr
, (u32
) va_align
, &size_align
);
1726 /* Remove mapping from the page tables. */
1728 status
= user_to_dsp_unmap(
1729 p_proc_object
->hbridge_context
->dsp_mmu
, va_align
);
1731 mutex_unlock(&proc_lock
);
1736 * A successful unmap should be followed by removal of map_obj
1737 * from dmm_map_list, so that mapped memory resource tracking
1740 remove_mapping_information(pr_ctxt
, (u32
) map_addr
, size_align
);
1743 dev_dbg(bridge
, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1744 __func__
, hprocessor
, map_addr
, status
);
1749 * ======== proc_un_reserve_memory ========
1751 * Frees a previously reserved region of DSP address space.
1753 int proc_un_reserve_memory(void *hprocessor
, void *prsv_addr
,
1754 struct process_context
*pr_ctxt
)
1756 struct dmm_object
*dmm_mgr
;
1758 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
1759 struct dmm_rsv_object
*rsv_obj
;
1761 if (!p_proc_object
) {
1766 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
1772 status
= dmm_un_reserve_memory(dmm_mgr
, (u32
) prsv_addr
);
1777 * A successful unreserve should be followed by removal of rsv_obj
1778 * from dmm_rsv_list, so that reserved memory resource tracking
1781 spin_lock(&pr_ctxt
->dmm_rsv_lock
);
1782 list_for_each_entry(rsv_obj
, &pr_ctxt
->dmm_rsv_list
, link
) {
1783 if (rsv_obj
->dsp_reserved_addr
== (u32
) prsv_addr
) {
1784 list_del(&rsv_obj
->link
);
1789 spin_unlock(&pr_ctxt
->dmm_rsv_lock
);
1792 dev_dbg(bridge
, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1793 __func__
, hprocessor
, prsv_addr
, status
);
1798 * ======== = proc_monitor ======== ==
1800 * Place the Processor in Monitor State. This is an internal
1801 * function and a requirement before Processor is loaded.
1802 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1803 * In dev_destroy2 we delete the node manager.
1805 * p_proc_object: Pointer to Processor Object
1807 * 0: Processor placed in monitor mode.
1808 * !0: Failed to place processor in monitor mode.
1810 * Valid Processor Handle
1812 * Success: ProcObject state is PROC_IDLE
1814 static int proc_monitor(struct proc_object
*proc_obj
)
1816 int status
= -EPERM
;
1817 struct msg_mgr
*hmsg_mgr
;
1820 DBC_REQUIRE(refs
> 0);
1821 DBC_REQUIRE(proc_obj
);
1823 /* This is needed only when Device is loaded when it is
1824 * already 'ACTIVE' */
1825 /* Destory the Node Manager, msg_ctrl Manager */
1826 if (!dev_destroy2(proc_obj
->hdev_obj
)) {
1827 /* Destroy the msg_ctrl by calling msg_delete */
1828 dev_get_msg_mgr(proc_obj
->hdev_obj
, &hmsg_mgr
);
1830 msg_delete(hmsg_mgr
);
1831 dev_set_msg_mgr(proc_obj
->hdev_obj
, NULL
);
1834 /* Place the Board in the Monitor State */
1835 if (!((*proc_obj
->intf_fxns
->pfn_brd_monitor
)
1836 (proc_obj
->hbridge_context
))) {
1838 if (!((*proc_obj
->intf_fxns
->pfn_brd_status
)
1839 (proc_obj
->hbridge_context
, &brd_state
)))
1840 DBC_ASSERT(brd_state
== BRD_IDLE
);
1843 DBC_ENSURE((!status
&& brd_state
== BRD_IDLE
) ||
1849 * ======== get_envp_count ========
1851 * Return the number of elements in the envp array, including the
1852 * terminating NULL element.
1854 static s32
get_envp_count(char **envp
)
1861 ret
+= 1; /* Include the terminating NULL in the count. */
1868 * ======== prepend_envp ========
1870 * Prepend an environment variable=value pair to the new envp array, and
1871 * copy in the existing var=value pairs in the old envp array.
1873 static char **prepend_envp(char **new_envp
, char **envp
, s32 envp_elems
,
1874 s32 cnew_envp
, char *sz_var
)
1876 char **pp_envp
= new_envp
;
1878 DBC_REQUIRE(new_envp
);
1880 /* Prepend new environ var=value string */
1881 *new_envp
++ = sz_var
;
1883 /* Copy user's environment into our own. */
1884 while (envp_elems
--)
1885 *new_envp
++ = *envp
++;
1887 /* Ensure NULL terminates the new environment strings array. */
1888 if (envp_elems
== 0)
1895 * ======== proc_notify_clients ========
1897 * Notify the processor the events.
1899 int proc_notify_clients(void *proc
, u32 events
)
1902 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1904 DBC_REQUIRE(p_proc_object
);
1905 DBC_REQUIRE(is_valid_proc_event(events
));
1906 DBC_REQUIRE(refs
> 0);
1907 if (!p_proc_object
) {
1912 ntfy_notify(p_proc_object
->ntfy_obj
, events
);
1918 * ======== proc_notify_all_clients ========
1920 * Notify the processor the events. This includes notifying all clients
1921 * attached to a particulat DSP.
1923 int proc_notify_all_clients(void *proc
, u32 events
)
1926 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1928 DBC_REQUIRE(is_valid_proc_event(events
));
1929 DBC_REQUIRE(refs
> 0);
1931 if (!p_proc_object
) {
1936 dev_notify_clients(p_proc_object
->hdev_obj
, events
);
1943 * ======== proc_get_processor_id ========
1945 * Retrieves the processor ID.
1947 int proc_get_processor_id(void *proc
, u32
* proc_id
)
1950 struct proc_object
*p_proc_object
= (struct proc_object
*)proc
;
1953 *proc_id
= p_proc_object
->processor_id
;