4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge Node Manager.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 /* ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/cfg.h>
31 #include <dspbridge/list.h>
32 #include <dspbridge/memdefs.h>
33 #include <dspbridge/proc.h>
34 #include <dspbridge/strm.h>
35 #include <dspbridge/sync.h>
36 #include <dspbridge/ntfy.h>
38 /* ----------------------------------- Platform Manager */
39 #include <dspbridge/cmm.h>
40 #include <dspbridge/cod.h>
41 #include <dspbridge/dev.h>
42 #include <dspbridge/msg.h>
44 /* ----------------------------------- Resource Manager */
45 #include <dspbridge/dbdcd.h>
46 #include <dspbridge/disp.h>
47 #include <dspbridge/rms_sh.h>
49 /* ----------------------------------- Link Driver */
50 #include <dspbridge/dspdefs.h>
51 #include <dspbridge/dspioctl.h>
53 /* ----------------------------------- Others */
54 #include <dspbridge/gb.h>
55 #include <dspbridge/uuidutil.h>
57 /* ----------------------------------- This */
58 #include <dspbridge/nodepriv.h>
59 #include <dspbridge/node.h>
60 #include <dspbridge/dmm.h>
62 /* Static/Dynamic Loader includes */
63 #include <dspbridge/dbll.h>
64 #include <dspbridge/nldr.h>
66 #include <dspbridge/drv.h>
67 #include <dspbridge/drvdefs.h>
68 #include <dspbridge/resourcecleanup.h>
71 #include <dspbridge/dspdeh.h>
73 #define HOSTPREFIX "/host"
74 #define PIPEPREFIX "/dbpipe"
76 #define MAX_INPUTS(h) \
77 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
78 #define MAX_OUTPUTS(h) \
79 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
81 #define NODE_GET_PRIORITY(h) ((h)->prio)
82 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
83 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
85 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
86 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
88 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
89 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
91 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
93 #define EXECUTEPHASE 2
96 /* Define default STRM parameters */
98 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
99 * or make defaults configurable.
101 #define DEFAULTBUFSIZE 32
102 #define DEFAULTNBUFS 2
103 #define DEFAULTSEGID 0
104 #define DEFAULTALIGNMENT 0
105 #define DEFAULTTIMEOUT 10000
107 #define RMSQUERYSERVER 0
108 #define RMSCONFIGURESERVER 1
109 #define RMSCREATENODE 2
110 #define RMSEXECUTENODE 3
111 #define RMSDELETENODE 4
112 #define RMSCHANGENODEPRIORITY 5
113 #define RMSREADMEMORY 6
114 #define RMSWRITEMEMORY 7
116 #define MAXTIMEOUT 2000
120 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
122 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
125 * ======== node_mgr ========
128 struct dev_object
*hdev_obj
; /* Device object */
129 /* Function interface to Bridge driver */
130 struct bridge_drv_interface
*intf_fxns
;
131 struct dcd_manager
*hdcd_mgr
; /* Proc/Node data manager */
132 struct disp_object
*disp_obj
; /* Node dispatcher */
133 struct lst_list
*node_list
; /* List of all allocated nodes */
134 u32 num_nodes
; /* Number of nodes in node_list */
135 u32 num_created
; /* Number of nodes *created* on DSP */
136 struct gb_t_map
*pipe_map
; /* Pipe connection bit map */
137 struct gb_t_map
*pipe_done_map
; /* Pipes that are half free */
138 struct gb_t_map
*chnl_map
; /* Channel allocation bit map */
139 struct gb_t_map
*dma_chnl_map
; /* DMA Channel allocation bit map */
140 struct gb_t_map
*zc_chnl_map
; /* Zero-Copy Channel alloc bit map */
141 struct ntfy_object
*ntfy_obj
; /* Manages registered notifications */
142 struct mutex node_mgr_lock
; /* For critical sections */
143 u32 ul_fxn_addrs
[NUMRMSFXNS
]; /* RMS function addresses */
144 struct msg_mgr
*msg_mgr_obj
;
146 /* Processor properties needed by Node Dispatcher */
147 u32 ul_num_chnls
; /* Total number of channels */
148 u32 ul_chnl_offset
; /* Offset of chnl ids rsvd for RMS */
149 u32 ul_chnl_buf_size
; /* Buffer size for data to RMS */
150 int proc_family
; /* eg, 5000 */
151 int proc_type
; /* eg, 5510 */
152 u32 udsp_word_size
; /* Size of DSP word on host bytes */
153 u32 udsp_data_mau_size
; /* Size of DSP data MAU */
154 u32 udsp_mau_size
; /* Size of MAU */
155 s32 min_pri
; /* Minimum runtime priority for node */
156 s32 max_pri
; /* Maximum runtime priority for node */
158 struct strm_mgr
*strm_mgr_obj
; /* STRM manager */
160 /* Loader properties */
161 struct nldr_object
*nldr_obj
; /* Handle to loader */
162 struct node_ldr_fxns nldr_fxns
; /* Handle to loader functions */
163 bool loader_init
; /* Loader Init function succeeded? */
167 * ======== connecttype ========
177 * ======== stream_chnl ========
180 enum connecttype type
; /* Type of stream connection */
181 u32 dev_id
; /* pipe or channel id */
185 * ======== node_object ========
188 struct list_head list_elem
;
189 struct node_mgr
*hnode_mgr
; /* The manager of this node */
190 struct proc_object
*hprocessor
; /* Back pointer to processor */
191 struct dsp_uuid node_uuid
; /* Node's ID */
192 s32 prio
; /* Node's current priority */
193 u32 utimeout
; /* Timeout for blocking NODE calls */
194 u32 heap_size
; /* Heap Size */
195 u32 udsp_heap_virt_addr
; /* Heap Size */
196 u32 ugpp_heap_virt_addr
; /* Heap Size */
197 enum node_type ntype
; /* Type of node: message, task, etc */
198 enum node_state node_state
; /* NODE_ALLOCATED, NODE_CREATED, ... */
199 u32 num_inputs
; /* Current number of inputs */
200 u32 num_outputs
; /* Current number of outputs */
201 u32 max_input_index
; /* Current max input stream index */
202 u32 max_output_index
; /* Current max output stream index */
203 struct stream_chnl
*inputs
; /* Node's input streams */
204 struct stream_chnl
*outputs
; /* Node's output streams */
205 struct node_createargs create_args
; /* Args for node create func */
206 nodeenv node_env
; /* Environment returned by RMS */
207 struct dcd_genericobj dcd_props
; /* Node properties from DCD */
208 struct dsp_cbdata
*pargs
; /* Optional args to pass to node */
209 struct ntfy_object
*ntfy_obj
; /* Manages registered notifications */
210 char *pstr_dev_name
; /* device name, if device node */
211 struct sync_object
*sync_done
; /* Synchronize node_terminate */
212 s32 exit_status
; /* execute function return status */
214 /* Information needed for node_get_attr() */
215 void *device_owner
; /* If dev node, task that owns it */
216 u32 num_gpp_inputs
; /* Current # of from GPP streams */
217 u32 num_gpp_outputs
; /* Current # of to GPP streams */
218 /* Current stream connections */
219 struct dsp_streamconnect
*stream_connect
;
222 struct msg_queue
*msg_queue_obj
;
224 /* These fields used for SM messaging */
225 struct cmm_xlatorobject
*xlator
; /* Node's SM addr translator */
227 /* Handle to pass to dynamic loader */
228 struct nldr_nodeobject
*nldr_node_obj
;
229 bool loaded
; /* Code is (dynamically) loaded */
230 bool phase_split
; /* Phases split in many libs or ovly */
234 /* Default buffer attributes */
235 static struct dsp_bufferattr node_dfltbufattrs
= {
238 0, /* buf_alignment */
241 static void delete_node(struct node_object
*hnode
,
242 struct process_context
*pr_ctxt
);
243 static void delete_node_mgr(struct node_mgr
*hnode_mgr
);
244 static void fill_stream_connect(struct node_object
*node1
,
245 struct node_object
*node2
, u32 stream1
,
247 static void fill_stream_def(struct node_object
*hnode
,
248 struct node_strmdef
*pstrm_def
,
249 struct dsp_strmattr
*pattrs
);
250 static void free_stream(struct node_mgr
*hnode_mgr
, struct stream_chnl stream
);
251 static int get_fxn_address(struct node_object
*hnode
, u32
* fxn_addr
,
253 static int get_node_props(struct dcd_manager
*hdcd_mgr
,
254 struct node_object
*hnode
,
255 const struct dsp_uuid
*node_uuid
,
256 struct dcd_genericobj
*dcd_prop
);
257 static int get_proc_props(struct node_mgr
*hnode_mgr
,
258 struct dev_object
*hdev_obj
);
259 static int get_rms_fxns(struct node_mgr
*hnode_mgr
);
260 static u32
ovly(void *priv_ref
, u32 dsp_run_addr
, u32 dsp_load_addr
,
261 u32 ul_num_bytes
, u32 mem_space
);
262 static u32
mem_write(void *priv_ref
, u32 dsp_add
, void *pbuf
,
263 u32 ul_num_bytes
, u32 mem_space
);
265 static u32 refs
; /* module reference count */
267 /* Dynamic loader functions. */
268 static struct node_ldr_fxns nldr_fxns
= {
279 enum node_state
node_get_state(void *hnode
)
281 struct node_object
*pnode
= (struct node_object
*)hnode
;
285 return pnode
->node_state
;
289 * ======== node_allocate ========
291 * Allocate GPP resources to manage a node on the DSP.
293 int node_allocate(struct proc_object
*hprocessor
,
294 const struct dsp_uuid
*node_uuid
,
295 const struct dsp_cbdata
*pargs
,
296 const struct dsp_nodeattrin
*attr_in
,
297 struct node_res_object
**noderes
,
298 struct process_context
*pr_ctxt
)
300 struct node_mgr
*hnode_mgr
;
301 struct dev_object
*hdev_obj
;
302 struct node_object
*pnode
= NULL
;
303 enum node_type node_type
= NODE_TASK
;
304 struct node_msgargs
*pmsg_args
;
305 struct node_taskargs
*ptask_args
;
307 struct bridge_drv_interface
*intf_fxns
;
309 struct cmm_object
*hcmm_mgr
= NULL
; /* Shared memory manager hndl */
314 u32 ul_stack_seg_addr
, ul_stack_seg_val
;
316 struct cfg_hostres
*host_res
;
317 struct bridge_dev_context
*pbridge_context
;
320 struct dsp_processorstate proc_state
;
322 struct dmm_object
*dmm_mgr
;
323 struct proc_object
*p_proc_object
= (struct proc_object
*)hprocessor
;
328 DBC_REQUIRE(refs
> 0);
329 DBC_REQUIRE(hprocessor
!= NULL
);
330 DBC_REQUIRE(noderes
!= NULL
);
331 DBC_REQUIRE(node_uuid
!= NULL
);
335 status
= proc_get_processor_id(hprocessor
, &proc_id
);
337 if (proc_id
!= DSP_UNIT
)
340 status
= proc_get_dev_object(hprocessor
, &hdev_obj
);
342 status
= dev_get_node_manager(hdev_obj
, &hnode_mgr
);
343 if (hnode_mgr
== NULL
)
351 status
= dev_get_bridge_context(hdev_obj
, &pbridge_context
);
352 if (!pbridge_context
) {
357 status
= proc_get_state(hprocessor
, &proc_state
,
358 sizeof(struct dsp_processorstate
));
361 /* If processor is in error state then don't attempt
362 to send the message */
363 if (proc_state
.proc_state
== PROC_ERROR
) {
368 /* Assuming that 0 is not a valid function address */
369 if (hnode_mgr
->ul_fxn_addrs
[0] == 0) {
370 /* No RMS on target - we currently can't handle this */
371 pr_err("%s: Failed, no RMS in base image\n", __func__
);
374 /* Validate attr_in fields, if non-NULL */
376 /* Check if attr_in->prio is within range */
377 if (attr_in
->prio
< hnode_mgr
->min_pri
||
378 attr_in
->prio
> hnode_mgr
->max_pri
)
382 /* Allocate node object and fill in */
386 pnode
= kzalloc(sizeof(struct node_object
), GFP_KERNEL
);
391 pnode
->hnode_mgr
= hnode_mgr
;
392 /* This critical section protects get_node_props */
393 mutex_lock(&hnode_mgr
->node_mgr_lock
);
395 /* Get dsp_ndbprops from node database */
396 status
= get_node_props(hnode_mgr
->hdcd_mgr
, pnode
, node_uuid
,
397 &(pnode
->dcd_props
));
401 pnode
->node_uuid
= *node_uuid
;
402 pnode
->hprocessor
= hprocessor
;
403 pnode
->ntype
= pnode
->dcd_props
.obj_data
.node_obj
.ndb_props
.ntype
;
404 pnode
->utimeout
= pnode
->dcd_props
.obj_data
.node_obj
.ndb_props
.utimeout
;
405 pnode
->prio
= pnode
->dcd_props
.obj_data
.node_obj
.ndb_props
.prio
;
407 /* Currently only C64 DSP builds support Node Dynamic * heaps */
408 /* Allocate memory for node heap */
409 pnode
->create_args
.asa
.task_arg_obj
.heap_size
= 0;
410 pnode
->create_args
.asa
.task_arg_obj
.udsp_heap_addr
= 0;
411 pnode
->create_args
.asa
.task_arg_obj
.udsp_heap_res_addr
= 0;
412 pnode
->create_args
.asa
.task_arg_obj
.ugpp_heap_addr
= 0;
416 /* Check if we have a user allocated node heap */
417 if (!(attr_in
->pgpp_virt_addr
))
420 /* check for page aligned Heap size */
421 if (((attr_in
->heap_size
) & (PG_SIZE4K
- 1))) {
422 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
423 __func__
, attr_in
->heap_size
);
426 pnode
->create_args
.asa
.task_arg_obj
.heap_size
=
428 pnode
->create_args
.asa
.task_arg_obj
.ugpp_heap_addr
=
429 (u32
) attr_in
->pgpp_virt_addr
;
434 status
= proc_reserve_memory(hprocessor
,
435 pnode
->create_args
.asa
.task_arg_obj
.
436 heap_size
+ PAGE_SIZE
,
437 (void **)&(pnode
->create_args
.asa
.
438 task_arg_obj
.udsp_heap_res_addr
),
441 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
446 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
448 status
= DSP_EHANDLE
;
452 dmm_mem_map_dump(dmm_mgr
);
455 map_attrs
|= DSP_MAPLITTLEENDIAN
;
456 map_attrs
|= DSP_MAPELEMSIZE32
;
457 map_attrs
|= DSP_MAPVIRTUALADDR
;
458 status
= proc_map(hprocessor
, (void *)attr_in
->pgpp_virt_addr
,
459 pnode
->create_args
.asa
.task_arg_obj
.heap_size
,
460 (void *)pnode
->create_args
.asa
.task_arg_obj
.
461 udsp_heap_res_addr
, (void **)&mapped_addr
, map_attrs
,
464 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
467 pnode
->create_args
.asa
.task_arg_obj
.udsp_heap_addr
=
471 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
472 if (attr_in
!= NULL
) {
473 /* Overrides of NBD properties */
474 pnode
->utimeout
= attr_in
->utimeout
;
475 pnode
->prio
= attr_in
->prio
;
477 /* Create object to manage notifications */
479 pnode
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
),
482 ntfy_init(pnode
->ntfy_obj
);
488 node_type
= node_get_type(pnode
);
489 /* Allocate dsp_streamconnect array for device, task, and
490 * dais socket nodes. */
491 if (node_type
!= NODE_MESSAGE
) {
492 num_streams
= MAX_INPUTS(pnode
) + MAX_OUTPUTS(pnode
);
493 pnode
->stream_connect
= kzalloc(num_streams
*
494 sizeof(struct dsp_streamconnect
),
496 if (num_streams
> 0 && pnode
->stream_connect
== NULL
)
500 if (!status
&& (node_type
== NODE_TASK
||
501 node_type
== NODE_DAISSOCKET
)) {
502 /* Allocate arrays for maintainig stream connections */
503 pnode
->inputs
= kzalloc(MAX_INPUTS(pnode
) *
504 sizeof(struct stream_chnl
), GFP_KERNEL
);
505 pnode
->outputs
= kzalloc(MAX_OUTPUTS(pnode
) *
506 sizeof(struct stream_chnl
), GFP_KERNEL
);
507 ptask_args
= &(pnode
->create_args
.asa
.task_arg_obj
);
508 ptask_args
->strm_in_def
= kzalloc(MAX_INPUTS(pnode
) *
509 sizeof(struct node_strmdef
),
511 ptask_args
->strm_out_def
= kzalloc(MAX_OUTPUTS(pnode
) *
512 sizeof(struct node_strmdef
),
514 if ((MAX_INPUTS(pnode
) > 0 && (pnode
->inputs
== NULL
||
515 ptask_args
->strm_in_def
517 || (MAX_OUTPUTS(pnode
) > 0
518 && (pnode
->outputs
== NULL
519 || ptask_args
->strm_out_def
== NULL
)))
523 if (!status
&& (node_type
!= NODE_DEVICE
)) {
524 /* Create an event that will be posted when RMS_EXIT is
526 pnode
->sync_done
= kzalloc(sizeof(struct sync_object
),
528 if (pnode
->sync_done
)
529 sync_init_event(pnode
->sync_done
);
534 /*Get the shared mem mgr for this nodes dev object */
535 status
= cmm_get_handle(hprocessor
, &hcmm_mgr
);
537 /* Allocate a SM addr translator for this node
539 status
= cmm_xlator_create(&pnode
->xlator
,
544 /* Fill in message args */
545 if ((pargs
!= NULL
) && (pargs
->cb_data
> 0)) {
547 &(pnode
->create_args
.asa
.node_msg_args
);
548 pmsg_args
->pdata
= kzalloc(pargs
->cb_data
,
550 if (pmsg_args
->pdata
== NULL
) {
553 pmsg_args
->arg_length
= pargs
->cb_data
;
554 memcpy(pmsg_args
->pdata
,
562 if (!status
&& node_type
!= NODE_DEVICE
) {
563 /* Create a message queue for this node */
564 intf_fxns
= hnode_mgr
->intf_fxns
;
566 (*intf_fxns
->pfn_msg_create_queue
) (hnode_mgr
->msg_mgr_obj
,
567 &pnode
->msg_queue_obj
,
569 pnode
->create_args
.asa
.
570 node_msg_args
.max_msgs
,
575 /* Create object for dynamic loading */
577 status
= hnode_mgr
->nldr_fxns
.pfn_allocate(hnode_mgr
->nldr_obj
,
583 &pnode
->phase_split
);
586 /* Compare value read from Node Properties and check if it is same as
587 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
588 * GPP Address, Read the value in that address and override the
589 * stack_seg value in task args */
591 (char *)pnode
->dcd_props
.obj_data
.node_obj
.ndb_props
.
592 stack_seg_name
!= NULL
) {
594 pnode
->dcd_props
.obj_data
.node_obj
.ndb_props
.
595 stack_seg_name
, STACKSEGLABEL
) == 0) {
597 hnode_mgr
->nldr_fxns
.
598 pfn_get_fxn_addr(pnode
->nldr_node_obj
, "DYNEXT_BEG",
601 pr_err("%s: Failed to get addr for DYNEXT_BEG"
602 " status = 0x%x\n", __func__
, status
);
605 hnode_mgr
->nldr_fxns
.
606 pfn_get_fxn_addr(pnode
->nldr_node_obj
,
607 "L1DSRAM_HEAP", &pul_value
);
610 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
611 " status = 0x%x\n", __func__
, status
);
613 host_res
= pbridge_context
->resources
;
618 pr_err("%s: Failed to get host resource, status"
619 " = 0x%x\n", __func__
, status
);
623 ul_gpp_mem_base
= (u32
) host_res
->dw_mem_base
[1];
624 off_set
= pul_value
- dynext_base
;
625 ul_stack_seg_addr
= ul_gpp_mem_base
+ off_set
;
626 ul_stack_seg_val
= readl(ul_stack_seg_addr
);
628 dev_dbg(bridge
, "%s: StackSegVal = 0x%x, StackSegAddr ="
629 " 0x%x\n", __func__
, ul_stack_seg_val
,
632 pnode
->create_args
.asa
.task_arg_obj
.stack_seg
=
639 /* Add the node to the node manager's list of allocated
641 lst_init_elem((struct list_head
*)pnode
);
642 NODE_SET_STATE(pnode
, NODE_ALLOCATED
);
644 mutex_lock(&hnode_mgr
->node_mgr_lock
);
646 lst_put_tail(hnode_mgr
->node_list
, (struct list_head
*) pnode
);
647 ++(hnode_mgr
->num_nodes
);
649 /* Exit critical section */
650 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
652 /* Preset this to assume phases are split
653 * (for overlay and dll) */
654 pnode
->phase_split
= true;
656 /* Notify all clients registered for DSP_NODESTATECHANGE. */
657 proc_notify_all_clients(hprocessor
, DSP_NODESTATECHANGE
);
661 delete_node(pnode
, pr_ctxt
);
666 status
= drv_insert_node_res_element(pnode
, &node_res
, pr_ctxt
);
668 delete_node(pnode
, pr_ctxt
);
672 *noderes
= (struct node_res_object
*)node_res
;
673 drv_proc_node_update_heap_status(node_res
, true);
674 drv_proc_node_update_status(node_res
, true);
676 DBC_ENSURE((status
&& *noderes
== NULL
) || (!status
&& *noderes
));
678 dev_dbg(bridge
, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
679 "node_res: %p status: 0x%x\n", __func__
, hprocessor
,
680 node_uuid
, pargs
, attr_in
, noderes
, status
);
685 * ======== node_alloc_msg_buf ========
687 * Allocates buffer for zero copy messaging.
689 DBAPI
node_alloc_msg_buf(struct node_object
*hnode
, u32 usize
,
690 struct dsp_bufferattr
*pattr
,
693 struct node_object
*pnode
= (struct node_object
*)hnode
;
695 bool va_flag
= false;
699 DBC_REQUIRE(refs
> 0);
700 DBC_REQUIRE(pbuffer
!= NULL
);
702 DBC_REQUIRE(usize
> 0);
706 else if (node_get_type(pnode
) == NODE_DEVICE
)
713 pattr
= &node_dfltbufattrs
; /* set defaults */
715 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
716 if (proc_id
!= DSP_UNIT
) {
720 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
721 * virt address, so set this info in this node's translator
722 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
723 * virtual address from node's translator. */
724 if ((pattr
->segment_id
& MEM_SETVIRTUALSEGID
) ||
725 (pattr
->segment_id
& MEM_GETVIRTUALSEGID
)) {
727 set_info
= (pattr
->segment_id
& MEM_SETVIRTUALSEGID
) ?
729 /* Clear mask bits */
730 pattr
->segment_id
&= ~MEM_MASKVIRTUALSEGID
;
731 /* Set/get this node's translators virtual address base/size */
732 status
= cmm_xlator_info(pnode
->xlator
, pbuffer
, usize
,
733 pattr
->segment_id
, set_info
);
735 if (!status
&& (!va_flag
)) {
736 if (pattr
->segment_id
!= 1) {
737 /* Node supports single SM segment only. */
740 /* Arbitrary SM buffer alignment not supported for host side
741 * allocs, but guaranteed for the following alignment
743 switch (pattr
->buf_alignment
) {
750 /* alignment value not suportted */
755 /* allocate physical buffer from seg_id in node's
757 (void)cmm_xlator_alloc_buf(pnode
->xlator
, pbuffer
,
759 if (*pbuffer
== NULL
) {
760 pr_err("%s: error - Out of shared memory\n",
771 * ======== node_change_priority ========
773 * Change the priority of a node in the allocated state, or that is
774 * currently running or paused on the target.
776 int node_change_priority(struct node_object
*hnode
, s32 prio
)
778 struct node_object
*pnode
= (struct node_object
*)hnode
;
779 struct node_mgr
*hnode_mgr
= NULL
;
780 enum node_type node_type
;
781 enum node_state state
;
785 DBC_REQUIRE(refs
> 0);
787 if (!hnode
|| !hnode
->hnode_mgr
) {
790 hnode_mgr
= hnode
->hnode_mgr
;
791 node_type
= node_get_type(hnode
);
792 if (node_type
!= NODE_TASK
&& node_type
!= NODE_DAISSOCKET
)
794 else if (prio
< hnode_mgr
->min_pri
|| prio
> hnode_mgr
->max_pri
)
800 /* Enter critical section */
801 mutex_lock(&hnode_mgr
->node_mgr_lock
);
803 state
= node_get_state(hnode
);
804 if (state
== NODE_ALLOCATED
|| state
== NODE_PAUSED
) {
805 NODE_SET_PRIORITY(hnode
, prio
);
807 if (state
!= NODE_RUNNING
) {
811 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
812 if (proc_id
== DSP_UNIT
) {
814 disp_node_change_priority(hnode_mgr
->disp_obj
,
816 hnode_mgr
->ul_fxn_addrs
817 [RMSCHANGENODEPRIORITY
],
818 hnode
->node_env
, prio
);
821 NODE_SET_PRIORITY(hnode
, prio
);
825 /* Leave critical section */
826 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
832 * ======== node_connect ========
834 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
836 int node_connect(struct node_object
*node1
, u32 stream1
,
837 struct node_object
*node2
,
838 u32 stream2
, struct dsp_strmattr
*pattrs
,
839 struct dsp_cbdata
*conn_param
)
841 struct node_mgr
*hnode_mgr
;
842 char *pstr_dev_name
= NULL
;
843 enum node_type node1_type
= NODE_TASK
;
844 enum node_type node2_type
= NODE_TASK
;
845 struct node_strmdef
*pstrm_def
;
846 struct node_strmdef
*input
= NULL
;
847 struct node_strmdef
*output
= NULL
;
848 struct node_object
*dev_node_obj
;
849 struct node_object
*hnode
;
850 struct stream_chnl
*pstream
;
851 u32 pipe_id
= GB_NOBITS
;
852 u32 chnl_id
= GB_NOBITS
;
856 DBC_REQUIRE(refs
> 0);
858 if ((node1
!= (struct node_object
*)DSP_HGPPNODE
&& !node1
) ||
859 (node2
!= (struct node_object
*)DSP_HGPPNODE
&& !node2
))
863 /* The two nodes must be on the same processor */
864 if (node1
!= (struct node_object
*)DSP_HGPPNODE
&&
865 node2
!= (struct node_object
*)DSP_HGPPNODE
&&
866 node1
->hnode_mgr
!= node2
->hnode_mgr
)
868 /* Cannot connect a node to itself */
874 /* node_get_type() will return NODE_GPP if hnode =
876 node1_type
= node_get_type(node1
);
877 node2_type
= node_get_type(node2
);
878 /* Check stream indices ranges */
879 if ((node1_type
!= NODE_GPP
&& node1_type
!= NODE_DEVICE
&&
880 stream1
>= MAX_OUTPUTS(node1
)) || (node2_type
!= NODE_GPP
889 * Only the following types of connections are allowed:
890 * task/dais socket < == > task/dais socket
891 * task/dais socket < == > device
892 * task/dais socket < == > GPP
894 * ie, no message nodes, and at least one task or dais
897 if (node1_type
== NODE_MESSAGE
|| node2_type
== NODE_MESSAGE
||
898 (node1_type
!= NODE_TASK
&& node1_type
!= NODE_DAISSOCKET
&&
899 node2_type
!= NODE_TASK
&& node2_type
!= NODE_DAISSOCKET
))
903 * Check stream mode. Default is STRMMODE_PROCCOPY.
905 if (!status
&& pattrs
) {
906 if (pattrs
->strm_mode
!= STRMMODE_PROCCOPY
)
907 status
= -EPERM
; /* illegal stream mode */
913 if (node1_type
!= NODE_GPP
) {
914 hnode_mgr
= node1
->hnode_mgr
;
916 DBC_ASSERT(node2
!= (struct node_object
*)DSP_HGPPNODE
);
917 hnode_mgr
= node2
->hnode_mgr
;
919 /* Enter critical section */
920 mutex_lock(&hnode_mgr
->node_mgr_lock
);
922 /* Nodes must be in the allocated state */
923 if (node1_type
!= NODE_GPP
&& node_get_state(node1
) != NODE_ALLOCATED
)
926 if (node2_type
!= NODE_GPP
&& node_get_state(node2
) != NODE_ALLOCATED
)
930 /* Check that stream indices for task and dais socket nodes
931 * are not already be used. (Device nodes checked later) */
932 if (node1_type
== NODE_TASK
|| node1_type
== NODE_DAISSOCKET
) {
934 &(node1
->create_args
.asa
.
935 task_arg_obj
.strm_out_def
[stream1
]);
936 if (output
->sz_device
!= NULL
)
940 if (node2_type
== NODE_TASK
|| node2_type
== NODE_DAISSOCKET
) {
942 &(node2
->create_args
.asa
.
943 task_arg_obj
.strm_in_def
[stream2
]);
944 if (input
->sz_device
!= NULL
)
949 /* Connecting two task nodes? */
950 if (!status
&& ((node1_type
== NODE_TASK
||
951 node1_type
== NODE_DAISSOCKET
)
952 && (node2_type
== NODE_TASK
953 || node2_type
== NODE_DAISSOCKET
))) {
954 /* Find available pipe */
955 pipe_id
= gb_findandset(hnode_mgr
->pipe_map
);
956 if (pipe_id
== GB_NOBITS
) {
957 status
= -ECONNREFUSED
;
959 node1
->outputs
[stream1
].type
= NODECONNECT
;
960 node2
->inputs
[stream2
].type
= NODECONNECT
;
961 node1
->outputs
[stream1
].dev_id
= pipe_id
;
962 node2
->inputs
[stream2
].dev_id
= pipe_id
;
963 output
->sz_device
= kzalloc(PIPENAMELEN
+ 1,
965 input
->sz_device
= kzalloc(PIPENAMELEN
+ 1, GFP_KERNEL
);
966 if (output
->sz_device
== NULL
||
967 input
->sz_device
== NULL
) {
968 /* Undo the connection */
969 kfree(output
->sz_device
);
971 kfree(input
->sz_device
);
973 output
->sz_device
= NULL
;
974 input
->sz_device
= NULL
;
975 gb_clear(hnode_mgr
->pipe_map
, pipe_id
);
978 /* Copy "/dbpipe<pipId>" name to device names */
979 sprintf(output
->sz_device
, "%s%d",
980 PIPEPREFIX
, pipe_id
);
981 strcpy(input
->sz_device
, output
->sz_device
);
985 /* Connecting task node to host? */
986 if (!status
&& (node1_type
== NODE_GPP
||
987 node2_type
== NODE_GPP
)) {
988 if (node1_type
== NODE_GPP
) {
989 chnl_mode
= CHNL_MODETODSP
;
991 DBC_ASSERT(node2_type
== NODE_GPP
);
992 chnl_mode
= CHNL_MODEFROMDSP
;
994 /* Reserve a channel id. We need to put the name "/host<id>"
995 * in the node's create_args, but the host
996 * side channel will not be opened until DSPStream_Open is
997 * called for this node. */
999 if (pattrs
->strm_mode
== STRMMODE_RDMA
) {
1001 gb_findandset(hnode_mgr
->dma_chnl_map
);
1002 /* dma chans are 2nd transport chnl set
1003 * ids(e.g. 16-31) */
1004 (chnl_id
!= GB_NOBITS
) ?
1007 hnode_mgr
->ul_num_chnls
) : chnl_id
;
1008 } else if (pattrs
->strm_mode
== STRMMODE_ZEROCOPY
) {
1009 chnl_id
= gb_findandset(hnode_mgr
->zc_chnl_map
);
1010 /* zero-copy chans are 3nd transport set
1012 (chnl_id
!= GB_NOBITS
) ? (chnl_id
= chnl_id
+
1017 } else { /* must be PROCCOPY */
1018 DBC_ASSERT(pattrs
->strm_mode
==
1020 chnl_id
= gb_findandset(hnode_mgr
->chnl_map
);
1024 /* default to PROCCOPY */
1025 chnl_id
= gb_findandset(hnode_mgr
->chnl_map
);
1027 if (chnl_id
== GB_NOBITS
) {
1028 status
= -ECONNREFUSED
;
1031 pstr_dev_name
= kzalloc(HOSTNAMELEN
+ 1, GFP_KERNEL
);
1032 if (pstr_dev_name
!= NULL
)
1036 if (pattrs
->strm_mode
== STRMMODE_RDMA
) {
1037 gb_clear(hnode_mgr
->dma_chnl_map
, chnl_id
-
1038 hnode_mgr
->ul_num_chnls
);
1039 } else if (pattrs
->strm_mode
== STRMMODE_ZEROCOPY
) {
1040 gb_clear(hnode_mgr
->zc_chnl_map
, chnl_id
-
1041 (2 * hnode_mgr
->ul_num_chnls
));
1043 DBC_ASSERT(pattrs
->strm_mode
==
1045 gb_clear(hnode_mgr
->chnl_map
, chnl_id
);
1048 gb_clear(hnode_mgr
->chnl_map
, chnl_id
);
1053 if (node1
== (struct node_object
*)DSP_HGPPNODE
) {
1054 node2
->inputs
[stream2
].type
= HOSTCONNECT
;
1055 node2
->inputs
[stream2
].dev_id
= chnl_id
;
1056 input
->sz_device
= pstr_dev_name
;
1058 node1
->outputs
[stream1
].type
= HOSTCONNECT
;
1059 node1
->outputs
[stream1
].dev_id
= chnl_id
;
1060 output
->sz_device
= pstr_dev_name
;
1062 sprintf(pstr_dev_name
, "%s%d", HOSTPREFIX
, chnl_id
);
1065 /* Connecting task node to device node? */
1066 if (!status
&& ((node1_type
== NODE_DEVICE
) ||
1067 (node2_type
== NODE_DEVICE
))) {
1068 if (node2_type
== NODE_DEVICE
) {
1069 /* node1 == > device */
1070 dev_node_obj
= node2
;
1072 pstream
= &(node1
->outputs
[stream1
]);
1075 /* device == > node2 */
1076 dev_node_obj
= node1
;
1078 pstream
= &(node2
->inputs
[stream2
]);
1081 /* Set up create args */
1082 pstream
->type
= DEVICECONNECT
;
1083 dw_length
= strlen(dev_node_obj
->pstr_dev_name
);
1084 if (conn_param
!= NULL
) {
1085 pstrm_def
->sz_device
= kzalloc(dw_length
+ 1 +
1086 conn_param
->cb_data
,
1089 pstrm_def
->sz_device
= kzalloc(dw_length
+ 1,
1092 if (pstrm_def
->sz_device
== NULL
) {
1095 /* Copy device name */
1096 strncpy(pstrm_def
->sz_device
,
1097 dev_node_obj
->pstr_dev_name
, dw_length
);
1098 if (conn_param
!= NULL
) {
1099 strncat(pstrm_def
->sz_device
,
1100 (char *)conn_param
->node_data
,
1101 (u32
) conn_param
->cb_data
);
1103 dev_node_obj
->device_owner
= hnode
;
1107 /* Fill in create args */
1108 if (node1_type
== NODE_TASK
|| node1_type
== NODE_DAISSOCKET
) {
1109 node1
->create_args
.asa
.task_arg_obj
.num_outputs
++;
1110 fill_stream_def(node1
, output
, pattrs
);
1112 if (node2_type
== NODE_TASK
|| node2_type
== NODE_DAISSOCKET
) {
1113 node2
->create_args
.asa
.task_arg_obj
.num_inputs
++;
1114 fill_stream_def(node2
, input
, pattrs
);
1116 /* Update node1 and node2 stream_connect */
1117 if (node1_type
!= NODE_GPP
&& node1_type
!= NODE_DEVICE
) {
1118 node1
->num_outputs
++;
1119 if (stream1
> node1
->max_output_index
)
1120 node1
->max_output_index
= stream1
;
1123 if (node2_type
!= NODE_GPP
&& node2_type
!= NODE_DEVICE
) {
1124 node2
->num_inputs
++;
1125 if (stream2
> node2
->max_input_index
)
1126 node2
->max_input_index
= stream2
;
1129 fill_stream_connect(node1
, node2
, stream1
, stream2
);
1131 /* end of sync_enter_cs */
1132 /* Exit critical section */
1133 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
1135 dev_dbg(bridge
, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1136 "pattrs: %p status: 0x%x\n", __func__
, node1
,
1137 stream1
, node2
, stream2
, pattrs
, status
);
1142 * ======== node_create ========
1144 * Create a node on the DSP by remotely calling the node's create function.
1146 int node_create(struct node_object
*hnode
)
1148 struct node_object
*pnode
= (struct node_object
*)hnode
;
1149 struct node_mgr
*hnode_mgr
;
1150 struct bridge_drv_interface
*intf_fxns
;
1152 enum node_type node_type
;
1155 struct dsp_cbdata cb_data
;
1157 struct dsp_processorstate proc_state
;
1158 struct proc_object
*hprocessor
;
1159 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1160 struct dspbridge_platform_data
*pdata
=
1161 omap_dspbridge_dev
->dev
.platform_data
;
1164 DBC_REQUIRE(refs
> 0);
1169 hprocessor
= hnode
->hprocessor
;
1170 status
= proc_get_state(hprocessor
, &proc_state
,
1171 sizeof(struct dsp_processorstate
));
1174 /* If processor is in error state then don't attempt to create
1176 if (proc_state
.proc_state
== PROC_ERROR
) {
1180 /* create struct dsp_cbdata struct for PWR calls */
1181 cb_data
.cb_data
= PWR_TIMEOUT
;
1182 node_type
= node_get_type(hnode
);
1183 hnode_mgr
= hnode
->hnode_mgr
;
1184 intf_fxns
= hnode_mgr
->intf_fxns
;
1185 /* Get access to node dispatcher */
1186 mutex_lock(&hnode_mgr
->node_mgr_lock
);
1188 /* Check node state */
1189 if (node_get_state(hnode
) != NODE_ALLOCATED
)
1193 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
1198 if (proc_id
!= DSP_UNIT
)
1201 /* Make sure streams are properly connected */
1202 if ((hnode
->num_inputs
&& hnode
->max_input_index
>
1203 hnode
->num_inputs
- 1) ||
1204 (hnode
->num_outputs
&& hnode
->max_output_index
>
1205 hnode
->num_outputs
- 1))
1209 /* If node's create function is not loaded, load it */
1210 /* Boost the OPP level to max level that DSP can be requested */
1211 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1212 if (pdata
->cpu_set_freq
)
1213 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP3
]);
1215 status
= hnode_mgr
->nldr_fxns
.pfn_load(hnode
->nldr_node_obj
,
1217 /* Get address of node's create function */
1219 hnode
->loaded
= true;
1220 if (node_type
!= NODE_DEVICE
) {
1221 status
= get_fxn_address(hnode
, &ul_create_fxn
,
1225 pr_err("%s: failed to load create code: 0x%x\n",
1228 /* Request the lowest OPP level */
1229 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1230 if (pdata
->cpu_set_freq
)
1231 (*pdata
->cpu_set_freq
) (pdata
->mpu_speed
[VDD1_OPP1
]);
1233 /* Get address of iAlg functions, if socket node */
1235 if (node_type
== NODE_DAISSOCKET
) {
1236 status
= hnode_mgr
->nldr_fxns
.pfn_get_fxn_addr
1237 (hnode
->nldr_node_obj
,
1238 hnode
->dcd_props
.obj_data
.node_obj
.
1240 &hnode
->create_args
.asa
.
1241 task_arg_obj
.ul_dais_arg
);
1246 if (node_type
!= NODE_DEVICE
) {
1247 status
= disp_node_create(hnode_mgr
->disp_obj
, hnode
,
1248 hnode_mgr
->ul_fxn_addrs
1251 &(hnode
->create_args
),
1252 &(hnode
->node_env
));
1254 /* Set the message queue id to the node env
1256 intf_fxns
= hnode_mgr
->intf_fxns
;
1257 (*intf_fxns
->pfn_msg_set_queue_id
) (hnode
->
1263 /* Phase II/Overlays: Create, execute, delete phases possibly in
1264 * different files/sections. */
1265 if (hnode
->loaded
&& hnode
->phase_split
) {
1266 /* If create code was dynamically loaded, we can now unload
1268 status1
= hnode_mgr
->nldr_fxns
.pfn_unload(hnode
->nldr_node_obj
,
1270 hnode
->loaded
= false;
1273 pr_err("%s: Failed to unload create code: 0x%x\n",
1276 /* Update node state and node manager state */
1278 NODE_SET_STATE(hnode
, NODE_CREATED
);
1279 hnode_mgr
->num_created
++;
1282 if (status
!= -EBADR
) {
1283 /* Put back in NODE_ALLOCATED state if error occurred */
1284 NODE_SET_STATE(hnode
, NODE_ALLOCATED
);
1287 /* Free access to node dispatcher */
1288 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
1291 proc_notify_clients(hnode
->hprocessor
, DSP_NODESTATECHANGE
);
1292 ntfy_notify(hnode
->ntfy_obj
, DSP_NODESTATECHANGE
);
1295 dev_dbg(bridge
, "%s: hnode: %p status: 0x%x\n", __func__
,
1301 * ======== node_create_mgr ========
1303 * Create a NODE Manager object.
1305 int node_create_mgr(struct node_mgr
**node_man
,
1306 struct dev_object
*hdev_obj
)
1309 struct node_mgr
*node_mgr_obj
= NULL
;
1310 struct disp_attr disp_attr_obj
;
1311 char *sz_zl_file
= "";
1312 struct nldr_attrs nldr_attrs_obj
;
1315 DBC_REQUIRE(refs
> 0);
1316 DBC_REQUIRE(node_man
!= NULL
);
1317 DBC_REQUIRE(hdev_obj
!= NULL
);
1320 /* Allocate Node manager object */
1321 node_mgr_obj
= kzalloc(sizeof(struct node_mgr
), GFP_KERNEL
);
1323 node_mgr_obj
->hdev_obj
= hdev_obj
;
1324 node_mgr_obj
->node_list
= kzalloc(sizeof(struct lst_list
),
1326 node_mgr_obj
->pipe_map
= gb_create(MAXPIPES
);
1327 node_mgr_obj
->pipe_done_map
= gb_create(MAXPIPES
);
1328 if (node_mgr_obj
->node_list
== NULL
1329 || node_mgr_obj
->pipe_map
== NULL
1330 || node_mgr_obj
->pipe_done_map
== NULL
) {
1333 INIT_LIST_HEAD(&node_mgr_obj
->node_list
->head
);
1334 node_mgr_obj
->ntfy_obj
= kmalloc(
1335 sizeof(struct ntfy_object
), GFP_KERNEL
);
1336 if (node_mgr_obj
->ntfy_obj
)
1337 ntfy_init(node_mgr_obj
->ntfy_obj
);
1341 node_mgr_obj
->num_created
= 0;
1345 /* get devNodeType */
1347 status
= dev_get_dev_type(hdev_obj
, &dev_type
);
1349 /* Create the DCD Manager */
1352 dcd_create_manager(sz_zl_file
, &node_mgr_obj
->hdcd_mgr
);
1354 status
= get_proc_props(node_mgr_obj
, hdev_obj
);
1357 /* Create NODE Dispatcher */
1359 disp_attr_obj
.ul_chnl_offset
= node_mgr_obj
->ul_chnl_offset
;
1360 disp_attr_obj
.ul_chnl_buf_size
= node_mgr_obj
->ul_chnl_buf_size
;
1361 disp_attr_obj
.proc_family
= node_mgr_obj
->proc_family
;
1362 disp_attr_obj
.proc_type
= node_mgr_obj
->proc_type
;
1364 disp_create(&node_mgr_obj
->disp_obj
, hdev_obj
,
1367 /* Create a STRM Manager */
1369 status
= strm_create(&node_mgr_obj
->strm_mgr_obj
, hdev_obj
);
1372 dev_get_intf_fxns(hdev_obj
, &node_mgr_obj
->intf_fxns
);
1373 /* Get msg_ctrl queue manager */
1374 dev_get_msg_mgr(hdev_obj
, &node_mgr_obj
->msg_mgr_obj
);
1375 mutex_init(&node_mgr_obj
->node_mgr_lock
);
1376 node_mgr_obj
->chnl_map
= gb_create(node_mgr_obj
->ul_num_chnls
);
1377 /* dma chnl map. ul_num_chnls is # per transport */
1378 node_mgr_obj
->dma_chnl_map
=
1379 gb_create(node_mgr_obj
->ul_num_chnls
);
1380 node_mgr_obj
->zc_chnl_map
=
1381 gb_create(node_mgr_obj
->ul_num_chnls
);
1382 if ((node_mgr_obj
->chnl_map
== NULL
)
1383 || (node_mgr_obj
->dma_chnl_map
== NULL
)
1384 || (node_mgr_obj
->zc_chnl_map
== NULL
)) {
1387 /* Block out reserved channels */
1388 for (i
= 0; i
< node_mgr_obj
->ul_chnl_offset
; i
++)
1389 gb_set(node_mgr_obj
->chnl_map
, i
);
1391 /* Block out channels reserved for RMS */
1392 gb_set(node_mgr_obj
->chnl_map
,
1393 node_mgr_obj
->ul_chnl_offset
);
1394 gb_set(node_mgr_obj
->chnl_map
,
1395 node_mgr_obj
->ul_chnl_offset
+ 1);
1399 /* NO RM Server on the IVA */
1400 if (dev_type
!= IVA_UNIT
) {
1401 /* Get addresses of any RMS functions loaded */
1402 status
= get_rms_fxns(node_mgr_obj
);
1406 /* Get loader functions and create loader */
1408 node_mgr_obj
->nldr_fxns
= nldr_fxns
; /* Dyn loader funcs */
1411 nldr_attrs_obj
.pfn_ovly
= ovly
;
1412 nldr_attrs_obj
.pfn_write
= mem_write
;
1413 nldr_attrs_obj
.us_dsp_word_size
= node_mgr_obj
->udsp_word_size
;
1414 nldr_attrs_obj
.us_dsp_mau_size
= node_mgr_obj
->udsp_mau_size
;
1415 node_mgr_obj
->loader_init
= node_mgr_obj
->nldr_fxns
.pfn_init();
1417 node_mgr_obj
->nldr_fxns
.pfn_create(&node_mgr_obj
->nldr_obj
,
1422 *node_man
= node_mgr_obj
;
1424 delete_node_mgr(node_mgr_obj
);
1426 DBC_ENSURE((status
&& *node_man
== NULL
) || (!status
&& *node_man
));
1432 * ======== node_delete ========
1434 * Delete a node on the DSP by remotely calling the node's delete function.
1435 * Loads the node's delete function if necessary. Free GPP side resources
1436 * after node's delete function returns.
1438 int node_delete(struct node_res_object
*noderes
,
1439 struct process_context
*pr_ctxt
)
1441 struct node_object
*pnode
= noderes
->hnode
;
1442 struct node_mgr
*hnode_mgr
;
1443 struct proc_object
*hprocessor
;
1444 struct disp_object
*disp_obj
;
1446 enum node_type node_type
;
1447 enum node_state state
;
1450 struct dsp_cbdata cb_data
;
1452 struct bridge_drv_interface
*intf_fxns
;
1454 void *node_res
= noderes
;
1456 struct dsp_processorstate proc_state
;
1457 DBC_REQUIRE(refs
> 0);
1463 /* create struct dsp_cbdata struct for PWR call */
1464 cb_data
.cb_data
= PWR_TIMEOUT
;
1465 hnode_mgr
= pnode
->hnode_mgr
;
1466 hprocessor
= pnode
->hprocessor
;
1467 disp_obj
= hnode_mgr
->disp_obj
;
1468 node_type
= node_get_type(pnode
);
1469 intf_fxns
= hnode_mgr
->intf_fxns
;
1470 /* Enter critical section */
1471 mutex_lock(&hnode_mgr
->node_mgr_lock
);
1473 state
= node_get_state(pnode
);
1474 /* Execute delete phase code for non-device node in all cases
1475 * except when the node was only allocated. Delete phase must be
1476 * executed even if create phase was executed, but failed.
1477 * If the node environment pointer is non-NULL, the delete phase
1478 * code must be executed. */
1479 if (!(state
== NODE_ALLOCATED
&& pnode
->node_env
== (u32
) NULL
) &&
1480 node_type
!= NODE_DEVICE
) {
1481 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
1485 if (proc_id
== DSP_UNIT
|| proc_id
== IVA_UNIT
) {
1486 /* If node has terminated, execute phase code will
1487 * have already been unloaded in node_on_exit(). If the
1488 * node is PAUSED, the execute phase is loaded, and it
1489 * is now ok to unload it. If the node is running, we
1490 * will unload the execute phase only after deleting
1492 if (state
== NODE_PAUSED
&& pnode
->loaded
&&
1493 pnode
->phase_split
) {
1494 /* Ok to unload execute code as long as node
1495 * is not * running */
1497 hnode_mgr
->nldr_fxns
.
1498 pfn_unload(pnode
->nldr_node_obj
,
1500 pnode
->loaded
= false;
1501 NODE_SET_STATE(pnode
, NODE_DONE
);
1503 /* Load delete phase code if not loaded or if haven't
1504 * * unloaded EXECUTE phase */
1505 if ((!(pnode
->loaded
) || (state
== NODE_RUNNING
)) &&
1506 pnode
->phase_split
) {
1508 hnode_mgr
->nldr_fxns
.
1509 pfn_load(pnode
->nldr_node_obj
, NLDR_DELETE
);
1511 pnode
->loaded
= true;
1513 pr_err("%s: fail - load delete code:"
1514 " 0x%x\n", __func__
, status
);
1519 /* Unblock a thread trying to terminate the node */
1520 (void)sync_set_event(pnode
->sync_done
);
1521 if (proc_id
== DSP_UNIT
) {
1522 /* ul_delete_fxn = address of node's delete
1524 status
= get_fxn_address(pnode
, &ul_delete_fxn
,
1526 } else if (proc_id
== IVA_UNIT
)
1527 ul_delete_fxn
= (u32
) pnode
->node_env
;
1529 status
= proc_get_state(hprocessor
,
1532 dsp_processorstate
));
1533 if (proc_state
.proc_state
!= PROC_ERROR
) {
1535 disp_node_delete(disp_obj
, pnode
,
1542 NODE_SET_STATE(pnode
, NODE_DONE
);
1544 /* Unload execute, if not unloaded, and delete
1546 if (state
== NODE_RUNNING
&&
1547 pnode
->phase_split
) {
1549 hnode_mgr
->nldr_fxns
.
1550 pfn_unload(pnode
->nldr_node_obj
,
1554 pr_err("%s: fail - unload execute code:"
1555 " 0x%x\n", __func__
, status1
);
1558 hnode_mgr
->nldr_fxns
.pfn_unload(pnode
->
1561 pnode
->loaded
= false;
1563 pr_err("%s: fail - unload delete code: "
1564 "0x%x\n", __func__
, status1
);
1568 /* Free host side resources even if a failure occurred */
1569 /* Remove node from hnode_mgr->node_list */
1570 lst_remove_elem(hnode_mgr
->node_list
, (struct list_head
*)pnode
);
1571 hnode_mgr
->num_nodes
--;
1572 /* Decrement count of nodes created on DSP */
1573 if ((state
!= NODE_ALLOCATED
) || ((state
== NODE_ALLOCATED
) &&
1574 (pnode
->node_env
!= (u32
) NULL
)))
1575 hnode_mgr
->num_created
--;
1576 /* Free host-side resources allocated by node_create()
1577 * delete_node() fails if SM buffers not freed by client! */
1578 drv_proc_node_update_status(node_res
, false);
1579 delete_node(pnode
, pr_ctxt
);
1582 * Release all Node resources and its context
1584 idr_remove(pr_ctxt
->node_id
, ((struct node_res_object
*)node_res
)->id
);
1587 /* Exit critical section */
1588 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
1589 proc_notify_clients(hprocessor
, DSP_NODESTATECHANGE
);
1591 dev_dbg(bridge
, "%s: pnode: %p status 0x%x\n", __func__
, pnode
, status
);
1596 * ======== node_delete_mgr ========
1598 * Delete the NODE Manager.
1600 int node_delete_mgr(struct node_mgr
*hnode_mgr
)
1604 DBC_REQUIRE(refs
> 0);
1607 delete_node_mgr(hnode_mgr
);
1615 * ======== node_enum_nodes ========
1617 * Enumerate currently allocated nodes.
1619 int node_enum_nodes(struct node_mgr
*hnode_mgr
, void **node_tab
,
1620 u32 node_tab_size
, u32
*pu_num_nodes
,
1623 struct node_object
*hnode
;
1626 DBC_REQUIRE(refs
> 0);
1627 DBC_REQUIRE(node_tab
!= NULL
|| node_tab_size
== 0);
1628 DBC_REQUIRE(pu_num_nodes
!= NULL
);
1629 DBC_REQUIRE(pu_allocated
!= NULL
);
1635 /* Enter critical section */
1636 mutex_lock(&hnode_mgr
->node_mgr_lock
);
1638 if (hnode_mgr
->num_nodes
> node_tab_size
) {
1639 *pu_allocated
= hnode_mgr
->num_nodes
;
1643 hnode
= (struct node_object
*)lst_first(hnode_mgr
->
1645 for (i
= 0; i
< hnode_mgr
->num_nodes
; i
++) {
1647 node_tab
[i
] = hnode
;
1648 hnode
= (struct node_object
*)lst_next
1649 (hnode_mgr
->node_list
,
1650 (struct list_head
*)hnode
);
1652 *pu_allocated
= *pu_num_nodes
= hnode_mgr
->num_nodes
;
1654 /* end of sync_enter_cs */
1655 /* Exit critical section */
1656 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
1662 * ======== node_exit ========
1664 * Discontinue usage of NODE module.
1666 void node_exit(void)
1668 DBC_REQUIRE(refs
> 0);
1672 DBC_ENSURE(refs
>= 0);
1676 * ======== node_free_msg_buf ========
1678 * Frees the message buffer.
1680 int node_free_msg_buf(struct node_object
*hnode
, u8
* pbuffer
,
1681 struct dsp_bufferattr
*pattr
)
1683 struct node_object
*pnode
= (struct node_object
*)hnode
;
1686 DBC_REQUIRE(refs
> 0);
1687 DBC_REQUIRE(pbuffer
!= NULL
);
1688 DBC_REQUIRE(pnode
!= NULL
);
1689 DBC_REQUIRE(pnode
->xlator
!= NULL
);
1695 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
1696 if (proc_id
== DSP_UNIT
) {
1698 if (pattr
== NULL
) {
1700 pattr
= &node_dfltbufattrs
;
1702 /* Node supports single SM segment only */
1703 if (pattr
->segment_id
!= 1)
1706 /* pbuffer is clients Va. */
1707 status
= cmm_xlator_free_buf(pnode
->xlator
, pbuffer
);
1710 DBC_ASSERT(NULL
); /* BUG */
1717 * ======== node_get_attr ========
1719 * Copy the current attributes of the specified node into a dsp_nodeattr
1722 int node_get_attr(struct node_object
*hnode
,
1723 struct dsp_nodeattr
*pattr
, u32 attr_size
)
1725 struct node_mgr
*hnode_mgr
;
1727 DBC_REQUIRE(refs
> 0);
1728 DBC_REQUIRE(pattr
!= NULL
);
1729 DBC_REQUIRE(attr_size
>= sizeof(struct dsp_nodeattr
));
1734 hnode_mgr
= hnode
->hnode_mgr
;
1735 /* Enter hnode_mgr critical section (since we're accessing
1736 * data that could be changed by node_change_priority() and
1737 * node_connect(). */
1738 mutex_lock(&hnode_mgr
->node_mgr_lock
);
1739 pattr
->cb_struct
= sizeof(struct dsp_nodeattr
);
1740 /* dsp_nodeattrin */
1741 pattr
->in_node_attr_in
.cb_struct
=
1742 sizeof(struct dsp_nodeattrin
);
1743 pattr
->in_node_attr_in
.prio
= hnode
->prio
;
1744 pattr
->in_node_attr_in
.utimeout
= hnode
->utimeout
;
1745 pattr
->in_node_attr_in
.heap_size
=
1746 hnode
->create_args
.asa
.task_arg_obj
.heap_size
;
1747 pattr
->in_node_attr_in
.pgpp_virt_addr
= (void *)
1748 hnode
->create_args
.asa
.task_arg_obj
.ugpp_heap_addr
;
1749 pattr
->node_attr_inputs
= hnode
->num_gpp_inputs
;
1750 pattr
->node_attr_outputs
= hnode
->num_gpp_outputs
;
1752 get_node_info(hnode
, &(pattr
->node_info
));
1753 /* end of sync_enter_cs */
1754 /* Exit critical section */
1755 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
1761 * ======== node_get_channel_id ========
1763 * Get the channel index reserved for a stream connection between the
1766 int node_get_channel_id(struct node_object
*hnode
, u32 dir
, u32 index
,
1769 enum node_type node_type
;
1770 int status
= -EINVAL
;
1771 DBC_REQUIRE(refs
> 0);
1772 DBC_REQUIRE(dir
== DSP_TONODE
|| dir
== DSP_FROMNODE
);
1773 DBC_REQUIRE(chan_id
!= NULL
);
1779 node_type
= node_get_type(hnode
);
1780 if (node_type
!= NODE_TASK
&& node_type
!= NODE_DAISSOCKET
) {
1784 if (dir
== DSP_TONODE
) {
1785 if (index
< MAX_INPUTS(hnode
)) {
1786 if (hnode
->inputs
[index
].type
== HOSTCONNECT
) {
1787 *chan_id
= hnode
->inputs
[index
].dev_id
;
1792 DBC_ASSERT(dir
== DSP_FROMNODE
);
1793 if (index
< MAX_OUTPUTS(hnode
)) {
1794 if (hnode
->outputs
[index
].type
== HOSTCONNECT
) {
1795 *chan_id
= hnode
->outputs
[index
].dev_id
;
1804 * ======== node_get_message ========
1806 * Retrieve a message from a node on the DSP.
1808 int node_get_message(struct node_object
*hnode
,
1809 struct dsp_msg
*message
, u32 utimeout
)
1811 struct node_mgr
*hnode_mgr
;
1812 enum node_type node_type
;
1813 struct bridge_drv_interface
*intf_fxns
;
1816 struct dsp_processorstate proc_state
;
1817 struct proc_object
*hprocessor
;
1819 DBC_REQUIRE(refs
> 0);
1820 DBC_REQUIRE(message
!= NULL
);
1826 hprocessor
= hnode
->hprocessor
;
1827 status
= proc_get_state(hprocessor
, &proc_state
,
1828 sizeof(struct dsp_processorstate
));
1831 /* If processor is in error state then don't attempt to get the
1833 if (proc_state
.proc_state
== PROC_ERROR
) {
1837 hnode_mgr
= hnode
->hnode_mgr
;
1838 node_type
= node_get_type(hnode
);
1839 if (node_type
!= NODE_MESSAGE
&& node_type
!= NODE_TASK
&&
1840 node_type
!= NODE_DAISSOCKET
) {
1844 /* This function will block unless a message is available. Since
1845 * DSPNode_RegisterNotify() allows notification when a message
1846 * is available, the system can be designed so that
1847 * DSPNode_GetMessage() is only called when a message is
1849 intf_fxns
= hnode_mgr
->intf_fxns
;
1851 (*intf_fxns
->pfn_msg_get
) (hnode
->msg_queue_obj
, message
, utimeout
);
1852 /* Check if message contains SM descriptor */
1853 if (status
|| !(message
->dw_cmd
& DSP_RMSBUFDESC
))
1856 /* Translate DSP byte addr to GPP Va. */
1857 tmp_buf
= cmm_xlator_translate(hnode
->xlator
,
1858 (void *)(message
->dw_arg1
*
1860 udsp_word_size
), CMM_DSPPA2PA
);
1861 if (tmp_buf
!= NULL
) {
1862 /* now convert this GPP Pa to Va */
1863 tmp_buf
= cmm_xlator_translate(hnode
->xlator
, tmp_buf
,
1865 if (tmp_buf
!= NULL
) {
1866 /* Adjust SM size in msg */
1867 message
->dw_arg1
= (u32
) tmp_buf
;
1868 message
->dw_arg2
*= hnode
->hnode_mgr
->udsp_word_size
;
1876 dev_dbg(bridge
, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__
,
1877 hnode
, message
, utimeout
);
1882 * ======== node_get_nldr_obj ========
1884 int node_get_nldr_obj(struct node_mgr
*hnode_mgr
,
1885 struct nldr_object
**nldr_ovlyobj
)
1888 struct node_mgr
*node_mgr_obj
= hnode_mgr
;
1889 DBC_REQUIRE(nldr_ovlyobj
!= NULL
);
1894 *nldr_ovlyobj
= node_mgr_obj
->nldr_obj
;
1896 DBC_ENSURE(!status
|| (nldr_ovlyobj
!= NULL
&& *nldr_ovlyobj
== NULL
));
1901 * ======== node_get_strm_mgr ========
1903 * Returns the Stream manager.
1905 int node_get_strm_mgr(struct node_object
*hnode
,
1906 struct strm_mgr
**strm_man
)
1910 DBC_REQUIRE(refs
> 0);
1915 *strm_man
= hnode
->hnode_mgr
->strm_mgr_obj
;
1921 * ======== node_get_load_type ========
1923 enum nldr_loadtype
node_get_load_type(struct node_object
*hnode
)
1925 DBC_REQUIRE(refs
> 0);
1928 dev_dbg(bridge
, "%s: Failed. hnode: %p\n", __func__
, hnode
);
1931 return hnode
->dcd_props
.obj_data
.node_obj
.us_load_type
;
1936 * ======== node_get_timeout ========
1938 * Returns the timeout value for this node.
1940 u32
node_get_timeout(struct node_object
*hnode
)
1942 DBC_REQUIRE(refs
> 0);
1945 dev_dbg(bridge
, "%s: failed. hnode: %p\n", __func__
, hnode
);
1948 return hnode
->utimeout
;
1953 * ======== node_get_type ========
1955 * Returns the node type.
1957 enum node_type
node_get_type(struct node_object
*hnode
)
1959 enum node_type node_type
;
1961 if (hnode
== (struct node_object
*)DSP_HGPPNODE
)
1962 node_type
= NODE_GPP
;
1967 node_type
= hnode
->ntype
;
1973 * ======== node_init ========
1975 * Initialize the NODE module.
1977 bool node_init(void)
1979 DBC_REQUIRE(refs
>= 0);
1987 * ======== node_on_exit ========
1989 * Gets called when RMS_EXIT is received for a node.
1991 void node_on_exit(struct node_object
*hnode
, s32 node_status
)
1996 /* Set node state to done */
1997 NODE_SET_STATE(hnode
, NODE_DONE
);
1998 hnode
->exit_status
= node_status
;
1999 if (hnode
->loaded
&& hnode
->phase_split
) {
2000 (void)hnode
->hnode_mgr
->nldr_fxns
.pfn_unload(hnode
->
2003 hnode
->loaded
= false;
2005 /* Unblock call to node_terminate */
2006 (void)sync_set_event(hnode
->sync_done
);
2007 /* Notify clients */
2008 proc_notify_clients(hnode
->hprocessor
, DSP_NODESTATECHANGE
);
2009 ntfy_notify(hnode
->ntfy_obj
, DSP_NODESTATECHANGE
);
2013 * ======== node_pause ========
2015 * Suspend execution of a node currently running on the DSP.
2017 int node_pause(struct node_object
*hnode
)
2019 struct node_object
*pnode
= (struct node_object
*)hnode
;
2020 enum node_type node_type
;
2021 enum node_state state
;
2022 struct node_mgr
*hnode_mgr
;
2025 struct dsp_processorstate proc_state
;
2026 struct proc_object
*hprocessor
;
2028 DBC_REQUIRE(refs
> 0);
2033 node_type
= node_get_type(hnode
);
2034 if (node_type
!= NODE_TASK
&& node_type
!= NODE_DAISSOCKET
)
2040 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
2042 if (proc_id
== IVA_UNIT
)
2046 hnode_mgr
= hnode
->hnode_mgr
;
2048 /* Enter critical section */
2049 mutex_lock(&hnode_mgr
->node_mgr_lock
);
2050 state
= node_get_state(hnode
);
2051 /* Check node state */
2052 if (state
!= NODE_RUNNING
)
2057 hprocessor
= hnode
->hprocessor
;
2058 status
= proc_get_state(hprocessor
, &proc_state
,
2059 sizeof(struct dsp_processorstate
));
2062 /* If processor is in error state then don't attempt
2063 to send the message */
2064 if (proc_state
.proc_state
== PROC_ERROR
) {
2069 status
= disp_node_change_priority(hnode_mgr
->disp_obj
, hnode
,
2070 hnode_mgr
->ul_fxn_addrs
[RMSCHANGENODEPRIORITY
],
2071 hnode
->node_env
, NODE_SUSPENDEDPRI
);
2075 NODE_SET_STATE(hnode
, NODE_PAUSED
);
2078 /* End of sync_enter_cs */
2079 /* Leave critical section */
2080 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
2082 proc_notify_clients(hnode
->hprocessor
,
2083 DSP_NODESTATECHANGE
);
2084 ntfy_notify(hnode
->ntfy_obj
, DSP_NODESTATECHANGE
);
2088 dev_dbg(bridge
, "%s: hnode: %p status 0x%x\n", __func__
, hnode
, status
);
2093 * ======== node_put_message ========
2095 * Send a message to a message node, task node, or XDAIS socket node. This
2096 * function will block until the message stream can accommodate the
2097 * message, or a timeout occurs.
2099 int node_put_message(struct node_object
*hnode
,
2100 const struct dsp_msg
*pmsg
, u32 utimeout
)
2102 struct node_mgr
*hnode_mgr
= NULL
;
2103 enum node_type node_type
;
2104 struct bridge_drv_interface
*intf_fxns
;
2105 enum node_state state
;
2108 struct dsp_msg new_msg
;
2109 struct dsp_processorstate proc_state
;
2110 struct proc_object
*hprocessor
;
2112 DBC_REQUIRE(refs
> 0);
2113 DBC_REQUIRE(pmsg
!= NULL
);
2119 hprocessor
= hnode
->hprocessor
;
2120 status
= proc_get_state(hprocessor
, &proc_state
,
2121 sizeof(struct dsp_processorstate
));
2124 /* If processor is in bad state then don't attempt sending the
2126 if (proc_state
.proc_state
== PROC_ERROR
) {
2130 hnode_mgr
= hnode
->hnode_mgr
;
2131 node_type
= node_get_type(hnode
);
2132 if (node_type
!= NODE_MESSAGE
&& node_type
!= NODE_TASK
&&
2133 node_type
!= NODE_DAISSOCKET
)
2137 /* Check node state. Can't send messages to a node after
2138 * we've sent the RMS_EXIT command. There is still the
2139 * possibility that node_terminate can be called after we've
2140 * checked the state. Could add another SYNC object to
2141 * prevent this (can't use node_mgr_lock, since we don't
2142 * want to block other NODE functions). However, the node may
2143 * still exit on its own, before this message is sent. */
2144 mutex_lock(&hnode_mgr
->node_mgr_lock
);
2145 state
= node_get_state(hnode
);
2146 if (state
== NODE_TERMINATING
|| state
== NODE_DONE
)
2149 /* end of sync_enter_cs */
2150 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
2155 /* assign pmsg values to new msg */
2157 /* Now, check if message contains a SM buffer descriptor */
2158 if (pmsg
->dw_cmd
& DSP_RMSBUFDESC
) {
2159 /* Translate GPP Va to DSP physical buf Ptr. */
2160 tmp_buf
= cmm_xlator_translate(hnode
->xlator
,
2161 (void *)new_msg
.dw_arg1
,
2163 if (tmp_buf
!= NULL
) {
2164 /* got translation, convert to MAUs in msg */
2165 if (hnode
->hnode_mgr
->udsp_word_size
!= 0) {
2168 hnode
->hnode_mgr
->udsp_word_size
;
2170 new_msg
.dw_arg2
/= hnode
->hnode_mgr
->
2173 pr_err("%s: udsp_word_size is zero!\n",
2175 status
= -EPERM
; /* bad DSPWordSize */
2177 } else { /* failed to translate buffer address */
2182 intf_fxns
= hnode_mgr
->intf_fxns
;
2183 status
= (*intf_fxns
->pfn_msg_put
) (hnode
->msg_queue_obj
,
2184 &new_msg
, utimeout
);
2187 dev_dbg(bridge
, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2188 "status 0x%x\n", __func__
, hnode
, pmsg
, utimeout
, status
);
2193 * ======== node_register_notify ========
2195 * Register to be notified on specific events for this node.
2197 int node_register_notify(struct node_object
*hnode
, u32 event_mask
,
2199 struct dsp_notification
*hnotification
)
2201 struct bridge_drv_interface
*intf_fxns
;
2204 DBC_REQUIRE(refs
> 0);
2205 DBC_REQUIRE(hnotification
!= NULL
);
2210 /* Check if event mask is a valid node related event */
2211 if (event_mask
& ~(DSP_NODESTATECHANGE
| DSP_NODEMESSAGEREADY
))
2214 /* Check if notify type is valid */
2215 if (notify_type
!= DSP_SIGNALEVENT
)
2218 /* Only one Notification can be registered at a
2219 * time - Limitation */
2220 if (event_mask
== (DSP_NODESTATECHANGE
| DSP_NODEMESSAGEREADY
))
2224 if (event_mask
== DSP_NODESTATECHANGE
) {
2225 status
= ntfy_register(hnode
->ntfy_obj
, hnotification
,
2226 event_mask
& DSP_NODESTATECHANGE
,
2229 /* Send Message part of event mask to msg_ctrl */
2230 intf_fxns
= hnode
->hnode_mgr
->intf_fxns
;
2231 status
= (*intf_fxns
->pfn_msg_register_notify
)
2232 (hnode
->msg_queue_obj
,
2233 event_mask
& DSP_NODEMESSAGEREADY
, notify_type
,
2238 dev_dbg(bridge
, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2239 "hnotification: %p status 0x%x\n", __func__
, hnode
,
2240 event_mask
, notify_type
, hnotification
, status
);
2245 * ======== node_run ========
2247 * Start execution of a node's execute phase, or resume execution of a node
2248 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2249 * node's execute function if necessary.
2251 int node_run(struct node_object
*hnode
)
2253 struct node_object
*pnode
= (struct node_object
*)hnode
;
2254 struct node_mgr
*hnode_mgr
;
2255 enum node_type node_type
;
2256 enum node_state state
;
2261 struct bridge_drv_interface
*intf_fxns
;
2262 struct dsp_processorstate proc_state
;
2263 struct proc_object
*hprocessor
;
2265 DBC_REQUIRE(refs
> 0);
2271 hprocessor
= hnode
->hprocessor
;
2272 status
= proc_get_state(hprocessor
, &proc_state
,
2273 sizeof(struct dsp_processorstate
));
2276 /* If processor is in error state then don't attempt to run the node */
2277 if (proc_state
.proc_state
== PROC_ERROR
) {
2281 node_type
= node_get_type(hnode
);
2282 if (node_type
== NODE_DEVICE
)
2287 hnode_mgr
= hnode
->hnode_mgr
;
2292 intf_fxns
= hnode_mgr
->intf_fxns
;
2293 /* Enter critical section */
2294 mutex_lock(&hnode_mgr
->node_mgr_lock
);
2296 state
= node_get_state(hnode
);
2297 if (state
!= NODE_CREATED
&& state
!= NODE_PAUSED
)
2301 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
2306 if ((proc_id
!= DSP_UNIT
) && (proc_id
!= IVA_UNIT
))
2309 if (state
== NODE_CREATED
) {
2310 /* If node's execute function is not loaded, load it */
2311 if (!(hnode
->loaded
) && hnode
->phase_split
) {
2313 hnode_mgr
->nldr_fxns
.pfn_load(hnode
->nldr_node_obj
,
2316 hnode
->loaded
= true;
2318 pr_err("%s: fail - load execute code: 0x%x\n",
2323 /* Get address of node's execute function */
2324 if (proc_id
== IVA_UNIT
)
2325 ul_execute_fxn
= (u32
) hnode
->node_env
;
2327 status
= get_fxn_address(hnode
, &ul_execute_fxn
,
2332 ul_fxn_addr
= hnode_mgr
->ul_fxn_addrs
[RMSEXECUTENODE
];
2334 disp_node_run(hnode_mgr
->disp_obj
, hnode
,
2335 ul_fxn_addr
, ul_execute_fxn
,
2338 } else if (state
== NODE_PAUSED
) {
2339 ul_fxn_addr
= hnode_mgr
->ul_fxn_addrs
[RMSCHANGENODEPRIORITY
];
2340 status
= disp_node_change_priority(hnode_mgr
->disp_obj
, hnode
,
2341 ul_fxn_addr
, hnode
->node_env
,
2342 NODE_GET_PRIORITY(hnode
));
2344 /* We should never get here */
2348 /* Update node state. */
2350 NODE_SET_STATE(hnode
, NODE_RUNNING
);
2351 else /* Set state back to previous value */
2352 NODE_SET_STATE(hnode
, state
);
2353 /*End of sync_enter_cs */
2354 /* Exit critical section */
2355 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
2357 proc_notify_clients(hnode
->hprocessor
, DSP_NODESTATECHANGE
);
2358 ntfy_notify(hnode
->ntfy_obj
, DSP_NODESTATECHANGE
);
2361 dev_dbg(bridge
, "%s: hnode: %p status 0x%x\n", __func__
, hnode
, status
);
2366 * ======== node_terminate ========
2368 * Signal a node running on the DSP that it should exit its execute phase
2371 int node_terminate(struct node_object
*hnode
, int *pstatus
)
2373 struct node_object
*pnode
= (struct node_object
*)hnode
;
2374 struct node_mgr
*hnode_mgr
= NULL
;
2375 enum node_type node_type
;
2376 struct bridge_drv_interface
*intf_fxns
;
2377 enum node_state state
;
2378 struct dsp_msg msg
, killmsg
;
2380 u32 proc_id
, kill_time_out
;
2381 struct deh_mgr
*hdeh_mgr
;
2382 struct dsp_processorstate proc_state
;
2384 DBC_REQUIRE(refs
> 0);
2385 DBC_REQUIRE(pstatus
!= NULL
);
2387 if (!hnode
|| !hnode
->hnode_mgr
) {
2391 if (pnode
->hprocessor
== NULL
) {
2395 status
= proc_get_processor_id(pnode
->hprocessor
, &proc_id
);
2398 hnode_mgr
= hnode
->hnode_mgr
;
2399 node_type
= node_get_type(hnode
);
2400 if (node_type
!= NODE_TASK
&& node_type
!= NODE_DAISSOCKET
)
2404 /* Check node state */
2405 mutex_lock(&hnode_mgr
->node_mgr_lock
);
2406 state
= node_get_state(hnode
);
2407 if (state
!= NODE_RUNNING
) {
2409 /* Set the exit status if node terminated on
2411 if (state
== NODE_DONE
)
2412 *pstatus
= hnode
->exit_status
;
2415 NODE_SET_STATE(hnode
, NODE_TERMINATING
);
2417 /* end of sync_enter_cs */
2418 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
2422 * Send exit message. Do not change state to NODE_DONE
2423 * here. That will be done in callback.
2425 status
= proc_get_state(pnode
->hprocessor
, &proc_state
,
2426 sizeof(struct dsp_processorstate
));
2429 /* If processor is in error state then don't attempt to send
2430 * A kill task command */
2431 if (proc_state
.proc_state
== PROC_ERROR
) {
2436 msg
.dw_cmd
= RMS_EXIT
;
2437 msg
.dw_arg1
= hnode
->node_env
;
2438 killmsg
.dw_cmd
= RMS_KILLTASK
;
2439 killmsg
.dw_arg1
= hnode
->node_env
;
2440 intf_fxns
= hnode_mgr
->intf_fxns
;
2442 if (hnode
->utimeout
> MAXTIMEOUT
)
2443 kill_time_out
= MAXTIMEOUT
;
2445 kill_time_out
= (hnode
->utimeout
) * 2;
2447 status
= (*intf_fxns
->pfn_msg_put
) (hnode
->msg_queue_obj
, &msg
,
2453 * Wait on synchronization object that will be
2454 * posted in the callback on receiving RMS_EXIT
2455 * message, or by node_delete. Check for valid hnode,
2456 * in case posted by node_delete().
2458 status
= sync_wait_on_event(hnode
->sync_done
,
2460 if (status
!= ETIME
)
2463 status
= (*intf_fxns
->pfn_msg_put
)(hnode
->msg_queue_obj
,
2464 &killmsg
, hnode
->utimeout
);
2467 status
= sync_wait_on_event(hnode
->sync_done
,
2471 * Here it goes the part of the simulation of
2472 * the DSP exception.
2474 dev_get_deh_mgr(hnode_mgr
->hdev_obj
, &hdeh_mgr
);
2478 bridge_deh_notify(hdeh_mgr
, DSP_SYSERROR
, DSP_EXCEPTIONABORT
);
2483 /* Enter CS before getting exit status, in case node was
2485 mutex_lock(&hnode_mgr
->node_mgr_lock
);
2486 /* Make sure node wasn't deleted while we blocked */
2490 *pstatus
= hnode
->exit_status
;
2491 dev_dbg(bridge
, "%s: hnode: %p env 0x%x status 0x%x\n",
2492 __func__
, hnode
, hnode
->node_env
, status
);
2494 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
2495 } /*End of sync_enter_cs */
2501 * ======== delete_node ========
2503 * Free GPP resources allocated in node_allocate() or node_connect().
2505 static void delete_node(struct node_object
*hnode
,
2506 struct process_context
*pr_ctxt
)
2508 struct node_mgr
*hnode_mgr
;
2509 struct cmm_xlatorobject
*xlator
;
2510 struct bridge_drv_interface
*intf_fxns
;
2512 enum node_type node_type
;
2513 struct stream_chnl stream
;
2514 struct node_msgargs node_msg_args
;
2515 struct node_taskargs task_arg_obj
;
2516 #ifdef DSP_DMM_DEBUG
2517 struct dmm_object
*dmm_mgr
;
2518 struct proc_object
*p_proc_object
=
2519 (struct proc_object
*)hnode
->hprocessor
;
2524 hnode_mgr
= hnode
->hnode_mgr
;
2527 xlator
= hnode
->xlator
;
2528 node_type
= node_get_type(hnode
);
2529 if (node_type
!= NODE_DEVICE
) {
2530 node_msg_args
= hnode
->create_args
.asa
.node_msg_args
;
2531 kfree(node_msg_args
.pdata
);
2533 /* Free msg_ctrl queue */
2534 if (hnode
->msg_queue_obj
) {
2535 intf_fxns
= hnode_mgr
->intf_fxns
;
2536 (*intf_fxns
->pfn_msg_delete_queue
) (hnode
->
2538 hnode
->msg_queue_obj
= NULL
;
2541 kfree(hnode
->sync_done
);
2543 /* Free all stream info */
2544 if (hnode
->inputs
) {
2545 for (i
= 0; i
< MAX_INPUTS(hnode
); i
++) {
2546 stream
= hnode
->inputs
[i
];
2547 free_stream(hnode_mgr
, stream
);
2549 kfree(hnode
->inputs
);
2550 hnode
->inputs
= NULL
;
2552 if (hnode
->outputs
) {
2553 for (i
= 0; i
< MAX_OUTPUTS(hnode
); i
++) {
2554 stream
= hnode
->outputs
[i
];
2555 free_stream(hnode_mgr
, stream
);
2557 kfree(hnode
->outputs
);
2558 hnode
->outputs
= NULL
;
2560 task_arg_obj
= hnode
->create_args
.asa
.task_arg_obj
;
2561 if (task_arg_obj
.strm_in_def
) {
2562 for (i
= 0; i
< MAX_INPUTS(hnode
); i
++) {
2563 kfree(task_arg_obj
.strm_in_def
[i
].sz_device
);
2564 task_arg_obj
.strm_in_def
[i
].sz_device
= NULL
;
2566 kfree(task_arg_obj
.strm_in_def
);
2567 task_arg_obj
.strm_in_def
= NULL
;
2569 if (task_arg_obj
.strm_out_def
) {
2570 for (i
= 0; i
< MAX_OUTPUTS(hnode
); i
++) {
2571 kfree(task_arg_obj
.strm_out_def
[i
].sz_device
);
2572 task_arg_obj
.strm_out_def
[i
].sz_device
= NULL
;
2574 kfree(task_arg_obj
.strm_out_def
);
2575 task_arg_obj
.strm_out_def
= NULL
;
2577 if (task_arg_obj
.udsp_heap_res_addr
) {
2578 status
= proc_un_map(hnode
->hprocessor
, (void *)
2579 task_arg_obj
.udsp_heap_addr
,
2582 status
= proc_un_reserve_memory(hnode
->hprocessor
,
2587 #ifdef DSP_DMM_DEBUG
2588 status
= dmm_get_handle(p_proc_object
, &dmm_mgr
);
2590 dmm_mem_map_dump(dmm_mgr
);
2592 status
= DSP_EHANDLE
;
2596 if (node_type
!= NODE_MESSAGE
) {
2597 kfree(hnode
->stream_connect
);
2598 hnode
->stream_connect
= NULL
;
2600 kfree(hnode
->pstr_dev_name
);
2601 hnode
->pstr_dev_name
= NULL
;
2603 if (hnode
->ntfy_obj
) {
2604 ntfy_delete(hnode
->ntfy_obj
);
2605 kfree(hnode
->ntfy_obj
);
2606 hnode
->ntfy_obj
= NULL
;
2609 /* These were allocated in dcd_get_object_def (via node_allocate) */
2610 kfree(hnode
->dcd_props
.obj_data
.node_obj
.pstr_create_phase_fxn
);
2611 hnode
->dcd_props
.obj_data
.node_obj
.pstr_create_phase_fxn
= NULL
;
2613 kfree(hnode
->dcd_props
.obj_data
.node_obj
.pstr_execute_phase_fxn
);
2614 hnode
->dcd_props
.obj_data
.node_obj
.pstr_execute_phase_fxn
= NULL
;
2616 kfree(hnode
->dcd_props
.obj_data
.node_obj
.pstr_delete_phase_fxn
);
2617 hnode
->dcd_props
.obj_data
.node_obj
.pstr_delete_phase_fxn
= NULL
;
2619 kfree(hnode
->dcd_props
.obj_data
.node_obj
.pstr_i_alg_name
);
2620 hnode
->dcd_props
.obj_data
.node_obj
.pstr_i_alg_name
= NULL
;
2622 /* Free all SM address translator resources */
2624 (void)cmm_xlator_delete(xlator
, true); /* force free */
2628 kfree(hnode
->nldr_node_obj
);
2629 hnode
->nldr_node_obj
= NULL
;
2630 hnode
->hnode_mgr
= NULL
;
2638 * ======== delete_node_mgr ========
2640 * Frees the node manager.
2642 static void delete_node_mgr(struct node_mgr
*hnode_mgr
)
2644 struct node_object
*hnode
;
2647 /* Free resources */
2648 if (hnode_mgr
->hdcd_mgr
)
2649 dcd_destroy_manager(hnode_mgr
->hdcd_mgr
);
2651 /* Remove any elements remaining in lists */
2652 if (hnode_mgr
->node_list
) {
2653 while ((hnode
= (struct node_object
*)
2654 lst_get_head(hnode_mgr
->node_list
)))
2655 delete_node(hnode
, NULL
);
2657 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr
->node_list
));
2658 kfree(hnode_mgr
->node_list
);
2660 mutex_destroy(&hnode_mgr
->node_mgr_lock
);
2661 if (hnode_mgr
->ntfy_obj
) {
2662 ntfy_delete(hnode_mgr
->ntfy_obj
);
2663 kfree(hnode_mgr
->ntfy_obj
);
2666 if (hnode_mgr
->pipe_map
)
2667 gb_delete(hnode_mgr
->pipe_map
);
2669 if (hnode_mgr
->pipe_done_map
)
2670 gb_delete(hnode_mgr
->pipe_done_map
);
2672 if (hnode_mgr
->chnl_map
)
2673 gb_delete(hnode_mgr
->chnl_map
);
2675 if (hnode_mgr
->dma_chnl_map
)
2676 gb_delete(hnode_mgr
->dma_chnl_map
);
2678 if (hnode_mgr
->zc_chnl_map
)
2679 gb_delete(hnode_mgr
->zc_chnl_map
);
2681 if (hnode_mgr
->disp_obj
)
2682 disp_delete(hnode_mgr
->disp_obj
);
2684 if (hnode_mgr
->strm_mgr_obj
)
2685 strm_delete(hnode_mgr
->strm_mgr_obj
);
2687 /* Delete the loader */
2688 if (hnode_mgr
->nldr_obj
)
2689 hnode_mgr
->nldr_fxns
.pfn_delete(hnode_mgr
->nldr_obj
);
2691 if (hnode_mgr
->loader_init
)
2692 hnode_mgr
->nldr_fxns
.pfn_exit();
2699 * ======== fill_stream_connect ========
2701 * Fills stream information.
2703 static void fill_stream_connect(struct node_object
*node1
,
2704 struct node_object
*node2
,
2705 u32 stream1
, u32 stream2
)
2708 struct dsp_streamconnect
*strm1
= NULL
;
2709 struct dsp_streamconnect
*strm2
= NULL
;
2710 enum node_type node1_type
= NODE_TASK
;
2711 enum node_type node2_type
= NODE_TASK
;
2713 node1_type
= node_get_type(node1
);
2714 node2_type
= node_get_type(node2
);
2715 if (node1
!= (struct node_object
*)DSP_HGPPNODE
) {
2717 if (node1_type
!= NODE_DEVICE
) {
2718 strm_index
= node1
->num_inputs
+
2719 node1
->num_outputs
- 1;
2720 strm1
= &(node1
->stream_connect
[strm_index
]);
2721 strm1
->cb_struct
= sizeof(struct dsp_streamconnect
);
2722 strm1
->this_node_stream_index
= stream1
;
2725 if (node2
!= (struct node_object
*)DSP_HGPPNODE
) {
2726 /* NODE == > NODE */
2727 if (node1_type
!= NODE_DEVICE
) {
2728 strm1
->connected_node
= node2
;
2729 strm1
->ui_connected_node_id
= node2
->node_uuid
;
2730 strm1
->connected_node_stream_index
= stream2
;
2731 strm1
->connect_type
= CONNECTTYPE_NODEOUTPUT
;
2733 if (node2_type
!= NODE_DEVICE
) {
2734 strm_index
= node2
->num_inputs
+
2735 node2
->num_outputs
- 1;
2736 strm2
= &(node2
->stream_connect
[strm_index
]);
2738 sizeof(struct dsp_streamconnect
);
2739 strm2
->this_node_stream_index
= stream2
;
2740 strm2
->connected_node
= node1
;
2741 strm2
->ui_connected_node_id
= node1
->node_uuid
;
2742 strm2
->connected_node_stream_index
= stream1
;
2743 strm2
->connect_type
= CONNECTTYPE_NODEINPUT
;
2745 } else if (node1_type
!= NODE_DEVICE
)
2746 strm1
->connect_type
= CONNECTTYPE_GPPOUTPUT
;
2749 DBC_ASSERT(node2
!= (struct node_object
*)DSP_HGPPNODE
);
2750 strm_index
= node2
->num_inputs
+ node2
->num_outputs
- 1;
2751 strm2
= &(node2
->stream_connect
[strm_index
]);
2752 strm2
->cb_struct
= sizeof(struct dsp_streamconnect
);
2753 strm2
->this_node_stream_index
= stream2
;
2754 strm2
->connect_type
= CONNECTTYPE_GPPINPUT
;
2759 * ======== fill_stream_def ========
2761 * Fills Stream attributes.
2763 static void fill_stream_def(struct node_object
*hnode
,
2764 struct node_strmdef
*pstrm_def
,
2765 struct dsp_strmattr
*pattrs
)
2767 struct node_mgr
*hnode_mgr
= hnode
->hnode_mgr
;
2769 if (pattrs
!= NULL
) {
2770 pstrm_def
->num_bufs
= pattrs
->num_bufs
;
2771 pstrm_def
->buf_size
=
2772 pattrs
->buf_size
/ hnode_mgr
->udsp_data_mau_size
;
2773 pstrm_def
->seg_id
= pattrs
->seg_id
;
2774 pstrm_def
->buf_alignment
= pattrs
->buf_alignment
;
2775 pstrm_def
->utimeout
= pattrs
->utimeout
;
2777 pstrm_def
->num_bufs
= DEFAULTNBUFS
;
2778 pstrm_def
->buf_size
=
2779 DEFAULTBUFSIZE
/ hnode_mgr
->udsp_data_mau_size
;
2780 pstrm_def
->seg_id
= DEFAULTSEGID
;
2781 pstrm_def
->buf_alignment
= DEFAULTALIGNMENT
;
2782 pstrm_def
->utimeout
= DEFAULTTIMEOUT
;
2787 * ======== free_stream ========
2789 * Updates the channel mask and frees the pipe id.
2791 static void free_stream(struct node_mgr
*hnode_mgr
, struct stream_chnl stream
)
2793 /* Free up the pipe id unless other node has not yet been deleted. */
2794 if (stream
.type
== NODECONNECT
) {
2795 if (gb_test(hnode_mgr
->pipe_done_map
, stream
.dev_id
)) {
2796 /* The other node has already been deleted */
2797 gb_clear(hnode_mgr
->pipe_done_map
, stream
.dev_id
);
2798 gb_clear(hnode_mgr
->pipe_map
, stream
.dev_id
);
2800 /* The other node has not been deleted yet */
2801 gb_set(hnode_mgr
->pipe_done_map
, stream
.dev_id
);
2803 } else if (stream
.type
== HOSTCONNECT
) {
2804 if (stream
.dev_id
< hnode_mgr
->ul_num_chnls
) {
2805 gb_clear(hnode_mgr
->chnl_map
, stream
.dev_id
);
2806 } else if (stream
.dev_id
< (2 * hnode_mgr
->ul_num_chnls
)) {
2808 gb_clear(hnode_mgr
->dma_chnl_map
, stream
.dev_id
-
2809 (1 * hnode_mgr
->ul_num_chnls
));
2810 } else if (stream
.dev_id
< (3 * hnode_mgr
->ul_num_chnls
)) {
2812 gb_clear(hnode_mgr
->zc_chnl_map
, stream
.dev_id
-
2813 (2 * hnode_mgr
->ul_num_chnls
));
2819 * ======== get_fxn_address ========
2821 * Retrieves the address for create, execute or delete phase for a node.
2823 static int get_fxn_address(struct node_object
*hnode
, u32
* fxn_addr
,
2826 char *pstr_fxn_name
= NULL
;
2827 struct node_mgr
*hnode_mgr
= hnode
->hnode_mgr
;
2829 DBC_REQUIRE(node_get_type(hnode
) == NODE_TASK
||
2830 node_get_type(hnode
) == NODE_DAISSOCKET
||
2831 node_get_type(hnode
) == NODE_MESSAGE
);
2836 hnode
->dcd_props
.obj_data
.node_obj
.pstr_create_phase_fxn
;
2840 hnode
->dcd_props
.obj_data
.node_obj
.pstr_execute_phase_fxn
;
2844 hnode
->dcd_props
.obj_data
.node_obj
.pstr_delete_phase_fxn
;
2847 /* Should never get here */
2853 hnode_mgr
->nldr_fxns
.pfn_get_fxn_addr(hnode
->nldr_node_obj
,
2854 pstr_fxn_name
, fxn_addr
);
2860 * ======== get_node_info ========
2862 * Retrieves the node information.
2864 void get_node_info(struct node_object
*hnode
, struct dsp_nodeinfo
*node_info
)
2869 DBC_REQUIRE(node_info
!= NULL
);
2871 node_info
->cb_struct
= sizeof(struct dsp_nodeinfo
);
2872 node_info
->nb_node_database_props
=
2873 hnode
->dcd_props
.obj_data
.node_obj
.ndb_props
;
2874 node_info
->execution_priority
= hnode
->prio
;
2875 node_info
->device_owner
= hnode
->device_owner
;
2876 node_info
->number_streams
= hnode
->num_inputs
+ hnode
->num_outputs
;
2877 node_info
->node_env
= hnode
->node_env
;
2879 node_info
->ns_execution_state
= node_get_state(hnode
);
2881 /* Copy stream connect data */
2882 for (i
= 0; i
< hnode
->num_inputs
+ hnode
->num_outputs
; i
++)
2883 node_info
->sc_stream_connection
[i
] = hnode
->stream_connect
[i
];
2888 * ======== get_node_props ========
2890 * Retrieve node properties.
2892 static int get_node_props(struct dcd_manager
*hdcd_mgr
,
2893 struct node_object
*hnode
,
2894 const struct dsp_uuid
*node_uuid
,
2895 struct dcd_genericobj
*dcd_prop
)
2898 struct node_msgargs
*pmsg_args
;
2899 struct node_taskargs
*task_arg_obj
;
2900 enum node_type node_type
= NODE_TASK
;
2901 struct dsp_ndbprops
*pndb_props
=
2902 &(dcd_prop
->obj_data
.node_obj
.ndb_props
);
2904 char sz_uuid
[MAXUUIDLEN
];
2906 status
= dcd_get_object_def(hdcd_mgr
, (struct dsp_uuid
*)node_uuid
,
2907 DSP_DCDNODETYPE
, dcd_prop
);
2910 hnode
->ntype
= node_type
= pndb_props
->ntype
;
2912 /* Create UUID value to set in registry. */
2913 uuid_uuid_to_string((struct dsp_uuid
*)node_uuid
, sz_uuid
,
2915 dev_dbg(bridge
, "(node) UUID: %s\n", sz_uuid
);
2917 /* Fill in message args that come from NDB */
2918 if (node_type
!= NODE_DEVICE
) {
2919 pmsg_args
= &(hnode
->create_args
.asa
.node_msg_args
);
2921 dcd_prop
->obj_data
.node_obj
.msg_segid
;
2922 pmsg_args
->notify_type
=
2923 dcd_prop
->obj_data
.node_obj
.msg_notify_type
;
2924 pmsg_args
->max_msgs
= pndb_props
->message_depth
;
2925 dev_dbg(bridge
, "(node) Max Number of Messages: 0x%x\n",
2926 pmsg_args
->max_msgs
);
2928 /* Copy device name */
2929 DBC_REQUIRE(pndb_props
->ac_name
);
2930 len
= strlen(pndb_props
->ac_name
);
2931 DBC_ASSERT(len
< MAXDEVNAMELEN
);
2932 hnode
->pstr_dev_name
= kzalloc(len
+ 1, GFP_KERNEL
);
2933 if (hnode
->pstr_dev_name
== NULL
) {
2936 strncpy(hnode
->pstr_dev_name
,
2937 pndb_props
->ac_name
, len
);
2942 /* Fill in create args that come from NDB */
2943 if (node_type
== NODE_TASK
|| node_type
== NODE_DAISSOCKET
) {
2944 task_arg_obj
= &(hnode
->create_args
.asa
.task_arg_obj
);
2945 task_arg_obj
->prio
= pndb_props
->prio
;
2946 task_arg_obj
->stack_size
= pndb_props
->stack_size
;
2947 task_arg_obj
->sys_stack_size
=
2948 pndb_props
->sys_stack_size
;
2949 task_arg_obj
->stack_seg
= pndb_props
->stack_seg
;
2950 dev_dbg(bridge
, "(node) Priority: 0x%x Stack Size: "
2951 "0x%x words System Stack Size: 0x%x words "
2952 "Stack Segment: 0x%x profile count : 0x%x\n",
2953 task_arg_obj
->prio
, task_arg_obj
->stack_size
,
2954 task_arg_obj
->sys_stack_size
,
2955 task_arg_obj
->stack_seg
,
2956 pndb_props
->count_profiles
);
2964 * ======== get_proc_props ========
2966 * Retrieve the processor properties.
2968 static int get_proc_props(struct node_mgr
*hnode_mgr
,
2969 struct dev_object
*hdev_obj
)
2971 struct cfg_hostres
*host_res
;
2972 struct bridge_dev_context
*pbridge_context
;
2975 status
= dev_get_bridge_context(hdev_obj
, &pbridge_context
);
2976 if (!pbridge_context
)
2980 host_res
= pbridge_context
->resources
;
2983 hnode_mgr
->ul_chnl_offset
= host_res
->dw_chnl_offset
;
2984 hnode_mgr
->ul_chnl_buf_size
= host_res
->dw_chnl_buf_size
;
2985 hnode_mgr
->ul_num_chnls
= host_res
->dw_num_chnls
;
2988 * PROC will add an API to get dsp_processorinfo.
2989 * Fill in default values for now.
2991 /* TODO -- Instead of hard coding, take from registry */
2992 hnode_mgr
->proc_family
= 6000;
2993 hnode_mgr
->proc_type
= 6410;
2994 hnode_mgr
->min_pri
= DSP_NODE_MIN_PRIORITY
;
2995 hnode_mgr
->max_pri
= DSP_NODE_MAX_PRIORITY
;
2996 hnode_mgr
->udsp_word_size
= DSPWORDSIZE
;
2997 hnode_mgr
->udsp_data_mau_size
= DSPWORDSIZE
;
2998 hnode_mgr
->udsp_mau_size
= 1;
3005 * ======== node_get_uuid_props ========
3007 * Fetch Node UUID properties from DCD/DOF file.
3009 int node_get_uuid_props(void *hprocessor
,
3010 const struct dsp_uuid
*node_uuid
,
3011 struct dsp_ndbprops
*node_props
)
3013 struct node_mgr
*hnode_mgr
= NULL
;
3014 struct dev_object
*hdev_obj
;
3016 struct dcd_nodeprops dcd_node_props
;
3017 struct dsp_processorstate proc_state
;
3019 DBC_REQUIRE(refs
> 0);
3020 DBC_REQUIRE(hprocessor
!= NULL
);
3021 DBC_REQUIRE(node_uuid
!= NULL
);
3023 if (hprocessor
== NULL
|| node_uuid
== NULL
) {
3027 status
= proc_get_state(hprocessor
, &proc_state
,
3028 sizeof(struct dsp_processorstate
));
3031 /* If processor is in error state then don't attempt
3032 to send the message */
3033 if (proc_state
.proc_state
== PROC_ERROR
) {
3038 status
= proc_get_dev_object(hprocessor
, &hdev_obj
);
3040 status
= dev_get_node_manager(hdev_obj
, &hnode_mgr
);
3041 if (hnode_mgr
== NULL
) {
3048 * Enter the critical section. This is needed because
3049 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3050 * which needs to be protected in order to not corrupt the zlib manager
3053 mutex_lock(&hnode_mgr
->node_mgr_lock
);
3055 dcd_node_props
.pstr_create_phase_fxn
= NULL
;
3056 dcd_node_props
.pstr_execute_phase_fxn
= NULL
;
3057 dcd_node_props
.pstr_delete_phase_fxn
= NULL
;
3058 dcd_node_props
.pstr_i_alg_name
= NULL
;
3060 status
= dcd_get_object_def(hnode_mgr
->hdcd_mgr
,
3061 (struct dsp_uuid
*)node_uuid
, DSP_DCDNODETYPE
,
3062 (struct dcd_genericobj
*)&dcd_node_props
);
3065 *node_props
= dcd_node_props
.ndb_props
;
3066 kfree(dcd_node_props
.pstr_create_phase_fxn
);
3068 kfree(dcd_node_props
.pstr_execute_phase_fxn
);
3070 kfree(dcd_node_props
.pstr_delete_phase_fxn
);
3072 kfree(dcd_node_props
.pstr_i_alg_name
);
3074 /* Leave the critical section, we're done. */
3075 mutex_unlock(&hnode_mgr
->node_mgr_lock
);
3081 * ======== get_rms_fxns ========
3083 * Retrieve the RMS functions.
3085 static int get_rms_fxns(struct node_mgr
*hnode_mgr
)
3088 struct dev_object
*dev_obj
= hnode_mgr
->hdev_obj
;
3091 static char *psz_fxns
[NUMRMSFXNS
] = {
3092 "RMS_queryServer", /* RMSQUERYSERVER */
3093 "RMS_configureServer", /* RMSCONFIGURESERVER */
3094 "RMS_createNode", /* RMSCREATENODE */
3095 "RMS_executeNode", /* RMSEXECUTENODE */
3096 "RMS_deleteNode", /* RMSDELETENODE */
3097 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3098 "RMS_readMemory", /* RMSREADMEMORY */
3099 "RMS_writeMemory", /* RMSWRITEMEMORY */
3100 "RMS_copy", /* RMSCOPY */
3103 for (i
= 0; i
< NUMRMSFXNS
; i
++) {
3104 status
= dev_get_symbol(dev_obj
, psz_fxns
[i
],
3105 &(hnode_mgr
->ul_fxn_addrs
[i
]));
3107 if (status
== -ESPIPE
) {
3109 * May be loaded dynamically (in the future),
3110 * but return an error for now.
3112 dev_dbg(bridge
, "%s: RMS function: %s currently"
3113 " not loaded\n", __func__
, psz_fxns
[i
]);
3115 dev_dbg(bridge
, "%s: Symbol not found: %s "
3116 "status = 0x%x\n", __func__
,
3117 psz_fxns
[i
], status
);
3127 * ======== ovly ========
3129 * Called during overlay.Sends command to RMS to copy a block of data.
3131 static u32
ovly(void *priv_ref
, u32 dsp_run_addr
, u32 dsp_load_addr
,
3132 u32 ul_num_bytes
, u32 mem_space
)
3134 struct node_object
*hnode
= (struct node_object
*)priv_ref
;
3135 struct node_mgr
*hnode_mgr
;
3140 struct bridge_dev_context
*hbridge_context
;
3141 /* Function interface to Bridge driver*/
3142 struct bridge_drv_interface
*intf_fxns
;
3146 hnode_mgr
= hnode
->hnode_mgr
;
3148 ul_size
= ul_num_bytes
/ hnode_mgr
->udsp_word_size
;
3149 ul_timeout
= hnode
->utimeout
;
3151 /* Call new MemCopy function */
3152 intf_fxns
= hnode_mgr
->intf_fxns
;
3153 status
= dev_get_bridge_context(hnode_mgr
->hdev_obj
, &hbridge_context
);
3156 (*intf_fxns
->pfn_brd_mem_copy
) (hbridge_context
,
3157 dsp_run_addr
, dsp_load_addr
,
3158 ul_num_bytes
, (u32
) mem_space
);
3160 ul_bytes
= ul_num_bytes
;
3162 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3165 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3173 * ======== mem_write ========
3175 static u32
mem_write(void *priv_ref
, u32 dsp_add
, void *pbuf
,
3176 u32 ul_num_bytes
, u32 mem_space
)
3178 struct node_object
*hnode
= (struct node_object
*)priv_ref
;
3179 struct node_mgr
*hnode_mgr
;
3183 struct bridge_dev_context
*hbridge_context
;
3184 /* Function interface to Bridge driver */
3185 struct bridge_drv_interface
*intf_fxns
;
3188 DBC_REQUIRE(mem_space
& DBLL_CODE
|| mem_space
& DBLL_DATA
);
3190 hnode_mgr
= hnode
->hnode_mgr
;
3192 ul_timeout
= hnode
->utimeout
;
3193 mem_sect_type
= (mem_space
& DBLL_CODE
) ? RMS_CODE
: RMS_DATA
;
3195 /* Call new MemWrite function */
3196 intf_fxns
= hnode_mgr
->intf_fxns
;
3197 status
= dev_get_bridge_context(hnode_mgr
->hdev_obj
, &hbridge_context
);
3198 status
= (*intf_fxns
->pfn_brd_mem_write
) (hbridge_context
, pbuf
,
3199 dsp_add
, ul_num_bytes
, mem_sect_type
);
3201 return ul_num_bytes
;
3204 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3206 * ======== node_find_addr ========
3208 int node_find_addr(struct node_mgr
*node_mgr
, u32 sym_addr
,
3209 u32 offset_range
, void *sym_addr_output
, char *sym_name
)
3211 struct node_object
*node_obj
;
3212 int status
= -ENOENT
;
3215 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__
,
3216 (unsigned int) node_mgr
,
3217 sym_addr
, offset_range
,
3218 (unsigned int) sym_addr_output
, sym_name
);
3220 node_obj
= (struct node_object
*)(node_mgr
->node_list
->head
.next
);
3222 for (n
= 0; n
< node_mgr
->num_nodes
; n
++) {
3223 status
= nldr_find_addr(node_obj
->nldr_node_obj
, sym_addr
,
3224 offset_range
, sym_addr_output
, sym_name
);
3229 node_obj
= (struct node_object
*) (node_obj
->list_elem
.next
);