GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / tidspbridge / rmgr / node.c
blob6e9441e21265c343fa23847849cafac6a8e3ca04
1 /*
2 * node.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge Node Manager.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 /* ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/cfg.h>
31 #include <dspbridge/list.h>
32 #include <dspbridge/memdefs.h>
33 #include <dspbridge/proc.h>
34 #include <dspbridge/strm.h>
35 #include <dspbridge/sync.h>
36 #include <dspbridge/ntfy.h>
38 /* ----------------------------------- Platform Manager */
39 #include <dspbridge/cmm.h>
40 #include <dspbridge/cod.h>
41 #include <dspbridge/dev.h>
42 #include <dspbridge/msg.h>
44 /* ----------------------------------- Resource Manager */
45 #include <dspbridge/dbdcd.h>
46 #include <dspbridge/disp.h>
47 #include <dspbridge/rms_sh.h>
49 /* ----------------------------------- Link Driver */
50 #include <dspbridge/dspdefs.h>
51 #include <dspbridge/dspioctl.h>
53 /* ----------------------------------- Others */
54 #include <dspbridge/gb.h>
55 #include <dspbridge/uuidutil.h>
57 /* ----------------------------------- This */
58 #include <dspbridge/nodepriv.h>
59 #include <dspbridge/node.h>
60 #include <dspbridge/dmm.h>
62 /* Static/Dynamic Loader includes */
63 #include <dspbridge/dbll.h>
64 #include <dspbridge/nldr.h>
66 #include <dspbridge/drv.h>
67 #include <dspbridge/drvdefs.h>
68 #include <dspbridge/resourcecleanup.h>
69 #include <_tiomap.h>
71 #include <dspbridge/dspdeh.h>
73 #define HOSTPREFIX "/host"
74 #define PIPEPREFIX "/dbpipe"
76 #define MAX_INPUTS(h) \
77 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
78 #define MAX_OUTPUTS(h) \
79 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
81 #define NODE_GET_PRIORITY(h) ((h)->prio)
82 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
83 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
85 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
86 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
88 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
89 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
91 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
92 #define CREATEPHASE 1
93 #define EXECUTEPHASE 2
94 #define DELETEPHASE 3
96 /* Define default STRM parameters */
98 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
99 * or make defaults configurable.
101 #define DEFAULTBUFSIZE 32
102 #define DEFAULTNBUFS 2
103 #define DEFAULTSEGID 0
104 #define DEFAULTALIGNMENT 0
105 #define DEFAULTTIMEOUT 10000
107 #define RMSQUERYSERVER 0
108 #define RMSCONFIGURESERVER 1
109 #define RMSCREATENODE 2
110 #define RMSEXECUTENODE 3
111 #define RMSDELETENODE 4
112 #define RMSCHANGENODEPRIORITY 5
113 #define RMSREADMEMORY 6
114 #define RMSWRITEMEMORY 7
115 #define RMSCOPY 8
116 #define MAXTIMEOUT 2000
118 #define NUMRMSFXNS 9
120 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
122 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
125 * ======== node_mgr ========
127 struct node_mgr {
128 struct dev_object *hdev_obj; /* Device object */
129 /* Function interface to Bridge driver */
130 struct bridge_drv_interface *intf_fxns;
131 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
132 struct disp_object *disp_obj; /* Node dispatcher */
133 struct lst_list *node_list; /* List of all allocated nodes */
134 u32 num_nodes; /* Number of nodes in node_list */
135 u32 num_created; /* Number of nodes *created* on DSP */
136 struct gb_t_map *pipe_map; /* Pipe connection bit map */
137 struct gb_t_map *pipe_done_map; /* Pipes that are half free */
138 struct gb_t_map *chnl_map; /* Channel allocation bit map */
139 struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
140 struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
141 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
142 struct mutex node_mgr_lock; /* For critical sections */
143 u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
144 struct msg_mgr *msg_mgr_obj;
146 /* Processor properties needed by Node Dispatcher */
147 u32 ul_num_chnls; /* Total number of channels */
148 u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
149 u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
150 int proc_family; /* eg, 5000 */
151 int proc_type; /* eg, 5510 */
152 u32 udsp_word_size; /* Size of DSP word on host bytes */
153 u32 udsp_data_mau_size; /* Size of DSP data MAU */
154 u32 udsp_mau_size; /* Size of MAU */
155 s32 min_pri; /* Minimum runtime priority for node */
156 s32 max_pri; /* Maximum runtime priority for node */
158 struct strm_mgr *strm_mgr_obj; /* STRM manager */
160 /* Loader properties */
161 struct nldr_object *nldr_obj; /* Handle to loader */
162 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
163 bool loader_init; /* Loader Init function succeeded? */
167 * ======== connecttype ========
169 enum connecttype {
170 NOTCONNECTED = 0,
171 NODECONNECT,
172 HOSTCONNECT,
173 DEVICECONNECT,
177 * ======== stream_chnl ========
179 struct stream_chnl {
180 enum connecttype type; /* Type of stream connection */
181 u32 dev_id; /* pipe or channel id */
185 * ======== node_object ========
187 struct node_object {
188 struct list_head list_elem;
189 struct node_mgr *hnode_mgr; /* The manager of this node */
190 struct proc_object *hprocessor; /* Back pointer to processor */
191 struct dsp_uuid node_uuid; /* Node's ID */
192 s32 prio; /* Node's current priority */
193 u32 utimeout; /* Timeout for blocking NODE calls */
194 u32 heap_size; /* Heap Size */
195 u32 udsp_heap_virt_addr; /* Heap Size */
196 u32 ugpp_heap_virt_addr; /* Heap Size */
197 enum node_type ntype; /* Type of node: message, task, etc */
198 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
199 u32 num_inputs; /* Current number of inputs */
200 u32 num_outputs; /* Current number of outputs */
201 u32 max_input_index; /* Current max input stream index */
202 u32 max_output_index; /* Current max output stream index */
203 struct stream_chnl *inputs; /* Node's input streams */
204 struct stream_chnl *outputs; /* Node's output streams */
205 struct node_createargs create_args; /* Args for node create func */
206 nodeenv node_env; /* Environment returned by RMS */
207 struct dcd_genericobj dcd_props; /* Node properties from DCD */
208 struct dsp_cbdata *pargs; /* Optional args to pass to node */
209 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
210 char *pstr_dev_name; /* device name, if device node */
211 struct sync_object *sync_done; /* Synchronize node_terminate */
212 s32 exit_status; /* execute function return status */
214 /* Information needed for node_get_attr() */
215 void *device_owner; /* If dev node, task that owns it */
216 u32 num_gpp_inputs; /* Current # of from GPP streams */
217 u32 num_gpp_outputs; /* Current # of to GPP streams */
218 /* Current stream connections */
219 struct dsp_streamconnect *stream_connect;
221 /* Message queue */
222 struct msg_queue *msg_queue_obj;
224 /* These fields used for SM messaging */
225 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
227 /* Handle to pass to dynamic loader */
228 struct nldr_nodeobject *nldr_node_obj;
229 bool loaded; /* Code is (dynamically) loaded */
230 bool phase_split; /* Phases split in many libs or ovly */
234 /* Default buffer attributes */
235 static struct dsp_bufferattr node_dfltbufattrs = {
236 0, /* cb_struct */
237 1, /* segment_id */
238 0, /* buf_alignment */
241 static void delete_node(struct node_object *hnode,
242 struct process_context *pr_ctxt);
243 static void delete_node_mgr(struct node_mgr *hnode_mgr);
244 static void fill_stream_connect(struct node_object *node1,
245 struct node_object *node2, u32 stream1,
246 u32 stream2);
247 static void fill_stream_def(struct node_object *hnode,
248 struct node_strmdef *pstrm_def,
249 struct dsp_strmattr *pattrs);
250 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
251 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
252 u32 phase);
253 static int get_node_props(struct dcd_manager *hdcd_mgr,
254 struct node_object *hnode,
255 const struct dsp_uuid *node_uuid,
256 struct dcd_genericobj *dcd_prop);
257 static int get_proc_props(struct node_mgr *hnode_mgr,
258 struct dev_object *hdev_obj);
259 static int get_rms_fxns(struct node_mgr *hnode_mgr);
260 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
261 u32 ul_num_bytes, u32 mem_space);
262 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
263 u32 ul_num_bytes, u32 mem_space);
265 static u32 refs; /* module reference count */
267 /* Dynamic loader functions. */
268 static struct node_ldr_fxns nldr_fxns = {
269 nldr_allocate,
270 nldr_create,
271 nldr_delete,
272 nldr_exit,
273 nldr_get_fxn_addr,
274 nldr_init,
275 nldr_load,
276 nldr_unload,
279 enum node_state node_get_state(void *hnode)
281 struct node_object *pnode = (struct node_object *)hnode;
282 if (!pnode)
283 return -1;
284 else
285 return pnode->node_state;
289 * ======== node_allocate ========
290 * Purpose:
291 * Allocate GPP resources to manage a node on the DSP.
293 int node_allocate(struct proc_object *hprocessor,
294 const struct dsp_uuid *node_uuid,
295 const struct dsp_cbdata *pargs,
296 const struct dsp_nodeattrin *attr_in,
297 struct node_res_object **noderes,
298 struct process_context *pr_ctxt)
300 struct node_mgr *hnode_mgr;
301 struct dev_object *hdev_obj;
302 struct node_object *pnode = NULL;
303 enum node_type node_type = NODE_TASK;
304 struct node_msgargs *pmsg_args;
305 struct node_taskargs *ptask_args;
306 u32 num_streams;
307 struct bridge_drv_interface *intf_fxns;
308 int status = 0;
309 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
310 u32 proc_id;
311 u32 pul_value;
312 u32 dynext_base;
313 u32 off_set = 0;
314 u32 ul_stack_seg_addr, ul_stack_seg_val;
315 u32 ul_gpp_mem_base;
316 struct cfg_hostres *host_res;
317 struct bridge_dev_context *pbridge_context;
318 u32 mapped_addr = 0;
319 u32 map_attrs = 0x0;
320 struct dsp_processorstate proc_state;
321 #ifdef DSP_DMM_DEBUG
322 struct dmm_object *dmm_mgr;
323 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
324 #endif
326 void *node_res;
328 DBC_REQUIRE(refs > 0);
329 DBC_REQUIRE(hprocessor != NULL);
330 DBC_REQUIRE(noderes != NULL);
331 DBC_REQUIRE(node_uuid != NULL);
333 *noderes = NULL;
335 status = proc_get_processor_id(hprocessor, &proc_id);
337 if (proc_id != DSP_UNIT)
338 goto func_end;
340 status = proc_get_dev_object(hprocessor, &hdev_obj);
341 if (!status) {
342 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
343 if (hnode_mgr == NULL)
344 status = -EPERM;
348 if (status)
349 goto func_end;
351 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
352 if (!pbridge_context) {
353 status = -EFAULT;
354 goto func_end;
357 status = proc_get_state(hprocessor, &proc_state,
358 sizeof(struct dsp_processorstate));
359 if (status)
360 goto func_end;
361 /* If processor is in error state then don't attempt
362 to send the message */
363 if (proc_state.proc_state == PROC_ERROR) {
364 status = -EPERM;
365 goto func_end;
368 /* Assuming that 0 is not a valid function address */
369 if (hnode_mgr->ul_fxn_addrs[0] == 0) {
370 /* No RMS on target - we currently can't handle this */
371 pr_err("%s: Failed, no RMS in base image\n", __func__);
372 status = -EPERM;
373 } else {
374 /* Validate attr_in fields, if non-NULL */
375 if (attr_in) {
376 /* Check if attr_in->prio is within range */
377 if (attr_in->prio < hnode_mgr->min_pri ||
378 attr_in->prio > hnode_mgr->max_pri)
379 status = -EDOM;
382 /* Allocate node object and fill in */
383 if (status)
384 goto func_end;
386 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
387 if (pnode == NULL) {
388 status = -ENOMEM;
389 goto func_end;
391 pnode->hnode_mgr = hnode_mgr;
392 /* This critical section protects get_node_props */
393 mutex_lock(&hnode_mgr->node_mgr_lock);
395 /* Get dsp_ndbprops from node database */
396 status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
397 &(pnode->dcd_props));
398 if (status)
399 goto func_cont;
401 pnode->node_uuid = *node_uuid;
402 pnode->hprocessor = hprocessor;
403 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
404 pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
405 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
407 /* Currently only C64 DSP builds support Node Dynamic * heaps */
408 /* Allocate memory for node heap */
409 pnode->create_args.asa.task_arg_obj.heap_size = 0;
410 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
411 pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
412 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
413 if (!attr_in)
414 goto func_cont;
416 /* Check if we have a user allocated node heap */
417 if (!(attr_in->pgpp_virt_addr))
418 goto func_cont;
420 /* check for page aligned Heap size */
421 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
422 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
423 __func__, attr_in->heap_size);
424 status = -EINVAL;
425 } else {
426 pnode->create_args.asa.task_arg_obj.heap_size =
427 attr_in->heap_size;
428 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
429 (u32) attr_in->pgpp_virt_addr;
431 if (status)
432 goto func_cont;
434 status = proc_reserve_memory(hprocessor,
435 pnode->create_args.asa.task_arg_obj.
436 heap_size + PAGE_SIZE,
437 (void **)&(pnode->create_args.asa.
438 task_arg_obj.udsp_heap_res_addr),
439 pr_ctxt);
440 if (status) {
441 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
442 __func__, status);
443 goto func_cont;
445 #ifdef DSP_DMM_DEBUG
446 status = dmm_get_handle(p_proc_object, &dmm_mgr);
447 if (!dmm_mgr) {
448 status = DSP_EHANDLE;
449 goto func_cont;
452 dmm_mem_map_dump(dmm_mgr);
453 #endif
455 map_attrs |= DSP_MAPLITTLEENDIAN;
456 map_attrs |= DSP_MAPELEMSIZE32;
457 map_attrs |= DSP_MAPVIRTUALADDR;
458 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
459 pnode->create_args.asa.task_arg_obj.heap_size,
460 (void *)pnode->create_args.asa.task_arg_obj.
461 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
462 pr_ctxt);
463 if (status)
464 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
465 __func__, status);
466 else
467 pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
468 (u32) mapped_addr;
470 func_cont:
471 mutex_unlock(&hnode_mgr->node_mgr_lock);
472 if (attr_in != NULL) {
473 /* Overrides of NBD properties */
474 pnode->utimeout = attr_in->utimeout;
475 pnode->prio = attr_in->prio;
477 /* Create object to manage notifications */
478 if (!status) {
479 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
480 GFP_KERNEL);
481 if (pnode->ntfy_obj)
482 ntfy_init(pnode->ntfy_obj);
483 else
484 status = -ENOMEM;
487 if (!status) {
488 node_type = node_get_type(pnode);
489 /* Allocate dsp_streamconnect array for device, task, and
490 * dais socket nodes. */
491 if (node_type != NODE_MESSAGE) {
492 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
493 pnode->stream_connect = kzalloc(num_streams *
494 sizeof(struct dsp_streamconnect),
495 GFP_KERNEL);
496 if (num_streams > 0 && pnode->stream_connect == NULL)
497 status = -ENOMEM;
500 if (!status && (node_type == NODE_TASK ||
501 node_type == NODE_DAISSOCKET)) {
502 /* Allocate arrays for maintainig stream connections */
503 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
504 sizeof(struct stream_chnl), GFP_KERNEL);
505 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
506 sizeof(struct stream_chnl), GFP_KERNEL);
507 ptask_args = &(pnode->create_args.asa.task_arg_obj);
508 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
509 sizeof(struct node_strmdef),
510 GFP_KERNEL);
511 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
512 sizeof(struct node_strmdef),
513 GFP_KERNEL);
514 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
515 ptask_args->strm_in_def
516 == NULL))
517 || (MAX_OUTPUTS(pnode) > 0
518 && (pnode->outputs == NULL
519 || ptask_args->strm_out_def == NULL)))
520 status = -ENOMEM;
523 if (!status && (node_type != NODE_DEVICE)) {
524 /* Create an event that will be posted when RMS_EXIT is
525 * received. */
526 pnode->sync_done = kzalloc(sizeof(struct sync_object),
527 GFP_KERNEL);
528 if (pnode->sync_done)
529 sync_init_event(pnode->sync_done);
530 else
531 status = -ENOMEM;
533 if (!status) {
534 /*Get the shared mem mgr for this nodes dev object */
535 status = cmm_get_handle(hprocessor, &hcmm_mgr);
536 if (!status) {
537 /* Allocate a SM addr translator for this node
538 * w/ deflt attr */
539 status = cmm_xlator_create(&pnode->xlator,
540 hcmm_mgr, NULL);
543 if (!status) {
544 /* Fill in message args */
545 if ((pargs != NULL) && (pargs->cb_data > 0)) {
546 pmsg_args =
547 &(pnode->create_args.asa.node_msg_args);
548 pmsg_args->pdata = kzalloc(pargs->cb_data,
549 GFP_KERNEL);
550 if (pmsg_args->pdata == NULL) {
551 status = -ENOMEM;
552 } else {
553 pmsg_args->arg_length = pargs->cb_data;
554 memcpy(pmsg_args->pdata,
555 pargs->node_data,
556 pargs->cb_data);
562 if (!status && node_type != NODE_DEVICE) {
563 /* Create a message queue for this node */
564 intf_fxns = hnode_mgr->intf_fxns;
565 status =
566 (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
567 &pnode->msg_queue_obj,
569 pnode->create_args.asa.
570 node_msg_args.max_msgs,
571 pnode);
574 if (!status) {
575 /* Create object for dynamic loading */
577 status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
578 (void *)pnode,
579 &pnode->dcd_props.
580 obj_data.node_obj,
581 &pnode->
582 nldr_node_obj,
583 &pnode->phase_split);
586 /* Compare value read from Node Properties and check if it is same as
587 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
588 * GPP Address, Read the value in that address and override the
589 * stack_seg value in task args */
590 if (!status &&
591 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
592 stack_seg_name != NULL) {
593 if (strcmp((char *)
594 pnode->dcd_props.obj_data.node_obj.ndb_props.
595 stack_seg_name, STACKSEGLABEL) == 0) {
596 status =
597 hnode_mgr->nldr_fxns.
598 pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
599 &dynext_base);
600 if (status)
601 pr_err("%s: Failed to get addr for DYNEXT_BEG"
602 " status = 0x%x\n", __func__, status);
604 status =
605 hnode_mgr->nldr_fxns.
606 pfn_get_fxn_addr(pnode->nldr_node_obj,
607 "L1DSRAM_HEAP", &pul_value);
609 if (status)
610 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
611 " status = 0x%x\n", __func__, status);
613 host_res = pbridge_context->resources;
614 if (!host_res)
615 status = -EPERM;
617 if (status) {
618 pr_err("%s: Failed to get host resource, status"
619 " = 0x%x\n", __func__, status);
620 goto func_end;
623 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
624 off_set = pul_value - dynext_base;
625 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
626 ul_stack_seg_val = readl(ul_stack_seg_addr);
628 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
629 " 0x%x\n", __func__, ul_stack_seg_val,
630 ul_stack_seg_addr);
632 pnode->create_args.asa.task_arg_obj.stack_seg =
633 ul_stack_seg_val;
638 if (!status) {
639 /* Add the node to the node manager's list of allocated
640 * nodes. */
641 lst_init_elem((struct list_head *)pnode);
642 NODE_SET_STATE(pnode, NODE_ALLOCATED);
644 mutex_lock(&hnode_mgr->node_mgr_lock);
646 lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
647 ++(hnode_mgr->num_nodes);
649 /* Exit critical section */
650 mutex_unlock(&hnode_mgr->node_mgr_lock);
652 /* Preset this to assume phases are split
653 * (for overlay and dll) */
654 pnode->phase_split = true;
656 /* Notify all clients registered for DSP_NODESTATECHANGE. */
657 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
658 } else {
659 /* Cleanup */
660 if (pnode)
661 delete_node(pnode, pr_ctxt);
665 if (!status) {
666 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
667 if (status) {
668 delete_node(pnode, pr_ctxt);
669 goto func_end;
672 *noderes = (struct node_res_object *)node_res;
673 drv_proc_node_update_heap_status(node_res, true);
674 drv_proc_node_update_status(node_res, true);
676 DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
677 func_end:
678 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
679 "node_res: %p status: 0x%x\n", __func__, hprocessor,
680 node_uuid, pargs, attr_in, noderes, status);
681 return status;
685 * ======== node_alloc_msg_buf ========
686 * Purpose:
687 * Allocates buffer for zero copy messaging.
689 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
690 struct dsp_bufferattr *pattr,
691 u8 **pbuffer)
693 struct node_object *pnode = (struct node_object *)hnode;
694 int status = 0;
695 bool va_flag = false;
696 bool set_info;
697 u32 proc_id;
699 DBC_REQUIRE(refs > 0);
700 DBC_REQUIRE(pbuffer != NULL);
702 DBC_REQUIRE(usize > 0);
704 if (!pnode)
705 status = -EFAULT;
706 else if (node_get_type(pnode) == NODE_DEVICE)
707 status = -EPERM;
709 if (status)
710 goto func_end;
712 if (pattr == NULL)
713 pattr = &node_dfltbufattrs; /* set defaults */
715 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
716 if (proc_id != DSP_UNIT) {
717 DBC_ASSERT(NULL);
718 goto func_end;
720 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
721 * virt address, so set this info in this node's translator
722 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
723 * virtual address from node's translator. */
724 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
725 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
726 va_flag = true;
727 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
728 true : false;
729 /* Clear mask bits */
730 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
731 /* Set/get this node's translators virtual address base/size */
732 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
733 pattr->segment_id, set_info);
735 if (!status && (!va_flag)) {
736 if (pattr->segment_id != 1) {
737 /* Node supports single SM segment only. */
738 status = -EBADR;
740 /* Arbitrary SM buffer alignment not supported for host side
741 * allocs, but guaranteed for the following alignment
742 * values. */
743 switch (pattr->buf_alignment) {
744 case 0:
745 case 1:
746 case 2:
747 case 4:
748 break;
749 default:
750 /* alignment value not suportted */
751 status = -EPERM;
752 break;
754 if (!status) {
755 /* allocate physical buffer from seg_id in node's
756 * translator */
757 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
758 usize);
759 if (*pbuffer == NULL) {
760 pr_err("%s: error - Out of shared memory\n",
761 __func__);
762 status = -ENOMEM;
766 func_end:
767 return status;
771 * ======== node_change_priority ========
772 * Purpose:
773 * Change the priority of a node in the allocated state, or that is
774 * currently running or paused on the target.
776 int node_change_priority(struct node_object *hnode, s32 prio)
778 struct node_object *pnode = (struct node_object *)hnode;
779 struct node_mgr *hnode_mgr = NULL;
780 enum node_type node_type;
781 enum node_state state;
782 int status = 0;
783 u32 proc_id;
785 DBC_REQUIRE(refs > 0);
787 if (!hnode || !hnode->hnode_mgr) {
788 status = -EFAULT;
789 } else {
790 hnode_mgr = hnode->hnode_mgr;
791 node_type = node_get_type(hnode);
792 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
793 status = -EPERM;
794 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
795 status = -EDOM;
797 if (status)
798 goto func_end;
800 /* Enter critical section */
801 mutex_lock(&hnode_mgr->node_mgr_lock);
803 state = node_get_state(hnode);
804 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
805 NODE_SET_PRIORITY(hnode, prio);
806 } else {
807 if (state != NODE_RUNNING) {
808 status = -EBADR;
809 goto func_cont;
811 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
812 if (proc_id == DSP_UNIT) {
813 status =
814 disp_node_change_priority(hnode_mgr->disp_obj,
815 hnode,
816 hnode_mgr->ul_fxn_addrs
817 [RMSCHANGENODEPRIORITY],
818 hnode->node_env, prio);
820 if (status >= 0)
821 NODE_SET_PRIORITY(hnode, prio);
824 func_cont:
825 /* Leave critical section */
826 mutex_unlock(&hnode_mgr->node_mgr_lock);
827 func_end:
828 return status;
832 * ======== node_connect ========
833 * Purpose:
834 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
836 int node_connect(struct node_object *node1, u32 stream1,
837 struct node_object *node2,
838 u32 stream2, struct dsp_strmattr *pattrs,
839 struct dsp_cbdata *conn_param)
841 struct node_mgr *hnode_mgr;
842 char *pstr_dev_name = NULL;
843 enum node_type node1_type = NODE_TASK;
844 enum node_type node2_type = NODE_TASK;
845 struct node_strmdef *pstrm_def;
846 struct node_strmdef *input = NULL;
847 struct node_strmdef *output = NULL;
848 struct node_object *dev_node_obj;
849 struct node_object *hnode;
850 struct stream_chnl *pstream;
851 u32 pipe_id = GB_NOBITS;
852 u32 chnl_id = GB_NOBITS;
853 s8 chnl_mode;
854 u32 dw_length;
855 int status = 0;
856 DBC_REQUIRE(refs > 0);
858 if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
859 (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
860 status = -EFAULT;
862 if (!status) {
863 /* The two nodes must be on the same processor */
864 if (node1 != (struct node_object *)DSP_HGPPNODE &&
865 node2 != (struct node_object *)DSP_HGPPNODE &&
866 node1->hnode_mgr != node2->hnode_mgr)
867 status = -EPERM;
868 /* Cannot connect a node to itself */
869 if (node1 == node2)
870 status = -EPERM;
873 if (!status) {
874 /* node_get_type() will return NODE_GPP if hnode =
875 * DSP_HGPPNODE. */
876 node1_type = node_get_type(node1);
877 node2_type = node_get_type(node2);
878 /* Check stream indices ranges */
879 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
880 stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
881 && node2_type !=
882 NODE_DEVICE
883 && stream2 >=
884 MAX_INPUTS(node2)))
885 status = -EINVAL;
887 if (!status) {
889 * Only the following types of connections are allowed:
890 * task/dais socket < == > task/dais socket
891 * task/dais socket < == > device
892 * task/dais socket < == > GPP
894 * ie, no message nodes, and at least one task or dais
895 * socket node.
897 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
898 (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
899 node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
900 status = -EPERM;
903 * Check stream mode. Default is STRMMODE_PROCCOPY.
905 if (!status && pattrs) {
906 if (pattrs->strm_mode != STRMMODE_PROCCOPY)
907 status = -EPERM; /* illegal stream mode */
910 if (status)
911 goto func_end;
913 if (node1_type != NODE_GPP) {
914 hnode_mgr = node1->hnode_mgr;
915 } else {
916 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
917 hnode_mgr = node2->hnode_mgr;
919 /* Enter critical section */
920 mutex_lock(&hnode_mgr->node_mgr_lock);
922 /* Nodes must be in the allocated state */
923 if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
924 status = -EBADR;
926 if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
927 status = -EBADR;
929 if (!status) {
930 /* Check that stream indices for task and dais socket nodes
931 * are not already be used. (Device nodes checked later) */
932 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
933 output =
934 &(node1->create_args.asa.
935 task_arg_obj.strm_out_def[stream1]);
936 if (output->sz_device != NULL)
937 status = -EISCONN;
940 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
941 input =
942 &(node2->create_args.asa.
943 task_arg_obj.strm_in_def[stream2]);
944 if (input->sz_device != NULL)
945 status = -EISCONN;
949 /* Connecting two task nodes? */
950 if (!status && ((node1_type == NODE_TASK ||
951 node1_type == NODE_DAISSOCKET)
952 && (node2_type == NODE_TASK
953 || node2_type == NODE_DAISSOCKET))) {
954 /* Find available pipe */
955 pipe_id = gb_findandset(hnode_mgr->pipe_map);
956 if (pipe_id == GB_NOBITS) {
957 status = -ECONNREFUSED;
958 } else {
959 node1->outputs[stream1].type = NODECONNECT;
960 node2->inputs[stream2].type = NODECONNECT;
961 node1->outputs[stream1].dev_id = pipe_id;
962 node2->inputs[stream2].dev_id = pipe_id;
963 output->sz_device = kzalloc(PIPENAMELEN + 1,
964 GFP_KERNEL);
965 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
966 if (output->sz_device == NULL ||
967 input->sz_device == NULL) {
968 /* Undo the connection */
969 kfree(output->sz_device);
971 kfree(input->sz_device);
973 output->sz_device = NULL;
974 input->sz_device = NULL;
975 gb_clear(hnode_mgr->pipe_map, pipe_id);
976 status = -ENOMEM;
977 } else {
978 /* Copy "/dbpipe<pipId>" name to device names */
979 sprintf(output->sz_device, "%s%d",
980 PIPEPREFIX, pipe_id);
981 strcpy(input->sz_device, output->sz_device);
985 /* Connecting task node to host? */
986 if (!status && (node1_type == NODE_GPP ||
987 node2_type == NODE_GPP)) {
988 if (node1_type == NODE_GPP) {
989 chnl_mode = CHNL_MODETODSP;
990 } else {
991 DBC_ASSERT(node2_type == NODE_GPP);
992 chnl_mode = CHNL_MODEFROMDSP;
994 /* Reserve a channel id. We need to put the name "/host<id>"
995 * in the node's create_args, but the host
996 * side channel will not be opened until DSPStream_Open is
997 * called for this node. */
998 if (pattrs) {
999 if (pattrs->strm_mode == STRMMODE_RDMA) {
1000 chnl_id =
1001 gb_findandset(hnode_mgr->dma_chnl_map);
1002 /* dma chans are 2nd transport chnl set
1003 * ids(e.g. 16-31) */
1004 (chnl_id != GB_NOBITS) ?
1005 (chnl_id =
1006 chnl_id +
1007 hnode_mgr->ul_num_chnls) : chnl_id;
1008 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1009 chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
1010 /* zero-copy chans are 3nd transport set
1011 * (e.g. 32-47) */
1012 (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
1013 (2 *
1014 hnode_mgr->
1015 ul_num_chnls))
1016 : chnl_id;
1017 } else { /* must be PROCCOPY */
1018 DBC_ASSERT(pattrs->strm_mode ==
1019 STRMMODE_PROCCOPY);
1020 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1021 /* e.g. 0-15 */
1023 } else {
1024 /* default to PROCCOPY */
1025 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1027 if (chnl_id == GB_NOBITS) {
1028 status = -ECONNREFUSED;
1029 goto func_cont2;
1031 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
1032 if (pstr_dev_name != NULL)
1033 goto func_cont2;
1035 if (pattrs) {
1036 if (pattrs->strm_mode == STRMMODE_RDMA) {
1037 gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1038 hnode_mgr->ul_num_chnls);
1039 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1040 gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1041 (2 * hnode_mgr->ul_num_chnls));
1042 } else {
1043 DBC_ASSERT(pattrs->strm_mode ==
1044 STRMMODE_PROCCOPY);
1045 gb_clear(hnode_mgr->chnl_map, chnl_id);
1047 } else {
1048 gb_clear(hnode_mgr->chnl_map, chnl_id);
1050 status = -ENOMEM;
1051 func_cont2:
1052 if (!status) {
1053 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1054 node2->inputs[stream2].type = HOSTCONNECT;
1055 node2->inputs[stream2].dev_id = chnl_id;
1056 input->sz_device = pstr_dev_name;
1057 } else {
1058 node1->outputs[stream1].type = HOSTCONNECT;
1059 node1->outputs[stream1].dev_id = chnl_id;
1060 output->sz_device = pstr_dev_name;
1062 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1065 /* Connecting task node to device node? */
1066 if (!status && ((node1_type == NODE_DEVICE) ||
1067 (node2_type == NODE_DEVICE))) {
1068 if (node2_type == NODE_DEVICE) {
1069 /* node1 == > device */
1070 dev_node_obj = node2;
1071 hnode = node1;
1072 pstream = &(node1->outputs[stream1]);
1073 pstrm_def = output;
1074 } else {
1075 /* device == > node2 */
1076 dev_node_obj = node1;
1077 hnode = node2;
1078 pstream = &(node2->inputs[stream2]);
1079 pstrm_def = input;
1081 /* Set up create args */
1082 pstream->type = DEVICECONNECT;
1083 dw_length = strlen(dev_node_obj->pstr_dev_name);
1084 if (conn_param != NULL) {
1085 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1086 conn_param->cb_data,
1087 GFP_KERNEL);
1088 } else {
1089 pstrm_def->sz_device = kzalloc(dw_length + 1,
1090 GFP_KERNEL);
1092 if (pstrm_def->sz_device == NULL) {
1093 status = -ENOMEM;
1094 } else {
1095 /* Copy device name */
1096 strncpy(pstrm_def->sz_device,
1097 dev_node_obj->pstr_dev_name, dw_length);
1098 if (conn_param != NULL) {
1099 strncat(pstrm_def->sz_device,
1100 (char *)conn_param->node_data,
1101 (u32) conn_param->cb_data);
1103 dev_node_obj->device_owner = hnode;
1106 if (!status) {
1107 /* Fill in create args */
1108 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1109 node1->create_args.asa.task_arg_obj.num_outputs++;
1110 fill_stream_def(node1, output, pattrs);
1112 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1113 node2->create_args.asa.task_arg_obj.num_inputs++;
1114 fill_stream_def(node2, input, pattrs);
1116 /* Update node1 and node2 stream_connect */
1117 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1118 node1->num_outputs++;
1119 if (stream1 > node1->max_output_index)
1120 node1->max_output_index = stream1;
1123 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1124 node2->num_inputs++;
1125 if (stream2 > node2->max_input_index)
1126 node2->max_input_index = stream2;
1129 fill_stream_connect(node1, node2, stream1, stream2);
1131 /* end of sync_enter_cs */
1132 /* Exit critical section */
1133 mutex_unlock(&hnode_mgr->node_mgr_lock);
1134 func_end:
1135 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1136 "pattrs: %p status: 0x%x\n", __func__, node1,
1137 stream1, node2, stream2, pattrs, status);
1138 return status;
1142 * ======== node_create ========
1143 * Purpose:
1144 * Create a node on the DSP by remotely calling the node's create function.
1146 int node_create(struct node_object *hnode)
1148 struct node_object *pnode = (struct node_object *)hnode;
1149 struct node_mgr *hnode_mgr;
1150 struct bridge_drv_interface *intf_fxns;
1151 u32 ul_create_fxn;
1152 enum node_type node_type;
1153 int status = 0;
1154 int status1 = 0;
1155 struct dsp_cbdata cb_data;
1156 u32 proc_id = 255;
1157 struct dsp_processorstate proc_state;
1158 struct proc_object *hprocessor;
1159 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1160 struct dspbridge_platform_data *pdata =
1161 omap_dspbridge_dev->dev.platform_data;
1162 #endif
1164 DBC_REQUIRE(refs > 0);
1165 if (!pnode) {
1166 status = -EFAULT;
1167 goto func_end;
1169 hprocessor = hnode->hprocessor;
1170 status = proc_get_state(hprocessor, &proc_state,
1171 sizeof(struct dsp_processorstate));
1172 if (status)
1173 goto func_end;
1174 /* If processor is in error state then don't attempt to create
1175 new node */
1176 if (proc_state.proc_state == PROC_ERROR) {
1177 status = -EPERM;
1178 goto func_end;
1180 /* create struct dsp_cbdata struct for PWR calls */
1181 cb_data.cb_data = PWR_TIMEOUT;
1182 node_type = node_get_type(hnode);
1183 hnode_mgr = hnode->hnode_mgr;
1184 intf_fxns = hnode_mgr->intf_fxns;
1185 /* Get access to node dispatcher */
1186 mutex_lock(&hnode_mgr->node_mgr_lock);
1188 /* Check node state */
1189 if (node_get_state(hnode) != NODE_ALLOCATED)
1190 status = -EBADR;
1192 if (!status)
1193 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1195 if (status)
1196 goto func_cont2;
1198 if (proc_id != DSP_UNIT)
1199 goto func_cont2;
1201 /* Make sure streams are properly connected */
1202 if ((hnode->num_inputs && hnode->max_input_index >
1203 hnode->num_inputs - 1) ||
1204 (hnode->num_outputs && hnode->max_output_index >
1205 hnode->num_outputs - 1))
1206 status = -ENOTCONN;
1208 if (!status) {
1209 /* If node's create function is not loaded, load it */
1210 /* Boost the OPP level to max level that DSP can be requested */
1211 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1212 if (pdata->cpu_set_freq)
1213 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1214 #endif
1215 status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
1216 NLDR_CREATE);
1217 /* Get address of node's create function */
1218 if (!status) {
1219 hnode->loaded = true;
1220 if (node_type != NODE_DEVICE) {
1221 status = get_fxn_address(hnode, &ul_create_fxn,
1222 CREATEPHASE);
1224 } else {
1225 pr_err("%s: failed to load create code: 0x%x\n",
1226 __func__, status);
1228 /* Request the lowest OPP level */
1229 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1230 if (pdata->cpu_set_freq)
1231 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1232 #endif
1233 /* Get address of iAlg functions, if socket node */
1234 if (!status) {
1235 if (node_type == NODE_DAISSOCKET) {
1236 status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
1237 (hnode->nldr_node_obj,
1238 hnode->dcd_props.obj_data.node_obj.
1239 pstr_i_alg_name,
1240 &hnode->create_args.asa.
1241 task_arg_obj.ul_dais_arg);
1245 if (!status) {
1246 if (node_type != NODE_DEVICE) {
1247 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1248 hnode_mgr->ul_fxn_addrs
1249 [RMSCREATENODE],
1250 ul_create_fxn,
1251 &(hnode->create_args),
1252 &(hnode->node_env));
1253 if (status >= 0) {
1254 /* Set the message queue id to the node env
1255 * pointer */
1256 intf_fxns = hnode_mgr->intf_fxns;
1257 (*intf_fxns->pfn_msg_set_queue_id) (hnode->
1258 msg_queue_obj,
1259 hnode->node_env);
1263 /* Phase II/Overlays: Create, execute, delete phases possibly in
1264 * different files/sections. */
1265 if (hnode->loaded && hnode->phase_split) {
1266 /* If create code was dynamically loaded, we can now unload
1267 * it. */
1268 status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
1269 NLDR_CREATE);
1270 hnode->loaded = false;
1272 if (status1)
1273 pr_err("%s: Failed to unload create code: 0x%x\n",
1274 __func__, status1);
1275 func_cont2:
1276 /* Update node state and node manager state */
1277 if (status >= 0) {
1278 NODE_SET_STATE(hnode, NODE_CREATED);
1279 hnode_mgr->num_created++;
1280 goto func_cont;
1282 if (status != -EBADR) {
1283 /* Put back in NODE_ALLOCATED state if error occurred */
1284 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1286 func_cont:
1287 /* Free access to node dispatcher */
1288 mutex_unlock(&hnode_mgr->node_mgr_lock);
1289 func_end:
1290 if (status >= 0) {
1291 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1292 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1295 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1296 hnode, status);
1297 return status;
1301 * ======== node_create_mgr ========
1302 * Purpose:
1303 * Create a NODE Manager object.
1305 int node_create_mgr(struct node_mgr **node_man,
1306 struct dev_object *hdev_obj)
1308 u32 i;
1309 struct node_mgr *node_mgr_obj = NULL;
1310 struct disp_attr disp_attr_obj;
1311 char *sz_zl_file = "";
1312 struct nldr_attrs nldr_attrs_obj;
1313 int status = 0;
1314 u8 dev_type;
1315 DBC_REQUIRE(refs > 0);
1316 DBC_REQUIRE(node_man != NULL);
1317 DBC_REQUIRE(hdev_obj != NULL);
1319 *node_man = NULL;
1320 /* Allocate Node manager object */
1321 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1322 if (node_mgr_obj) {
1323 node_mgr_obj->hdev_obj = hdev_obj;
1324 node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
1325 GFP_KERNEL);
1326 node_mgr_obj->pipe_map = gb_create(MAXPIPES);
1327 node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
1328 if (node_mgr_obj->node_list == NULL
1329 || node_mgr_obj->pipe_map == NULL
1330 || node_mgr_obj->pipe_done_map == NULL) {
1331 status = -ENOMEM;
1332 } else {
1333 INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1334 node_mgr_obj->ntfy_obj = kmalloc(
1335 sizeof(struct ntfy_object), GFP_KERNEL);
1336 if (node_mgr_obj->ntfy_obj)
1337 ntfy_init(node_mgr_obj->ntfy_obj);
1338 else
1339 status = -ENOMEM;
1341 node_mgr_obj->num_created = 0;
1342 } else {
1343 status = -ENOMEM;
1345 /* get devNodeType */
1346 if (!status)
1347 status = dev_get_dev_type(hdev_obj, &dev_type);
1349 /* Create the DCD Manager */
1350 if (!status) {
1351 status =
1352 dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
1353 if (!status)
1354 status = get_proc_props(node_mgr_obj, hdev_obj);
1357 /* Create NODE Dispatcher */
1358 if (!status) {
1359 disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
1360 disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
1361 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1362 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1363 status =
1364 disp_create(&node_mgr_obj->disp_obj, hdev_obj,
1365 &disp_attr_obj);
1367 /* Create a STRM Manager */
1368 if (!status)
1369 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1371 if (!status) {
1372 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1373 /* Get msg_ctrl queue manager */
1374 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1375 mutex_init(&node_mgr_obj->node_mgr_lock);
1376 node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
1377 /* dma chnl map. ul_num_chnls is # per transport */
1378 node_mgr_obj->dma_chnl_map =
1379 gb_create(node_mgr_obj->ul_num_chnls);
1380 node_mgr_obj->zc_chnl_map =
1381 gb_create(node_mgr_obj->ul_num_chnls);
1382 if ((node_mgr_obj->chnl_map == NULL)
1383 || (node_mgr_obj->dma_chnl_map == NULL)
1384 || (node_mgr_obj->zc_chnl_map == NULL)) {
1385 status = -ENOMEM;
1386 } else {
1387 /* Block out reserved channels */
1388 for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
1389 gb_set(node_mgr_obj->chnl_map, i);
1391 /* Block out channels reserved for RMS */
1392 gb_set(node_mgr_obj->chnl_map,
1393 node_mgr_obj->ul_chnl_offset);
1394 gb_set(node_mgr_obj->chnl_map,
1395 node_mgr_obj->ul_chnl_offset + 1);
1398 if (!status) {
1399 /* NO RM Server on the IVA */
1400 if (dev_type != IVA_UNIT) {
1401 /* Get addresses of any RMS functions loaded */
1402 status = get_rms_fxns(node_mgr_obj);
1406 /* Get loader functions and create loader */
1407 if (!status)
1408 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1410 if (!status) {
1411 nldr_attrs_obj.pfn_ovly = ovly;
1412 nldr_attrs_obj.pfn_write = mem_write;
1413 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1414 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1415 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1416 status =
1417 node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1418 hdev_obj,
1419 &nldr_attrs_obj);
1421 if (!status)
1422 *node_man = node_mgr_obj;
1423 else
1424 delete_node_mgr(node_mgr_obj);
1426 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1428 return status;
1432 * ======== node_delete ========
1433 * Purpose:
1434 * Delete a node on the DSP by remotely calling the node's delete function.
1435 * Loads the node's delete function if necessary. Free GPP side resources
1436 * after node's delete function returns.
1438 int node_delete(struct node_res_object *noderes,
1439 struct process_context *pr_ctxt)
1441 struct node_object *pnode = noderes->hnode;
1442 struct node_mgr *hnode_mgr;
1443 struct proc_object *hprocessor;
1444 struct disp_object *disp_obj;
1445 u32 ul_delete_fxn;
1446 enum node_type node_type;
1447 enum node_state state;
1448 int status = 0;
1449 int status1 = 0;
1450 struct dsp_cbdata cb_data;
1451 u32 proc_id;
1452 struct bridge_drv_interface *intf_fxns;
1454 void *node_res = noderes;
1456 struct dsp_processorstate proc_state;
1457 DBC_REQUIRE(refs > 0);
1459 if (!pnode) {
1460 status = -EFAULT;
1461 goto func_end;
1463 /* create struct dsp_cbdata struct for PWR call */
1464 cb_data.cb_data = PWR_TIMEOUT;
1465 hnode_mgr = pnode->hnode_mgr;
1466 hprocessor = pnode->hprocessor;
1467 disp_obj = hnode_mgr->disp_obj;
1468 node_type = node_get_type(pnode);
1469 intf_fxns = hnode_mgr->intf_fxns;
1470 /* Enter critical section */
1471 mutex_lock(&hnode_mgr->node_mgr_lock);
1473 state = node_get_state(pnode);
1474 /* Execute delete phase code for non-device node in all cases
1475 * except when the node was only allocated. Delete phase must be
1476 * executed even if create phase was executed, but failed.
1477 * If the node environment pointer is non-NULL, the delete phase
1478 * code must be executed. */
1479 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1480 node_type != NODE_DEVICE) {
1481 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1482 if (status)
1483 goto func_cont1;
1485 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1486 /* If node has terminated, execute phase code will
1487 * have already been unloaded in node_on_exit(). If the
1488 * node is PAUSED, the execute phase is loaded, and it
1489 * is now ok to unload it. If the node is running, we
1490 * will unload the execute phase only after deleting
1491 * the node. */
1492 if (state == NODE_PAUSED && pnode->loaded &&
1493 pnode->phase_split) {
1494 /* Ok to unload execute code as long as node
1495 * is not * running */
1496 status1 =
1497 hnode_mgr->nldr_fxns.
1498 pfn_unload(pnode->nldr_node_obj,
1499 NLDR_EXECUTE);
1500 pnode->loaded = false;
1501 NODE_SET_STATE(pnode, NODE_DONE);
1503 /* Load delete phase code if not loaded or if haven't
1504 * * unloaded EXECUTE phase */
1505 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1506 pnode->phase_split) {
1507 status =
1508 hnode_mgr->nldr_fxns.
1509 pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
1510 if (!status)
1511 pnode->loaded = true;
1512 else
1513 pr_err("%s: fail - load delete code:"
1514 " 0x%x\n", __func__, status);
1517 func_cont1:
1518 if (!status) {
1519 /* Unblock a thread trying to terminate the node */
1520 (void)sync_set_event(pnode->sync_done);
1521 if (proc_id == DSP_UNIT) {
1522 /* ul_delete_fxn = address of node's delete
1523 * function */
1524 status = get_fxn_address(pnode, &ul_delete_fxn,
1525 DELETEPHASE);
1526 } else if (proc_id == IVA_UNIT)
1527 ul_delete_fxn = (u32) pnode->node_env;
1528 if (!status) {
1529 status = proc_get_state(hprocessor,
1530 &proc_state,
1531 sizeof(struct
1532 dsp_processorstate));
1533 if (proc_state.proc_state != PROC_ERROR) {
1534 status =
1535 disp_node_delete(disp_obj, pnode,
1536 hnode_mgr->
1537 ul_fxn_addrs
1538 [RMSDELETENODE],
1539 ul_delete_fxn,
1540 pnode->node_env);
1541 } else
1542 NODE_SET_STATE(pnode, NODE_DONE);
1544 /* Unload execute, if not unloaded, and delete
1545 * function */
1546 if (state == NODE_RUNNING &&
1547 pnode->phase_split) {
1548 status1 =
1549 hnode_mgr->nldr_fxns.
1550 pfn_unload(pnode->nldr_node_obj,
1551 NLDR_EXECUTE);
1553 if (status1)
1554 pr_err("%s: fail - unload execute code:"
1555 " 0x%x\n", __func__, status1);
1557 status1 =
1558 hnode_mgr->nldr_fxns.pfn_unload(pnode->
1559 nldr_node_obj,
1560 NLDR_DELETE);
1561 pnode->loaded = false;
1562 if (status1)
1563 pr_err("%s: fail - unload delete code: "
1564 "0x%x\n", __func__, status1);
1568 /* Free host side resources even if a failure occurred */
1569 /* Remove node from hnode_mgr->node_list */
1570 lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
1571 hnode_mgr->num_nodes--;
1572 /* Decrement count of nodes created on DSP */
1573 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1574 (pnode->node_env != (u32) NULL)))
1575 hnode_mgr->num_created--;
1576 /* Free host-side resources allocated by node_create()
1577 * delete_node() fails if SM buffers not freed by client! */
1578 drv_proc_node_update_status(node_res, false);
1579 delete_node(pnode, pr_ctxt);
1582 * Release all Node resources and its context
1584 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1585 kfree(node_res);
1587 /* Exit critical section */
1588 mutex_unlock(&hnode_mgr->node_mgr_lock);
1589 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1590 func_end:
1591 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1592 return status;
1596 * ======== node_delete_mgr ========
1597 * Purpose:
1598 * Delete the NODE Manager.
1600 int node_delete_mgr(struct node_mgr *hnode_mgr)
1602 int status = 0;
1604 DBC_REQUIRE(refs > 0);
1606 if (hnode_mgr)
1607 delete_node_mgr(hnode_mgr);
1608 else
1609 status = -EFAULT;
1611 return status;
1615 * ======== node_enum_nodes ========
1616 * Purpose:
1617 * Enumerate currently allocated nodes.
1619 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1620 u32 node_tab_size, u32 *pu_num_nodes,
1621 u32 *pu_allocated)
1623 struct node_object *hnode;
1624 u32 i;
1625 int status = 0;
1626 DBC_REQUIRE(refs > 0);
1627 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1628 DBC_REQUIRE(pu_num_nodes != NULL);
1629 DBC_REQUIRE(pu_allocated != NULL);
1631 if (!hnode_mgr) {
1632 status = -EFAULT;
1633 goto func_end;
1635 /* Enter critical section */
1636 mutex_lock(&hnode_mgr->node_mgr_lock);
1638 if (hnode_mgr->num_nodes > node_tab_size) {
1639 *pu_allocated = hnode_mgr->num_nodes;
1640 *pu_num_nodes = 0;
1641 status = -EINVAL;
1642 } else {
1643 hnode = (struct node_object *)lst_first(hnode_mgr->
1644 node_list);
1645 for (i = 0; i < hnode_mgr->num_nodes; i++) {
1646 DBC_ASSERT(hnode);
1647 node_tab[i] = hnode;
1648 hnode = (struct node_object *)lst_next
1649 (hnode_mgr->node_list,
1650 (struct list_head *)hnode);
1652 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1654 /* end of sync_enter_cs */
1655 /* Exit critical section */
1656 mutex_unlock(&hnode_mgr->node_mgr_lock);
1657 func_end:
1658 return status;
1662 * ======== node_exit ========
1663 * Purpose:
1664 * Discontinue usage of NODE module.
1666 void node_exit(void)
1668 DBC_REQUIRE(refs > 0);
1670 refs--;
1672 DBC_ENSURE(refs >= 0);
1676 * ======== node_free_msg_buf ========
1677 * Purpose:
1678 * Frees the message buffer.
1680 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1681 struct dsp_bufferattr *pattr)
1683 struct node_object *pnode = (struct node_object *)hnode;
1684 int status = 0;
1685 u32 proc_id;
1686 DBC_REQUIRE(refs > 0);
1687 DBC_REQUIRE(pbuffer != NULL);
1688 DBC_REQUIRE(pnode != NULL);
1689 DBC_REQUIRE(pnode->xlator != NULL);
1691 if (!hnode) {
1692 status = -EFAULT;
1693 goto func_end;
1695 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1696 if (proc_id == DSP_UNIT) {
1697 if (!status) {
1698 if (pattr == NULL) {
1699 /* set defaults */
1700 pattr = &node_dfltbufattrs;
1702 /* Node supports single SM segment only */
1703 if (pattr->segment_id != 1)
1704 status = -EBADR;
1706 /* pbuffer is clients Va. */
1707 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1709 } else {
1710 DBC_ASSERT(NULL); /* BUG */
1712 func_end:
1713 return status;
1717 * ======== node_get_attr ========
1718 * Purpose:
1719 * Copy the current attributes of the specified node into a dsp_nodeattr
1720 * structure.
1722 int node_get_attr(struct node_object *hnode,
1723 struct dsp_nodeattr *pattr, u32 attr_size)
1725 struct node_mgr *hnode_mgr;
1726 int status = 0;
1727 DBC_REQUIRE(refs > 0);
1728 DBC_REQUIRE(pattr != NULL);
1729 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1731 if (!hnode) {
1732 status = -EFAULT;
1733 } else {
1734 hnode_mgr = hnode->hnode_mgr;
1735 /* Enter hnode_mgr critical section (since we're accessing
1736 * data that could be changed by node_change_priority() and
1737 * node_connect(). */
1738 mutex_lock(&hnode_mgr->node_mgr_lock);
1739 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1740 /* dsp_nodeattrin */
1741 pattr->in_node_attr_in.cb_struct =
1742 sizeof(struct dsp_nodeattrin);
1743 pattr->in_node_attr_in.prio = hnode->prio;
1744 pattr->in_node_attr_in.utimeout = hnode->utimeout;
1745 pattr->in_node_attr_in.heap_size =
1746 hnode->create_args.asa.task_arg_obj.heap_size;
1747 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1748 hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
1749 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1750 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1751 /* dsp_nodeinfo */
1752 get_node_info(hnode, &(pattr->node_info));
1753 /* end of sync_enter_cs */
1754 /* Exit critical section */
1755 mutex_unlock(&hnode_mgr->node_mgr_lock);
1757 return status;
1761 * ======== node_get_channel_id ========
1762 * Purpose:
1763 * Get the channel index reserved for a stream connection between the
1764 * host and a node.
1766 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1767 u32 *chan_id)
1769 enum node_type node_type;
1770 int status = -EINVAL;
1771 DBC_REQUIRE(refs > 0);
1772 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1773 DBC_REQUIRE(chan_id != NULL);
1775 if (!hnode) {
1776 status = -EFAULT;
1777 return status;
1779 node_type = node_get_type(hnode);
1780 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1781 status = -EPERM;
1782 return status;
1784 if (dir == DSP_TONODE) {
1785 if (index < MAX_INPUTS(hnode)) {
1786 if (hnode->inputs[index].type == HOSTCONNECT) {
1787 *chan_id = hnode->inputs[index].dev_id;
1788 status = 0;
1791 } else {
1792 DBC_ASSERT(dir == DSP_FROMNODE);
1793 if (index < MAX_OUTPUTS(hnode)) {
1794 if (hnode->outputs[index].type == HOSTCONNECT) {
1795 *chan_id = hnode->outputs[index].dev_id;
1796 status = 0;
1800 return status;
1804 * ======== node_get_message ========
1805 * Purpose:
1806 * Retrieve a message from a node on the DSP.
1808 int node_get_message(struct node_object *hnode,
1809 struct dsp_msg *message, u32 utimeout)
1811 struct node_mgr *hnode_mgr;
1812 enum node_type node_type;
1813 struct bridge_drv_interface *intf_fxns;
1814 int status = 0;
1815 void *tmp_buf;
1816 struct dsp_processorstate proc_state;
1817 struct proc_object *hprocessor;
1819 DBC_REQUIRE(refs > 0);
1820 DBC_REQUIRE(message != NULL);
1822 if (!hnode) {
1823 status = -EFAULT;
1824 goto func_end;
1826 hprocessor = hnode->hprocessor;
1827 status = proc_get_state(hprocessor, &proc_state,
1828 sizeof(struct dsp_processorstate));
1829 if (status)
1830 goto func_end;
1831 /* If processor is in error state then don't attempt to get the
1832 message */
1833 if (proc_state.proc_state == PROC_ERROR) {
1834 status = -EPERM;
1835 goto func_end;
1837 hnode_mgr = hnode->hnode_mgr;
1838 node_type = node_get_type(hnode);
1839 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1840 node_type != NODE_DAISSOCKET) {
1841 status = -EPERM;
1842 goto func_end;
1844 /* This function will block unless a message is available. Since
1845 * DSPNode_RegisterNotify() allows notification when a message
1846 * is available, the system can be designed so that
1847 * DSPNode_GetMessage() is only called when a message is
1848 * available. */
1849 intf_fxns = hnode_mgr->intf_fxns;
1850 status =
1851 (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
1852 /* Check if message contains SM descriptor */
1853 if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
1854 goto func_end;
1856 /* Translate DSP byte addr to GPP Va. */
1857 tmp_buf = cmm_xlator_translate(hnode->xlator,
1858 (void *)(message->dw_arg1 *
1859 hnode->hnode_mgr->
1860 udsp_word_size), CMM_DSPPA2PA);
1861 if (tmp_buf != NULL) {
1862 /* now convert this GPP Pa to Va */
1863 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1864 CMM_PA2VA);
1865 if (tmp_buf != NULL) {
1866 /* Adjust SM size in msg */
1867 message->dw_arg1 = (u32) tmp_buf;
1868 message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
1869 } else {
1870 status = -ESRCH;
1872 } else {
1873 status = -ESRCH;
1875 func_end:
1876 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1877 hnode, message, utimeout);
1878 return status;
1882 * ======== node_get_nldr_obj ========
1884 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1885 struct nldr_object **nldr_ovlyobj)
1887 int status = 0;
1888 struct node_mgr *node_mgr_obj = hnode_mgr;
1889 DBC_REQUIRE(nldr_ovlyobj != NULL);
1891 if (!hnode_mgr)
1892 status = -EFAULT;
1893 else
1894 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1896 DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1897 return status;
1901 * ======== node_get_strm_mgr ========
1902 * Purpose:
1903 * Returns the Stream manager.
1905 int node_get_strm_mgr(struct node_object *hnode,
1906 struct strm_mgr **strm_man)
1908 int status = 0;
1910 DBC_REQUIRE(refs > 0);
1912 if (!hnode)
1913 status = -EFAULT;
1914 else
1915 *strm_man = hnode->hnode_mgr->strm_mgr_obj;
1917 return status;
1921 * ======== node_get_load_type ========
1923 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1925 DBC_REQUIRE(refs > 0);
1926 DBC_REQUIRE(hnode);
1927 if (!hnode) {
1928 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1929 return -1;
1930 } else {
1931 return hnode->dcd_props.obj_data.node_obj.us_load_type;
1936 * ======== node_get_timeout ========
1937 * Purpose:
1938 * Returns the timeout value for this node.
1940 u32 node_get_timeout(struct node_object *hnode)
1942 DBC_REQUIRE(refs > 0);
1943 DBC_REQUIRE(hnode);
1944 if (!hnode) {
1945 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1946 return 0;
1947 } else {
1948 return hnode->utimeout;
1953 * ======== node_get_type ========
1954 * Purpose:
1955 * Returns the node type.
1957 enum node_type node_get_type(struct node_object *hnode)
1959 enum node_type node_type;
1961 if (hnode == (struct node_object *)DSP_HGPPNODE)
1962 node_type = NODE_GPP;
1963 else {
1964 if (!hnode)
1965 node_type = -1;
1966 else
1967 node_type = hnode->ntype;
1969 return node_type;
1973 * ======== node_init ========
1974 * Purpose:
1975 * Initialize the NODE module.
1977 bool node_init(void)
1979 DBC_REQUIRE(refs >= 0);
1981 refs++;
1983 return true;
1987 * ======== node_on_exit ========
1988 * Purpose:
1989 * Gets called when RMS_EXIT is received for a node.
1991 void node_on_exit(struct node_object *hnode, s32 node_status)
1993 if (!hnode)
1994 return;
1996 /* Set node state to done */
1997 NODE_SET_STATE(hnode, NODE_DONE);
1998 hnode->exit_status = node_status;
1999 if (hnode->loaded && hnode->phase_split) {
2000 (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
2001 nldr_node_obj,
2002 NLDR_EXECUTE);
2003 hnode->loaded = false;
2005 /* Unblock call to node_terminate */
2006 (void)sync_set_event(hnode->sync_done);
2007 /* Notify clients */
2008 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2009 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2013 * ======== node_pause ========
2014 * Purpose:
2015 * Suspend execution of a node currently running on the DSP.
2017 int node_pause(struct node_object *hnode)
2019 struct node_object *pnode = (struct node_object *)hnode;
2020 enum node_type node_type;
2021 enum node_state state;
2022 struct node_mgr *hnode_mgr;
2023 int status = 0;
2024 u32 proc_id;
2025 struct dsp_processorstate proc_state;
2026 struct proc_object *hprocessor;
2028 DBC_REQUIRE(refs > 0);
2030 if (!hnode) {
2031 status = -EFAULT;
2032 } else {
2033 node_type = node_get_type(hnode);
2034 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2035 status = -EPERM;
2037 if (status)
2038 goto func_end;
2040 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2042 if (proc_id == IVA_UNIT)
2043 status = -ENOSYS;
2045 if (!status) {
2046 hnode_mgr = hnode->hnode_mgr;
2048 /* Enter critical section */
2049 mutex_lock(&hnode_mgr->node_mgr_lock);
2050 state = node_get_state(hnode);
2051 /* Check node state */
2052 if (state != NODE_RUNNING)
2053 status = -EBADR;
2055 if (status)
2056 goto func_cont;
2057 hprocessor = hnode->hprocessor;
2058 status = proc_get_state(hprocessor, &proc_state,
2059 sizeof(struct dsp_processorstate));
2060 if (status)
2061 goto func_cont;
2062 /* If processor is in error state then don't attempt
2063 to send the message */
2064 if (proc_state.proc_state == PROC_ERROR) {
2065 status = -EPERM;
2066 goto func_cont;
2069 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2070 hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
2071 hnode->node_env, NODE_SUSPENDEDPRI);
2073 /* Update state */
2074 if (status >= 0)
2075 NODE_SET_STATE(hnode, NODE_PAUSED);
2077 func_cont:
2078 /* End of sync_enter_cs */
2079 /* Leave critical section */
2080 mutex_unlock(&hnode_mgr->node_mgr_lock);
2081 if (status >= 0) {
2082 proc_notify_clients(hnode->hprocessor,
2083 DSP_NODESTATECHANGE);
2084 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2087 func_end:
2088 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2089 return status;
2093 * ======== node_put_message ========
2094 * Purpose:
2095 * Send a message to a message node, task node, or XDAIS socket node. This
2096 * function will block until the message stream can accommodate the
2097 * message, or a timeout occurs.
2099 int node_put_message(struct node_object *hnode,
2100 const struct dsp_msg *pmsg, u32 utimeout)
2102 struct node_mgr *hnode_mgr = NULL;
2103 enum node_type node_type;
2104 struct bridge_drv_interface *intf_fxns;
2105 enum node_state state;
2106 int status = 0;
2107 void *tmp_buf;
2108 struct dsp_msg new_msg;
2109 struct dsp_processorstate proc_state;
2110 struct proc_object *hprocessor;
2112 DBC_REQUIRE(refs > 0);
2113 DBC_REQUIRE(pmsg != NULL);
2115 if (!hnode) {
2116 status = -EFAULT;
2117 goto func_end;
2119 hprocessor = hnode->hprocessor;
2120 status = proc_get_state(hprocessor, &proc_state,
2121 sizeof(struct dsp_processorstate));
2122 if (status)
2123 goto func_end;
2124 /* If processor is in bad state then don't attempt sending the
2125 message */
2126 if (proc_state.proc_state == PROC_ERROR) {
2127 status = -EPERM;
2128 goto func_end;
2130 hnode_mgr = hnode->hnode_mgr;
2131 node_type = node_get_type(hnode);
2132 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2133 node_type != NODE_DAISSOCKET)
2134 status = -EPERM;
2136 if (!status) {
2137 /* Check node state. Can't send messages to a node after
2138 * we've sent the RMS_EXIT command. There is still the
2139 * possibility that node_terminate can be called after we've
2140 * checked the state. Could add another SYNC object to
2141 * prevent this (can't use node_mgr_lock, since we don't
2142 * want to block other NODE functions). However, the node may
2143 * still exit on its own, before this message is sent. */
2144 mutex_lock(&hnode_mgr->node_mgr_lock);
2145 state = node_get_state(hnode);
2146 if (state == NODE_TERMINATING || state == NODE_DONE)
2147 status = -EBADR;
2149 /* end of sync_enter_cs */
2150 mutex_unlock(&hnode_mgr->node_mgr_lock);
2152 if (status)
2153 goto func_end;
2155 /* assign pmsg values to new msg */
2156 new_msg = *pmsg;
2157 /* Now, check if message contains a SM buffer descriptor */
2158 if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
2159 /* Translate GPP Va to DSP physical buf Ptr. */
2160 tmp_buf = cmm_xlator_translate(hnode->xlator,
2161 (void *)new_msg.dw_arg1,
2162 CMM_VA2DSPPA);
2163 if (tmp_buf != NULL) {
2164 /* got translation, convert to MAUs in msg */
2165 if (hnode->hnode_mgr->udsp_word_size != 0) {
2166 new_msg.dw_arg1 =
2167 (u32) tmp_buf /
2168 hnode->hnode_mgr->udsp_word_size;
2169 /* MAUs */
2170 new_msg.dw_arg2 /= hnode->hnode_mgr->
2171 udsp_word_size;
2172 } else {
2173 pr_err("%s: udsp_word_size is zero!\n",
2174 __func__);
2175 status = -EPERM; /* bad DSPWordSize */
2177 } else { /* failed to translate buffer address */
2178 status = -ESRCH;
2181 if (!status) {
2182 intf_fxns = hnode_mgr->intf_fxns;
2183 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
2184 &new_msg, utimeout);
2186 func_end:
2187 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2188 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2189 return status;
2193 * ======== node_register_notify ========
2194 * Purpose:
2195 * Register to be notified on specific events for this node.
2197 int node_register_notify(struct node_object *hnode, u32 event_mask,
2198 u32 notify_type,
2199 struct dsp_notification *hnotification)
2201 struct bridge_drv_interface *intf_fxns;
2202 int status = 0;
2204 DBC_REQUIRE(refs > 0);
2205 DBC_REQUIRE(hnotification != NULL);
2207 if (!hnode) {
2208 status = -EFAULT;
2209 } else {
2210 /* Check if event mask is a valid node related event */
2211 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2212 status = -EINVAL;
2214 /* Check if notify type is valid */
2215 if (notify_type != DSP_SIGNALEVENT)
2216 status = -EINVAL;
2218 /* Only one Notification can be registered at a
2219 * time - Limitation */
2220 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2221 status = -EINVAL;
2223 if (!status) {
2224 if (event_mask == DSP_NODESTATECHANGE) {
2225 status = ntfy_register(hnode->ntfy_obj, hnotification,
2226 event_mask & DSP_NODESTATECHANGE,
2227 notify_type);
2228 } else {
2229 /* Send Message part of event mask to msg_ctrl */
2230 intf_fxns = hnode->hnode_mgr->intf_fxns;
2231 status = (*intf_fxns->pfn_msg_register_notify)
2232 (hnode->msg_queue_obj,
2233 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2234 hnotification);
2238 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2239 "hnotification: %p status 0x%x\n", __func__, hnode,
2240 event_mask, notify_type, hnotification, status);
2241 return status;
2245 * ======== node_run ========
2246 * Purpose:
2247 * Start execution of a node's execute phase, or resume execution of a node
2248 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2249 * node's execute function if necessary.
2251 int node_run(struct node_object *hnode)
2253 struct node_object *pnode = (struct node_object *)hnode;
2254 struct node_mgr *hnode_mgr;
2255 enum node_type node_type;
2256 enum node_state state;
2257 u32 ul_execute_fxn;
2258 u32 ul_fxn_addr;
2259 int status = 0;
2260 u32 proc_id;
2261 struct bridge_drv_interface *intf_fxns;
2262 struct dsp_processorstate proc_state;
2263 struct proc_object *hprocessor;
2265 DBC_REQUIRE(refs > 0);
2267 if (!hnode) {
2268 status = -EFAULT;
2269 goto func_end;
2271 hprocessor = hnode->hprocessor;
2272 status = proc_get_state(hprocessor, &proc_state,
2273 sizeof(struct dsp_processorstate));
2274 if (status)
2275 goto func_end;
2276 /* If processor is in error state then don't attempt to run the node */
2277 if (proc_state.proc_state == PROC_ERROR) {
2278 status = -EPERM;
2279 goto func_end;
2281 node_type = node_get_type(hnode);
2282 if (node_type == NODE_DEVICE)
2283 status = -EPERM;
2284 if (status)
2285 goto func_end;
2287 hnode_mgr = hnode->hnode_mgr;
2288 if (!hnode_mgr) {
2289 status = -EFAULT;
2290 goto func_end;
2292 intf_fxns = hnode_mgr->intf_fxns;
2293 /* Enter critical section */
2294 mutex_lock(&hnode_mgr->node_mgr_lock);
2296 state = node_get_state(hnode);
2297 if (state != NODE_CREATED && state != NODE_PAUSED)
2298 status = -EBADR;
2300 if (!status)
2301 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2303 if (status)
2304 goto func_cont1;
2306 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2307 goto func_cont1;
2309 if (state == NODE_CREATED) {
2310 /* If node's execute function is not loaded, load it */
2311 if (!(hnode->loaded) && hnode->phase_split) {
2312 status =
2313 hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
2314 NLDR_EXECUTE);
2315 if (!status) {
2316 hnode->loaded = true;
2317 } else {
2318 pr_err("%s: fail - load execute code: 0x%x\n",
2319 __func__, status);
2322 if (!status) {
2323 /* Get address of node's execute function */
2324 if (proc_id == IVA_UNIT)
2325 ul_execute_fxn = (u32) hnode->node_env;
2326 else {
2327 status = get_fxn_address(hnode, &ul_execute_fxn,
2328 EXECUTEPHASE);
2331 if (!status) {
2332 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
2333 status =
2334 disp_node_run(hnode_mgr->disp_obj, hnode,
2335 ul_fxn_addr, ul_execute_fxn,
2336 hnode->node_env);
2338 } else if (state == NODE_PAUSED) {
2339 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
2340 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2341 ul_fxn_addr, hnode->node_env,
2342 NODE_GET_PRIORITY(hnode));
2343 } else {
2344 /* We should never get here */
2345 DBC_ASSERT(false);
2347 func_cont1:
2348 /* Update node state. */
2349 if (status >= 0)
2350 NODE_SET_STATE(hnode, NODE_RUNNING);
2351 else /* Set state back to previous value */
2352 NODE_SET_STATE(hnode, state);
2353 /*End of sync_enter_cs */
2354 /* Exit critical section */
2355 mutex_unlock(&hnode_mgr->node_mgr_lock);
2356 if (status >= 0) {
2357 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2358 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2360 func_end:
2361 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2362 return status;
2366 * ======== node_terminate ========
2367 * Purpose:
2368 * Signal a node running on the DSP that it should exit its execute phase
2369 * function.
2371 int node_terminate(struct node_object *hnode, int *pstatus)
2373 struct node_object *pnode = (struct node_object *)hnode;
2374 struct node_mgr *hnode_mgr = NULL;
2375 enum node_type node_type;
2376 struct bridge_drv_interface *intf_fxns;
2377 enum node_state state;
2378 struct dsp_msg msg, killmsg;
2379 int status = 0;
2380 u32 proc_id, kill_time_out;
2381 struct deh_mgr *hdeh_mgr;
2382 struct dsp_processorstate proc_state;
2384 DBC_REQUIRE(refs > 0);
2385 DBC_REQUIRE(pstatus != NULL);
2387 if (!hnode || !hnode->hnode_mgr) {
2388 status = -EFAULT;
2389 goto func_end;
2391 if (pnode->hprocessor == NULL) {
2392 status = -EFAULT;
2393 goto func_end;
2395 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2397 if (!status) {
2398 hnode_mgr = hnode->hnode_mgr;
2399 node_type = node_get_type(hnode);
2400 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2401 status = -EPERM;
2403 if (!status) {
2404 /* Check node state */
2405 mutex_lock(&hnode_mgr->node_mgr_lock);
2406 state = node_get_state(hnode);
2407 if (state != NODE_RUNNING) {
2408 status = -EBADR;
2409 /* Set the exit status if node terminated on
2410 * its own. */
2411 if (state == NODE_DONE)
2412 *pstatus = hnode->exit_status;
2414 } else {
2415 NODE_SET_STATE(hnode, NODE_TERMINATING);
2417 /* end of sync_enter_cs */
2418 mutex_unlock(&hnode_mgr->node_mgr_lock);
2420 if (!status) {
2422 * Send exit message. Do not change state to NODE_DONE
2423 * here. That will be done in callback.
2425 status = proc_get_state(pnode->hprocessor, &proc_state,
2426 sizeof(struct dsp_processorstate));
2427 if (status)
2428 goto func_cont;
2429 /* If processor is in error state then don't attempt to send
2430 * A kill task command */
2431 if (proc_state.proc_state == PROC_ERROR) {
2432 status = -EPERM;
2433 goto func_cont;
2436 msg.dw_cmd = RMS_EXIT;
2437 msg.dw_arg1 = hnode->node_env;
2438 killmsg.dw_cmd = RMS_KILLTASK;
2439 killmsg.dw_arg1 = hnode->node_env;
2440 intf_fxns = hnode_mgr->intf_fxns;
2442 if (hnode->utimeout > MAXTIMEOUT)
2443 kill_time_out = MAXTIMEOUT;
2444 else
2445 kill_time_out = (hnode->utimeout) * 2;
2447 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
2448 hnode->utimeout);
2449 if (status)
2450 goto func_cont;
2453 * Wait on synchronization object that will be
2454 * posted in the callback on receiving RMS_EXIT
2455 * message, or by node_delete. Check for valid hnode,
2456 * in case posted by node_delete().
2458 status = sync_wait_on_event(hnode->sync_done,
2459 kill_time_out / 2);
2460 if (status != ETIME)
2461 goto func_cont;
2463 status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
2464 &killmsg, hnode->utimeout);
2465 if (status)
2466 goto func_cont;
2467 status = sync_wait_on_event(hnode->sync_done,
2468 kill_time_out / 2);
2469 if (status) {
2471 * Here it goes the part of the simulation of
2472 * the DSP exception.
2474 dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
2475 if (!hdeh_mgr)
2476 goto func_cont;
2478 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2481 func_cont:
2482 if (!status) {
2483 /* Enter CS before getting exit status, in case node was
2484 * deleted. */
2485 mutex_lock(&hnode_mgr->node_mgr_lock);
2486 /* Make sure node wasn't deleted while we blocked */
2487 if (!hnode) {
2488 status = -EPERM;
2489 } else {
2490 *pstatus = hnode->exit_status;
2491 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2492 __func__, hnode, hnode->node_env, status);
2494 mutex_unlock(&hnode_mgr->node_mgr_lock);
2495 } /*End of sync_enter_cs */
2496 func_end:
2497 return status;
2501 * ======== delete_node ========
2502 * Purpose:
2503 * Free GPP resources allocated in node_allocate() or node_connect().
2505 static void delete_node(struct node_object *hnode,
2506 struct process_context *pr_ctxt)
2508 struct node_mgr *hnode_mgr;
2509 struct cmm_xlatorobject *xlator;
2510 struct bridge_drv_interface *intf_fxns;
2511 u32 i;
2512 enum node_type node_type;
2513 struct stream_chnl stream;
2514 struct node_msgargs node_msg_args;
2515 struct node_taskargs task_arg_obj;
2516 #ifdef DSP_DMM_DEBUG
2517 struct dmm_object *dmm_mgr;
2518 struct proc_object *p_proc_object =
2519 (struct proc_object *)hnode->hprocessor;
2520 #endif
2521 int status;
2522 if (!hnode)
2523 goto func_end;
2524 hnode_mgr = hnode->hnode_mgr;
2525 if (!hnode_mgr)
2526 goto func_end;
2527 xlator = hnode->xlator;
2528 node_type = node_get_type(hnode);
2529 if (node_type != NODE_DEVICE) {
2530 node_msg_args = hnode->create_args.asa.node_msg_args;
2531 kfree(node_msg_args.pdata);
2533 /* Free msg_ctrl queue */
2534 if (hnode->msg_queue_obj) {
2535 intf_fxns = hnode_mgr->intf_fxns;
2536 (*intf_fxns->pfn_msg_delete_queue) (hnode->
2537 msg_queue_obj);
2538 hnode->msg_queue_obj = NULL;
2541 kfree(hnode->sync_done);
2543 /* Free all stream info */
2544 if (hnode->inputs) {
2545 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2546 stream = hnode->inputs[i];
2547 free_stream(hnode_mgr, stream);
2549 kfree(hnode->inputs);
2550 hnode->inputs = NULL;
2552 if (hnode->outputs) {
2553 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2554 stream = hnode->outputs[i];
2555 free_stream(hnode_mgr, stream);
2557 kfree(hnode->outputs);
2558 hnode->outputs = NULL;
2560 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2561 if (task_arg_obj.strm_in_def) {
2562 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2563 kfree(task_arg_obj.strm_in_def[i].sz_device);
2564 task_arg_obj.strm_in_def[i].sz_device = NULL;
2566 kfree(task_arg_obj.strm_in_def);
2567 task_arg_obj.strm_in_def = NULL;
2569 if (task_arg_obj.strm_out_def) {
2570 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2571 kfree(task_arg_obj.strm_out_def[i].sz_device);
2572 task_arg_obj.strm_out_def[i].sz_device = NULL;
2574 kfree(task_arg_obj.strm_out_def);
2575 task_arg_obj.strm_out_def = NULL;
2577 if (task_arg_obj.udsp_heap_res_addr) {
2578 status = proc_un_map(hnode->hprocessor, (void *)
2579 task_arg_obj.udsp_heap_addr,
2580 pr_ctxt);
2582 status = proc_un_reserve_memory(hnode->hprocessor,
2583 (void *)
2584 task_arg_obj.
2585 udsp_heap_res_addr,
2586 pr_ctxt);
2587 #ifdef DSP_DMM_DEBUG
2588 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2589 if (dmm_mgr)
2590 dmm_mem_map_dump(dmm_mgr);
2591 else
2592 status = DSP_EHANDLE;
2593 #endif
2596 if (node_type != NODE_MESSAGE) {
2597 kfree(hnode->stream_connect);
2598 hnode->stream_connect = NULL;
2600 kfree(hnode->pstr_dev_name);
2601 hnode->pstr_dev_name = NULL;
2603 if (hnode->ntfy_obj) {
2604 ntfy_delete(hnode->ntfy_obj);
2605 kfree(hnode->ntfy_obj);
2606 hnode->ntfy_obj = NULL;
2609 /* These were allocated in dcd_get_object_def (via node_allocate) */
2610 kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
2611 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
2613 kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
2614 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
2616 kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
2617 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
2619 kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
2620 hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
2622 /* Free all SM address translator resources */
2623 if (xlator) {
2624 (void)cmm_xlator_delete(xlator, true); /* force free */
2625 xlator = NULL;
2628 kfree(hnode->nldr_node_obj);
2629 hnode->nldr_node_obj = NULL;
2630 hnode->hnode_mgr = NULL;
2631 kfree(hnode);
2632 hnode = NULL;
2633 func_end:
2634 return;
2638 * ======== delete_node_mgr ========
2639 * Purpose:
2640 * Frees the node manager.
2642 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2644 struct node_object *hnode;
2646 if (hnode_mgr) {
2647 /* Free resources */
2648 if (hnode_mgr->hdcd_mgr)
2649 dcd_destroy_manager(hnode_mgr->hdcd_mgr);
2651 /* Remove any elements remaining in lists */
2652 if (hnode_mgr->node_list) {
2653 while ((hnode = (struct node_object *)
2654 lst_get_head(hnode_mgr->node_list)))
2655 delete_node(hnode, NULL);
2657 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2658 kfree(hnode_mgr->node_list);
2660 mutex_destroy(&hnode_mgr->node_mgr_lock);
2661 if (hnode_mgr->ntfy_obj) {
2662 ntfy_delete(hnode_mgr->ntfy_obj);
2663 kfree(hnode_mgr->ntfy_obj);
2666 if (hnode_mgr->pipe_map)
2667 gb_delete(hnode_mgr->pipe_map);
2669 if (hnode_mgr->pipe_done_map)
2670 gb_delete(hnode_mgr->pipe_done_map);
2672 if (hnode_mgr->chnl_map)
2673 gb_delete(hnode_mgr->chnl_map);
2675 if (hnode_mgr->dma_chnl_map)
2676 gb_delete(hnode_mgr->dma_chnl_map);
2678 if (hnode_mgr->zc_chnl_map)
2679 gb_delete(hnode_mgr->zc_chnl_map);
2681 if (hnode_mgr->disp_obj)
2682 disp_delete(hnode_mgr->disp_obj);
2684 if (hnode_mgr->strm_mgr_obj)
2685 strm_delete(hnode_mgr->strm_mgr_obj);
2687 /* Delete the loader */
2688 if (hnode_mgr->nldr_obj)
2689 hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
2691 if (hnode_mgr->loader_init)
2692 hnode_mgr->nldr_fxns.pfn_exit();
2694 kfree(hnode_mgr);
2699 * ======== fill_stream_connect ========
2700 * Purpose:
2701 * Fills stream information.
2703 static void fill_stream_connect(struct node_object *node1,
2704 struct node_object *node2,
2705 u32 stream1, u32 stream2)
2707 u32 strm_index;
2708 struct dsp_streamconnect *strm1 = NULL;
2709 struct dsp_streamconnect *strm2 = NULL;
2710 enum node_type node1_type = NODE_TASK;
2711 enum node_type node2_type = NODE_TASK;
2713 node1_type = node_get_type(node1);
2714 node2_type = node_get_type(node2);
2715 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2717 if (node1_type != NODE_DEVICE) {
2718 strm_index = node1->num_inputs +
2719 node1->num_outputs - 1;
2720 strm1 = &(node1->stream_connect[strm_index]);
2721 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2722 strm1->this_node_stream_index = stream1;
2725 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2726 /* NODE == > NODE */
2727 if (node1_type != NODE_DEVICE) {
2728 strm1->connected_node = node2;
2729 strm1->ui_connected_node_id = node2->node_uuid;
2730 strm1->connected_node_stream_index = stream2;
2731 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2733 if (node2_type != NODE_DEVICE) {
2734 strm_index = node2->num_inputs +
2735 node2->num_outputs - 1;
2736 strm2 = &(node2->stream_connect[strm_index]);
2737 strm2->cb_struct =
2738 sizeof(struct dsp_streamconnect);
2739 strm2->this_node_stream_index = stream2;
2740 strm2->connected_node = node1;
2741 strm2->ui_connected_node_id = node1->node_uuid;
2742 strm2->connected_node_stream_index = stream1;
2743 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2745 } else if (node1_type != NODE_DEVICE)
2746 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2747 } else {
2748 /* GPP == > NODE */
2749 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2750 strm_index = node2->num_inputs + node2->num_outputs - 1;
2751 strm2 = &(node2->stream_connect[strm_index]);
2752 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2753 strm2->this_node_stream_index = stream2;
2754 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2759 * ======== fill_stream_def ========
2760 * Purpose:
2761 * Fills Stream attributes.
2763 static void fill_stream_def(struct node_object *hnode,
2764 struct node_strmdef *pstrm_def,
2765 struct dsp_strmattr *pattrs)
2767 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2769 if (pattrs != NULL) {
2770 pstrm_def->num_bufs = pattrs->num_bufs;
2771 pstrm_def->buf_size =
2772 pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
2773 pstrm_def->seg_id = pattrs->seg_id;
2774 pstrm_def->buf_alignment = pattrs->buf_alignment;
2775 pstrm_def->utimeout = pattrs->utimeout;
2776 } else {
2777 pstrm_def->num_bufs = DEFAULTNBUFS;
2778 pstrm_def->buf_size =
2779 DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
2780 pstrm_def->seg_id = DEFAULTSEGID;
2781 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2782 pstrm_def->utimeout = DEFAULTTIMEOUT;
2787 * ======== free_stream ========
2788 * Purpose:
2789 * Updates the channel mask and frees the pipe id.
2791 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2793 /* Free up the pipe id unless other node has not yet been deleted. */
2794 if (stream.type == NODECONNECT) {
2795 if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
2796 /* The other node has already been deleted */
2797 gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
2798 gb_clear(hnode_mgr->pipe_map, stream.dev_id);
2799 } else {
2800 /* The other node has not been deleted yet */
2801 gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
2803 } else if (stream.type == HOSTCONNECT) {
2804 if (stream.dev_id < hnode_mgr->ul_num_chnls) {
2805 gb_clear(hnode_mgr->chnl_map, stream.dev_id);
2806 } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
2807 /* dsp-dma */
2808 gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
2809 (1 * hnode_mgr->ul_num_chnls));
2810 } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
2811 /* zero-copy */
2812 gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
2813 (2 * hnode_mgr->ul_num_chnls));
2819 * ======== get_fxn_address ========
2820 * Purpose:
2821 * Retrieves the address for create, execute or delete phase for a node.
2823 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2824 u32 phase)
2826 char *pstr_fxn_name = NULL;
2827 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2828 int status = 0;
2829 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2830 node_get_type(hnode) == NODE_DAISSOCKET ||
2831 node_get_type(hnode) == NODE_MESSAGE);
2833 switch (phase) {
2834 case CREATEPHASE:
2835 pstr_fxn_name =
2836 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
2837 break;
2838 case EXECUTEPHASE:
2839 pstr_fxn_name =
2840 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
2841 break;
2842 case DELETEPHASE:
2843 pstr_fxn_name =
2844 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
2845 break;
2846 default:
2847 /* Should never get here */
2848 DBC_ASSERT(false);
2849 break;
2852 status =
2853 hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
2854 pstr_fxn_name, fxn_addr);
2856 return status;
2860 * ======== get_node_info ========
2861 * Purpose:
2862 * Retrieves the node information.
2864 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2866 u32 i;
2868 DBC_REQUIRE(hnode);
2869 DBC_REQUIRE(node_info != NULL);
2871 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2872 node_info->nb_node_database_props =
2873 hnode->dcd_props.obj_data.node_obj.ndb_props;
2874 node_info->execution_priority = hnode->prio;
2875 node_info->device_owner = hnode->device_owner;
2876 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2877 node_info->node_env = hnode->node_env;
2879 node_info->ns_execution_state = node_get_state(hnode);
2881 /* Copy stream connect data */
2882 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2883 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2888 * ======== get_node_props ========
2889 * Purpose:
2890 * Retrieve node properties.
2892 static int get_node_props(struct dcd_manager *hdcd_mgr,
2893 struct node_object *hnode,
2894 const struct dsp_uuid *node_uuid,
2895 struct dcd_genericobj *dcd_prop)
2897 u32 len;
2898 struct node_msgargs *pmsg_args;
2899 struct node_taskargs *task_arg_obj;
2900 enum node_type node_type = NODE_TASK;
2901 struct dsp_ndbprops *pndb_props =
2902 &(dcd_prop->obj_data.node_obj.ndb_props);
2903 int status = 0;
2904 char sz_uuid[MAXUUIDLEN];
2906 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2907 DSP_DCDNODETYPE, dcd_prop);
2909 if (!status) {
2910 hnode->ntype = node_type = pndb_props->ntype;
2912 /* Create UUID value to set in registry. */
2913 uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2914 MAXUUIDLEN);
2915 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2917 /* Fill in message args that come from NDB */
2918 if (node_type != NODE_DEVICE) {
2919 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2920 pmsg_args->seg_id =
2921 dcd_prop->obj_data.node_obj.msg_segid;
2922 pmsg_args->notify_type =
2923 dcd_prop->obj_data.node_obj.msg_notify_type;
2924 pmsg_args->max_msgs = pndb_props->message_depth;
2925 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2926 pmsg_args->max_msgs);
2927 } else {
2928 /* Copy device name */
2929 DBC_REQUIRE(pndb_props->ac_name);
2930 len = strlen(pndb_props->ac_name);
2931 DBC_ASSERT(len < MAXDEVNAMELEN);
2932 hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
2933 if (hnode->pstr_dev_name == NULL) {
2934 status = -ENOMEM;
2935 } else {
2936 strncpy(hnode->pstr_dev_name,
2937 pndb_props->ac_name, len);
2941 if (!status) {
2942 /* Fill in create args that come from NDB */
2943 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2944 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2945 task_arg_obj->prio = pndb_props->prio;
2946 task_arg_obj->stack_size = pndb_props->stack_size;
2947 task_arg_obj->sys_stack_size =
2948 pndb_props->sys_stack_size;
2949 task_arg_obj->stack_seg = pndb_props->stack_seg;
2950 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2951 "0x%x words System Stack Size: 0x%x words "
2952 "Stack Segment: 0x%x profile count : 0x%x\n",
2953 task_arg_obj->prio, task_arg_obj->stack_size,
2954 task_arg_obj->sys_stack_size,
2955 task_arg_obj->stack_seg,
2956 pndb_props->count_profiles);
2960 return status;
2964 * ======== get_proc_props ========
2965 * Purpose:
2966 * Retrieve the processor properties.
2968 static int get_proc_props(struct node_mgr *hnode_mgr,
2969 struct dev_object *hdev_obj)
2971 struct cfg_hostres *host_res;
2972 struct bridge_dev_context *pbridge_context;
2973 int status = 0;
2975 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2976 if (!pbridge_context)
2977 status = -EFAULT;
2979 if (!status) {
2980 host_res = pbridge_context->resources;
2981 if (!host_res)
2982 return -EPERM;
2983 hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
2984 hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
2985 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
2988 * PROC will add an API to get dsp_processorinfo.
2989 * Fill in default values for now.
2991 /* TODO -- Instead of hard coding, take from registry */
2992 hnode_mgr->proc_family = 6000;
2993 hnode_mgr->proc_type = 6410;
2994 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2995 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2996 hnode_mgr->udsp_word_size = DSPWORDSIZE;
2997 hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
2998 hnode_mgr->udsp_mau_size = 1;
3001 return status;
3005 * ======== node_get_uuid_props ========
3006 * Purpose:
3007 * Fetch Node UUID properties from DCD/DOF file.
3009 int node_get_uuid_props(void *hprocessor,
3010 const struct dsp_uuid *node_uuid,
3011 struct dsp_ndbprops *node_props)
3013 struct node_mgr *hnode_mgr = NULL;
3014 struct dev_object *hdev_obj;
3015 int status = 0;
3016 struct dcd_nodeprops dcd_node_props;
3017 struct dsp_processorstate proc_state;
3019 DBC_REQUIRE(refs > 0);
3020 DBC_REQUIRE(hprocessor != NULL);
3021 DBC_REQUIRE(node_uuid != NULL);
3023 if (hprocessor == NULL || node_uuid == NULL) {
3024 status = -EFAULT;
3025 goto func_end;
3027 status = proc_get_state(hprocessor, &proc_state,
3028 sizeof(struct dsp_processorstate));
3029 if (status)
3030 goto func_end;
3031 /* If processor is in error state then don't attempt
3032 to send the message */
3033 if (proc_state.proc_state == PROC_ERROR) {
3034 status = -EPERM;
3035 goto func_end;
3038 status = proc_get_dev_object(hprocessor, &hdev_obj);
3039 if (hdev_obj) {
3040 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
3041 if (hnode_mgr == NULL) {
3042 status = -EFAULT;
3043 goto func_end;
3048 * Enter the critical section. This is needed because
3049 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3050 * which needs to be protected in order to not corrupt the zlib manager
3051 * (COD).
3053 mutex_lock(&hnode_mgr->node_mgr_lock);
3055 dcd_node_props.pstr_create_phase_fxn = NULL;
3056 dcd_node_props.pstr_execute_phase_fxn = NULL;
3057 dcd_node_props.pstr_delete_phase_fxn = NULL;
3058 dcd_node_props.pstr_i_alg_name = NULL;
3060 status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
3061 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
3062 (struct dcd_genericobj *)&dcd_node_props);
3064 if (!status) {
3065 *node_props = dcd_node_props.ndb_props;
3066 kfree(dcd_node_props.pstr_create_phase_fxn);
3068 kfree(dcd_node_props.pstr_execute_phase_fxn);
3070 kfree(dcd_node_props.pstr_delete_phase_fxn);
3072 kfree(dcd_node_props.pstr_i_alg_name);
3074 /* Leave the critical section, we're done. */
3075 mutex_unlock(&hnode_mgr->node_mgr_lock);
3076 func_end:
3077 return status;
3081 * ======== get_rms_fxns ========
3082 * Purpose:
3083 * Retrieve the RMS functions.
3085 static int get_rms_fxns(struct node_mgr *hnode_mgr)
3087 s32 i;
3088 struct dev_object *dev_obj = hnode_mgr->hdev_obj;
3089 int status = 0;
3091 static char *psz_fxns[NUMRMSFXNS] = {
3092 "RMS_queryServer", /* RMSQUERYSERVER */
3093 "RMS_configureServer", /* RMSCONFIGURESERVER */
3094 "RMS_createNode", /* RMSCREATENODE */
3095 "RMS_executeNode", /* RMSEXECUTENODE */
3096 "RMS_deleteNode", /* RMSDELETENODE */
3097 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3098 "RMS_readMemory", /* RMSREADMEMORY */
3099 "RMS_writeMemory", /* RMSWRITEMEMORY */
3100 "RMS_copy", /* RMSCOPY */
3103 for (i = 0; i < NUMRMSFXNS; i++) {
3104 status = dev_get_symbol(dev_obj, psz_fxns[i],
3105 &(hnode_mgr->ul_fxn_addrs[i]));
3106 if (status) {
3107 if (status == -ESPIPE) {
3109 * May be loaded dynamically (in the future),
3110 * but return an error for now.
3112 dev_dbg(bridge, "%s: RMS function: %s currently"
3113 " not loaded\n", __func__, psz_fxns[i]);
3114 } else {
3115 dev_dbg(bridge, "%s: Symbol not found: %s "
3116 "status = 0x%x\n", __func__,
3117 psz_fxns[i], status);
3118 break;
3123 return status;
3127 * ======== ovly ========
3128 * Purpose:
3129 * Called during overlay.Sends command to RMS to copy a block of data.
3131 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3132 u32 ul_num_bytes, u32 mem_space)
3134 struct node_object *hnode = (struct node_object *)priv_ref;
3135 struct node_mgr *hnode_mgr;
3136 u32 ul_bytes = 0;
3137 u32 ul_size;
3138 u32 ul_timeout;
3139 int status = 0;
3140 struct bridge_dev_context *hbridge_context;
3141 /* Function interface to Bridge driver*/
3142 struct bridge_drv_interface *intf_fxns;
3144 DBC_REQUIRE(hnode);
3146 hnode_mgr = hnode->hnode_mgr;
3148 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
3149 ul_timeout = hnode->utimeout;
3151 /* Call new MemCopy function */
3152 intf_fxns = hnode_mgr->intf_fxns;
3153 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3154 if (!status) {
3155 status =
3156 (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
3157 dsp_run_addr, dsp_load_addr,
3158 ul_num_bytes, (u32) mem_space);
3159 if (!status)
3160 ul_bytes = ul_num_bytes;
3161 else
3162 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3163 __func__, status);
3164 } else {
3165 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3166 __func__, status);
3169 return ul_bytes;
3173 * ======== mem_write ========
3175 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3176 u32 ul_num_bytes, u32 mem_space)
3178 struct node_object *hnode = (struct node_object *)priv_ref;
3179 struct node_mgr *hnode_mgr;
3180 u16 mem_sect_type;
3181 u32 ul_timeout;
3182 int status = 0;
3183 struct bridge_dev_context *hbridge_context;
3184 /* Function interface to Bridge driver */
3185 struct bridge_drv_interface *intf_fxns;
3187 DBC_REQUIRE(hnode);
3188 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3190 hnode_mgr = hnode->hnode_mgr;
3192 ul_timeout = hnode->utimeout;
3193 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3195 /* Call new MemWrite function */
3196 intf_fxns = hnode_mgr->intf_fxns;
3197 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3198 status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
3199 dsp_add, ul_num_bytes, mem_sect_type);
3201 return ul_num_bytes;
3204 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3206 * ======== node_find_addr ========
3208 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3209 u32 offset_range, void *sym_addr_output, char *sym_name)
3211 struct node_object *node_obj;
3212 int status = -ENOENT;
3213 u32 n;
3215 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3216 (unsigned int) node_mgr,
3217 sym_addr, offset_range,
3218 (unsigned int) sym_addr_output, sym_name);
3220 node_obj = (struct node_object *)(node_mgr->node_list->head.next);
3222 for (n = 0; n < node_mgr->num_nodes; n++) {
3223 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3224 offset_range, sym_addr_output, sym_name);
3226 if (!status)
3227 break;
3229 node_obj = (struct node_object *) (node_obj->list_elem.next);
3232 return status;
3234 #endif