4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <dspbridge/host_os.h>
21 #include <dspbridge/std.h>
22 #include <dspbridge/dbdefs.h>
24 #include <dspbridge/dbc.h>
26 /* Platform manager */
27 #include <dspbridge/cod.h>
28 #include <dspbridge/dev.h>
30 /* Resource manager */
31 #include <dspbridge/dbll.h>
32 #include <dspbridge/dbdcd.h>
33 #include <dspbridge/rmm.h>
34 #include <dspbridge/uuidutil.h>
36 #include <dspbridge/nldr.h>
38 /* Name of section containing dynamic load mem */
39 #define DYNMEMSECT ".dspbridge_mem"
41 /* Name of section containing dependent library information */
42 #define DEPLIBSECT ".dspbridge_deplibs"
44 /* Max depth of recursion for loading node's dependent libraries */
47 /* Max number of persistent libraries kept by a node */
51 * Defines for extracting packed dynamic load memory requirements from two
53 * These defines must match node.cdb and dynm.cdb
54 * Format of data/code mask is:
55 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
58 * cccccc = prefered/required dynamic mem segid for create phase data/code
59 * dddddd = prefered/required dynamic mem segid for delete phase data/code
60 * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
61 * f = flag indicating if memory is preferred or required:
62 * f = 1 if required, f = 0 if preferred.
64 * The 6 bits of the segid are interpreted as follows:
66 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
67 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
68 * If the 6th bit (bit 5) is set, segid has the following interpretation:
69 * segid = 32 - Any internal memory segment can be used.
70 * segid = 33 - Any external memory segment can be used.
71 * segid = 63 - Any memory segment can be used (in this case the
72 * required/preferred flag is irrelevant).
75 /* Maximum allowed dynamic loading memory segments */
78 #define MAXSEGID 3 /* Largest possible (real) segid */
79 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
80 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
81 #define NULLID 63 /* Segid meaning no memory req/pref */
82 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
83 #define SEGMASK 0x3f /* Bits 0 - 5 */
85 #define CREATEBIT 0 /* Create segid starts at bit 0 */
86 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
87 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
90 * Masks that define memory type. Must match defines in dynm.cdb.
94 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
95 #define DYNM_INTERNAL 0x8
96 #define DYNM_EXTERNAL 0x10
99 * Defines for packing memory requirement/preference flags for code and
100 * data of each of the node's phases into one mask.
101 * The bit is set if the segid is required for loading code/data of the
102 * given phase. The bit is not set, if the segid is preferred only.
104 * These defines are also used as indeces into a segid array for the node.
105 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
106 * create phase data is required or preferred to be loaded into.
108 #define CREATEDATAFLAGBIT 0
109 #define CREATECODEFLAGBIT 1
110 #define EXECUTEDATAFLAGBIT 2
111 #define EXECUTECODEFLAGBIT 3
112 #define DELETEDATAFLAGBIT 4
113 #define DELETECODEFLAGBIT 5
116 #define IS_INTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
117 nldr_obj->seg_table[(segid)] & DYNM_INTERNAL) || \
118 (segid) == MEMINTERNALID)
120 #define IS_EXTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
121 nldr_obj->seg_table[(segid)] & DYNM_EXTERNAL) || \
122 (segid) == MEMEXTERNALID)
124 #define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \
125 (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF))
127 #define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF))
130 * These names may be embedded in overlay sections to identify which
131 * node phase the section should be overlayed.
133 #define PCREATE "create"
134 #define PDELETE "delete"
135 #define PEXECUTE "execute"
137 #define IS_EQUAL_UUID(uuid1, uuid2) (\
138 ((uuid1).ul_data1 == (uuid2).ul_data1) && \
139 ((uuid1).us_data2 == (uuid2).us_data2) && \
140 ((uuid1).us_data3 == (uuid2).us_data3) && \
141 ((uuid1).uc_data4 == (uuid2).uc_data4) && \
142 ((uuid1).uc_data5 == (uuid2).uc_data5) && \
143 (strncmp((void *)(uuid1).uc_data6, (void *)(uuid2).uc_data6, 6)) == 0)
146 * ======== mem_seg_info ========
147 * Format of dynamic loading memory segment info in coff file.
148 * Must match dynm.h55.
150 struct mem_seg_info
{
151 u32 segid
; /* Dynamic loading memory segment number */
154 u32 type
; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
158 * ======== lib_node ========
159 * For maintaining a tree of library dependencies.
162 struct dbll_library_obj
*lib
; /* The library */
163 u16 dep_libs
; /* Number of dependent libraries */
164 struct lib_node
*dep_libs_tree
; /* Dependent libraries of lib */
168 * ======== ovly_sect ========
169 * Information needed to overlay a section.
172 struct ovly_sect
*next_sect
;
173 u32 sect_load_addr
; /* Load address of section */
174 u32 sect_run_addr
; /* Run address of section */
175 u32 size
; /* Size of section */
176 u16 page
; /* DBL_CODE, DBL_DATA */
180 * ======== ovly_node ========
181 * For maintaining a list of overlay nodes, with sections that need to be
182 * overlayed for each of the nodes phases.
185 struct dsp_uuid uuid
;
187 struct ovly_sect
*create_sects_list
;
188 struct ovly_sect
*delete_sects_list
;
189 struct ovly_sect
*execute_sects_list
;
190 struct ovly_sect
*other_sects_list
;
202 * ======== nldr_object ========
203 * Overlay loader object.
206 struct dev_object
*hdev_obj
; /* Device object */
207 struct dcd_manager
*hdcd_mgr
; /* Proc/Node data manager */
208 struct dbll_tar_obj
*dbll
; /* The DBL loader */
209 struct dbll_library_obj
*base_lib
; /* Base image library */
210 struct rmm_target_obj
*rmm
; /* Remote memory manager for DSP */
211 struct dbll_fxns ldr_fxns
; /* Loader function table */
212 struct dbll_attrs ldr_attrs
; /* attrs to pass to loader functions */
213 nldr_ovlyfxn ovly_fxn
; /* "write" for overlay nodes */
214 nldr_writefxn write_fxn
; /* "write" for dynamic nodes */
215 struct ovly_node
*ovly_table
; /* Table of overlay nodes */
216 u16 ovly_nodes
; /* Number of overlay nodes in base */
217 u16 ovly_nid
; /* Index for tracking overlay nodes */
218 u16 dload_segs
; /* Number of dynamic load mem segs */
219 u32
*seg_table
; /* memtypes of dynamic memory segs
222 u16 us_dsp_mau_size
; /* Size of DSP MAU */
223 u16 us_dsp_word_size
; /* Size of DSP word */
227 * ======== nldr_nodeobject ========
228 * Dynamic node object. This object is created when a node is allocated.
230 struct nldr_nodeobject
{
231 struct nldr_object
*nldr_obj
; /* Dynamic loader handle */
232 void *priv_ref
; /* Handle to pass to dbl_write_fxn */
233 struct dsp_uuid uuid
; /* Node's UUID */
234 bool dynamic
; /* Dynamically loaded node? */
235 bool overlay
; /* Overlay node? */
236 bool *pf_phase_split
; /* Multiple phase libraries? */
237 struct lib_node root
; /* Library containing node phase */
238 struct lib_node create_lib
; /* Library with create phase lib */
239 struct lib_node execute_lib
; /* Library with execute phase lib */
240 struct lib_node delete_lib
; /* Library with delete phase lib */
241 /* libs remain loaded until Delete */
242 struct lib_node pers_lib_table
[MAXLIBS
];
243 s32 pers_libs
; /* Number of persistent libraries */
244 /* Path in lib dependency tree */
245 struct dbll_library_obj
*lib_path
[MAXDEPTH
+ 1];
246 enum nldr_phase phase
; /* Node phase currently being loaded */
249 * Dynamic loading memory segments for data and code of each phase.
251 u16 seg_id
[MAXFLAGS
];
254 * Mask indicating whether each mem segment specified in seg_id[]
255 * is preferred or required.
257 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
258 * then it is required to load execute phase data into the memory
259 * specified by seg_id[EXECUTEDATAFLAGBIT].
261 u32 code_data_flag_mask
;
264 /* Dynamic loader function table */
265 static struct dbll_fxns ldr_fxns
= {
266 (dbll_close_fxn
) dbll_close
,
267 (dbll_create_fxn
) dbll_create
,
268 (dbll_delete_fxn
) dbll_delete
,
269 (dbll_exit_fxn
) dbll_exit
,
270 (dbll_get_attrs_fxn
) dbll_get_attrs
,
271 (dbll_get_addr_fxn
) dbll_get_addr
,
272 (dbll_get_c_addr_fxn
) dbll_get_c_addr
,
273 (dbll_get_sect_fxn
) dbll_get_sect
,
274 (dbll_init_fxn
) dbll_init
,
275 (dbll_load_fxn
) dbll_load
,
276 (dbll_load_sect_fxn
) dbll_load_sect
,
277 (dbll_open_fxn
) dbll_open
,
278 (dbll_read_sect_fxn
) dbll_read_sect
,
279 (dbll_set_attrs_fxn
) dbll_set_attrs
,
280 (dbll_unload_fxn
) dbll_unload
,
281 (dbll_unload_sect_fxn
) dbll_unload_sect
,
284 static u32 refs
; /* module reference count */
286 static int add_ovly_info(void *handle
, struct dbll_sect_info
*sect_info
,
287 u32 addr
, u32 bytes
);
288 static int add_ovly_node(struct dsp_uuid
*uuid_obj
,
289 enum dsp_dcdobjtype obj_type
, IN
void *handle
);
290 static int add_ovly_sect(struct nldr_object
*nldr_obj
,
291 struct ovly_sect
**pList
,
292 struct dbll_sect_info
*pSectInfo
,
293 bool *exists
, u32 addr
, u32 bytes
);
294 static s32
fake_ovly_write(void *handle
, u32 dsp_address
, void *buf
, u32 bytes
,
296 static void free_sects(struct nldr_object
*nldr_obj
,
297 struct ovly_sect
*phase_sects
, u16 alloc_num
);
298 static bool get_symbol_value(void *handle
, void *parg
, void *rmm_handle
,
299 char *symName
, struct dbll_sym_val
**sym
);
300 static int load_lib(struct nldr_nodeobject
*nldr_node_obj
,
301 struct lib_node
*root
, struct dsp_uuid uuid
,
303 struct dbll_library_obj
**lib_path
,
304 enum nldr_phase phase
, u16 depth
);
305 static int load_ovly(struct nldr_nodeobject
*nldr_node_obj
,
306 enum nldr_phase phase
);
307 static int remote_alloc(void **pRef
, u16 mem_sect_type
, u32 size
,
308 u32 align
, u32
*dsp_address
,
309 OPTIONAL s32 segmentId
,
310 OPTIONAL s32 req
, bool reserve
);
311 static int remote_free(void **pRef
, u16 space
, u32 dsp_address
, u32 size
,
314 static void unload_lib(struct nldr_nodeobject
*nldr_node_obj
,
315 struct lib_node
*root
);
316 static void unload_ovly(struct nldr_nodeobject
*nldr_node_obj
,
317 enum nldr_phase phase
);
318 static bool find_in_persistent_lib_array(struct nldr_nodeobject
*nldr_node_obj
,
319 struct dbll_library_obj
*lib
);
320 static u32
find_lcm(u32 a
, u32 b
);
321 static u32
find_gcf(u32 a
, u32 b
);
324 * ======== nldr_allocate ========
326 int nldr_allocate(struct nldr_object
*nldr_obj
, void *priv_ref
,
327 IN CONST
struct dcd_nodeprops
*node_props
,
328 OUT
struct nldr_nodeobject
**phNldrNode
,
329 IN
bool *pf_phase_split
)
331 struct nldr_nodeobject
*nldr_node_obj
= NULL
;
334 DBC_REQUIRE(refs
> 0);
335 DBC_REQUIRE(node_props
!= NULL
);
336 DBC_REQUIRE(phNldrNode
!= NULL
);
337 DBC_REQUIRE(nldr_obj
);
339 /* Initialize handle in case of failure */
341 /* Allocate node object */
342 nldr_node_obj
= kzalloc(sizeof(struct nldr_nodeobject
), GFP_KERNEL
);
344 if (nldr_node_obj
== NULL
) {
347 nldr_node_obj
->pf_phase_split
= pf_phase_split
;
348 nldr_node_obj
->pers_libs
= 0;
349 nldr_node_obj
->nldr_obj
= nldr_obj
;
350 nldr_node_obj
->priv_ref
= priv_ref
;
351 /* Save node's UUID. */
352 nldr_node_obj
->uuid
= node_props
->ndb_props
.ui_node_id
;
354 * Determine if node is a dynamically loaded node from
357 if (node_props
->us_load_type
== NLDR_DYNAMICLOAD
) {
359 nldr_node_obj
->dynamic
= true;
361 * Extract memory requirements from ndb_props masks
364 nldr_node_obj
->seg_id
[CREATEDATAFLAGBIT
] = (u16
)
365 (node_props
->ul_data_mem_seg_mask
>> CREATEBIT
) &
367 nldr_node_obj
->code_data_flag_mask
|=
368 ((node_props
->ul_data_mem_seg_mask
>>
369 (CREATEBIT
+ FLAGBIT
)) & 1) << CREATEDATAFLAGBIT
;
370 nldr_node_obj
->seg_id
[CREATECODEFLAGBIT
] = (u16
)
371 (node_props
->ul_code_mem_seg_mask
>>
372 CREATEBIT
) & SEGMASK
;
373 nldr_node_obj
->code_data_flag_mask
|=
374 ((node_props
->ul_code_mem_seg_mask
>>
375 (CREATEBIT
+ FLAGBIT
)) & 1) << CREATECODEFLAGBIT
;
377 nldr_node_obj
->seg_id
[EXECUTEDATAFLAGBIT
] = (u16
)
378 (node_props
->ul_data_mem_seg_mask
>>
379 EXECUTEBIT
) & SEGMASK
;
380 nldr_node_obj
->code_data_flag_mask
|=
381 ((node_props
->ul_data_mem_seg_mask
>>
382 (EXECUTEBIT
+ FLAGBIT
)) & 1) <<
384 nldr_node_obj
->seg_id
[EXECUTECODEFLAGBIT
] = (u16
)
385 (node_props
->ul_code_mem_seg_mask
>>
386 EXECUTEBIT
) & SEGMASK
;
387 nldr_node_obj
->code_data_flag_mask
|=
388 ((node_props
->ul_code_mem_seg_mask
>>
389 (EXECUTEBIT
+ FLAGBIT
)) & 1) <<
392 nldr_node_obj
->seg_id
[DELETEDATAFLAGBIT
] = (u16
)
393 (node_props
->ul_data_mem_seg_mask
>> DELETEBIT
) &
395 nldr_node_obj
->code_data_flag_mask
|=
396 ((node_props
->ul_data_mem_seg_mask
>>
397 (DELETEBIT
+ FLAGBIT
)) & 1) << DELETEDATAFLAGBIT
;
398 nldr_node_obj
->seg_id
[DELETECODEFLAGBIT
] = (u16
)
399 (node_props
->ul_code_mem_seg_mask
>>
400 DELETEBIT
) & SEGMASK
;
401 nldr_node_obj
->code_data_flag_mask
|=
402 ((node_props
->ul_code_mem_seg_mask
>>
403 (DELETEBIT
+ FLAGBIT
)) & 1) << DELETECODEFLAGBIT
;
405 /* Non-dynamically loaded nodes are part of the
407 nldr_node_obj
->root
.lib
= nldr_obj
->base_lib
;
408 /* Check for overlay node */
409 if (node_props
->us_load_type
== NLDR_OVLYLOAD
)
410 nldr_node_obj
->overlay
= true;
413 *phNldrNode
= (struct nldr_nodeobject
*)nldr_node_obj
;
415 /* Cleanup on failure */
416 if (DSP_FAILED(status
) && nldr_node_obj
)
417 kfree(nldr_node_obj
);
419 DBC_ENSURE((DSP_SUCCEEDED(status
) && *phNldrNode
)
420 || (DSP_FAILED(status
) && *phNldrNode
== NULL
));
425 * ======== nldr_create ========
427 int nldr_create(OUT
struct nldr_object
**phNldr
,
428 struct dev_object
*hdev_obj
,
429 IN CONST
struct nldr_attrs
*pattrs
)
431 struct cod_manager
*cod_mgr
; /* COD manager */
432 char *psz_coff_buf
= NULL
;
433 char sz_zl_file
[COD_MAXPATHLENGTH
];
434 struct nldr_object
*nldr_obj
= NULL
;
435 struct dbll_attrs save_attrs
;
436 struct dbll_attrs new_attrs
;
440 struct mem_seg_info
*mem_info_obj
;
443 struct rmm_segment
*rmm_segs
= NULL
;
446 DBC_REQUIRE(refs
> 0);
447 DBC_REQUIRE(phNldr
!= NULL
);
448 DBC_REQUIRE(hdev_obj
!= NULL
);
449 DBC_REQUIRE(pattrs
!= NULL
);
450 DBC_REQUIRE(pattrs
->pfn_ovly
!= NULL
);
451 DBC_REQUIRE(pattrs
->pfn_write
!= NULL
);
453 /* Allocate dynamic loader object */
454 nldr_obj
= kzalloc(sizeof(struct nldr_object
), GFP_KERNEL
);
456 nldr_obj
->hdev_obj
= hdev_obj
;
457 /* warning, lazy status checking alert! */
458 dev_get_cod_mgr(hdev_obj
, &cod_mgr
);
460 status
= cod_get_loader(cod_mgr
, &nldr_obj
->dbll
);
461 DBC_ASSERT(DSP_SUCCEEDED(status
));
462 status
= cod_get_base_lib(cod_mgr
, &nldr_obj
->base_lib
);
463 DBC_ASSERT(DSP_SUCCEEDED(status
));
465 cod_get_base_name(cod_mgr
, sz_zl_file
,
467 DBC_ASSERT(DSP_SUCCEEDED(status
));
470 /* end lazy status checking */
471 nldr_obj
->us_dsp_mau_size
= pattrs
->us_dsp_mau_size
;
472 nldr_obj
->us_dsp_word_size
= pattrs
->us_dsp_word_size
;
473 nldr_obj
->ldr_fxns
= ldr_fxns
;
474 if (!(nldr_obj
->ldr_fxns
.init_fxn()))
480 /* Create the DCD Manager */
481 if (DSP_SUCCEEDED(status
))
482 status
= dcd_create_manager(NULL
, &nldr_obj
->hdcd_mgr
);
484 /* Get dynamic loading memory sections from base lib */
485 if (DSP_SUCCEEDED(status
)) {
487 nldr_obj
->ldr_fxns
.get_sect_fxn(nldr_obj
->base_lib
,
488 DYNMEMSECT
, &ul_addr
,
490 if (DSP_SUCCEEDED(status
)) {
492 kzalloc(ul_len
* nldr_obj
->us_dsp_mau_size
,
497 /* Ok to not have dynamic loading memory */
500 dev_dbg(bridge
, "%s: failed - no dynamic loading mem "
501 "segments: 0x%x\n", __func__
, status
);
504 if (DSP_SUCCEEDED(status
) && ul_len
> 0) {
505 /* Read section containing dynamic load mem segments */
507 nldr_obj
->ldr_fxns
.read_sect_fxn(nldr_obj
->base_lib
,
508 DYNMEMSECT
, psz_coff_buf
,
511 if (DSP_SUCCEEDED(status
) && ul_len
> 0) {
512 /* Parse memory segment data */
513 dload_segs
= (u16
) (*((u32
*) psz_coff_buf
));
514 if (dload_segs
> MAXMEMSEGS
)
517 /* Parse dynamic load memory segments */
518 if (DSP_SUCCEEDED(status
) && dload_segs
> 0) {
519 rmm_segs
= kzalloc(sizeof(struct rmm_segment
) * dload_segs
,
521 nldr_obj
->seg_table
=
522 kzalloc(sizeof(u32
) * dload_segs
, GFP_KERNEL
);
523 if (rmm_segs
== NULL
|| nldr_obj
->seg_table
== NULL
) {
526 nldr_obj
->dload_segs
= dload_segs
;
527 mem_info_obj
= (struct mem_seg_info
*)(psz_coff_buf
+
529 for (i
= 0; i
< dload_segs
; i
++) {
530 rmm_segs
[i
].base
= (mem_info_obj
+ i
)->base
;
531 rmm_segs
[i
].length
= (mem_info_obj
+ i
)->len
;
532 rmm_segs
[i
].space
= 0;
533 nldr_obj
->seg_table
[i
] =
534 (mem_info_obj
+ i
)->type
;
536 "(proc) DLL MEMSEGMENT: %d, "
537 "Base: 0x%x, Length: 0x%x\n", i
,
538 rmm_segs
[i
].base
, rmm_segs
[i
].length
);
542 /* Create Remote memory manager */
543 if (DSP_SUCCEEDED(status
))
544 status
= rmm_create(&nldr_obj
->rmm
, rmm_segs
, dload_segs
);
546 if (DSP_SUCCEEDED(status
)) {
547 /* set the alloc, free, write functions for loader */
548 nldr_obj
->ldr_fxns
.get_attrs_fxn(nldr_obj
->dbll
, &save_attrs
);
549 new_attrs
= save_attrs
;
550 new_attrs
.alloc
= (dbll_alloc_fxn
) remote_alloc
;
551 new_attrs
.free
= (dbll_free_fxn
) remote_free
;
552 new_attrs
.sym_lookup
= (dbll_sym_lookup
) get_symbol_value
;
553 new_attrs
.sym_handle
= nldr_obj
;
554 new_attrs
.write
= (dbll_write_fxn
) pattrs
->pfn_write
;
555 nldr_obj
->ovly_fxn
= pattrs
->pfn_ovly
;
556 nldr_obj
->write_fxn
= pattrs
->pfn_write
;
557 nldr_obj
->ldr_attrs
= new_attrs
;
563 /* Get overlay nodes */
564 if (DSP_SUCCEEDED(status
)) {
566 cod_get_base_name(cod_mgr
, sz_zl_file
, COD_MAXPATHLENGTH
);
568 DBC_ASSERT(DSP_SUCCEEDED(status
));
569 /* First count number of overlay nodes */
571 dcd_get_objects(nldr_obj
->hdcd_mgr
, sz_zl_file
,
572 add_ovly_node
, (void *)nldr_obj
);
573 /* Now build table of overlay nodes */
574 if (DSP_SUCCEEDED(status
) && nldr_obj
->ovly_nodes
> 0) {
575 /* Allocate table for overlay nodes */
576 nldr_obj
->ovly_table
=
577 kzalloc(sizeof(struct ovly_node
) *
578 nldr_obj
->ovly_nodes
, GFP_KERNEL
);
579 /* Put overlay nodes in the table */
580 nldr_obj
->ovly_nid
= 0;
581 status
= dcd_get_objects(nldr_obj
->hdcd_mgr
, sz_zl_file
,
586 /* Do a fake reload of the base image to get overlay section info */
587 if (DSP_SUCCEEDED(status
) && nldr_obj
->ovly_nodes
> 0) {
588 save_attrs
.write
= fake_ovly_write
;
589 save_attrs
.log_write
= add_ovly_info
;
590 save_attrs
.log_write_handle
= nldr_obj
;
591 flags
= DBLL_CODE
| DBLL_DATA
| DBLL_SYMB
;
592 status
= nldr_obj
->ldr_fxns
.load_fxn(nldr_obj
->base_lib
, flags
,
593 &save_attrs
, &ul_entry
);
595 if (DSP_SUCCEEDED(status
)) {
596 *phNldr
= (struct nldr_object
*)nldr_obj
;
599 nldr_delete((struct nldr_object
*)nldr_obj
);
603 /* FIXME:Temp. Fix. Must be removed */
604 DBC_ENSURE((DSP_SUCCEEDED(status
) && *phNldr
)
605 || (DSP_FAILED(status
) && (*phNldr
== NULL
)));
610 * ======== nldr_delete ========
612 void nldr_delete(struct nldr_object
*nldr_obj
)
614 struct ovly_sect
*ovly_section
;
615 struct ovly_sect
*next
;
617 DBC_REQUIRE(refs
> 0);
618 DBC_REQUIRE(nldr_obj
);
620 nldr_obj
->ldr_fxns
.exit_fxn();
622 rmm_delete(nldr_obj
->rmm
);
624 kfree(nldr_obj
->seg_table
);
626 if (nldr_obj
->hdcd_mgr
)
627 dcd_destroy_manager(nldr_obj
->hdcd_mgr
);
629 /* Free overlay node information */
630 if (nldr_obj
->ovly_table
) {
631 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
633 nldr_obj
->ovly_table
[i
].create_sects_list
;
634 while (ovly_section
) {
635 next
= ovly_section
->next_sect
;
640 nldr_obj
->ovly_table
[i
].delete_sects_list
;
641 while (ovly_section
) {
642 next
= ovly_section
->next_sect
;
647 nldr_obj
->ovly_table
[i
].execute_sects_list
;
648 while (ovly_section
) {
649 next
= ovly_section
->next_sect
;
653 ovly_section
= nldr_obj
->ovly_table
[i
].other_sects_list
;
654 while (ovly_section
) {
655 next
= ovly_section
->next_sect
;
660 kfree(nldr_obj
->ovly_table
);
666 * ======== nldr_exit ========
667 * Discontinue usage of NLDR module.
671 DBC_REQUIRE(refs
> 0);
678 DBC_ENSURE(refs
>= 0);
682 * ======== nldr_get_fxn_addr ========
684 int nldr_get_fxn_addr(struct nldr_nodeobject
*nldr_node_obj
,
685 char *pstrFxn
, u32
* pulAddr
)
687 struct dbll_sym_val
*dbll_sym
;
688 struct nldr_object
*nldr_obj
;
690 bool status1
= false;
692 struct lib_node root
= { NULL
, 0, NULL
};
693 DBC_REQUIRE(refs
> 0);
694 DBC_REQUIRE(nldr_node_obj
);
695 DBC_REQUIRE(pulAddr
!= NULL
);
696 DBC_REQUIRE(pstrFxn
!= NULL
);
698 nldr_obj
= nldr_node_obj
->nldr_obj
;
699 /* Called from node_create(), node_delete(), or node_run(). */
700 if (nldr_node_obj
->dynamic
&& *nldr_node_obj
->pf_phase_split
) {
701 switch (nldr_node_obj
->phase
) {
703 root
= nldr_node_obj
->create_lib
;
706 root
= nldr_node_obj
->execute_lib
;
709 root
= nldr_node_obj
->delete_lib
;
716 /* for Overlay nodes or non-split Dynamic nodes */
717 root
= nldr_node_obj
->root
;
720 nldr_obj
->ldr_fxns
.get_c_addr_fxn(root
.lib
, pstrFxn
, &dbll_sym
);
723 nldr_obj
->ldr_fxns
.get_addr_fxn(root
.lib
, pstrFxn
,
726 /* If symbol not found, check dependent libraries */
728 for (i
= 0; i
< root
.dep_libs
; i
++) {
730 nldr_obj
->ldr_fxns
.get_addr_fxn(root
.dep_libs_tree
736 get_c_addr_fxn(root
.dep_libs_tree
[i
].lib
,
745 /* Check persistent libraries */
747 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
750 get_addr_fxn(nldr_node_obj
->pers_lib_table
[i
].lib
,
755 get_c_addr_fxn(nldr_node_obj
->pers_lib_table
756 [i
].lib
, pstrFxn
, &dbll_sym
);
766 *pulAddr
= dbll_sym
->value
;
774 * ======== nldr_get_rmm_manager ========
775 * Given a NLDR object, retrieve RMM Manager Handle
777 int nldr_get_rmm_manager(struct nldr_object
*nldr
,
778 OUT
struct rmm_target_obj
**phRmmMgr
)
781 struct nldr_object
*nldr_obj
= nldr
;
782 DBC_REQUIRE(phRmmMgr
!= NULL
);
785 *phRmmMgr
= nldr_obj
->rmm
;
791 DBC_ENSURE(DSP_SUCCEEDED(status
) || ((phRmmMgr
!= NULL
) &&
792 (*phRmmMgr
== NULL
)));
798 * ======== nldr_init ========
799 * Initialize the NLDR module.
803 DBC_REQUIRE(refs
>= 0);
810 DBC_ENSURE(refs
> 0);
815 * ======== nldr_load ========
817 int nldr_load(struct nldr_nodeobject
*nldr_node_obj
,
818 enum nldr_phase phase
)
820 struct nldr_object
*nldr_obj
;
821 struct dsp_uuid lib_uuid
;
824 DBC_REQUIRE(refs
> 0);
825 DBC_REQUIRE(nldr_node_obj
);
827 nldr_obj
= nldr_node_obj
->nldr_obj
;
829 if (nldr_node_obj
->dynamic
) {
830 nldr_node_obj
->phase
= phase
;
832 lib_uuid
= nldr_node_obj
->uuid
;
834 /* At this point, we may not know if node is split into
835 * different libraries. So we'll go ahead and load the
836 * library, and then save the pointer to the appropriate
837 * location after we know. */
840 load_lib(nldr_node_obj
, &nldr_node_obj
->root
, lib_uuid
,
841 false, nldr_node_obj
->lib_path
, phase
, 0);
843 if (DSP_SUCCEEDED(status
)) {
844 if (*nldr_node_obj
->pf_phase_split
) {
847 nldr_node_obj
->create_lib
=
852 nldr_node_obj
->execute_lib
=
857 nldr_node_obj
->delete_lib
=
868 if (nldr_node_obj
->overlay
)
869 status
= load_ovly(nldr_node_obj
, phase
);
877 * ======== nldr_unload ========
879 int nldr_unload(struct nldr_nodeobject
*nldr_node_obj
,
880 enum nldr_phase phase
)
883 struct lib_node
*root_lib
= NULL
;
886 DBC_REQUIRE(refs
> 0);
887 DBC_REQUIRE(nldr_node_obj
);
889 if (nldr_node_obj
!= NULL
) {
890 if (nldr_node_obj
->dynamic
) {
891 if (*nldr_node_obj
->pf_phase_split
) {
894 root_lib
= &nldr_node_obj
->create_lib
;
897 root_lib
= &nldr_node_obj
->execute_lib
;
900 root_lib
= &nldr_node_obj
->delete_lib
;
901 /* Unload persistent libraries */
903 i
< nldr_node_obj
->pers_libs
;
905 unload_lib(nldr_node_obj
,
909 nldr_node_obj
->pers_libs
= 0;
916 /* Unload main library */
917 root_lib
= &nldr_node_obj
->root
;
920 unload_lib(nldr_node_obj
, root_lib
);
922 if (nldr_node_obj
->overlay
)
923 unload_ovly(nldr_node_obj
, phase
);
931 * ======== add_ovly_info ========
933 static int add_ovly_info(void *handle
, struct dbll_sect_info
*sect_info
,
937 char *sect_name
= (char *)sect_info
->name
;
938 bool sect_exists
= false;
942 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
945 /* Is this an overlay section (load address != run address)? */
946 if (sect_info
->sect_load_addr
== sect_info
->sect_run_addr
)
949 /* Find the node it belongs to */
950 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
951 node_name
= nldr_obj
->ovly_table
[i
].node_name
;
952 DBC_REQUIRE(node_name
);
953 if (strncmp(node_name
, sect_name
+ 1, strlen(node_name
)) == 0) {
958 if (!(i
< nldr_obj
->ovly_nodes
))
961 /* Determine which phase this section belongs to */
962 for (pch
= sect_name
+ 1; *pch
&& *pch
!= seps
; pch
++)
966 pch
++; /* Skip over the ':' */
967 if (strncmp(pch
, PCREATE
, strlen(PCREATE
)) == 0) {
969 add_ovly_sect(nldr_obj
,
971 ovly_table
[i
].create_sects_list
,
972 sect_info
, §_exists
, addr
, bytes
);
973 if (DSP_SUCCEEDED(status
) && !sect_exists
)
974 nldr_obj
->ovly_table
[i
].create_sects
++;
976 } else if (strncmp(pch
, PDELETE
, strlen(PDELETE
)) == 0) {
978 add_ovly_sect(nldr_obj
,
980 ovly_table
[i
].delete_sects_list
,
981 sect_info
, §_exists
, addr
, bytes
);
982 if (DSP_SUCCEEDED(status
) && !sect_exists
)
983 nldr_obj
->ovly_table
[i
].delete_sects
++;
985 } else if (strncmp(pch
, PEXECUTE
, strlen(PEXECUTE
)) == 0) {
987 add_ovly_sect(nldr_obj
,
989 ovly_table
[i
].execute_sects_list
,
990 sect_info
, §_exists
, addr
, bytes
);
991 if (DSP_SUCCEEDED(status
) && !sect_exists
)
992 nldr_obj
->ovly_table
[i
].execute_sects
++;
995 /* Put in "other" sectins */
997 add_ovly_sect(nldr_obj
,
999 ovly_table
[i
].other_sects_list
,
1000 sect_info
, §_exists
, addr
, bytes
);
1001 if (DSP_SUCCEEDED(status
) && !sect_exists
)
1002 nldr_obj
->ovly_table
[i
].other_sects
++;
1011 * ======== add_ovly_node =========
1012 * Callback function passed to dcd_get_objects.
1014 static int add_ovly_node(struct dsp_uuid
*uuid_obj
,
1015 enum dsp_dcdobjtype obj_type
, IN
void *handle
)
1017 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
1018 char *node_name
= NULL
;
1021 struct dcd_genericobj obj_def
;
1024 if (obj_type
!= DSP_DCDNODETYPE
)
1028 dcd_get_object_def(nldr_obj
->hdcd_mgr
, uuid_obj
, obj_type
,
1030 if (DSP_FAILED(status
))
1033 /* If overlay node, add to the list */
1034 if (obj_def
.obj_data
.node_obj
.us_load_type
== NLDR_OVLYLOAD
) {
1035 if (nldr_obj
->ovly_table
== NULL
) {
1036 nldr_obj
->ovly_nodes
++;
1038 /* Add node to table */
1039 nldr_obj
->ovly_table
[nldr_obj
->ovly_nid
].uuid
=
1041 DBC_REQUIRE(obj_def
.obj_data
.node_obj
.ndb_props
.
1044 strlen(obj_def
.obj_data
.node_obj
.ndb_props
.ac_name
);
1045 node_name
= obj_def
.obj_data
.node_obj
.ndb_props
.ac_name
;
1046 pbuf
= kzalloc(len
+ 1, GFP_KERNEL
);
1050 strncpy(pbuf
, node_name
, len
);
1051 nldr_obj
->ovly_table
[nldr_obj
->ovly_nid
].
1053 nldr_obj
->ovly_nid
++;
1057 /* These were allocated in dcd_get_object_def */
1058 kfree(obj_def
.obj_data
.node_obj
.pstr_create_phase_fxn
);
1060 kfree(obj_def
.obj_data
.node_obj
.pstr_execute_phase_fxn
);
1062 kfree(obj_def
.obj_data
.node_obj
.pstr_delete_phase_fxn
);
1064 kfree(obj_def
.obj_data
.node_obj
.pstr_i_alg_name
);
1071 * ======== add_ovly_sect ========
1073 static int add_ovly_sect(struct nldr_object
*nldr_obj
,
1074 struct ovly_sect
**pList
,
1075 struct dbll_sect_info
*pSectInfo
,
1076 bool *exists
, u32 addr
, u32 bytes
)
1078 struct ovly_sect
*new_sect
= NULL
;
1079 struct ovly_sect
*last_sect
;
1080 struct ovly_sect
*ovly_section
;
1083 ovly_section
= last_sect
= *pList
;
1085 while (ovly_section
) {
1087 * Make sure section has not already been added. Multiple
1088 * 'write' calls may be made to load the section.
1090 if (ovly_section
->sect_load_addr
== addr
) {
1095 last_sect
= ovly_section
;
1096 ovly_section
= ovly_section
->next_sect
;
1099 if (!ovly_section
) {
1101 new_sect
= kzalloc(sizeof(struct ovly_sect
), GFP_KERNEL
);
1102 if (new_sect
== NULL
) {
1105 new_sect
->sect_load_addr
= addr
;
1106 new_sect
->sect_run_addr
= pSectInfo
->sect_run_addr
+
1107 (addr
- pSectInfo
->sect_load_addr
);
1108 new_sect
->size
= bytes
;
1109 new_sect
->page
= pSectInfo
->type
;
1112 /* Add to the list */
1113 if (DSP_SUCCEEDED(status
)) {
1114 if (*pList
== NULL
) {
1115 /* First in the list */
1118 last_sect
->next_sect
= new_sect
;
1127 * ======== fake_ovly_write ========
1129 static s32
fake_ovly_write(void *handle
, u32 dsp_address
, void *buf
, u32 bytes
,
1136 * ======== free_sects ========
1138 static void free_sects(struct nldr_object
*nldr_obj
,
1139 struct ovly_sect
*phase_sects
, u16 alloc_num
)
1141 struct ovly_sect
*ovly_section
= phase_sects
;
1145 while (ovly_section
&& i
< alloc_num
) {
1147 /* segid - page not supported yet */
1148 /* Reserved memory */
1150 rmm_free(nldr_obj
->rmm
, 0, ovly_section
->sect_run_addr
,
1151 ovly_section
->size
, true);
1153 ovly_section
= ovly_section
->next_sect
;
1159 * ======== get_symbol_value ========
1160 * Find symbol in library's base image. If not there, check dependent
1163 static bool get_symbol_value(void *handle
, void *parg
, void *rmm_handle
,
1164 char *name
, struct dbll_sym_val
**sym
)
1166 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
1167 struct nldr_nodeobject
*nldr_node_obj
=
1168 (struct nldr_nodeobject
*)rmm_handle
;
1169 struct lib_node
*root
= (struct lib_node
*)parg
;
1171 bool status
= false;
1173 /* check the base image */
1174 status
= nldr_obj
->ldr_fxns
.get_addr_fxn(nldr_obj
->base_lib
, name
, sym
);
1177 nldr_obj
->ldr_fxns
.get_c_addr_fxn(nldr_obj
->base_lib
, name
,
1181 * Check in root lib itself. If the library consists of
1182 * multiple object files linked together, some symbols in the
1183 * library may need to be resolved.
1186 status
= nldr_obj
->ldr_fxns
.get_addr_fxn(root
->lib
, name
, sym
);
1189 nldr_obj
->ldr_fxns
.get_c_addr_fxn(root
->lib
, name
,
1195 * Check in root lib's dependent libraries, but not dependent
1196 * libraries' dependents.
1199 for (i
= 0; i
< root
->dep_libs
; i
++) {
1201 nldr_obj
->ldr_fxns
.get_addr_fxn(root
->dep_libs_tree
1202 [i
].lib
, name
, sym
);
1206 get_c_addr_fxn(root
->dep_libs_tree
[i
].lib
,
1216 * Check in persistent libraries
1219 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
1222 get_addr_fxn(nldr_node_obj
->pers_lib_table
[i
].lib
,
1225 status
= nldr_obj
->ldr_fxns
.get_c_addr_fxn
1226 (nldr_node_obj
->pers_lib_table
[i
].lib
, name
,
1240 * ======== load_lib ========
1241 * Recursively load library and all its dependent libraries. The library
1242 * we're loading is specified by a uuid.
1244 static int load_lib(struct nldr_nodeobject
*nldr_node_obj
,
1245 struct lib_node
*root
, struct dsp_uuid uuid
,
1246 bool rootPersistent
,
1247 struct dbll_library_obj
**lib_path
,
1248 enum nldr_phase phase
, u16 depth
)
1250 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1251 u16 nd_libs
= 0; /* Number of dependent libraries */
1252 u16 np_libs
= 0; /* Number of persistent libraries */
1253 u16 nd_libs_loaded
= 0; /* Number of dep. libraries loaded */
1256 u32 dw_buf_size
= NLDR_MAXPATHLENGTH
;
1257 dbll_flags flags
= DBLL_SYMB
| DBLL_CODE
| DBLL_DATA
| DBLL_DYNAMIC
;
1258 struct dbll_attrs new_attrs
;
1259 char *psz_file_name
= NULL
;
1260 struct dsp_uuid
*dep_lib_uui_ds
= NULL
;
1261 bool *persistent_dep_libs
= NULL
;
1263 bool lib_status
= false;
1264 struct lib_node
*dep_lib
;
1266 if (depth
> MAXDEPTH
) {
1271 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1272 psz_file_name
= kzalloc(DBLL_MAXPATHLENGTH
, GFP_KERNEL
);
1273 if (psz_file_name
== NULL
)
1276 if (DSP_SUCCEEDED(status
)) {
1277 /* Get the name of the library */
1280 dcd_get_library_name(nldr_node_obj
->nldr_obj
->
1281 hdcd_mgr
, &uuid
, psz_file_name
,
1282 &dw_buf_size
, phase
,
1283 nldr_node_obj
->pf_phase_split
);
1285 /* Dependent libraries are registered with a phase */
1287 dcd_get_library_name(nldr_node_obj
->nldr_obj
->
1288 hdcd_mgr
, &uuid
, psz_file_name
,
1289 &dw_buf_size
, NLDR_NOPHASE
,
1293 if (DSP_SUCCEEDED(status
)) {
1294 /* Open the library, don't load symbols */
1296 nldr_obj
->ldr_fxns
.open_fxn(nldr_obj
->dbll
, psz_file_name
,
1297 DBLL_NOLOAD
, &root
->lib
);
1299 /* Done with file name */
1300 kfree(psz_file_name
);
1302 /* Check to see if library not already loaded */
1303 if (DSP_SUCCEEDED(status
) && rootPersistent
) {
1305 find_in_persistent_lib_array(nldr_node_obj
, root
->lib
);
1308 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1312 if (DSP_SUCCEEDED(status
)) {
1313 /* Check for circular dependencies. */
1314 for (i
= 0; i
< depth
; i
++) {
1315 if (root
->lib
== lib_path
[i
]) {
1316 /* This condition could be checked by a
1317 * tool at build time. */
1322 if (DSP_SUCCEEDED(status
)) {
1323 /* Add library to current path in dependency tree */
1324 lib_path
[depth
] = root
->lib
;
1326 /* Get number of dependent libraries */
1328 dcd_get_num_dep_libs(nldr_node_obj
->nldr_obj
->hdcd_mgr
,
1329 &uuid
, &nd_libs
, &np_libs
, phase
);
1331 DBC_ASSERT(nd_libs
>= np_libs
);
1332 if (DSP_SUCCEEDED(status
)) {
1333 if (!(*nldr_node_obj
->pf_phase_split
))
1336 /* nd_libs = #of dependent libraries */
1337 root
->dep_libs
= nd_libs
- np_libs
;
1339 dep_lib_uui_ds
= kzalloc(sizeof(struct dsp_uuid
) *
1340 nd_libs
, GFP_KERNEL
);
1341 persistent_dep_libs
=
1342 kzalloc(sizeof(bool) * nd_libs
, GFP_KERNEL
);
1343 if (!dep_lib_uui_ds
|| !persistent_dep_libs
)
1346 if (root
->dep_libs
> 0) {
1347 /* Allocate arrays for dependent lib UUIDs,
1349 root
->dep_libs_tree
= kzalloc
1350 (sizeof(struct lib_node
) *
1351 (root
->dep_libs
), GFP_KERNEL
);
1352 if (!(root
->dep_libs_tree
))
1357 if (DSP_SUCCEEDED(status
)) {
1358 /* Get the dependent library UUIDs */
1360 dcd_get_dep_libs(nldr_node_obj
->
1361 nldr_obj
->hdcd_mgr
, &uuid
,
1362 nd_libs
, dep_lib_uui_ds
,
1363 persistent_dep_libs
,
1370 * Recursively load dependent libraries.
1372 if (DSP_SUCCEEDED(status
)) {
1373 for (i
= 0; i
< nd_libs
; i
++) {
1374 /* If root library is NOT persistent, and dep library
1375 * is, then record it. If root library IS persistent,
1376 * the deplib is already included */
1377 if (!rootPersistent
&& persistent_dep_libs
[i
] &&
1378 *nldr_node_obj
->pf_phase_split
) {
1379 if ((nldr_node_obj
->pers_libs
) >= MAXLIBS
) {
1384 /* Allocate library outside of phase */
1386 &nldr_node_obj
->pers_lib_table
1387 [nldr_node_obj
->pers_libs
];
1390 persistent_dep_libs
[i
] = true;
1392 /* Allocate library within phase */
1393 dep_lib
= &root
->dep_libs_tree
[nd_libs_loaded
];
1396 status
= load_lib(nldr_node_obj
, dep_lib
,
1398 persistent_dep_libs
[i
], lib_path
,
1401 if (DSP_SUCCEEDED(status
)) {
1402 if ((status
!= 0) &&
1403 !rootPersistent
&& persistent_dep_libs
[i
] &&
1404 *nldr_node_obj
->pf_phase_split
) {
1405 (nldr_node_obj
->pers_libs
)++;
1407 if (!persistent_dep_libs
[i
] ||
1408 !(*nldr_node_obj
->pf_phase_split
)) {
1418 /* Now we can load the root library */
1419 if (DSP_SUCCEEDED(status
)) {
1420 new_attrs
= nldr_obj
->ldr_attrs
;
1421 new_attrs
.sym_arg
= root
;
1422 new_attrs
.rmm_handle
= nldr_node_obj
;
1423 new_attrs
.input_params
= nldr_node_obj
->priv_ref
;
1424 new_attrs
.base_image
= false;
1427 nldr_obj
->ldr_fxns
.load_fxn(root
->lib
, flags
, &new_attrs
,
1432 * In case of failure, unload any dependent libraries that
1433 * were loaded, and close the root library.
1434 * (Persistent libraries are unloaded from the very top)
1436 if (DSP_FAILED(status
)) {
1437 if (phase
!= NLDR_EXECUTE
) {
1438 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++)
1439 unload_lib(nldr_node_obj
,
1440 &nldr_node_obj
->pers_lib_table
[i
]);
1442 nldr_node_obj
->pers_libs
= 0;
1444 for (i
= 0; i
< nd_libs_loaded
; i
++)
1445 unload_lib(nldr_node_obj
, &root
->dep_libs_tree
[i
]);
1448 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1452 /* Going up one node in the dependency tree */
1455 kfree(dep_lib_uui_ds
);
1456 dep_lib_uui_ds
= NULL
;
1458 kfree(persistent_dep_libs
);
1459 persistent_dep_libs
= NULL
;
1465 * ======== load_ovly ========
1467 static int load_ovly(struct nldr_nodeobject
*nldr_node_obj
,
1468 enum nldr_phase phase
)
1470 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1471 struct ovly_node
*po_node
= NULL
;
1472 struct ovly_sect
*phase_sects
= NULL
;
1473 struct ovly_sect
*other_sects_list
= NULL
;
1476 u16 other_alloc
= 0;
1477 u16
*ref_count
= NULL
;
1478 u16
*other_ref
= NULL
;
1480 struct ovly_sect
*ovly_section
;
1483 /* Find the node in the table */
1484 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
1486 (nldr_node_obj
->uuid
, nldr_obj
->ovly_table
[i
].uuid
)) {
1488 po_node
= &(nldr_obj
->ovly_table
[i
]);
1493 DBC_ASSERT(i
< nldr_obj
->ovly_nodes
);
1502 ref_count
= &(po_node
->create_ref
);
1503 other_ref
= &(po_node
->other_ref
);
1504 phase_sects
= po_node
->create_sects_list
;
1505 other_sects_list
= po_node
->other_sects_list
;
1509 ref_count
= &(po_node
->execute_ref
);
1510 phase_sects
= po_node
->execute_sects_list
;
1514 ref_count
= &(po_node
->delete_ref
);
1515 phase_sects
= po_node
->delete_sects_list
;
1523 if (ref_count
== NULL
)
1526 if (*ref_count
!= 0)
1529 /* 'Allocate' memory for overlay sections of this phase */
1530 ovly_section
= phase_sects
;
1531 while (ovly_section
) {
1532 /* allocate *//* page not supported yet */
1533 /* reserve *//* align */
1534 status
= rmm_alloc(nldr_obj
->rmm
, 0, ovly_section
->size
, 0,
1535 &(ovly_section
->sect_run_addr
), true);
1536 if (DSP_SUCCEEDED(status
)) {
1537 ovly_section
= ovly_section
->next_sect
;
1543 if (other_ref
&& *other_ref
== 0) {
1544 /* 'Allocate' memory for other overlay sections
1546 if (DSP_SUCCEEDED(status
)) {
1547 ovly_section
= other_sects_list
;
1548 while (ovly_section
) {
1549 /* page not supported *//* align */
1552 rmm_alloc(nldr_obj
->rmm
, 0,
1553 ovly_section
->size
, 0,
1554 &(ovly_section
->sect_run_addr
),
1556 if (DSP_SUCCEEDED(status
)) {
1557 ovly_section
= ovly_section
->next_sect
;
1565 if (*ref_count
== 0) {
1566 if (DSP_SUCCEEDED(status
)) {
1567 /* Load sections for this phase */
1568 ovly_section
= phase_sects
;
1569 while (ovly_section
&& DSP_SUCCEEDED(status
)) {
1571 (*nldr_obj
->ovly_fxn
) (nldr_node_obj
->
1578 ovly_section
->page
);
1579 if (bytes
!= ovly_section
->size
)
1582 ovly_section
= ovly_section
->next_sect
;
1586 if (other_ref
&& *other_ref
== 0) {
1587 if (DSP_SUCCEEDED(status
)) {
1588 /* Load other sections (create phase) */
1589 ovly_section
= other_sects_list
;
1590 while (ovly_section
&& DSP_SUCCEEDED(status
)) {
1592 (*nldr_obj
->ovly_fxn
) (nldr_node_obj
->
1599 ovly_section
->page
);
1600 if (bytes
!= ovly_section
->size
)
1603 ovly_section
= ovly_section
->next_sect
;
1607 if (DSP_FAILED(status
)) {
1608 /* 'Deallocate' memory */
1609 free_sects(nldr_obj
, phase_sects
, alloc_num
);
1610 free_sects(nldr_obj
, other_sects_list
, other_alloc
);
1613 if (DSP_SUCCEEDED(status
) && (ref_count
!= NULL
)) {
1624 * ======== remote_alloc ========
1626 static int remote_alloc(void **pRef
, u16 space
, u32 size
,
1627 u32 align
, u32
*dsp_address
,
1628 OPTIONAL s32 segmentId
, OPTIONAL s32 req
,
1631 struct nldr_nodeobject
*hnode
= (struct nldr_nodeobject
*)pRef
;
1632 struct nldr_object
*nldr_obj
;
1633 struct rmm_target_obj
*rmm
;
1634 u16 mem_phase_bit
= MAXFLAGS
;
1639 struct rmm_addr
*rmm_addr_obj
= (struct rmm_addr
*)dsp_address
;
1640 bool mem_load_req
= false;
1641 int status
= -ENOMEM
; /* Set to fail */
1643 DBC_REQUIRE(space
== DBLL_CODE
|| space
== DBLL_DATA
||
1645 nldr_obj
= hnode
->nldr_obj
;
1646 rmm
= nldr_obj
->rmm
;
1647 /* Convert size to DSP words */
1649 (size
+ nldr_obj
->us_dsp_word_size
-
1650 1) / nldr_obj
->us_dsp_word_size
;
1651 /* Modify memory 'align' to account for DSP cache line size */
1652 align
= find_lcm(GEM_CACHE_LINE_SIZE
, align
);
1653 dev_dbg(bridge
, "%s: memory align to 0x%x\n", __func__
, align
);
1654 if (segmentId
!= -1) {
1655 rmm_addr_obj
->segid
= segmentId
;
1659 switch (hnode
->phase
) {
1661 mem_phase_bit
= CREATEDATAFLAGBIT
;
1664 mem_phase_bit
= DELETEDATAFLAGBIT
;
1667 mem_phase_bit
= EXECUTEDATAFLAGBIT
;
1673 if (space
== DBLL_CODE
)
1676 if (mem_phase_bit
< MAXFLAGS
)
1677 segid
= hnode
->seg_id
[mem_phase_bit
];
1679 /* Determine if there is a memory loading requirement */
1680 if ((hnode
->code_data_flag_mask
>> mem_phase_bit
) & 0x1)
1681 mem_load_req
= true;
1684 mem_sect_type
= (space
== DBLL_CODE
) ? DYNM_CODE
: DYNM_DATA
;
1686 /* Find an appropriate segment based on space */
1687 if (segid
== NULLID
) {
1688 /* No memory requirements of preferences */
1689 DBC_ASSERT(!mem_load_req
);
1692 if (segid
<= MAXSEGID
) {
1693 DBC_ASSERT(segid
< nldr_obj
->dload_segs
);
1694 /* Attempt to allocate from segid first. */
1695 rmm_addr_obj
->segid
= segid
;
1697 rmm_alloc(rmm
, segid
, word_size
, align
, dsp_address
, false);
1698 if (DSP_FAILED(status
)) {
1699 dev_dbg(bridge
, "%s: Unable allocate from segment %d\n",
1703 /* segid > MAXSEGID ==> Internal or external memory */
1704 DBC_ASSERT(segid
== MEMINTERNALID
|| segid
== MEMEXTERNALID
);
1705 /* Check for any internal or external memory segment,
1706 * depending on segid. */
1707 mem_sect_type
|= segid
== MEMINTERNALID
?
1708 DYNM_INTERNAL
: DYNM_EXTERNAL
;
1709 for (i
= 0; i
< nldr_obj
->dload_segs
; i
++) {
1710 if ((nldr_obj
->seg_table
[i
] & mem_sect_type
) !=
1714 status
= rmm_alloc(rmm
, i
, word_size
, align
,
1715 dsp_address
, false);
1716 if (DSP_SUCCEEDED(status
)) {
1717 /* Save segid for freeing later */
1718 rmm_addr_obj
->segid
= i
;
1724 /* Haven't found memory yet, attempt to find any segment that works */
1725 if (status
== -ENOMEM
&& !mem_load_req
) {
1726 dev_dbg(bridge
, "%s: Preferred segment unavailable, trying "
1727 "another\n", __func__
);
1728 for (i
= 0; i
< nldr_obj
->dload_segs
; i
++) {
1729 /* All bits of mem_sect_type must be set */
1730 if ((nldr_obj
->seg_table
[i
] & mem_sect_type
) !=
1734 status
= rmm_alloc(rmm
, i
, word_size
, align
,
1735 dsp_address
, false);
1736 if (DSP_SUCCEEDED(status
)) {
1738 rmm_addr_obj
->segid
= i
;
1747 static int remote_free(void **pRef
, u16 space
, u32 dsp_address
,
1748 u32 size
, bool reserve
)
1750 struct nldr_object
*nldr_obj
= (struct nldr_object
*)pRef
;
1751 struct rmm_target_obj
*rmm
;
1753 int status
= -ENOMEM
; /* Set to fail */
1755 DBC_REQUIRE(nldr_obj
);
1757 rmm
= nldr_obj
->rmm
;
1759 /* Convert size to DSP words */
1761 (size
+ nldr_obj
->us_dsp_word_size
-
1762 1) / nldr_obj
->us_dsp_word_size
;
1764 if (rmm_free(rmm
, space
, dsp_address
, word_size
, reserve
))
1771 * ======== unload_lib ========
1773 static void unload_lib(struct nldr_nodeobject
*nldr_node_obj
,
1774 struct lib_node
*root
)
1776 struct dbll_attrs new_attrs
;
1777 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1780 DBC_ASSERT(root
!= NULL
);
1782 /* Unload dependent libraries */
1783 for (i
= 0; i
< root
->dep_libs
; i
++)
1784 unload_lib(nldr_node_obj
, &root
->dep_libs_tree
[i
]);
1788 new_attrs
= nldr_obj
->ldr_attrs
;
1789 new_attrs
.rmm_handle
= nldr_obj
->rmm
;
1790 new_attrs
.input_params
= nldr_node_obj
->priv_ref
;
1791 new_attrs
.base_image
= false;
1792 new_attrs
.sym_arg
= root
;
1795 /* Unload the root library */
1796 nldr_obj
->ldr_fxns
.unload_fxn(root
->lib
, &new_attrs
);
1797 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1800 /* Free dependent library list */
1801 kfree(root
->dep_libs_tree
);
1802 root
->dep_libs_tree
= NULL
;
1806 * ======== unload_ovly ========
1808 static void unload_ovly(struct nldr_nodeobject
*nldr_node_obj
,
1809 enum nldr_phase phase
)
1811 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1812 struct ovly_node
*po_node
= NULL
;
1813 struct ovly_sect
*phase_sects
= NULL
;
1814 struct ovly_sect
*other_sects_list
= NULL
;
1817 u16 other_alloc
= 0;
1818 u16
*ref_count
= NULL
;
1819 u16
*other_ref
= NULL
;
1821 /* Find the node in the table */
1822 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
1824 (nldr_node_obj
->uuid
, nldr_obj
->ovly_table
[i
].uuid
)) {
1826 po_node
= &(nldr_obj
->ovly_table
[i
]);
1831 DBC_ASSERT(i
< nldr_obj
->ovly_nodes
);
1834 /* TODO: Should we print warning here? */
1839 ref_count
= &(po_node
->create_ref
);
1840 phase_sects
= po_node
->create_sects_list
;
1841 alloc_num
= po_node
->create_sects
;
1844 ref_count
= &(po_node
->execute_ref
);
1845 phase_sects
= po_node
->execute_sects_list
;
1846 alloc_num
= po_node
->execute_sects
;
1849 ref_count
= &(po_node
->delete_ref
);
1850 other_ref
= &(po_node
->other_ref
);
1851 phase_sects
= po_node
->delete_sects_list
;
1852 /* 'Other' overlay sections are unloaded in the delete phase */
1853 other_sects_list
= po_node
->other_sects_list
;
1854 alloc_num
= po_node
->delete_sects
;
1855 other_alloc
= po_node
->other_sects
;
1861 DBC_ASSERT(ref_count
&& (*ref_count
> 0));
1862 if (ref_count
&& (*ref_count
> 0)) {
1865 DBC_ASSERT(*other_ref
> 0);
1870 if (ref_count
&& *ref_count
== 0) {
1871 /* 'Deallocate' memory */
1872 free_sects(nldr_obj
, phase_sects
, alloc_num
);
1874 if (other_ref
&& *other_ref
== 0)
1875 free_sects(nldr_obj
, other_sects_list
, other_alloc
);
1879 * ======== find_in_persistent_lib_array ========
1881 static bool find_in_persistent_lib_array(struct nldr_nodeobject
*nldr_node_obj
,
1882 struct dbll_library_obj
*lib
)
1886 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
1887 if (lib
== nldr_node_obj
->pers_lib_table
[i
].lib
)
1896 * ================ Find LCM (Least Common Multiplier ===
1898 static u32
find_lcm(u32 a
, u32 b
)
1902 ret
= a
* b
/ find_gcf(a
, b
);
1908 * ================ Find GCF (Greatest Common Factor ) ===
1910 static u32
find_gcf(u32 a
, u32 b
)
1914 /* Get the GCF (Greatest common factor between the numbers,
1915 * using Euclidian Algo */
1916 while ((c
= (a
% b
))) {
1923 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1925 * nldr_find_addr() - Find the closest symbol to the given address based on
1926 * dynamic node object.
1928 * @nldr_node: Dynamic node object
1929 * @sym_addr: Given address to find the dsp symbol
1930 * @offset_range: offset range to look for dsp symbol
1931 * @offset_output: Symbol Output address
1932 * @sym_name: String with the dsp symbol
1934 * This function finds the node library for a given address and
1935 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1937 int nldr_find_addr(struct nldr_nodeobject
*nldr_node
, u32 sym_addr
,
1938 u32 offset_range
, void *offset_output
, char *sym_name
)
1941 bool status1
= false;
1943 struct lib_node root
= { NULL
, 0, NULL
};
1944 DBC_REQUIRE(refs
> 0);
1945 DBC_REQUIRE(offset_output
!= NULL
);
1946 DBC_REQUIRE(sym_name
!= NULL
);
1947 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__
, (u32
) nldr_node
,
1948 sym_addr
, offset_range
, (u32
) offset_output
, sym_name
);
1950 if (nldr_node
->dynamic
&& *nldr_node
->pf_phase_split
) {
1951 switch (nldr_node
->phase
) {
1953 root
= nldr_node
->create_lib
;
1956 root
= nldr_node
->execute_lib
;
1959 root
= nldr_node
->delete_lib
;
1966 /* for Overlay nodes or non-split Dynamic nodes */
1967 root
= nldr_node
->root
;
1970 status1
= dbll_find_dsp_symbol(root
.lib
, sym_addr
,
1971 offset_range
, offset_output
, sym_name
);
1973 /* If symbol not found, check dependent libraries */
1975 for (i
= 0; i
< root
.dep_libs
; i
++) {
1976 status1
= dbll_find_dsp_symbol(
1977 root
.dep_libs_tree
[i
].lib
, sym_addr
,
1978 offset_range
, offset_output
, sym_name
);
1983 /* Check persistent libraries */
1985 for (i
= 0; i
< nldr_node
->pers_libs
; i
++) {
1986 status1
= dbll_find_dsp_symbol(
1987 nldr_node
->pers_lib_table
[i
].lib
, sym_addr
,
1988 offset_range
, offset_output
, sym_name
);
1995 pr_debug("%s: Address 0x%x not found in range %d.\n",
1996 __func__
, sym_addr
, offset_range
);