staging:ti dspbridge: remove unnecessary check for NULL pointer in cmm.c
[linux-2.6/kvm.git] / drivers / staging / tidspbridge / pmgr / cmm.c
blob874ed646d6d261957407453034b08004e140dfc7
1 /*
2 * cmm.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
8 * and messaging.
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
15 * Memory is coelesced back to the appropriate heap when a buffer is
16 * freed.
18 * Notes:
19 * Va: Virtual address.
20 * Pa: Physical or kernel system address.
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32 #include <linux/types.h>
34 /* ----------------------------------- DSP/BIOS Bridge */
35 #include <dspbridge/dbdefs.h>
37 /* ----------------------------------- Trace & Debug */
38 #include <dspbridge/dbc.h>
40 /* ----------------------------------- OS Adaptation Layer */
41 #include <dspbridge/cfg.h>
42 #include <dspbridge/list.h>
43 #include <dspbridge/sync.h>
44 #include <dspbridge/utildefs.h>
46 /* ----------------------------------- Platform Manager */
47 #include <dspbridge/dev.h>
48 #include <dspbridge/proc.h>
50 /* ----------------------------------- This */
51 #include <dspbridge/cmm.h>
53 /* ----------------------------------- Defines, Data Structures, Typedefs */
54 #define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
56 /* Other bus/platform translations */
57 #define DSPPA2GPPPA(base, x, y) ((x)+(y))
58 #define GPPPA2DSPPA(base, x, y) ((x)-(y))
61 * Allocators define a block of contiguous memory used for future allocations.
63 * sma - shared memory allocator.
64 * vma - virtual memory allocator.(not used).
66 struct cmm_allocator { /* sma */
67 unsigned int shm_base; /* Start of physical SM block */
68 u32 ul_sm_size; /* Size of SM block in bytes */
69 unsigned int dw_vm_base; /* Start of VM block. (Dev driver
70 * context for 'sma') */
71 u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
72 * SM space */
73 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
74 unsigned int dw_dsp_base; /* DSP virt base byte address */
75 u32 ul_dsp_size; /* DSP seg size in bytes */
76 struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
77 /* node list of available memory */
78 struct lst_list *free_list_head;
79 /* node list of memory in use */
80 struct lst_list *in_use_list_head;
83 struct cmm_xlator { /* Pa<->Va translator object */
84 /* CMM object this translator associated */
85 struct cmm_object *hcmm_mgr;
87 * Client process virtual base address that corresponds to phys SM
88 * base address for translator's ul_seg_id.
89 * Only 1 segment ID currently supported.
91 unsigned int dw_virt_base; /* virtual base address */
92 u32 ul_virt_size; /* size of virt space in bytes */
93 u32 ul_seg_id; /* Segment Id */
96 /* CMM Mgr */
97 struct cmm_object {
99 * Cmm Lock is used to serialize access mem manager for multi-threads.
101 struct mutex cmm_lock; /* Lock to access cmm mgr */
102 struct lst_list *node_free_list_head; /* Free list of memory nodes */
103 u32 ul_min_block_size; /* Min SM block; default 16 bytes */
104 u32 dw_page_size; /* Memory Page size (1k/4k) */
105 /* GPP SM segment ptrs */
106 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
109 /* Default CMM Mgr attributes */
110 static struct cmm_mgrattrs cmm_dfltmgrattrs = {
111 /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
115 /* Default allocation attributes */
116 static struct cmm_attrs cmm_dfltalctattrs = {
117 1 /* ul_seg_id, default segment Id for allocator */
120 /* Address translator default attrs */
121 static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
122 /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
124 0, /* dw_dsp_bufs */
125 0, /* dw_dsp_buf_size */
126 NULL, /* vm_base */
127 0, /* dw_vm_size */
130 /* SM node representing a block of memory. */
131 struct cmm_mnode {
132 struct list_head link; /* must be 1st element */
133 u32 dw_pa; /* Phys addr */
134 u32 dw_va; /* Virtual address in device process context */
135 u32 ul_size; /* SM block size in bytes */
136 u32 client_proc; /* Process that allocated this mem block */
139 /* ----------------------------------- Globals */
140 static u32 refs; /* module reference count */
142 /* ----------------------------------- Function Prototypes */
143 static void add_to_free_list(struct cmm_allocator *allocator,
144 struct cmm_mnode *pnode);
145 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
146 u32 ul_seg_id);
147 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
148 u32 usize);
149 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
150 u32 dw_va, u32 ul_size);
151 /* get available slot for new allocator */
152 static s32 get_slot(struct cmm_object *cmm_mgr_obj);
153 static void un_register_gppsm_seg(struct cmm_allocator *psma);
156 * ======== cmm_calloc_buf ========
157 * Purpose:
158 * Allocate a SM buffer, zero contents, and return the physical address
159 * and optional driver context virtual address(pp_buf_va).
161 * The freelist is sorted in increasing size order. Get the first
162 * block that satifies the request and sort the remaining back on
163 * the freelist; if large enough. The kept block is placed on the
164 * inUseList.
166 void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
167 struct cmm_attrs *pattrs, void **pp_buf_va)
169 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
170 void *buf_pa = NULL;
171 struct cmm_mnode *pnode = NULL;
172 struct cmm_mnode *new_node = NULL;
173 struct cmm_allocator *allocator = NULL;
174 u32 delta_size;
175 u8 *pbyte = NULL;
176 s32 cnt;
178 if (pattrs == NULL)
179 pattrs = &cmm_dfltalctattrs;
181 if (pp_buf_va != NULL)
182 *pp_buf_va = NULL;
184 if (cmm_mgr_obj && (usize != 0)) {
185 if (pattrs->ul_seg_id > 0) {
186 /* SegId > 0 is SM */
187 /* get the allocator object for this segment id */
188 allocator =
189 get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
190 /* keep block size a multiple of ul_min_block_size */
191 usize =
192 ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
194 + cmm_mgr_obj->ul_min_block_size;
195 mutex_lock(&cmm_mgr_obj->cmm_lock);
196 pnode = get_free_block(allocator, usize);
198 if (pnode) {
199 delta_size = (pnode->ul_size - usize);
200 if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
201 /* create a new block with the leftovers and
202 * add to freelist */
203 new_node =
204 get_node(cmm_mgr_obj, pnode->dw_pa + usize,
205 pnode->dw_va + usize,
206 (u32) delta_size);
207 /* leftovers go free */
208 add_to_free_list(allocator, new_node);
209 /* adjust our node's size */
210 pnode->ul_size = usize;
212 /* Tag node with client process requesting allocation
213 * We'll need to free up a process's alloc'd SM if the
214 * client process goes away.
216 /* Return TGID instead of process handle */
217 pnode->client_proc = current->tgid;
219 /* put our node on InUse list */
220 lst_put_tail(allocator->in_use_list_head,
221 (struct list_head *)pnode);
222 buf_pa = (void *)pnode->dw_pa; /* physical address */
223 /* clear mem */
224 pbyte = (u8 *) pnode->dw_va;
225 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
226 *pbyte = 0;
228 if (pp_buf_va != NULL) {
229 /* Virtual address */
230 *pp_buf_va = (void *)pnode->dw_va;
233 mutex_unlock(&cmm_mgr_obj->cmm_lock);
235 return buf_pa;
239 * ======== cmm_create ========
240 * Purpose:
241 * Create a communication memory manager object.
243 int cmm_create(struct cmm_object **ph_cmm_mgr,
244 struct dev_object *hdev_obj,
245 const struct cmm_mgrattrs *mgr_attrts)
247 struct cmm_object *cmm_obj = NULL;
248 int status = 0;
249 struct util_sysinfo sys_info;
251 DBC_REQUIRE(refs > 0);
252 DBC_REQUIRE(ph_cmm_mgr != NULL);
254 *ph_cmm_mgr = NULL;
255 /* create, zero, and tag a cmm mgr object */
256 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
257 if (cmm_obj != NULL) {
258 if (mgr_attrts == NULL)
259 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
261 /* 4 bytes minimum */
262 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
263 /* save away smallest block allocation for this cmm mgr */
264 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
265 /* save away the systems memory page size */
266 sys_info.dw_page_size = PAGE_SIZE;
267 sys_info.dw_allocation_granularity = PAGE_SIZE;
268 sys_info.dw_number_of_processors = 1;
269 if (DSP_SUCCEEDED(status)) {
270 cmm_obj->dw_page_size = sys_info.dw_page_size;
271 } else {
272 cmm_obj->dw_page_size = 0;
273 status = -EPERM;
275 /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
276 * MEM_ALLOC_OBJECT */
277 if (DSP_SUCCEEDED(status)) {
278 /* create node free list */
279 cmm_obj->node_free_list_head =
280 kzalloc(sizeof(struct lst_list),
281 GFP_KERNEL);
282 if (cmm_obj->node_free_list_head == NULL)
283 status = -ENOMEM;
284 else
285 INIT_LIST_HEAD(&cmm_obj->
286 node_free_list_head->head);
288 if (DSP_SUCCEEDED(status))
289 mutex_init(&cmm_obj->cmm_lock);
291 if (DSP_SUCCEEDED(status))
292 *ph_cmm_mgr = cmm_obj;
293 else
294 cmm_destroy(cmm_obj, true);
296 } else {
297 status = -ENOMEM;
299 return status;
303 * ======== cmm_destroy ========
304 * Purpose:
305 * Release the communication memory manager resources.
307 int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
309 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
310 struct cmm_info temp_info;
311 int status = 0;
312 s32 slot_seg;
313 struct cmm_mnode *pnode;
315 DBC_REQUIRE(refs > 0);
316 if (!hcmm_mgr) {
317 status = -EFAULT;
318 return status;
320 mutex_lock(&cmm_mgr_obj->cmm_lock);
321 /* If not force then fail if outstanding allocations exist */
322 if (!force) {
323 /* Check for outstanding memory allocations */
324 status = cmm_get_info(hcmm_mgr, &temp_info);
325 if (DSP_SUCCEEDED(status)) {
326 if (temp_info.ul_total_in_use_cnt > 0) {
327 /* outstanding allocations */
328 status = -EPERM;
332 if (DSP_SUCCEEDED(status)) {
333 /* UnRegister SM allocator */
334 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
335 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
336 un_register_gppsm_seg
337 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
338 /* Set slot to NULL for future reuse */
339 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
343 if (cmm_mgr_obj->node_free_list_head != NULL) {
344 /* Free the free nodes */
345 while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
346 pnode = (struct cmm_mnode *)
347 lst_get_head(cmm_mgr_obj->node_free_list_head);
348 kfree(pnode);
350 /* delete NodeFreeList list */
351 kfree(cmm_mgr_obj->node_free_list_head);
353 mutex_unlock(&cmm_mgr_obj->cmm_lock);
354 if (DSP_SUCCEEDED(status)) {
355 /* delete CS & cmm mgr object */
356 mutex_destroy(&cmm_mgr_obj->cmm_lock);
357 kfree(cmm_mgr_obj);
359 return status;
363 * ======== cmm_exit ========
364 * Purpose:
365 * Discontinue usage of module; free resources when reference count
366 * reaches 0.
368 void cmm_exit(void)
370 DBC_REQUIRE(refs > 0);
372 refs--;
376 * ======== cmm_free_buf ========
377 * Purpose:
378 * Free the given buffer.
380 int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
381 u32 ul_seg_id)
383 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
384 int status = -EFAULT;
385 struct cmm_mnode *mnode_obj = NULL;
386 struct cmm_allocator *allocator = NULL;
387 struct cmm_attrs *pattrs;
389 DBC_REQUIRE(refs > 0);
390 DBC_REQUIRE(buf_pa != NULL);
392 if (ul_seg_id == 0) {
393 pattrs = &cmm_dfltalctattrs;
394 ul_seg_id = pattrs->ul_seg_id;
396 if (!hcmm_mgr || !(ul_seg_id > 0)) {
397 status = -EFAULT;
398 return status;
400 /* get the allocator for this segment id */
401 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
402 if (allocator != NULL) {
403 mutex_lock(&cmm_mgr_obj->cmm_lock);
404 mnode_obj =
405 (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
406 while (mnode_obj) {
407 if ((u32) buf_pa == mnode_obj->dw_pa) {
408 /* Found it */
409 lst_remove_elem(allocator->in_use_list_head,
410 (struct list_head *)mnode_obj);
411 /* back to freelist */
412 add_to_free_list(allocator, mnode_obj);
413 status = 0; /* all right! */
414 break;
416 /* next node. */
417 mnode_obj = (struct cmm_mnode *)
418 lst_next(allocator->in_use_list_head,
419 (struct list_head *)mnode_obj);
421 mutex_unlock(&cmm_mgr_obj->cmm_lock);
423 return status;
427 * ======== cmm_get_handle ========
428 * Purpose:
429 * Return the communication memory manager object for this device.
430 * This is typically called from the client process.
432 int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
434 int status = 0;
435 struct dev_object *hdev_obj;
437 DBC_REQUIRE(refs > 0);
438 DBC_REQUIRE(ph_cmm_mgr != NULL);
439 if (hprocessor != NULL)
440 status = proc_get_dev_object(hprocessor, &hdev_obj);
441 else
442 hdev_obj = dev_get_first(); /* default */
444 if (DSP_SUCCEEDED(status))
445 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
447 return status;
451 * ======== cmm_get_info ========
452 * Purpose:
453 * Return the current memory utilization information.
455 int cmm_get_info(struct cmm_object *hcmm_mgr,
456 struct cmm_info *cmm_info_obj)
458 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
459 u32 ul_seg;
460 int status = 0;
461 struct cmm_allocator *altr;
462 struct cmm_mnode *mnode_obj = NULL;
464 DBC_REQUIRE(cmm_info_obj != NULL);
466 if (!hcmm_mgr) {
467 status = -EFAULT;
468 return status;
470 mutex_lock(&cmm_mgr_obj->cmm_lock);
471 cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
472 /* Total # of outstanding alloc */
473 cmm_info_obj->ul_total_in_use_cnt = 0;
474 /* min block size */
475 cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
476 /* check SM memory segments */
477 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
478 /* get the allocator object for this segment id */
479 altr = get_allocator(cmm_mgr_obj, ul_seg);
480 if (altr != NULL) {
481 cmm_info_obj->ul_num_gppsm_segs++;
482 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
483 altr->shm_base - altr->ul_dsp_size;
484 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
485 altr->ul_dsp_size + altr->ul_sm_size;
486 cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
487 altr->shm_base;
488 cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
489 altr->ul_sm_size;
490 cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
491 altr->dw_dsp_base;
492 cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
493 altr->ul_dsp_size;
494 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
495 altr->dw_vm_base - altr->ul_dsp_size;
496 cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
497 mnode_obj = (struct cmm_mnode *)
498 lst_first(altr->in_use_list_head);
499 /* Count inUse blocks */
500 while (mnode_obj) {
501 cmm_info_obj->ul_total_in_use_cnt++;
502 cmm_info_obj->seg_info[ul_seg -
503 1].ul_in_use_cnt++;
504 /* next node. */
505 mnode_obj = (struct cmm_mnode *)
506 lst_next(altr->in_use_list_head,
507 (struct list_head *)mnode_obj);
510 } /* end for */
511 mutex_unlock(&cmm_mgr_obj->cmm_lock);
512 return status;
516 * ======== cmm_init ========
517 * Purpose:
518 * Initializes private state of CMM module.
520 bool cmm_init(void)
522 bool ret = true;
524 DBC_REQUIRE(refs >= 0);
525 if (ret)
526 refs++;
528 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
530 return ret;
534 * ======== cmm_register_gppsm_seg ========
535 * Purpose:
536 * Register a block of SM with the CMM to be used for later GPP SM
537 * allocations.
539 int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
540 u32 dw_gpp_base_pa, u32 ul_size,
541 u32 dsp_addr_offset, s8 c_factor,
542 u32 dw_dsp_base, u32 ul_dsp_size,
543 u32 *sgmt_id, u32 gpp_base_va)
545 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
546 struct cmm_allocator *psma = NULL;
547 int status = 0;
548 struct cmm_mnode *new_node;
549 s32 slot_seg;
551 DBC_REQUIRE(ul_size > 0);
552 DBC_REQUIRE(sgmt_id != NULL);
553 DBC_REQUIRE(dw_gpp_base_pa != 0);
554 DBC_REQUIRE(gpp_base_va != 0);
555 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
556 (c_factor >= CMM_SUBFROMDSPPA));
557 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
558 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
559 dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
560 ul_dsp_size, gpp_base_va);
561 if (!hcmm_mgr) {
562 status = -EFAULT;
563 return status;
565 /* make sure we have room for another allocator */
566 mutex_lock(&cmm_mgr_obj->cmm_lock);
567 slot_seg = get_slot(cmm_mgr_obj);
568 if (slot_seg < 0) {
569 /* get a slot number */
570 status = -EPERM;
571 goto func_end;
573 /* Check if input ul_size is big enough to alloc at least one block */
574 if (DSP_SUCCEEDED(status)) {
575 if (ul_size < cmm_mgr_obj->ul_min_block_size) {
576 status = -EINVAL;
577 goto func_end;
580 if (DSP_SUCCEEDED(status)) {
581 /* create, zero, and tag an SM allocator object */
582 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
584 if (psma != NULL) {
585 psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
586 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
587 psma->ul_sm_size = ul_size; /* SM segment size in bytes */
588 psma->dw_vm_base = gpp_base_va;
589 psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
590 psma->c_factor = c_factor;
591 psma->dw_dsp_base = dw_dsp_base;
592 psma->ul_dsp_size = ul_dsp_size;
593 if (psma->dw_vm_base == 0) {
594 status = -EPERM;
595 goto func_end;
597 if (DSP_SUCCEEDED(status)) {
598 /* return the actual segment identifier */
599 *sgmt_id = (u32) slot_seg + 1;
600 /* create memory free list */
601 psma->free_list_head = kzalloc(sizeof(struct lst_list),
602 GFP_KERNEL);
603 if (psma->free_list_head == NULL) {
604 status = -ENOMEM;
605 goto func_end;
607 INIT_LIST_HEAD(&psma->free_list_head->head);
609 if (DSP_SUCCEEDED(status)) {
610 /* create memory in-use list */
611 psma->in_use_list_head = kzalloc(sizeof(struct
612 lst_list), GFP_KERNEL);
613 if (psma->in_use_list_head == NULL) {
614 status = -ENOMEM;
615 goto func_end;
617 INIT_LIST_HEAD(&psma->in_use_list_head->head);
619 if (DSP_SUCCEEDED(status)) {
620 /* Get a mem node for this hunk-o-memory */
621 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
622 psma->dw_vm_base, ul_size);
623 /* Place node on the SM allocator's free list */
624 if (new_node) {
625 lst_put_tail(psma->free_list_head,
626 (struct list_head *)new_node);
627 } else {
628 status = -ENOMEM;
629 goto func_end;
632 if (DSP_FAILED(status)) {
633 /* Cleanup allocator */
634 un_register_gppsm_seg(psma);
636 } else {
637 status = -ENOMEM;
638 goto func_end;
640 /* make entry */
641 if (DSP_SUCCEEDED(status))
642 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
644 func_end:
645 mutex_unlock(&cmm_mgr_obj->cmm_lock);
646 return status;
650 * ======== cmm_un_register_gppsm_seg ========
651 * Purpose:
652 * UnRegister GPP SM segments with the CMM.
654 int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
655 u32 ul_seg_id)
657 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
658 int status = 0;
659 struct cmm_allocator *psma;
660 u32 ul_id = ul_seg_id;
662 DBC_REQUIRE(ul_seg_id > 0);
663 if (hcmm_mgr) {
664 if (ul_seg_id == CMM_ALLSEGMENTS)
665 ul_id = 1;
667 if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
668 while (ul_id <= CMM_MAXGPPSEGS) {
669 mutex_lock(&cmm_mgr_obj->cmm_lock);
670 /* slot = seg_id-1 */
671 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
672 if (psma != NULL) {
673 un_register_gppsm_seg(psma);
674 /* Set alctr ptr to NULL for future
675 * reuse */
676 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
677 1] = NULL;
678 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
679 status = -EPERM;
681 mutex_unlock(&cmm_mgr_obj->cmm_lock);
682 if (ul_seg_id != CMM_ALLSEGMENTS)
683 break;
685 ul_id++;
686 } /* end while */
687 } else {
688 status = -EINVAL;
690 } else {
691 status = -EFAULT;
693 return status;
697 * ======== un_register_gppsm_seg ========
698 * Purpose:
699 * UnRegister the SM allocator by freeing all its resources and
700 * nulling cmm mgr table entry.
701 * Note:
702 * This routine is always called within cmm lock crit sect.
704 static void un_register_gppsm_seg(struct cmm_allocator *psma)
706 struct cmm_mnode *mnode_obj = NULL;
707 struct cmm_mnode *next_node = NULL;
709 DBC_REQUIRE(psma != NULL);
710 if (psma->free_list_head != NULL) {
711 /* free nodes on free list */
712 mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
713 while (mnode_obj) {
714 next_node =
715 (struct cmm_mnode *)lst_next(psma->free_list_head,
716 (struct list_head *)
717 mnode_obj);
718 lst_remove_elem(psma->free_list_head,
719 (struct list_head *)mnode_obj);
720 kfree((void *)mnode_obj);
721 /* next node. */
722 mnode_obj = next_node;
724 kfree(psma->free_list_head); /* delete freelist */
725 /* free nodes on InUse list */
726 mnode_obj =
727 (struct cmm_mnode *)lst_first(psma->in_use_list_head);
728 while (mnode_obj) {
729 next_node =
730 (struct cmm_mnode *)lst_next(psma->in_use_list_head,
731 (struct list_head *)
732 mnode_obj);
733 lst_remove_elem(psma->in_use_list_head,
734 (struct list_head *)mnode_obj);
735 kfree((void *)mnode_obj);
736 /* next node. */
737 mnode_obj = next_node;
739 kfree(psma->in_use_list_head); /* delete InUse list */
741 if ((void *)psma->dw_vm_base != NULL)
742 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
744 /* Free allocator itself */
745 kfree(psma);
749 * ======== get_slot ========
750 * Purpose:
751 * An available slot # is returned. Returns negative on failure.
753 static s32 get_slot(struct cmm_object *cmm_mgr_obj)
755 s32 slot_seg = -1; /* neg on failure */
756 DBC_REQUIRE(cmm_mgr_obj != NULL);
757 /* get first available slot in cmm mgr SMSegTab[] */
758 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
759 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
760 break;
763 if (slot_seg == CMM_MAXGPPSEGS)
764 slot_seg = -1; /* failed */
766 return slot_seg;
770 * ======== get_node ========
771 * Purpose:
772 * Get a memory node from freelist or create a new one.
774 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
775 u32 dw_va, u32 ul_size)
777 struct cmm_mnode *pnode = NULL;
779 DBC_REQUIRE(cmm_mgr_obj != NULL);
780 DBC_REQUIRE(dw_pa != 0);
781 DBC_REQUIRE(dw_va != 0);
782 DBC_REQUIRE(ul_size != 0);
783 /* Check cmm mgr's node freelist */
784 if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
785 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
786 } else {
787 /* surely a valid element */
788 pnode = (struct cmm_mnode *)
789 lst_get_head(cmm_mgr_obj->node_free_list_head);
791 if (pnode) {
792 lst_init_elem((struct list_head *)pnode); /* set self */
793 pnode->dw_pa = dw_pa; /* Physical addr of start of block */
794 pnode->dw_va = dw_va; /* Virtual " " */
795 pnode->ul_size = ul_size; /* Size of block */
797 return pnode;
801 * ======== delete_node ========
802 * Purpose:
803 * Put a memory node on the cmm nodelist for later use.
804 * Doesn't actually delete the node. Heap thrashing friendly.
806 static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
808 DBC_REQUIRE(pnode != NULL);
809 lst_init_elem((struct list_head *)pnode); /* init .self ptr */
810 lst_put_tail(cmm_mgr_obj->node_free_list_head,
811 (struct list_head *)pnode);
815 * ====== get_free_block ========
816 * Purpose:
817 * Scan the free block list and return the first block that satisfies
818 * the size.
820 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
821 u32 usize)
823 if (allocator) {
824 struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
825 lst_first(allocator->free_list_head);
826 while (mnode_obj) {
827 if (usize <= (u32) mnode_obj->ul_size) {
828 lst_remove_elem(allocator->free_list_head,
829 (struct list_head *)mnode_obj);
830 return mnode_obj;
832 /* next node. */
833 mnode_obj = (struct cmm_mnode *)
834 lst_next(allocator->free_list_head,
835 (struct list_head *)mnode_obj);
838 return NULL;
842 * ======== add_to_free_list ========
843 * Purpose:
844 * Coelesce node into the freelist in ascending size order.
846 static void add_to_free_list(struct cmm_allocator *allocator,
847 struct cmm_mnode *pnode)
849 struct cmm_mnode *node_prev = NULL;
850 struct cmm_mnode *node_next = NULL;
851 struct cmm_mnode *mnode_obj;
852 u32 dw_this_pa;
853 u32 dw_next_pa;
855 DBC_REQUIRE(pnode != NULL);
856 DBC_REQUIRE(allocator != NULL);
857 dw_this_pa = pnode->dw_pa;
858 dw_next_pa = NEXT_PA(pnode);
859 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
860 while (mnode_obj) {
861 if (dw_this_pa == NEXT_PA(mnode_obj)) {
862 /* found the block ahead of this one */
863 node_prev = mnode_obj;
864 } else if (dw_next_pa == mnode_obj->dw_pa) {
865 node_next = mnode_obj;
867 if ((node_prev == NULL) || (node_next == NULL)) {
868 /* next node. */
869 mnode_obj = (struct cmm_mnode *)
870 lst_next(allocator->free_list_head,
871 (struct list_head *)mnode_obj);
872 } else {
873 /* got 'em */
874 break;
876 } /* while */
877 if (node_prev != NULL) {
878 /* combine with previous block */
879 lst_remove_elem(allocator->free_list_head,
880 (struct list_head *)node_prev);
881 /* grow node to hold both */
882 pnode->ul_size += node_prev->ul_size;
883 pnode->dw_pa = node_prev->dw_pa;
884 pnode->dw_va = node_prev->dw_va;
885 /* place node on mgr nodeFreeList */
886 delete_node((struct cmm_object *)allocator->hcmm_mgr,
887 node_prev);
889 if (node_next != NULL) {
890 /* combine with next block */
891 lst_remove_elem(allocator->free_list_head,
892 (struct list_head *)node_next);
893 /* grow da node */
894 pnode->ul_size += node_next->ul_size;
895 /* place node on mgr nodeFreeList */
896 delete_node((struct cmm_object *)allocator->hcmm_mgr,
897 node_next);
899 /* Now, let's add to freelist in increasing size order */
900 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
901 while (mnode_obj) {
902 if (pnode->ul_size <= mnode_obj->ul_size)
903 break;
905 /* next node. */
906 mnode_obj =
907 (struct cmm_mnode *)lst_next(allocator->free_list_head,
908 (struct list_head *)mnode_obj);
910 /* if mnode_obj is NULL then add our pnode to the end of the freelist */
911 if (mnode_obj == NULL) {
912 lst_put_tail(allocator->free_list_head,
913 (struct list_head *)pnode);
914 } else {
915 /* insert our node before the current traversed node */
916 lst_insert_before(allocator->free_list_head,
917 (struct list_head *)pnode,
918 (struct list_head *)mnode_obj);
923 * ======== get_allocator ========
924 * Purpose:
925 * Return the allocator for the given SM Segid.
926 * SegIds: 1,2,3..max.
928 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
929 u32 ul_seg_id)
931 struct cmm_allocator *allocator = NULL;
933 DBC_REQUIRE(cmm_mgr_obj != NULL);
934 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
935 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
936 if (allocator != NULL) {
937 /* make sure it's for real */
938 if (!allocator) {
939 allocator = NULL;
940 DBC_ASSERT(false);
943 return allocator;
947 * The CMM_Xlator[xxx] routines below are used by Node and Stream
948 * to perform SM address translation to the client process address space.
949 * A "translator" object is created by a node/stream for each SM seg used.
953 * ======== cmm_xlator_create ========
954 * Purpose:
955 * Create an address translator object.
957 int cmm_xlator_create(struct cmm_xlatorobject **xlator,
958 struct cmm_object *hcmm_mgr,
959 struct cmm_xlatorattrs *xlator_attrs)
961 struct cmm_xlator *xlator_object = NULL;
962 int status = 0;
964 DBC_REQUIRE(refs > 0);
965 DBC_REQUIRE(xlator != NULL);
966 DBC_REQUIRE(hcmm_mgr != NULL);
968 *xlator = NULL;
969 if (xlator_attrs == NULL)
970 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
972 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
973 if (xlator_object != NULL) {
974 xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
975 /* SM seg_id */
976 xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
977 } else {
978 status = -ENOMEM;
980 if (DSP_SUCCEEDED(status))
981 *xlator = (struct cmm_xlatorobject *)xlator_object;
983 return status;
987 * ======== cmm_xlator_delete ========
988 * Purpose:
989 * Free the Xlator resources.
990 * VM gets freed later.
992 int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
994 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
996 DBC_REQUIRE(refs > 0);
998 kfree(xlator_obj);
1000 return 0;
1004 * ======== cmm_xlator_alloc_buf ========
1006 void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
1007 u32 pa_size)
1009 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1010 void *pbuf = NULL;
1011 struct cmm_attrs attrs;
1013 DBC_REQUIRE(refs > 0);
1014 DBC_REQUIRE(xlator != NULL);
1015 DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
1016 DBC_REQUIRE(va_buf != NULL);
1017 DBC_REQUIRE(pa_size > 0);
1018 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1020 if (xlator_obj) {
1021 attrs.ul_seg_id = xlator_obj->ul_seg_id;
1022 *(volatile u32 *)va_buf = 0;
1023 /* Alloc SM */
1024 pbuf =
1025 cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
1026 if (pbuf) {
1027 /* convert to translator(node/strm) process Virtual
1028 * address */
1029 *(volatile u32 **)va_buf =
1030 (u32 *) cmm_xlator_translate(xlator,
1031 pbuf, CMM_PA2VA);
1034 return pbuf;
1038 * ======== cmm_xlator_free_buf ========
1039 * Purpose:
1040 * Free the given SM buffer and descriptor.
1041 * Does not free virtual memory.
1043 int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1045 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1046 int status = -EPERM;
1047 void *buf_pa = NULL;
1049 DBC_REQUIRE(refs > 0);
1050 DBC_REQUIRE(buf_va != NULL);
1051 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1053 if (xlator_obj) {
1054 /* convert Va to Pa so we can free it. */
1055 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1056 if (buf_pa) {
1057 status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
1058 xlator_obj->ul_seg_id);
1059 if (DSP_FAILED(status)) {
1060 /* Uh oh, this shouldn't happen. Descriptor
1061 * gone! */
1062 DBC_ASSERT(false); /* CMM is leaking mem */
1066 return status;
1070 * ======== cmm_xlator_info ========
1071 * Purpose:
1072 * Set/Get translator info.
1074 int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1075 u32 ul_size, u32 segm_id, bool set_info)
1077 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1078 int status = 0;
1080 DBC_REQUIRE(refs > 0);
1081 DBC_REQUIRE(paddr != NULL);
1082 DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
1084 if (xlator_obj) {
1085 if (set_info) {
1086 /* set translators virtual address range */
1087 xlator_obj->dw_virt_base = (u32) *paddr;
1088 xlator_obj->ul_virt_size = ul_size;
1089 } else { /* return virt base address */
1090 *paddr = (u8 *) xlator_obj->dw_virt_base;
1092 } else {
1093 status = -EFAULT;
1095 return status;
1099 * ======== cmm_xlator_translate ========
1101 void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1102 enum cmm_xlatetype xtype)
1104 u32 dw_addr_xlate = 0;
1105 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1106 struct cmm_object *cmm_mgr_obj = NULL;
1107 struct cmm_allocator *allocator = NULL;
1108 u32 dw_offset = 0;
1110 DBC_REQUIRE(refs > 0);
1111 DBC_REQUIRE(paddr != NULL);
1112 DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1114 if (!xlator_obj)
1115 goto loop_cont;
1117 cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
1118 /* get this translator's default SM allocator */
1119 DBC_ASSERT(xlator_obj->ul_seg_id > 0);
1120 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
1121 if (!allocator)
1122 goto loop_cont;
1124 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1125 (xtype == CMM_PA2VA)) {
1126 if (xtype == CMM_PA2VA) {
1127 /* Gpp Va = Va Base + offset */
1128 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1129 allocator->
1130 ul_dsp_size);
1131 dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
1132 /* Check if translated Va base is in range */
1133 if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
1134 (dw_addr_xlate >=
1135 (xlator_obj->dw_virt_base +
1136 xlator_obj->ul_virt_size))) {
1137 dw_addr_xlate = 0; /* bad address */
1139 } else {
1140 /* Gpp PA = Gpp Base + offset */
1141 dw_offset =
1142 (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
1143 dw_addr_xlate =
1144 allocator->shm_base - allocator->ul_dsp_size +
1145 dw_offset;
1147 } else {
1148 dw_addr_xlate = (u32) paddr;
1150 /*Now convert address to proper target physical address if needed */
1151 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1152 /* Got Gpp Pa now, convert to DSP Pa */
1153 dw_addr_xlate =
1154 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1155 dw_addr_xlate,
1156 allocator->dw_dsp_phys_addr_offset *
1157 allocator->c_factor);
1158 } else if (xtype == CMM_DSPPA2PA) {
1159 /* Got DSP Pa, convert to GPP Pa */
1160 dw_addr_xlate =
1161 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
1162 dw_addr_xlate,
1163 allocator->dw_dsp_phys_addr_offset *
1164 allocator->c_factor);
1166 loop_cont:
1167 return (void *)dw_addr_xlate;