GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / tidspbridge / pmgr / dmm.c
blob8685233d7627c337290486c67c6a1001db969e37
1 /*
2 * dmm.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 #include <linux/types.h>
25 /* ----------------------------------- Host OS */
26 #include <dspbridge/host_os.h>
28 /* ----------------------------------- DSP/BIOS Bridge */
29 #include <dspbridge/dbdefs.h>
31 /* ----------------------------------- Trace & Debug */
32 #include <dspbridge/dbc.h>
34 /* ----------------------------------- OS Adaptation Layer */
35 #include <dspbridge/sync.h>
37 /* ----------------------------------- Platform Manager */
38 #include <dspbridge/dev.h>
39 #include <dspbridge/proc.h>
41 /* ----------------------------------- This */
42 #include <dspbridge/dmm.h>
44 /* ----------------------------------- Defines, Data Structures, Typedefs */
45 #define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47 dyn_mem_map_beg)
48 #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
50 /* DMM Mgr */
51 struct dmm_object {
52 /* Dmm Lock is used to serialize access mem manager for
53 * multi-threads. */
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */
57 /* ----------------------------------- Globals */
58 static u32 refs; /* module reference count */
59 struct map_page {
60 u32 region_size:15;
61 u32 mapped_size:15;
62 u32 reserved:1;
63 u32 mapped:1;
66 /* Create the free list */
67 static struct map_page *virtual_mapping_table;
68 static u32 free_region; /* The index of free region */
69 static u32 free_size;
70 static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
71 static u32 table_size; /* The size of virt and phys pages tables */
73 /* ----------------------------------- Function Prototypes */
74 static struct map_page *get_region(u32 addr);
75 static struct map_page *get_free_region(u32 len);
76 static struct map_page *get_mapped_region(u32 addrs);
78 /* ======== dmm_create_tables ========
79 * Purpose:
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
83 * for DSP.
85 int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
87 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88 int status = 0;
90 status = dmm_delete_tables(dmm_obj);
91 if (!status) {
92 dyn_mem_map_beg = addr;
93 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94 /* Create the free list */
95 virtual_mapping_table = __vmalloc(table_size *
96 sizeof(struct map_page), GFP_KERNEL |
97 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98 if (virtual_mapping_table == NULL)
99 status = -ENOMEM;
100 else {
101 /* On successful allocation,
102 * all entries are zero ('free') */
103 free_region = 0;
104 free_size = table_size * PG_SIZE4K;
105 virtual_mapping_table[0].region_size = table_size;
109 if (status)
110 pr_err("%s: failure, status 0x%x\n", __func__, status);
112 return status;
116 * ======== dmm_create ========
117 * Purpose:
118 * Create a dynamic memory manager object.
120 int dmm_create(struct dmm_object **dmm_manager,
121 struct dev_object *hdev_obj,
122 const struct dmm_mgrattrs *mgr_attrts)
124 struct dmm_object *dmm_obj = NULL;
125 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
129 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132 if (dmm_obj != NULL) {
133 spin_lock_init(&dmm_obj->dmm_lock);
134 *dmm_manager = dmm_obj;
135 } else {
136 status = -ENOMEM;
139 return status;
143 * ======== dmm_destroy ========
144 * Purpose:
145 * Release the communication memory manager resources.
147 int dmm_destroy(struct dmm_object *dmm_mgr)
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0;
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj);
155 if (!status)
156 kfree(dmm_obj);
157 } else
158 status = -EFAULT;
160 return status;
164 * ======== dmm_delete_tables ========
165 * Purpose:
166 * Delete DMM Tables.
168 int dmm_delete_tables(struct dmm_object *dmm_mgr)
170 int status = 0;
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */
174 if (dmm_mgr)
175 vfree(virtual_mapping_table);
176 else
177 status = -EFAULT;
178 return status;
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
187 void dmm_exit(void)
189 DBC_REQUIRE(refs > 0);
191 refs--;
195 * ======== dmm_get_handle ========
196 * Purpose:
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
200 int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
202 int status = 0;
203 struct dev_object *hdev_obj;
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else
210 hdev_obj = dev_get_first(); /* default */
212 if (!status)
213 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
215 return status;
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
223 bool dmm_init(void)
225 bool ret = true;
227 DBC_REQUIRE(refs >= 0);
229 if (ret)
230 refs++;
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
234 virtual_mapping_table = NULL;
235 table_size = 0;
237 return ret;
241 * ======== dmm_map_memory ========
242 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
248 int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
250 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251 struct map_page *chunk;
252 int status = 0;
254 spin_lock(&dmm_obj->dmm_lock);
255 /* Find the Reserved memory chunk containing the DSP block to
256 * be mapped */
257 chunk = (struct map_page *)get_region(addr);
258 if (chunk != NULL) {
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk->mapped = true;
261 chunk->mapped_size = (size / PG_SIZE4K);
262 } else
263 status = -ENOENT;
264 spin_unlock(&dmm_obj->dmm_lock);
266 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
269 return status;
273 * ======== dmm_reserve_memory ========
274 * Purpose:
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
277 int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278 u32 *prsv_addr)
280 int status = 0;
281 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282 struct map_page *node;
283 u32 rsv_addr = 0;
284 u32 rsv_size = 0;
286 spin_lock(&dmm_obj->dmm_lock);
288 /* Try to get a DSP chunk from the free list */
289 node = get_free_region(size);
290 if (node != NULL) {
291 /* DSP chunk of given size is available. */
292 rsv_addr = DMM_ADDR_VIRTUAL(node);
293 /* Calculate the number entries to use */
294 rsv_size = size / PG_SIZE4K;
295 if (rsv_size < node->region_size) {
296 /* Mark remainder of free region */
297 node[rsv_size].mapped = false;
298 node[rsv_size].reserved = false;
299 node[rsv_size].region_size =
300 node->region_size - rsv_size;
301 node[rsv_size].mapped_size = 0;
303 /* get_region will return first fit chunk. But we only use what
304 is requested. */
305 node->mapped = false;
306 node->reserved = true;
307 node->region_size = rsv_size;
308 node->mapped_size = 0;
309 /* Return the chunk's starting address */
310 *prsv_addr = rsv_addr;
311 } else
312 /*dSP chunk of given size is not available */
313 status = -ENOMEM;
315 spin_unlock(&dmm_obj->dmm_lock);
317 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319 prsv_addr, status, rsv_addr, rsv_size);
321 return status;
325 * ======== dmm_un_map_memory ========
326 * Purpose:
327 * Remove the mapped block from the reserved chunk.
329 int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
331 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332 struct map_page *chunk;
333 int status = 0;
335 spin_lock(&dmm_obj->dmm_lock);
336 chunk = get_mapped_region(addr);
337 if (chunk == NULL)
338 status = -ENOENT;
340 if (!status) {
341 /* Unmap the region */
342 *psize = chunk->mapped_size * PG_SIZE4K;
343 chunk->mapped = false;
344 chunk->mapped_size = 0;
346 spin_unlock(&dmm_obj->dmm_lock);
348 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
351 return status;
355 * ======== dmm_un_reserve_memory ========
356 * Purpose:
357 * Free a chunk of reserved DSP/IVA address space.
359 int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
361 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362 struct map_page *chunk;
363 u32 i;
364 int status = 0;
365 u32 chunk_size;
367 spin_lock(&dmm_obj->dmm_lock);
369 /* Find the chunk containing the reserved address */
370 chunk = get_mapped_region(rsv_addr);
371 if (chunk == NULL)
372 status = -ENOENT;
374 if (!status) {
375 /* Free all the mapped pages for this reserved region */
376 i = 0;
377 while (i < chunk->region_size) {
378 if (chunk[i].mapped) {
379 /* Remove mapping from the page tables. */
380 chunk_size = chunk[i].mapped_size;
381 /* Clear the mapping flags */
382 chunk[i].mapped = false;
383 chunk[i].mapped_size = 0;
384 i += chunk_size;
385 } else
386 i++;
388 /* Clear the flags (mark the region 'free') */
389 chunk->reserved = false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
395 spin_unlock(&dmm_obj->dmm_lock);
397 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__, dmm_mgr, rsv_addr, status, chunk);
400 return status;
404 * ======== get_region ========
405 * Purpose:
406 * Returns a region containing the specified memory region
408 static struct map_page *get_region(u32 addr)
410 struct map_page *curr_region = NULL;
411 u32 i = 0;
413 if (virtual_mapping_table != NULL) {
414 /* find page mapped by this address */
415 i = DMM_ADDR_TO_INDEX(addr);
416 if (i < table_size)
417 curr_region = virtual_mapping_table + i;
420 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__, curr_region, free_region, free_size);
422 return curr_region;
426 * ======== get_free_region ========
427 * Purpose:
428 * Returns the requested free region
430 static struct map_page *get_free_region(u32 len)
432 struct map_page *curr_region = NULL;
433 u32 i = 0;
434 u32 region_size = 0;
435 u32 next_i = 0;
437 if (virtual_mapping_table == NULL)
438 return curr_region;
439 if (len > free_size) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i < table_size) {
443 region_size = virtual_mapping_table[i].region_size;
444 next_i = i + region_size;
445 if (virtual_mapping_table[i].reserved == false) {
446 /* Coalesce, if possible */
447 if (next_i < table_size &&
448 virtual_mapping_table[next_i].reserved
449 == false) {
450 virtual_mapping_table[i].region_size +=
451 virtual_mapping_table
452 [next_i].region_size;
453 continue;
455 region_size *= PG_SIZE4K;
456 if (region_size > free_size) {
457 free_region = i;
458 free_size = region_size;
461 i = next_i;
464 if (len <= free_size) {
465 curr_region = virtual_mapping_table + free_region;
466 free_region += (len / PG_SIZE4K);
467 free_size -= len;
469 return curr_region;
473 * ======== get_mapped_region ========
474 * Purpose:
475 * Returns the requestedmapped region
477 static struct map_page *get_mapped_region(u32 addrs)
479 u32 i = 0;
480 struct map_page *curr_region = NULL;
482 if (virtual_mapping_table == NULL)
483 return curr_region;
485 i = DMM_ADDR_TO_INDEX(addrs);
486 if (i < table_size && (virtual_mapping_table[i].mapped ||
487 virtual_mapping_table[i].reserved))
488 curr_region = virtual_mapping_table + i;
489 return curr_region;
492 #ifdef DSP_DMM_DEBUG
493 u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
495 struct map_page *curr_node = NULL;
496 u32 i;
497 u32 freemem = 0;
498 u32 bigsize = 0;
500 spin_lock(&dmm_mgr->dmm_lock);
502 if (virtual_mapping_table != NULL) {
503 for (i = 0; i < table_size; i +=
504 virtual_mapping_table[i].region_size) {
505 curr_node = virtual_mapping_table + i;
506 if (curr_node->reserved) {
507 /*printk("RESERVED size = 0x%x, "
508 "Map size = 0x%x\n",
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
513 } else {
514 /* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
517 freemem += (curr_node->region_size * PG_SIZE4K);
518 if (curr_node->region_size > bigsize)
519 bigsize = curr_node->region_size;
523 spin_unlock(&dmm_mgr->dmm_lock);
524 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525 freemem / (1024 * 1024));
526 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize * PG_SIZE4K / (1024 * 1024)));
531 return 0;
533 #endif