slc90e66 update
[linux-2.6/history.git] / sound / core / memory.c
blob49767bfc0e540c83c6b2e9f57ad0925c5ae86c57
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Memory allocation routines.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define __NO_VERSION__
23 #include <sound/driver.h>
24 #include <asm/io.h>
25 #include <asm/uaccess.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/time.h>
29 #include <linux/pci.h>
30 #include <sound/core.h>
31 #include <sound/info.h>
34 * memory allocation helpers and debug routines
37 #ifdef CONFIG_SND_DEBUG_MEMORY
39 struct snd_alloc_track {
40 unsigned long magic;
41 void *caller;
42 size_t size;
43 struct list_head list;
44 long data[0];
47 #define snd_alloc_track_entry(obj) (struct snd_alloc_track *)((char*)obj - (unsigned long)((struct snd_alloc_track *)0)->data)
49 static long snd_alloc_pages;
50 static long snd_alloc_kmalloc;
51 static long snd_alloc_vmalloc;
52 static LIST_HEAD(snd_alloc_kmalloc_list);
53 static LIST_HEAD(snd_alloc_vmalloc_list);
54 static spinlock_t snd_alloc_kmalloc_lock = SPIN_LOCK_UNLOCKED;
55 static spinlock_t snd_alloc_vmalloc_lock = SPIN_LOCK_UNLOCKED;
56 #define KMALLOC_MAGIC 0x87654321
57 #define VMALLOC_MAGIC 0x87654320
58 static snd_info_entry_t *snd_memory_info_entry;
60 void snd_memory_init(void)
62 snd_alloc_pages = 0;
63 snd_alloc_kmalloc = 0;
64 snd_alloc_vmalloc = 0;
67 void snd_memory_done(void)
69 struct list_head *head;
70 struct snd_alloc_track *t;
71 if (snd_alloc_pages > 0)
72 snd_printk(KERN_ERR "Not freed snd_alloc_pages = %li\n", snd_alloc_pages);
73 if (snd_alloc_kmalloc > 0)
74 snd_printk(KERN_ERR "Not freed snd_alloc_kmalloc = %li\n", snd_alloc_kmalloc);
75 if (snd_alloc_vmalloc > 0)
76 snd_printk(KERN_ERR "Not freed snd_alloc_vmalloc = %li\n", snd_alloc_vmalloc);
77 for (head = snd_alloc_kmalloc_list.prev;
78 head != &snd_alloc_kmalloc_list; head = head->prev) {
79 t = list_entry(head, struct snd_alloc_track, list);
80 if (t->magic != KMALLOC_MAGIC) {
81 snd_printk(KERN_ERR "Corrupted kmalloc\n");
82 break;
84 snd_printk(KERN_ERR "kmalloc(%ld) from %p not freed\n", (long) t->size, t->caller);
86 for (head = snd_alloc_vmalloc_list.prev;
87 head != &snd_alloc_vmalloc_list; head = head->prev) {
88 t = list_entry(head, struct snd_alloc_track, list);
89 if (t->magic != VMALLOC_MAGIC) {
90 snd_printk(KERN_ERR "Corrupted vmalloc\n");
91 break;
93 snd_printk(KERN_ERR "vmalloc(%ld) from %p not freed\n", (long) t->size, t->caller);
97 void *__snd_kmalloc(size_t size, int flags, void *caller)
99 unsigned long cpu_flags;
100 struct snd_alloc_track *t;
101 void *ptr;
103 ptr = snd_wrapper_kmalloc(size + sizeof(struct snd_alloc_track), flags);
104 if (ptr != NULL) {
105 t = (struct snd_alloc_track *)ptr;
106 t->magic = KMALLOC_MAGIC;
107 t->caller = caller;
108 spin_lock_irqsave(&snd_alloc_kmalloc_lock, cpu_flags);
109 list_add_tail(&t->list, &snd_alloc_kmalloc_list);
110 spin_unlock_irqrestore(&snd_alloc_kmalloc_lock, cpu_flags);
111 t->size = size;
112 snd_alloc_kmalloc += size;
113 ptr = t->data;
115 return ptr;
118 #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0));
119 void *snd_hidden_kmalloc(size_t size, int flags)
121 return _snd_kmalloc(size, flags);
124 void snd_hidden_kfree(const void *obj)
126 unsigned long flags;
127 struct snd_alloc_track *t;
128 if (obj == NULL) {
129 snd_printk(KERN_WARNING "null kfree (called from %p)\n", __builtin_return_address(0));
130 return;
132 t = snd_alloc_track_entry(obj);
133 if (t->magic != KMALLOC_MAGIC) {
134 snd_printk(KERN_WARNING "bad kfree (called from %p)\n", __builtin_return_address(0));
135 return;
137 spin_lock_irqsave(&snd_alloc_kmalloc_lock, flags);
138 list_del(&t->list);
139 spin_unlock_irqrestore(&snd_alloc_kmalloc_lock, flags);
140 t->magic = 0;
141 snd_alloc_kmalloc -= t->size;
142 obj = t;
143 snd_wrapper_kfree(obj);
146 void *_snd_magic_kcalloc(unsigned long magic, size_t size, int flags)
148 unsigned long *ptr;
149 ptr = _snd_kmalloc(size + sizeof(unsigned long), flags);
150 if (ptr) {
151 *ptr++ = magic;
152 memset(ptr, 0, size);
154 return ptr;
157 void *_snd_magic_kmalloc(unsigned long magic, size_t size, int flags)
159 unsigned long *ptr;
160 ptr = _snd_kmalloc(size + sizeof(unsigned long), flags);
161 if (ptr)
162 *ptr++ = magic;
163 return ptr;
166 void snd_magic_kfree(void *_ptr)
168 unsigned long *ptr = _ptr;
169 if (ptr == NULL) {
170 snd_printk(KERN_WARNING "null snd_magic_kfree (called from %p)\n", __builtin_return_address(0));
171 return;
173 *--ptr = 0;
175 struct snd_alloc_track *t;
176 t = snd_alloc_track_entry(ptr);
177 if (t->magic != KMALLOC_MAGIC) {
178 snd_printk(KERN_ERR "bad snd_magic_kfree (called from %p)\n", __builtin_return_address(0));
179 return;
182 snd_hidden_kfree(ptr);
183 return;
186 void *snd_hidden_vmalloc(unsigned long size)
188 void *ptr;
189 ptr = snd_wrapper_vmalloc(size + sizeof(struct snd_alloc_track));
190 if (ptr) {
191 struct snd_alloc_track *t = (struct snd_alloc_track *)ptr;
192 t->magic = VMALLOC_MAGIC;
193 t->caller = __builtin_return_address(0);
194 spin_lock(&snd_alloc_vmalloc_lock);
195 list_add_tail(&t->list, &snd_alloc_vmalloc_list);
196 spin_unlock(&snd_alloc_vmalloc_lock);
197 t->size = size;
198 snd_alloc_vmalloc += size;
199 ptr = t->data;
201 return ptr;
204 void snd_hidden_vfree(void *obj)
206 struct snd_alloc_track *t;
207 if (obj == NULL) {
208 snd_printk(KERN_WARNING "null vfree (called from %p)\n", __builtin_return_address(0));
209 return;
211 t = snd_alloc_track_entry(obj);
212 if (t->magic != VMALLOC_MAGIC) {
213 snd_printk(KERN_ERR "bad vfree (called from %p)\n", __builtin_return_address(0));
214 return;
216 spin_lock(&snd_alloc_vmalloc_lock);
217 list_del(&t->list);
218 spin_unlock(&snd_alloc_vmalloc_lock);
219 t->magic = 0;
220 snd_alloc_vmalloc -= t->size;
221 obj = t;
222 snd_wrapper_vfree(obj);
225 static void snd_memory_info_read(snd_info_entry_t *entry, snd_info_buffer_t * buffer)
227 long pages = snd_alloc_pages >> (PAGE_SHIFT-12);
228 snd_iprintf(buffer, "pages : %li bytes (%li pages per %likB)\n", pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
229 snd_iprintf(buffer, "kmalloc: %li bytes\n", snd_alloc_kmalloc);
230 snd_iprintf(buffer, "vmalloc: %li bytes\n", snd_alloc_vmalloc);
233 int __init snd_memory_info_init(void)
235 snd_info_entry_t *entry;
237 entry = snd_info_create_module_entry(THIS_MODULE, "meminfo", NULL);
238 if (entry) {
239 entry->content = SNDRV_INFO_CONTENT_TEXT;
240 entry->c.text.read_size = 256;
241 entry->c.text.read = snd_memory_info_read;
242 if (snd_info_register(entry) < 0) {
243 snd_info_free_entry(entry);
244 entry = NULL;
247 snd_memory_info_entry = entry;
248 return 0;
251 int __exit snd_memory_info_done(void)
253 if (snd_memory_info_entry)
254 snd_info_unregister(snd_memory_info_entry);
255 return 0;
258 #else
260 #define _snd_kmalloc kmalloc
262 #endif /* CONFIG_SND_DEBUG_MEMORY */
266 void *snd_malloc_pages(unsigned long size, unsigned int dma_flags)
268 int pg;
269 void *res;
271 snd_assert(size > 0, return NULL);
272 snd_assert(dma_flags != 0, return NULL);
273 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
274 if ((res = (void *) __get_free_pages(dma_flags, pg)) != NULL) {
275 struct page *page = virt_to_page(res);
276 struct page *last_page = page + (1 << pg);
277 while (page < last_page)
278 SetPageReserved(page++);
279 #ifdef CONFIG_SND_DEBUG_MEMORY
280 snd_alloc_pages += 1 << pg;
281 #endif
283 return res;
286 void *snd_malloc_pages_fallback(unsigned long size, unsigned int dma_flags, unsigned long *res_size)
288 void *res;
290 snd_assert(size > 0, return NULL);
291 snd_assert(res_size != NULL, return NULL);
292 do {
293 if ((res = snd_malloc_pages(size, dma_flags)) != NULL) {
294 *res_size = size;
295 return res;
297 size >>= 1;
298 } while (size >= PAGE_SIZE);
299 return NULL;
302 void snd_free_pages(void *ptr, unsigned long size)
304 int pg;
305 struct page *page, *last_page;
307 if (ptr == NULL)
308 return;
309 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
310 page = virt_to_page(ptr);
311 last_page = page + (1 << pg);
312 while (page < last_page)
313 ClearPageReserved(page++);
314 free_pages((unsigned long) ptr, pg);
315 #ifdef CONFIG_SND_DEBUG_MEMORY
316 snd_alloc_pages -= 1 << pg;
317 #endif
320 #if defined(CONFIG_ISA) && ! defined(CONFIG_PCI)
322 void *snd_malloc_isa_pages(unsigned long size, dma_addr_t *dma_addr)
324 void *dma_area;
325 dma_area = snd_malloc_pages(size, GFP_ATOMIC|GFP_DMA);
326 *dma_addr = dma_area ? isa_virt_to_bus(dma_area) : 0UL;
327 return dma_area;
330 void *snd_malloc_isa_pages_fallback(unsigned long size,
331 dma_addr_t *dma_addr,
332 unsigned long *res_size)
334 void *dma_area;
335 dma_area = snd_malloc_pages_fallback(size, GFP_ATOMIC|GFP_DMA, res_size);
336 *dma_addr = dma_area ? isa_virt_to_bus(dma_area) : 0UL;
337 return dma_area;
340 #endif /* CONFIG_ISA && !CONFIG_PCI */
342 #ifdef CONFIG_PCI
344 void *snd_malloc_pci_pages(struct pci_dev *pci,
345 unsigned long size,
346 dma_addr_t *dma_addr)
348 int pg;
349 void *res;
351 snd_assert(size > 0, return NULL);
352 snd_assert(dma_addr != NULL, return NULL);
353 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
354 res = pci_alloc_consistent(pci, PAGE_SIZE * (1 << pg), dma_addr);
355 if (res != NULL) {
356 struct page *page = virt_to_page(res);
357 struct page *last_page = page + (1 << pg);
358 while (page < last_page)
359 SetPageReserved(page++);
360 #ifdef CONFIG_SND_DEBUG_MEMORY
361 snd_alloc_pages += 1 << pg;
362 #endif
364 return res;
367 void *snd_malloc_pci_pages_fallback(struct pci_dev *pci,
368 unsigned long size,
369 dma_addr_t *dma_addr,
370 unsigned long *res_size)
372 void *res;
374 snd_assert(res_size != NULL, return NULL);
375 do {
376 if ((res = snd_malloc_pci_pages(pci, size, dma_addr)) != NULL) {
377 *res_size = size;
378 return res;
380 size >>= 1;
381 } while (size >= PAGE_SIZE);
382 return NULL;
385 void snd_free_pci_pages(struct pci_dev *pci,
386 unsigned long size,
387 void *ptr,
388 dma_addr_t dma_addr)
390 int pg;
391 struct page *page, *last_page;
393 if (ptr == NULL)
394 return;
395 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
396 page = virt_to_page(ptr);
397 last_page = page + (1 << pg);
398 while (page < last_page)
399 ClearPageReserved(page++);
400 pci_free_consistent(pci, PAGE_SIZE * (1 << pg), ptr, dma_addr);
401 #ifdef CONFIG_SND_DEBUG_MEMORY
402 snd_alloc_pages -= 1 << pg;
403 #endif
406 #endif /* CONFIG_PCI */
408 void *snd_kcalloc(size_t size, int flags)
410 void *ptr;
412 ptr = _snd_kmalloc(size, flags);
413 if (ptr)
414 memset(ptr, 0, size);
415 return ptr;
418 char *snd_kmalloc_strdup(const char *string, int flags)
420 size_t len;
421 char *ptr;
423 if (!string)
424 return NULL;
425 len = strlen(string) + 1;
426 ptr = _snd_kmalloc(len, flags);
427 if (ptr)
428 memcpy(ptr, string, len);
429 return ptr;
432 int copy_to_user_fromio(void *dst, unsigned long src, size_t count)
434 #if defined(__i386_) || defined(CONFIG_SPARC32)
435 return copy_to_user(dst, (const void*)src, count) ? -EFAULT : 0;
436 #else
437 char buf[1024];
438 while (count) {
439 size_t c = count;
440 if (c > sizeof(buf))
441 c = sizeof(buf);
442 memcpy_fromio(buf, src, c);
443 if (copy_to_user(dst, buf, c))
444 return -EFAULT;
445 count -= c;
446 dst += c;
447 src += c;
449 return 0;
450 #endif
453 int copy_from_user_toio(unsigned long dst, const void *src, size_t count)
455 #if defined(__i386_) || defined(CONFIG_SPARC32)
456 return copy_from_user((void*)dst, src, count) ? -EFAULT : 0;
457 #else
458 char buf[1024];
459 while (count) {
460 size_t c = count;
461 if (c > sizeof(buf))
462 c = sizeof(buf);
463 if (copy_from_user(buf, src, c))
464 return -EFAULT;
465 memcpy_toio(dst, buf, c);
466 count -= c;
467 dst += c;
468 src += c;
470 return 0;
471 #endif
474 #ifdef HACK_PCI_ALLOC_CONSISTENT
476 * A dirty hack... when the kernel code is fixed this should be removed.
478 * since pci_alloc_consistent always tries GFP_DMA when the requested
479 * pci memory region is below 32bit, it happens quite often that even
480 * 2 order or pages cannot be allocated.
482 * so in the following, GFP_DMA is used only when the first allocation
483 * doesn't match the requested region.
485 #ifdef __i386__
486 #define get_phys_addr(x) virt_to_phys(x)
487 #else /* ppc */
488 #define get_phys_addr(x) virt_to_bus(x)
489 #endif
490 void *snd_pci_hack_alloc_consistent(struct pci_dev *hwdev, size_t size,
491 dma_addr_t *dma_handle)
493 void *ret;
494 int gfp = GFP_ATOMIC;
496 if (hwdev == NULL)
497 gfp |= GFP_DMA;
498 ret = (void *)__get_free_pages(gfp, get_order(size));
499 if (ret) {
500 if (hwdev && ((get_phys_addr(ret) + size - 1) & ~hwdev->dma_mask)) {
501 free_pages((unsigned long)ret, get_order(size));
502 ret = (void *)__get_free_pages(gfp | GFP_DMA, get_order(size));
505 if (ret) {
506 memset(ret, 0, size);
507 *dma_handle = get_phys_addr(ret);
509 return ret;
511 #endif /* hack */