2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Memory allocation routines.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define __NO_VERSION__
23 #include <sound/driver.h>
25 #include <asm/uaccess.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/time.h>
29 #include <linux/pci.h>
30 #include <sound/core.h>
31 #include <sound/info.h>
34 * memory allocation helpers and debug routines
37 #ifdef CONFIG_SND_DEBUG_MEMORY
39 struct snd_alloc_track
{
43 struct list_head list
;
47 #define snd_alloc_track_entry(obj) (struct snd_alloc_track *)((char*)obj - (unsigned long)((struct snd_alloc_track *)0)->data)
49 static long snd_alloc_pages
;
50 static long snd_alloc_kmalloc
;
51 static long snd_alloc_vmalloc
;
52 static LIST_HEAD(snd_alloc_kmalloc_list
);
53 static LIST_HEAD(snd_alloc_vmalloc_list
);
54 static spinlock_t snd_alloc_kmalloc_lock
= SPIN_LOCK_UNLOCKED
;
55 static spinlock_t snd_alloc_vmalloc_lock
= SPIN_LOCK_UNLOCKED
;
56 #define KMALLOC_MAGIC 0x87654321
57 #define VMALLOC_MAGIC 0x87654320
58 static snd_info_entry_t
*snd_memory_info_entry
;
60 void snd_memory_init(void)
63 snd_alloc_kmalloc
= 0;
64 snd_alloc_vmalloc
= 0;
67 void snd_memory_done(void)
69 struct list_head
*head
;
70 struct snd_alloc_track
*t
;
71 if (snd_alloc_pages
> 0)
72 snd_printk(KERN_ERR
"Not freed snd_alloc_pages = %li\n", snd_alloc_pages
);
73 if (snd_alloc_kmalloc
> 0)
74 snd_printk(KERN_ERR
"Not freed snd_alloc_kmalloc = %li\n", snd_alloc_kmalloc
);
75 if (snd_alloc_vmalloc
> 0)
76 snd_printk(KERN_ERR
"Not freed snd_alloc_vmalloc = %li\n", snd_alloc_vmalloc
);
77 for (head
= snd_alloc_kmalloc_list
.prev
;
78 head
!= &snd_alloc_kmalloc_list
; head
= head
->prev
) {
79 t
= list_entry(head
, struct snd_alloc_track
, list
);
80 if (t
->magic
!= KMALLOC_MAGIC
) {
81 snd_printk(KERN_ERR
"Corrupted kmalloc\n");
84 snd_printk(KERN_ERR
"kmalloc(%ld) from %p not freed\n", (long) t
->size
, t
->caller
);
86 for (head
= snd_alloc_vmalloc_list
.prev
;
87 head
!= &snd_alloc_vmalloc_list
; head
= head
->prev
) {
88 t
= list_entry(head
, struct snd_alloc_track
, list
);
89 if (t
->magic
!= VMALLOC_MAGIC
) {
90 snd_printk(KERN_ERR
"Corrupted vmalloc\n");
93 snd_printk(KERN_ERR
"vmalloc(%ld) from %p not freed\n", (long) t
->size
, t
->caller
);
97 void *__snd_kmalloc(size_t size
, int flags
, void *caller
)
99 unsigned long cpu_flags
;
100 struct snd_alloc_track
*t
;
103 ptr
= snd_wrapper_kmalloc(size
+ sizeof(struct snd_alloc_track
), flags
);
105 t
= (struct snd_alloc_track
*)ptr
;
106 t
->magic
= KMALLOC_MAGIC
;
108 spin_lock_irqsave(&snd_alloc_kmalloc_lock
, cpu_flags
);
109 list_add_tail(&t
->list
, &snd_alloc_kmalloc_list
);
110 spin_unlock_irqrestore(&snd_alloc_kmalloc_lock
, cpu_flags
);
112 snd_alloc_kmalloc
+= size
;
118 #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0));
119 void *snd_hidden_kmalloc(size_t size
, int flags
)
121 return _snd_kmalloc(size
, flags
);
124 void snd_hidden_kfree(const void *obj
)
127 struct snd_alloc_track
*t
;
129 snd_printk(KERN_WARNING
"null kfree (called from %p)\n", __builtin_return_address(0));
132 t
= snd_alloc_track_entry(obj
);
133 if (t
->magic
!= KMALLOC_MAGIC
) {
134 snd_printk(KERN_WARNING
"bad kfree (called from %p)\n", __builtin_return_address(0));
137 spin_lock_irqsave(&snd_alloc_kmalloc_lock
, flags
);
139 spin_unlock_irqrestore(&snd_alloc_kmalloc_lock
, flags
);
141 snd_alloc_kmalloc
-= t
->size
;
143 snd_wrapper_kfree(obj
);
146 void *_snd_magic_kcalloc(unsigned long magic
, size_t size
, int flags
)
149 ptr
= _snd_kmalloc(size
+ sizeof(unsigned long), flags
);
152 memset(ptr
, 0, size
);
157 void *_snd_magic_kmalloc(unsigned long magic
, size_t size
, int flags
)
160 ptr
= _snd_kmalloc(size
+ sizeof(unsigned long), flags
);
166 void snd_magic_kfree(void *_ptr
)
168 unsigned long *ptr
= _ptr
;
170 snd_printk(KERN_WARNING
"null snd_magic_kfree (called from %p)\n", __builtin_return_address(0));
175 struct snd_alloc_track
*t
;
176 t
= snd_alloc_track_entry(ptr
);
177 if (t
->magic
!= KMALLOC_MAGIC
) {
178 snd_printk(KERN_ERR
"bad snd_magic_kfree (called from %p)\n", __builtin_return_address(0));
182 snd_hidden_kfree(ptr
);
186 void *snd_hidden_vmalloc(unsigned long size
)
189 ptr
= snd_wrapper_vmalloc(size
+ sizeof(struct snd_alloc_track
));
191 struct snd_alloc_track
*t
= (struct snd_alloc_track
*)ptr
;
192 t
->magic
= VMALLOC_MAGIC
;
193 t
->caller
= __builtin_return_address(0);
194 spin_lock(&snd_alloc_vmalloc_lock
);
195 list_add_tail(&t
->list
, &snd_alloc_vmalloc_list
);
196 spin_unlock(&snd_alloc_vmalloc_lock
);
198 snd_alloc_vmalloc
+= size
;
204 void snd_hidden_vfree(void *obj
)
206 struct snd_alloc_track
*t
;
208 snd_printk(KERN_WARNING
"null vfree (called from %p)\n", __builtin_return_address(0));
211 t
= snd_alloc_track_entry(obj
);
212 if (t
->magic
!= VMALLOC_MAGIC
) {
213 snd_printk(KERN_ERR
"bad vfree (called from %p)\n", __builtin_return_address(0));
216 spin_lock(&snd_alloc_vmalloc_lock
);
218 spin_unlock(&snd_alloc_vmalloc_lock
);
220 snd_alloc_vmalloc
-= t
->size
;
222 snd_wrapper_vfree(obj
);
225 static void snd_memory_info_read(snd_info_entry_t
*entry
, snd_info_buffer_t
* buffer
)
227 long pages
= snd_alloc_pages
>> (PAGE_SHIFT
-12);
228 snd_iprintf(buffer
, "pages : %li bytes (%li pages per %likB)\n", pages
* PAGE_SIZE
, pages
, PAGE_SIZE
/ 1024);
229 snd_iprintf(buffer
, "kmalloc: %li bytes\n", snd_alloc_kmalloc
);
230 snd_iprintf(buffer
, "vmalloc: %li bytes\n", snd_alloc_vmalloc
);
233 int __init
snd_memory_info_init(void)
235 snd_info_entry_t
*entry
;
237 entry
= snd_info_create_module_entry(THIS_MODULE
, "meminfo", NULL
);
239 entry
->content
= SNDRV_INFO_CONTENT_TEXT
;
240 entry
->c
.text
.read_size
= 256;
241 entry
->c
.text
.read
= snd_memory_info_read
;
242 if (snd_info_register(entry
) < 0) {
243 snd_info_free_entry(entry
);
247 snd_memory_info_entry
= entry
;
251 int __exit
snd_memory_info_done(void)
253 if (snd_memory_info_entry
)
254 snd_info_unregister(snd_memory_info_entry
);
260 #define _snd_kmalloc kmalloc
262 #endif /* CONFIG_SND_DEBUG_MEMORY */
266 void *snd_malloc_pages(unsigned long size
, unsigned int dma_flags
)
271 snd_assert(size
> 0, return NULL
);
272 snd_assert(dma_flags
!= 0, return NULL
);
273 for (pg
= 0; PAGE_SIZE
* (1 << pg
) < size
; pg
++);
274 if ((res
= (void *) __get_free_pages(dma_flags
, pg
)) != NULL
) {
275 struct page
*page
= virt_to_page(res
);
276 struct page
*last_page
= page
+ (1 << pg
);
277 while (page
< last_page
)
278 SetPageReserved(page
++);
279 #ifdef CONFIG_SND_DEBUG_MEMORY
280 snd_alloc_pages
+= 1 << pg
;
286 void *snd_malloc_pages_fallback(unsigned long size
, unsigned int dma_flags
, unsigned long *res_size
)
290 snd_assert(size
> 0, return NULL
);
291 snd_assert(res_size
!= NULL
, return NULL
);
293 if ((res
= snd_malloc_pages(size
, dma_flags
)) != NULL
) {
298 } while (size
>= PAGE_SIZE
);
302 void snd_free_pages(void *ptr
, unsigned long size
)
305 struct page
*page
, *last_page
;
309 for (pg
= 0; PAGE_SIZE
* (1 << pg
) < size
; pg
++);
310 page
= virt_to_page(ptr
);
311 last_page
= page
+ (1 << pg
);
312 while (page
< last_page
)
313 ClearPageReserved(page
++);
314 free_pages((unsigned long) ptr
, pg
);
315 #ifdef CONFIG_SND_DEBUG_MEMORY
316 snd_alloc_pages
-= 1 << pg
;
320 #if defined(CONFIG_ISA) && ! defined(CONFIG_PCI)
322 void *snd_malloc_isa_pages(unsigned long size
, dma_addr_t
*dma_addr
)
325 dma_area
= snd_malloc_pages(size
, GFP_ATOMIC
|GFP_DMA
);
326 *dma_addr
= dma_area
? isa_virt_to_bus(dma_area
) : 0UL;
330 void *snd_malloc_isa_pages_fallback(unsigned long size
,
331 dma_addr_t
*dma_addr
,
332 unsigned long *res_size
)
335 dma_area
= snd_malloc_pages_fallback(size
, GFP_ATOMIC
|GFP_DMA
, res_size
);
336 *dma_addr
= dma_area
? isa_virt_to_bus(dma_area
) : 0UL;
340 #endif /* CONFIG_ISA && !CONFIG_PCI */
344 void *snd_malloc_pci_pages(struct pci_dev
*pci
,
346 dma_addr_t
*dma_addr
)
351 snd_assert(size
> 0, return NULL
);
352 snd_assert(dma_addr
!= NULL
, return NULL
);
353 for (pg
= 0; PAGE_SIZE
* (1 << pg
) < size
; pg
++);
354 res
= pci_alloc_consistent(pci
, PAGE_SIZE
* (1 << pg
), dma_addr
);
356 struct page
*page
= virt_to_page(res
);
357 struct page
*last_page
= page
+ (1 << pg
);
358 while (page
< last_page
)
359 SetPageReserved(page
++);
360 #ifdef CONFIG_SND_DEBUG_MEMORY
361 snd_alloc_pages
+= 1 << pg
;
367 void *snd_malloc_pci_pages_fallback(struct pci_dev
*pci
,
369 dma_addr_t
*dma_addr
,
370 unsigned long *res_size
)
374 snd_assert(res_size
!= NULL
, return NULL
);
376 if ((res
= snd_malloc_pci_pages(pci
, size
, dma_addr
)) != NULL
) {
381 } while (size
>= PAGE_SIZE
);
385 void snd_free_pci_pages(struct pci_dev
*pci
,
391 struct page
*page
, *last_page
;
395 for (pg
= 0; PAGE_SIZE
* (1 << pg
) < size
; pg
++);
396 page
= virt_to_page(ptr
);
397 last_page
= page
+ (1 << pg
);
398 while (page
< last_page
)
399 ClearPageReserved(page
++);
400 pci_free_consistent(pci
, PAGE_SIZE
* (1 << pg
), ptr
, dma_addr
);
401 #ifdef CONFIG_SND_DEBUG_MEMORY
402 snd_alloc_pages
-= 1 << pg
;
406 #endif /* CONFIG_PCI */
408 void *snd_kcalloc(size_t size
, int flags
)
412 ptr
= _snd_kmalloc(size
, flags
);
414 memset(ptr
, 0, size
);
418 char *snd_kmalloc_strdup(const char *string
, int flags
)
425 len
= strlen(string
) + 1;
426 ptr
= _snd_kmalloc(len
, flags
);
428 memcpy(ptr
, string
, len
);
432 int copy_to_user_fromio(void *dst
, unsigned long src
, size_t count
)
434 #if defined(__i386_) || defined(CONFIG_SPARC32)
435 return copy_to_user(dst
, (const void*)src
, count
) ? -EFAULT
: 0;
442 memcpy_fromio(buf
, src
, c
);
443 if (copy_to_user(dst
, buf
, c
))
453 int copy_from_user_toio(unsigned long dst
, const void *src
, size_t count
)
455 #if defined(__i386_) || defined(CONFIG_SPARC32)
456 return copy_from_user((void*)dst
, src
, count
) ? -EFAULT
: 0;
463 if (copy_from_user(buf
, src
, c
))
465 memcpy_toio(dst
, buf
, c
);
474 #ifdef HACK_PCI_ALLOC_CONSISTENT
476 * A dirty hack... when the kernel code is fixed this should be removed.
478 * since pci_alloc_consistent always tries GFP_DMA when the requested
479 * pci memory region is below 32bit, it happens quite often that even
480 * 2 order or pages cannot be allocated.
482 * so in the following, GFP_DMA is used only when the first allocation
483 * doesn't match the requested region.
486 #define get_phys_addr(x) virt_to_phys(x)
488 #define get_phys_addr(x) virt_to_bus(x)
490 void *snd_pci_hack_alloc_consistent(struct pci_dev
*hwdev
, size_t size
,
491 dma_addr_t
*dma_handle
)
494 int gfp
= GFP_ATOMIC
;
498 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
500 if (hwdev
&& ((get_phys_addr(ret
) + size
- 1) & ~hwdev
->dma_mask
)) {
501 free_pages((unsigned long)ret
, get_order(size
));
502 ret
= (void *)__get_free_pages(gfp
| GFP_DMA
, get_order(size
));
506 memset(ret
, 0, size
);
507 *dma_handle
= get_phys_addr(ret
);