Fix undefined count_partial if !CONFIG_SLABINFO
[linux-2.6/btrfs-unstable.git] / sound / pci / emu10k1 / memory.c
blob916c1dbcd53cf919f6a420b86f893a676115302e
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/time.h>
26 #include <linux/mutex.h>
28 #include <sound/core.h>
29 #include <sound/emu10k1.h>
31 /* page arguments of these two macros are Emu page (4096 bytes), not like
32 * aligned pages in others
34 #define __set_ptb_entry(emu,page,addr) \
35 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
37 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
38 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
39 /* get aligned page from offset address */
40 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
41 /* get offset address from aligned page */
42 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
44 #if PAGE_SIZE == 4096
45 /* page size == EMUPAGESIZE */
46 /* fill PTB entrie(s) corresponding to page with addr */
47 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
48 /* fill PTB entrie(s) corresponding to page with silence pointer */
49 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
50 #else
51 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
52 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
54 int i;
55 page *= UNIT_PAGES;
56 for (i = 0; i < UNIT_PAGES; i++, page++) {
57 __set_ptb_entry(emu, page, addr);
58 addr += EMUPAGESIZE;
61 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
63 int i;
64 page *= UNIT_PAGES;
65 for (i = 0; i < UNIT_PAGES; i++, page++)
66 /* do not increment ptr */
67 __set_ptb_entry(emu, page, emu->silent_page.addr);
69 #endif /* PAGE_SIZE */
74 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
75 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
80 /* initialize emu10k1 part */
81 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
83 blk->mapped_page = -1;
84 INIT_LIST_HEAD(&blk->mapped_link);
85 INIT_LIST_HEAD(&blk->mapped_order_link);
86 blk->map_locked = 0;
88 blk->first_page = get_aligned_page(blk->mem.offset);
89 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
90 blk->pages = blk->last_page - blk->first_page + 1;
94 * search empty region on PTB with the given size
96 * if an empty region is found, return the page and store the next mapped block
97 * in nextp
98 * if not found, return a negative error code.
100 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
102 int page = 0, found_page = -ENOMEM;
103 int max_size = npages;
104 int size;
105 struct list_head *candidate = &emu->mapped_link_head;
106 struct list_head *pos;
108 list_for_each (pos, &emu->mapped_link_head) {
109 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
110 snd_assert(blk->mapped_page >= 0, continue);
111 size = blk->mapped_page - page;
112 if (size == npages) {
113 *nextp = pos;
114 return page;
116 else if (size > max_size) {
117 /* we look for the maximum empty hole */
118 max_size = size;
119 candidate = pos;
120 found_page = page;
122 page = blk->mapped_page + blk->pages;
124 size = MAX_ALIGN_PAGES - page;
125 if (size >= max_size) {
126 *nextp = pos;
127 return page;
129 *nextp = candidate;
130 return found_page;
134 * map a memory block onto emu10k1's PTB
136 * call with memblk_lock held
138 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
140 int page, pg;
141 struct list_head *next;
143 page = search_empty_map_area(emu, blk->pages, &next);
144 if (page < 0) /* not found */
145 return page;
146 /* insert this block in the proper position of mapped list */
147 list_add_tail(&blk->mapped_link, next);
148 /* append this as a newest block in order list */
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
150 blk->mapped_page = page;
151 /* fill PTB */
152 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
154 page++;
156 return 0;
160 * unmap the block
161 * return the size of resultant empty pages
163 * call with memblk_lock held
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
167 int start_page, end_page, mpage, pg;
168 struct list_head *p;
169 struct snd_emu10k1_memblk *q;
171 /* calculate the expected size of empty region */
172 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
173 q = get_emu10k1_memblk(p, mapped_link);
174 start_page = q->mapped_page + q->pages;
175 } else
176 start_page = 0;
177 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
178 q = get_emu10k1_memblk(p, mapped_link);
179 end_page = q->mapped_page;
180 } else
181 end_page = MAX_ALIGN_PAGES;
183 /* remove links */
184 list_del(&blk->mapped_link);
185 list_del(&blk->mapped_order_link);
186 /* clear PTB */
187 mpage = blk->mapped_page;
188 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
189 set_silent_ptb(emu, mpage);
190 mpage++;
192 blk->mapped_page = -1;
193 return end_page - start_page; /* return the new empty size */
197 * search empty pages with the given size, and create a memory block
199 * unlike synth_alloc the memory block is aligned to the page start
201 static struct snd_emu10k1_memblk *
202 search_empty(struct snd_emu10k1 *emu, int size)
204 struct list_head *p;
205 struct snd_emu10k1_memblk *blk;
206 int page, psize;
208 psize = get_aligned_page(size + PAGE_SIZE -1);
209 page = 0;
210 list_for_each(p, &emu->memhdr->block) {
211 blk = get_emu10k1_memblk(p, mem.list);
212 if (page + psize <= blk->first_page)
213 goto __found_pages;
214 page = blk->last_page + 1;
216 if (page + psize > emu->max_cache_pages)
217 return NULL;
219 __found_pages:
220 /* create a new memory block */
221 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
222 if (blk == NULL)
223 return NULL;
224 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
225 emu10k1_memblk_init(blk);
226 return blk;
231 * check if the given pointer is valid for pages
233 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
235 if (addr & ~emu->dma_mask) {
236 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
237 return 0;
239 if (addr & (EMUPAGESIZE-1)) {
240 snd_printk(KERN_ERR "page is not aligned\n");
241 return 0;
243 return 1;
247 * map the given memory block on PTB.
248 * if the block is already mapped, update the link order.
249 * if no empty pages are found, tries to release unsed memory blocks
250 * and retry the mapping.
252 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
254 int err;
255 int size;
256 struct list_head *p, *nextp;
257 struct snd_emu10k1_memblk *deleted;
258 unsigned long flags;
260 spin_lock_irqsave(&emu->memblk_lock, flags);
261 if (blk->mapped_page >= 0) {
262 /* update order link */
263 list_del(&blk->mapped_order_link);
264 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
265 spin_unlock_irqrestore(&emu->memblk_lock, flags);
266 return 0;
268 if ((err = map_memblk(emu, blk)) < 0) {
269 /* no enough page - try to unmap some blocks */
270 /* starting from the oldest block */
271 p = emu->mapped_order_link_head.next;
272 for (; p != &emu->mapped_order_link_head; p = nextp) {
273 nextp = p->next;
274 deleted = get_emu10k1_memblk(p, mapped_order_link);
275 if (deleted->map_locked)
276 continue;
277 size = unmap_memblk(emu, deleted);
278 if (size >= blk->pages) {
279 /* ok the empty region is enough large */
280 err = map_memblk(emu, blk);
281 break;
285 spin_unlock_irqrestore(&emu->memblk_lock, flags);
286 return err;
289 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
292 * page allocation for DMA
294 struct snd_util_memblk *
295 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
297 struct snd_pcm_runtime *runtime = substream->runtime;
298 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
299 struct snd_util_memhdr *hdr;
300 struct snd_emu10k1_memblk *blk;
301 int page, err, idx;
303 snd_assert(emu, return NULL);
304 snd_assert(runtime->dma_bytes > 0 && runtime->dma_bytes < MAXPAGES * EMUPAGESIZE, return NULL);
305 hdr = emu->memhdr;
306 snd_assert(hdr, return NULL);
308 mutex_lock(&hdr->block_mutex);
309 blk = search_empty(emu, runtime->dma_bytes);
310 if (blk == NULL) {
311 mutex_unlock(&hdr->block_mutex);
312 return NULL;
314 /* fill buffer addresses but pointers are not stored so that
315 * snd_free_pci_page() is not called in in synth_free()
317 idx = 0;
318 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
319 dma_addr_t addr;
320 #ifdef CONFIG_SND_DEBUG
321 if (idx >= sgbuf->pages) {
322 printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
323 blk->first_page, blk->last_page, sgbuf->pages);
324 mutex_unlock(&hdr->block_mutex);
325 return NULL;
327 #endif
328 addr = sgbuf->table[idx].addr;
329 if (! is_valid_page(emu, addr)) {
330 printk(KERN_ERR "emu: failure page = %d\n", idx);
331 mutex_unlock(&hdr->block_mutex);
332 return NULL;
334 emu->page_addr_table[page] = addr;
335 emu->page_ptr_table[page] = NULL;
338 /* set PTB entries */
339 blk->map_locked = 1; /* do not unmap this block! */
340 err = snd_emu10k1_memblk_map(emu, blk);
341 if (err < 0) {
342 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
343 mutex_unlock(&hdr->block_mutex);
344 return NULL;
346 mutex_unlock(&hdr->block_mutex);
347 return (struct snd_util_memblk *)blk;
352 * release DMA buffer from page table
354 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
356 snd_assert(emu && blk, return -EINVAL);
357 return snd_emu10k1_synth_free(emu, blk);
362 * memory allocation using multiple pages (for synth)
363 * Unlike the DMA allocation above, non-contiguous pages are assined.
367 * allocate a synth sample area
369 struct snd_util_memblk *
370 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
372 struct snd_emu10k1_memblk *blk;
373 struct snd_util_memhdr *hdr = hw->memhdr;
375 mutex_lock(&hdr->block_mutex);
376 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
377 if (blk == NULL) {
378 mutex_unlock(&hdr->block_mutex);
379 return NULL;
381 if (synth_alloc_pages(hw, blk)) {
382 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
383 mutex_unlock(&hdr->block_mutex);
384 return NULL;
386 snd_emu10k1_memblk_map(hw, blk);
387 mutex_unlock(&hdr->block_mutex);
388 return (struct snd_util_memblk *)blk;
391 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
394 * free a synth sample area
397 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
399 struct snd_util_memhdr *hdr = emu->memhdr;
400 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
401 unsigned long flags;
403 mutex_lock(&hdr->block_mutex);
404 spin_lock_irqsave(&emu->memblk_lock, flags);
405 if (blk->mapped_page >= 0)
406 unmap_memblk(emu, blk);
407 spin_unlock_irqrestore(&emu->memblk_lock, flags);
408 synth_free_pages(emu, blk);
409 __snd_util_mem_free(hdr, memblk);
410 mutex_unlock(&hdr->block_mutex);
411 return 0;
414 EXPORT_SYMBOL(snd_emu10k1_synth_free);
416 /* check new allocation range */
417 static void get_single_page_range(struct snd_util_memhdr *hdr,
418 struct snd_emu10k1_memblk *blk,
419 int *first_page_ret, int *last_page_ret)
421 struct list_head *p;
422 struct snd_emu10k1_memblk *q;
423 int first_page, last_page;
424 first_page = blk->first_page;
425 if ((p = blk->mem.list.prev) != &hdr->block) {
426 q = get_emu10k1_memblk(p, mem.list);
427 if (q->last_page == first_page)
428 first_page++; /* first page was already allocated */
430 last_page = blk->last_page;
431 if ((p = blk->mem.list.next) != &hdr->block) {
432 q = get_emu10k1_memblk(p, mem.list);
433 if (q->first_page == last_page)
434 last_page--; /* last page was already allocated */
436 *first_page_ret = first_page;
437 *last_page_ret = last_page;
441 * allocate kernel pages
443 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
445 int page, first_page, last_page;
446 struct snd_dma_buffer dmab;
448 emu10k1_memblk_init(blk);
449 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
450 /* allocate kernel pages */
451 for (page = first_page; page <= last_page; page++) {
452 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci),
453 PAGE_SIZE, &dmab) < 0)
454 goto __fail;
455 if (! is_valid_page(emu, dmab.addr)) {
456 snd_dma_free_pages(&dmab);
457 goto __fail;
459 emu->page_addr_table[page] = dmab.addr;
460 emu->page_ptr_table[page] = dmab.area;
462 return 0;
464 __fail:
465 /* release allocated pages */
466 last_page = page - 1;
467 for (page = first_page; page <= last_page; page++) {
468 dmab.area = emu->page_ptr_table[page];
469 dmab.addr = emu->page_addr_table[page];
470 dmab.bytes = PAGE_SIZE;
471 snd_dma_free_pages(&dmab);
472 emu->page_addr_table[page] = 0;
473 emu->page_ptr_table[page] = NULL;
476 return -ENOMEM;
480 * free pages
482 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
484 int page, first_page, last_page;
485 struct snd_dma_buffer dmab;
487 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
488 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
489 dmab.dev.dev = snd_dma_pci_data(emu->pci);
490 for (page = first_page; page <= last_page; page++) {
491 if (emu->page_ptr_table[page] == NULL)
492 continue;
493 dmab.area = emu->page_ptr_table[page];
494 dmab.addr = emu->page_addr_table[page];
495 dmab.bytes = PAGE_SIZE;
496 snd_dma_free_pages(&dmab);
497 emu->page_addr_table[page] = 0;
498 emu->page_ptr_table[page] = NULL;
501 return 0;
504 /* calculate buffer pointer from offset address */
505 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
507 char *ptr;
508 snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
509 ptr = emu->page_ptr_table[page];
510 if (! ptr) {
511 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
512 return NULL;
514 ptr += offset & (PAGE_SIZE - 1);
515 return (void*)ptr;
519 * bzero(blk + offset, size)
521 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
522 int offset, int size)
524 int page, nextofs, end_offset, temp, temp1;
525 void *ptr;
526 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
528 offset += blk->offset & (PAGE_SIZE - 1);
529 end_offset = offset + size;
530 page = get_aligned_page(offset);
531 do {
532 nextofs = aligned_page_offset(page + 1);
533 temp = nextofs - offset;
534 temp1 = end_offset - offset;
535 if (temp1 < temp)
536 temp = temp1;
537 ptr = offset_ptr(emu, page + p->first_page, offset);
538 if (ptr)
539 memset(ptr, 0, temp);
540 offset = nextofs;
541 page++;
542 } while (offset < end_offset);
543 return 0;
546 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
549 * copy_from_user(blk + offset, data, size)
551 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
552 int offset, const char __user *data, int size)
554 int page, nextofs, end_offset, temp, temp1;
555 void *ptr;
556 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
558 offset += blk->offset & (PAGE_SIZE - 1);
559 end_offset = offset + size;
560 page = get_aligned_page(offset);
561 do {
562 nextofs = aligned_page_offset(page + 1);
563 temp = nextofs - offset;
564 temp1 = end_offset - offset;
565 if (temp1 < temp)
566 temp = temp1;
567 ptr = offset_ptr(emu, page + p->first_page, offset);
568 if (ptr && copy_from_user(ptr, data, temp))
569 return -EFAULT;
570 offset = nextofs;
571 data += temp;
572 page++;
573 } while (offset < end_offset);
574 return 0;
577 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);