2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
11 * This file contains the implementation of virtual memory management object
19 #include <linux/slab.h>
22 #include <sound/pcm.h>
24 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
25 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
28 * Find or create vm block based on requested @size.
29 * @size must be page aligned.
31 static struct ct_vm_block
*
32 get_vm_block(struct ct_vm
*vm
, unsigned int size
)
34 struct ct_vm_block
*block
= NULL
, *entry
;
35 struct list_head
*pos
;
37 size
= CT_PAGE_ALIGN(size
);
38 if (size
> vm
->size
) {
39 printk(KERN_ERR
"ctxfi: Fail! No sufficient device virtual "
40 "memory space available!\n");
44 mutex_lock(&vm
->lock
);
45 list_for_each(pos
, &vm
->unused
) {
46 entry
= list_entry(pos
, struct ct_vm_block
, list
);
47 if (entry
->size
>= size
)
48 break; /* found a block that is big enough */
50 if (pos
== &vm
->unused
)
53 if (entry
->size
== size
) {
54 /* Move the vm node from unused list to used list directly */
55 list_move(&entry
->list
, &vm
->used
);
61 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
65 block
->addr
= entry
->addr
;
67 list_add(&block
->list
, &vm
->used
);
73 mutex_unlock(&vm
->lock
);
77 static void put_vm_block(struct ct_vm
*vm
, struct ct_vm_block
*block
)
79 struct ct_vm_block
*entry
, *pre_ent
;
80 struct list_head
*pos
, *pre
;
82 block
->size
= CT_PAGE_ALIGN(block
->size
);
84 mutex_lock(&vm
->lock
);
85 list_del(&block
->list
);
86 vm
->size
+= block
->size
;
88 list_for_each(pos
, &vm
->unused
) {
89 entry
= list_entry(pos
, struct ct_vm_block
, list
);
90 if (entry
->addr
>= (block
->addr
+ block
->size
))
91 break; /* found a position */
93 if (pos
== &vm
->unused
) {
94 list_add_tail(&block
->list
, &vm
->unused
);
97 if ((block
->addr
+ block
->size
) == entry
->addr
) {
98 entry
->addr
= block
->addr
;
99 entry
->size
+= block
->size
;
102 __list_add(&block
->list
, pos
->prev
, pos
);
109 while (pre
!= &vm
->unused
) {
110 entry
= list_entry(pos
, struct ct_vm_block
, list
);
111 pre_ent
= list_entry(pre
, struct ct_vm_block
, list
);
112 if ((pre_ent
->addr
+ pre_ent
->size
) > entry
->addr
)
115 pre_ent
->size
+= entry
->size
;
121 mutex_unlock(&vm
->lock
);
124 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
125 static struct ct_vm_block
*
126 ct_vm_map(struct ct_vm
*vm
, struct snd_pcm_substream
*substream
, int size
)
128 struct ct_vm_block
*block
;
129 unsigned int pte_start
;
133 block
= get_vm_block(vm
, size
);
135 printk(KERN_ERR
"ctxfi: No virtual memory block that is big "
136 "enough to allocate!\n");
140 ptp
= (unsigned long *)vm
->ptp
[0].area
;
141 pte_start
= (block
->addr
>> CT_PAGE_SHIFT
);
142 pages
= block
->size
>> CT_PAGE_SHIFT
;
143 for (i
= 0; i
< pages
; i
++) {
145 addr
= snd_pcm_sgbuf_get_addr(substream
, i
<< CT_PAGE_SHIFT
);
146 ptp
[pte_start
+ i
] = addr
;
153 static void ct_vm_unmap(struct ct_vm
*vm
, struct ct_vm_block
*block
)
156 put_vm_block(vm
, block
);
160 * return the host physical addr of the @index-th device
161 * page table page on success, or ~0UL on failure.
162 * The first returned ~0UL indicates the termination.
165 ct_get_ptp_phys(struct ct_vm
*vm
, int index
)
169 addr
= (index
>= CT_PTP_NUM
) ? ~0UL : vm
->ptp
[index
].addr
;
174 int ct_vm_create(struct ct_vm
**rvm
, struct pci_dev
*pci
)
177 struct ct_vm_block
*block
;
182 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
186 mutex_init(&vm
->lock
);
188 /* Allocate page table pages */
189 for (i
= 0; i
< CT_PTP_NUM
; i
++) {
190 err
= snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV
,
191 snd_dma_pci_data(pci
),
192 PAGE_SIZE
, &vm
->ptp
[i
]);
197 /* no page table pages are allocated */
201 vm
->size
= CT_ADDRS_PER_PAGE
* i
;
203 vm
->unmap
= ct_vm_unmap
;
204 vm
->get_ptp_phys
= ct_get_ptp_phys
;
205 INIT_LIST_HEAD(&vm
->unused
);
206 INIT_LIST_HEAD(&vm
->used
);
207 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
210 block
->size
= vm
->size
;
211 list_add(&block
->list
, &vm
->unused
);
218 /* The caller must ensure no mapping pages are being used
219 * by hardware before calling this function */
220 void ct_vm_destroy(struct ct_vm
*vm
)
223 struct list_head
*pos
;
224 struct ct_vm_block
*entry
;
226 /* free used and unused list nodes */
227 while (!list_empty(&vm
->used
)) {
230 entry
= list_entry(pos
, struct ct_vm_block
, list
);
233 while (!list_empty(&vm
->unused
)) {
234 pos
= vm
->unused
.next
;
236 entry
= list_entry(pos
, struct ct_vm_block
, list
);
240 /* free allocated page table pages */
241 for (i
= 0; i
< CT_PTP_NUM
; i
++)
242 snd_dma_free_pages(&vm
->ptp
[i
]);