drm/nouveau: modify vm to accomodate dual page tables for nvc0
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / nouveau / nouveau_vm.c
blobb023a64c27d83beb3b4f63d7f83aab7ea5f5d1f6
1 /*
2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
30 void
31 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len;
43 list_for_each_entry(r, &vram->regions, rl_entry) {
44 u64 phys = (u64)r->offset << 12;
45 u32 num = r->length >> bits;
47 while (num) {
48 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
50 end = (pte + num);
51 if (unlikely(end >= max))
52 end = max;
53 len = end - pte;
55 vm->map(vma, pgt, vram, pte, len, phys);
57 num -= len;
58 pte += len;
59 if (unlikely(end >= max)) {
60 pde++;
61 pte = 0;
66 vm->flush(vm);
69 void
70 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
72 nouveau_vm_map_at(vma, 0, vram);
75 void
76 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
77 dma_addr_t *list)
79 struct nouveau_vm *vm = vma->vm;
80 int big = vma->node->type != vm->spg_shift;
81 u32 offset = vma->node->offset + (delta >> 12);
82 u32 bits = vma->node->type - 12;
83 u32 num = length >> vma->node->type;
84 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
85 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
86 u32 max = 1 << (vm->pgt_bits - bits);
87 u32 end, len;
89 while (num) {
90 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
92 end = (pte + num);
93 if (unlikely(end >= max))
94 end = max;
95 len = end - pte;
97 vm->map_sg(vma, pgt, pte, list, len);
99 num -= len;
100 pte += len;
101 list += len;
102 if (unlikely(end >= max)) {
103 pde++;
104 pte = 0;
108 vm->flush(vm);
111 void
112 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
114 struct nouveau_vm *vm = vma->vm;
115 int big = vma->node->type != vm->spg_shift;
116 u32 offset = vma->node->offset + (delta >> 12);
117 u32 bits = vma->node->type - 12;
118 u32 num = length >> vma->node->type;
119 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
120 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
121 u32 max = 1 << (vm->pgt_bits - bits);
122 u32 end, len;
124 while (num) {
125 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
127 end = (pte + num);
128 if (unlikely(end >= max))
129 end = max;
130 len = end - pte;
132 vm->unmap(pgt, pte, len);
134 num -= len;
135 pte += len;
136 if (unlikely(end >= max)) {
137 pde++;
138 pte = 0;
142 vm->flush(vm);
145 void
146 nouveau_vm_unmap(struct nouveau_vma *vma)
148 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
151 static void
152 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
154 struct nouveau_vm_pgd *vpgd;
155 struct nouveau_vm_pgt *vpgt;
156 struct nouveau_gpuobj *pgt;
157 u32 pde;
159 for (pde = fpde; pde <= lpde; pde++) {
160 vpgt = &vm->pgt[pde - vm->fpde];
161 if (--vpgt->refcount[big])
162 continue;
164 pgt = vpgt->obj[big];
165 vpgt->obj[big] = NULL;
167 list_for_each_entry(vpgd, &vm->pgd_list, head) {
168 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
171 mutex_unlock(&vm->mm->mutex);
172 nouveau_gpuobj_ref(NULL, &pgt);
173 mutex_lock(&vm->mm->mutex);
177 static int
178 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
180 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
181 struct nouveau_vm_pgd *vpgd;
182 struct nouveau_gpuobj *pgt;
183 int big = (type != vm->spg_shift);
184 u32 pgt_size;
185 int ret;
187 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
188 pgt_size *= 8;
190 mutex_unlock(&vm->mm->mutex);
191 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
192 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
193 mutex_lock(&vm->mm->mutex);
194 if (unlikely(ret))
195 return ret;
197 /* someone beat us to filling the PDE while we didn't have the lock */
198 if (unlikely(vpgt->refcount[big]++)) {
199 mutex_unlock(&vm->mm->mutex);
200 nouveau_gpuobj_ref(NULL, &pgt);
201 mutex_lock(&vm->mm->mutex);
202 return 0;
205 vpgt->obj[big] = pgt;
206 list_for_each_entry(vpgd, &vm->pgd_list, head) {
207 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
210 return 0;
214 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
215 u32 access, struct nouveau_vma *vma)
217 u32 align = (1 << page_shift) >> 12;
218 u32 msize = size >> 12;
219 u32 fpde, lpde, pde;
220 int ret;
222 mutex_lock(&vm->mm->mutex);
223 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
224 if (unlikely(ret != 0)) {
225 mutex_unlock(&vm->mm->mutex);
226 return ret;
229 fpde = (vma->node->offset >> vm->pgt_bits);
230 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
231 for (pde = fpde; pde <= lpde; pde++) {
232 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
233 int big = (vma->node->type != vm->spg_shift);
235 if (likely(vpgt->refcount[big])) {
236 vpgt->refcount[big]++;
237 continue;
240 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
241 if (ret) {
242 if (pde != fpde)
243 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
244 nouveau_mm_put(vm->mm, vma->node);
245 mutex_unlock(&vm->mm->mutex);
246 vma->node = NULL;
247 return ret;
250 mutex_unlock(&vm->mm->mutex);
252 vma->vm = vm;
253 vma->offset = (u64)vma->node->offset << 12;
254 vma->access = access;
255 return 0;
258 void
259 nouveau_vm_put(struct nouveau_vma *vma)
261 struct nouveau_vm *vm = vma->vm;
262 u32 fpde, lpde;
264 if (unlikely(vma->node == NULL))
265 return;
266 fpde = (vma->node->offset >> vm->pgt_bits);
267 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
269 mutex_lock(&vm->mm->mutex);
270 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
271 nouveau_mm_put(vm->mm, vma->node);
272 vma->node = NULL;
273 mutex_unlock(&vm->mm->mutex);
277 nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
278 struct nouveau_vm **pvm)
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_vm *vm;
282 u64 mm_length = (offset + length) - mm_offset;
283 u32 block, pgt_bits;
284 int ret;
286 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
287 if (!vm)
288 return -ENOMEM;
290 if (dev_priv->card_type == NV_50) {
291 vm->map_pgt = nv50_vm_map_pgt;
292 vm->map = nv50_vm_map;
293 vm->map_sg = nv50_vm_map_sg;
294 vm->unmap = nv50_vm_unmap;
295 vm->flush = nv50_vm_flush;
296 vm->spg_shift = 12;
297 vm->lpg_shift = 16;
298 pgt_bits = 29;
299 } else {
300 kfree(vm);
301 return -ENOSYS;
304 vm->fpde = offset >> pgt_bits;
305 vm->lpde = (offset + length - 1) >> pgt_bits;
306 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
307 if (!vm->pgt) {
308 kfree(vm);
309 return -ENOMEM;
312 INIT_LIST_HEAD(&vm->pgd_list);
313 vm->dev = dev;
314 vm->refcount = 1;
315 vm->pgt_bits = pgt_bits - 12;
317 block = (1 << pgt_bits);
318 if (length < block)
319 block = length;
321 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
322 block >> 12);
323 if (ret) {
324 kfree(vm);
325 return ret;
328 *pvm = vm;
329 return 0;
332 static int
333 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
335 struct nouveau_vm_pgd *vpgd;
336 int i;
338 if (!pgd)
339 return 0;
341 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
342 if (!vpgd)
343 return -ENOMEM;
345 nouveau_gpuobj_ref(pgd, &vpgd->obj);
347 mutex_lock(&vm->mm->mutex);
348 for (i = vm->fpde; i <= vm->lpde; i++)
349 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
350 list_add(&vpgd->head, &vm->pgd_list);
351 mutex_unlock(&vm->mm->mutex);
352 return 0;
355 static void
356 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
358 struct nouveau_vm_pgd *vpgd, *tmp;
360 if (!pgd)
361 return;
363 mutex_lock(&vm->mm->mutex);
364 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
365 if (vpgd->obj != pgd)
366 continue;
368 list_del(&vpgd->head);
369 nouveau_gpuobj_ref(NULL, &vpgd->obj);
370 kfree(vpgd);
372 mutex_unlock(&vm->mm->mutex);
375 static void
376 nouveau_vm_del(struct nouveau_vm *vm)
378 struct nouveau_vm_pgd *vpgd, *tmp;
380 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
381 nouveau_vm_unlink(vm, vpgd->obj);
383 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
385 kfree(vm->pgt);
386 kfree(vm);
390 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
391 struct nouveau_gpuobj *pgd)
393 struct nouveau_vm *vm;
394 int ret;
396 vm = ref;
397 if (vm) {
398 ret = nouveau_vm_link(vm, pgd);
399 if (ret)
400 return ret;
402 vm->refcount++;
405 vm = *ptr;
406 *ptr = ref;
408 if (vm) {
409 nouveau_vm_unlink(vm, pgd);
411 if (--vm->refcount == 0)
412 nouveau_vm_del(vm);
415 return 0;